Merge branch 'irq/numa' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux...
authorDavid S. Miller <davem@davemloft.net>
Tue, 15 Mar 2011 22:06:35 +0000 (15:06 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 Mar 2011 22:06:35 +0000 (15:06 -0700)
2493 files changed:
.gitignore
.mailmap
Documentation/ABI/testing/sysfs-platform-at91 [new file with mode: 0644]
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/drm.tmpl
Documentation/DocBook/filesystems.tmpl
Documentation/devicetree/bindings/ata/fsl-sata.txt [moved from Documentation/powerpc/dts-bindings/fsl/sata.txt with 100% similarity]
Documentation/devicetree/bindings/eeprom.txt [moved from Documentation/powerpc/dts-bindings/eeprom.txt with 100% similarity]
Documentation/devicetree/bindings/gpio/8xxx_gpio.txt [moved from Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt with 100% similarity]
Documentation/devicetree/bindings/gpio/gpio.txt [moved from Documentation/powerpc/dts-bindings/gpio/gpio.txt with 100% similarity]
Documentation/devicetree/bindings/gpio/led.txt [moved from Documentation/powerpc/dts-bindings/gpio/led.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/fsl-i2c.txt [moved from Documentation/powerpc/dts-bindings/fsl/i2c.txt with 100% similarity]
Documentation/devicetree/bindings/marvell.txt [moved from Documentation/powerpc/dts-bindings/marvell.txt with 100% similarity]
Documentation/devicetree/bindings/mmc/fsl-esdhc.txt [moved from Documentation/powerpc/dts-bindings/fsl/esdhc.txt with 100% similarity]
Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt [moved from Documentation/powerpc/dts-bindings/mmc-spi-slot.txt with 100% similarity]
Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt [moved from Documentation/powerpc/dts-bindings/fsl/upm-nand.txt with 100% similarity]
Documentation/devicetree/bindings/mtd/mtd-physmap.txt [moved from Documentation/powerpc/dts-bindings/mtd-physmap.txt with 100% similarity]
Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt [moved from Documentation/powerpc/dts-bindings/fsl/can.txt with 100% similarity]
Documentation/devicetree/bindings/net/can/sja1000.txt [moved from Documentation/powerpc/dts-bindings/can/sja1000.txt with 100% similarity]
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt [moved from Documentation/powerpc/dts-bindings/fsl/tsec.txt with 100% similarity]
Documentation/devicetree/bindings/net/mdio-gpio.txt [moved from Documentation/powerpc/dts-bindings/gpio/mdio.txt with 100% similarity]
Documentation/devicetree/bindings/net/phy.txt [moved from Documentation/powerpc/dts-bindings/phy.txt with 100% similarity]
Documentation/devicetree/bindings/pci/83xx-512x-pci.txt [moved from Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/4xx/cpm.txt [moved from Documentation/powerpc/dts-bindings/4xx/cpm.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/4xx/emac.txt [moved from Documentation/powerpc/dts-bindings/4xx/emac.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt [moved from Documentation/powerpc/dts-bindings/4xx/ndfc.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt [moved from Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/4xx/reboot.txt [moved from Documentation/powerpc/dts-bindings/4xx/reboot.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/board.txt [moved from Documentation/powerpc/dts-bindings/fsl/board.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt [moved from Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/diu.txt [moved from Documentation/powerpc/dts-bindings/fsl/diu.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/dma.txt [moved from Documentation/powerpc/dts-bindings/fsl/dma.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/ecm.txt [moved from Documentation/powerpc/dts-bindings/ecm.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/gtm.txt [moved from Documentation/powerpc/dts-bindings/fsl/gtm.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/guts.txt [moved from Documentation/powerpc/dts-bindings/fsl/guts.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/lbc.txt [moved from Documentation/powerpc/dts-bindings/fsl/lbc.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/mcm.txt [moved from Documentation/powerpc/dts-bindings/fsl/mcm.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt [moved from Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt [moved from Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt [moved from Documentation/powerpc/dts-bindings/fsl/mpc5200.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/mpic.txt [moved from Documentation/powerpc/dts-bindings/fsl/mpic.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt [moved from Documentation/powerpc/dts-bindings/fsl/msi-pic.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/pmc.txt [moved from Documentation/powerpc/dts-bindings/fsl/pmc.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/sec.txt [moved from Documentation/powerpc/dts-bindings/fsl/sec.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/fsl/ssi.txt [moved from Documentation/powerpc/dts-bindings/fsl/ssi.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt [moved from Documentation/powerpc/dts-bindings/nintendo/gamecube.txt with 100% similarity]
Documentation/devicetree/bindings/powerpc/nintendo/wii.txt [moved from Documentation/powerpc/dts-bindings/nintendo/wii.txt with 100% similarity]
Documentation/devicetree/bindings/spi/fsl-spi.txt [moved from Documentation/powerpc/dts-bindings/fsl/spi.txt with 100% similarity]
Documentation/devicetree/bindings/spi/spi-bus.txt [moved from Documentation/powerpc/dts-bindings/spi-bus.txt with 100% similarity]
Documentation/devicetree/bindings/usb/fsl-usb.txt [moved from Documentation/powerpc/dts-bindings/fsl/usb.txt with 100% similarity]
Documentation/devicetree/bindings/usb/usb-ehci.txt [moved from Documentation/powerpc/dts-bindings/usb-ehci.txt with 100% similarity]
Documentation/devicetree/bindings/xilinx.txt [moved from Documentation/powerpc/dts-bindings/xilinx.txt with 100% similarity]
Documentation/devicetree/booting-without-of.txt [moved from Documentation/powerpc/booting-without-of.txt with 90% similarity]
Documentation/feature-removal-schedule.txt
Documentation/filesystems/ntfs.txt
Documentation/hwmon/jc42
Documentation/hwmon/k10temp
Documentation/kernel-parameters.txt
Documentation/networking/00-INDEX
Documentation/networking/Makefile
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/networking/dns_resolver.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/phonet.txt
Documentation/scheduler/sched-stats.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/workqueue.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/kernel/irq.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/irq_i8259.c
arch/alpha/kernel/irq_impl.h
arch/alpha/kernel/irq_pyxis.c
arch/alpha/kernel/irq_srm.c
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_wildfire.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/compressed/.gitignore
arch/arm/common/Kconfig
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/hardware/sp810.h
arch/arm/include/asm/io.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/module.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/pmu.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-davinci/cpufreq.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/gpio-tnetv107x.c
arch/arm/mach-davinci/include/mach/clkdev.h
arch/arm/mach-ep93xx/core.c
arch/arm/mach-ep93xx/gpio.c
arch/arm/mach-footbridge/include/mach/debug-macro.S
arch/arm/mach-imx/mach-mx25_3ds.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-ixp4xx/include/mach/timex.h
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
arch/arm/mach-mxs/clock-mx23.c
arch/arm/mach-mxs/clock-mx28.c
arch/arm/mach-mxs/clock.c
arch/arm/mach-mxs/gpio.c
arch/arm/mach-mxs/include/mach/clock.h
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/include/mach/entry-macro.S
arch/arm/mach-omap1/irq.c
arch/arm/mach-omap1/lcd_dma.c
arch/arm/mach-omap1/time.c
arch/arm/mach-omap1/timer32k.c
arch/arm/mach-omap2/board-cm-t3517.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-rm680.c
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/dma.c
arch/arm/mach-omap2/include/mach/entry-macro.S
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/mailbox.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
arch/arm/mach-omap2/prcm_mpu44xx.h
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/timer-gp.c
arch/arm/mach-omap2/voltage.c
arch/arm/mach-pxa/colibri-evalboard.c
arch/arm/mach-pxa/colibri-pxa300.c
arch/arm/mach-pxa/include/mach/colibri.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/pm.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/tosa-bt.c
arch/arm/mach-pxa/tosa.c
arch/arm/mach-realview/Kconfig
arch/arm/mach-realview/platsmp.c
arch/arm/mach-s3c2440/Kconfig
arch/arm/mach-s3c2440/include/mach/gta02.h
arch/arm/mach-s3c64xx/clock.c
arch/arm/mach-s3c64xx/dma.c
arch/arm/mach-s3c64xx/gpiolib.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/mach-s3c64xx/setup-keypad.c
arch/arm/mach-s3c64xx/setup-sdhci.c
arch/arm/mach-s5p6442/include/mach/map.h
arch/arm/mach-s5p64x0/include/mach/gpio.h
arch/arm/mach-s5p64x0/include/mach/map.h
arch/arm/mach-s5pc100/include/mach/map.h
arch/arm/mach-s5pv210/include/mach/map.h
arch/arm/mach-s5pv210/mach-aquila.c
arch/arm/mach-s5pv210/mach-goni.c
arch/arm/mach-s5pv310/Kconfig
arch/arm/mach-s5pv310/include/mach/map.h
arch/arm/mach-s5pv310/include/mach/sysmmu.h
arch/arm/mach-sa1100/collie.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-g3evm.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
arch/arm/mach-shmobile/include/mach/head-mackerel.txt
arch/arm/mach-shmobile/intc-sh7372.c
arch/arm/mach-shmobile/intc-sh73a0.c
arch/arm/mach-spear3xx/include/mach/spear320.h
arch/arm/mach-tegra/gpio.c
arch/arm/mach-tegra/include/mach/clk.h
arch/arm/mach-tegra/include/mach/clkdev.h
arch/arm/mach-tegra/include/mach/kbc.h [new file with mode: 0644]
arch/arm/mach-tegra/irq.c
arch/arm/mach-versatile/Kconfig
arch/arm/mach-vexpress/platsmp.c
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/init.c
arch/arm/mm/proc-v7.S
arch/arm/oprofile/common.c
arch/arm/plat-mxc/include/mach/uncompress.h
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/counter_32k.c
arch/arm/plat-omap/dma.c
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-omap/mailbox.c
arch/arm/plat-pxa/mfp.c
arch/arm/plat-s5p/Kconfig
arch/arm/plat-s5p/Makefile
arch/arm/plat-s5p/dev-uart.c
arch/arm/plat-s5p/include/plat/sysmmu.h [deleted file]
arch/arm/plat-s5p/sysmmu.c
arch/arm/plat-samsung/dev-ts.c
arch/arm/plat-samsung/dev-uart.c
arch/arm/plat-samsung/include/plat/pm.h
arch/arm/plat-spear/include/plat/uncompress.h
arch/arm/plat-spear/include/plat/vmalloc.h
arch/arm/tools/mach-types
arch/avr32/include/asm/pgalloc.h
arch/blackfin/include/asm/bfin_serial.h
arch/blackfin/lib/outs.S
arch/blackfin/mach-common/cache.S
arch/cris/kernel/vmlinux.lds.S
arch/m32r/kernel/irq.c
arch/m68k/amiga/config.c
arch/m68k/atari/ataints.c
arch/m68k/atari/config.c
arch/m68k/atari/debug.c
arch/m68k/include/asm/atarihw.h
arch/m68k/include/asm/string.h
arch/m68k/lib/string.c
arch/m68knommu/kernel/vmlinux.lds.S
arch/m68knommu/lib/Makefile
arch/m68knommu/lib/memmove.c [new file with mode: 0644]
arch/m68knommu/platform/5249/intc2.c
arch/m68knommu/platform/68328/entry.S
arch/m68knommu/platform/68360/commproc.c
arch/m68knommu/platform/68360/config.c
arch/m68knommu/platform/68360/entry.S
arch/m68knommu/platform/68360/ints.c
arch/m68knommu/platform/coldfire/entry.S
arch/microblaze/include/asm/irqflags.h
arch/microblaze/include/asm/pgtable.h
arch/microblaze/kernel/cpu/pvr.c
arch/microblaze/kernel/head.S
arch/microblaze/kernel/hw_exception_handler.S
arch/microblaze/kernel/setup.c
arch/microblaze/lib/fastcopy.S
arch/parisc/kernel/pdc_cons.c
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/mmu-book3e.h
arch/powerpc/include/asm/page.h
arch/powerpc/kernel/cpu_setup_6xx.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/kernel/process.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/platforms/iseries/dt.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/pseries/lpar.c
arch/s390/Kconfig
arch/s390/boot/compressed/misc.c
arch/s390/crypto/sha_common.c
arch/s390/include/asm/atomic.h
arch/s390/include/asm/cache.h
arch/s390/include/asm/cacheflush.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/traps.c
arch/s390/lib/uaccess_std.c
arch/s390/mm/pgtable.c
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/boot/Makefile
arch/sh/boot/compressed/Makefile
arch/sh/boot/compressed/misc.c
arch/sh/include/asm/pgtable.h
arch/sh/include/asm/sections.h
arch/sh/include/asm/sh_eth.h
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/topology.c
arch/sh/lib/delay.c
arch/sh/mm/cache.c
arch/sparc/include/asm/pcr.h
arch/sparc/kernel/iommu.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/una_asm_32.S
arch/sparc/lib/bitext.c
arch/x86/boot/compressed/mkpiggy.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/perf_event_p4.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/smpboot_hooks.h
arch/x86/include/asm/system_64.h [deleted file]
arch/x86/include/asm/uv/uv_bau.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/alternative.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/head_32.S
arch/x86/kernel/irq.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/svm.c
arch/x86/mm/numa_64.c
arch/x86/mm/pageattr.c
arch/x86/platform/olpc/olpc_dt.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
block/blk-core.c
block/blk-flush.c
block/blk-lib.c
block/blk-throttle.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
block/ioctl.c
crypto/Makefile
crypto/authencesn.c [new file with mode: 0644]
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/debugfs.c
drivers/acpi/osl.c
drivers/acpi/video_detect.c
drivers/acpi/wakeup.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_hpt366.c
drivers/ata/pata_hpt37x.c
drivers/ata/pata_hpt3x2n.c
drivers/ata/pata_mpc52xx.c
drivers/atm/idt77105.c
drivers/atm/solos-pci.c
drivers/base/power/runtime.c
drivers/block/Makefile
drivers/block/aoe/Makefile
drivers/block/cciss.c
drivers/block/drbd/drbd_nl.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ldisc.c
drivers/cdrom/cdrom.c
drivers/char/Makefile
drivers/char/agp/Kconfig
drivers/char/agp/amd-k7-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/bfin_jtag_comm.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/pcmcia/cm4000_cs.c
drivers/char/pcmcia/ipwireless/main.c
drivers/char/tpm/tpm_tis.c
drivers/char/virtio_console.c [moved from drivers/tty/hvc/virtio_console.c with 98% similarity]
drivers/clocksource/acpi_pm.c
drivers/clocksource/tcb_clksrc.c
drivers/cpufreq/cpufreq.c
drivers/dma/amba-pl08x.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/ipu/ipu_idmac.c
drivers/edac/amd64_edac.c
drivers/firmware/dmi_scan.c
drivers/gpio/langwell_gpio.c
drivers/gpio/pca953x.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mm.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_pm.c
drivers/gpu/drm/nouveau/nouveau_temp.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv40_graph.c
drivers/gpu/drm/nouveau/nv50_evo.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvc0_grctx.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/evergreen_blit_shaders.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/mkregtable.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r100_track.h
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300_reg.h
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_blit_shaders.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_reg.h
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/reg_srcs/r300
drivers/gpu/drm/radeon/reg_srcs/r420
drivers/gpu/drm/radeon/reg_srcs/rs600
drivers/gpu/drm/radeon/reg_srcs/rv515
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/stub/Kconfig
drivers/gpu/vga/vgaarb.c
drivers/hwmon/Kconfig
drivers/hwmon/ad7414.c
drivers/hwmon/adt7411.c
drivers/hwmon/applesmc.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/emc1403.c
drivers/hwmon/jc42.c
drivers/hwmon/k10temp.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/lm63.c
drivers/hwmon/lm85.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-stu300.c
drivers/idle/intel_idle.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/amso1100/c2_vq.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/input/gameport/gameport.c
drivers/input/input.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/tegra-kbc.c [new file with mode: 0644]
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/misc/ixp4xx-beeper.c
drivers/input/misc/rotary_encoder.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/serio/ct82c710.c
drivers/input/serio/serio.c
drivers/input/serio/serport.c
drivers/input/sparse-keymap.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/bu21013_ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/wacom_w8001.c
drivers/isdn/hardware/eicon/istream.c
drivers/isdn/hisax/isdnl2.c
drivers/isdn/hysdn/hysdn_defs.h
drivers/isdn/hysdn/hysdn_init.c
drivers/isdn/hysdn/hysdn_net.c
drivers/isdn/hysdn/hysdn_procconf.c
drivers/isdn/icn/icn.c
drivers/leds/leds-pwm.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/tuners/tda8290.c
drivers/media/dvb/dvb-usb/dib0700_devices.c
drivers/media/dvb/dvb-usb/lmedm04.c
drivers/media/dvb/frontends/dib7000m.c
drivers/media/dvb/frontends/dib7000m.h
drivers/media/dvb/mantis/mantis_pci.c
drivers/media/rc/ir-lirc-codec.c
drivers/media/rc/ir-raw.c
drivers/media/rc/keymaps/rc-rc6-mce.c
drivers/media/rc/mceusb.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-main.c
drivers/media/rc/streamzap.c
drivers/media/video/au0828/au0828-video.c
drivers/media/video/cx18/cx18-cards.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx18/cx18-driver.h
drivers/media/video/cx18/cx18-dvb.c
drivers/media/video/cx23885/cx23885-i2c.c
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/gspca/zc3xx.c
drivers/media/video/hdpvr/hdpvr-core.c
drivers/media/video/hdpvr/hdpvr-i2c.c
drivers/media/video/hdpvr/hdpvr.h
drivers/media/video/ir-kbd-i2c.c
drivers/media/video/ivtv/ivtv-irq.c
drivers/media/video/mem2mem_testdev.c
drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
drivers/media/video/s2255drv.c
drivers/media/video/saa7115.c
drivers/memstick/core/memstick.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptscsih.c
drivers/mfd/asic3.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/tps6586x.c
drivers/mfd/ucb1x00-ts.c
drivers/mfd/wm8994-core.c
drivers/misc/bmp085.c
drivers/misc/tifm_core.c
drivers/misc/vmw_balloon.c
drivers/mmc/core/core.c
drivers/mmc/core/sdio.c
drivers/mmc/host/bfin_sdh.c
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/msm_sdcc.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/ushc.c
drivers/mtd/nand/r852.c
drivers/mtd/sm_ftl.c
drivers/mtd/ubi/build.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/arm/ks8695net.c
drivers/net/atl1c/atl1c_hw.c
drivers/net/atl1c/atl1c_hw.h
drivers/net/atl1c/atl1c_main.c
drivers/net/atl1e/atl1e_ethtool.c
drivers/net/atl1e/atl1e_hw.c
drivers/net/atl1e/atl1e_hw.h
drivers/net/atl1e/atl1e_main.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atl2.c
drivers/net/ax88796.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bna/bnad.c
drivers/net/bna/bnad.h
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_dcb.c
drivers/net/bnx2x/bnx2x_dcb.h
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_hsi.h
drivers/net/bnx2x/bnx2x_init.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_link.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/bonding/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c [new file with mode: 0644]
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/c_can/Kconfig [new file with mode: 0644]
drivers/net/can/c_can/Makefile [new file with mode: 0644]
drivers/net/can/c_can/c_can.c [new file with mode: 0644]
drivers/net/can/c_can/c_can.h [new file with mode: 0644]
drivers/net/can/c_can/c_can_platform.c [new file with mode: 0644]
drivers/net/can/janz-ican3.c
drivers/net/can/mcp251x.c
drivers/net/can/mscan/Kconfig
drivers/net/can/pch_can.c
drivers/net/can/softing/Kconfig [new file with mode: 0644]
drivers/net/can/softing/Makefile [new file with mode: 0644]
drivers/net/can/softing/softing.h [new file with mode: 0644]
drivers/net/can/softing/softing_cs.c [new file with mode: 0644]
drivers/net/can/softing/softing_fw.c [new file with mode: 0644]
drivers/net/can/softing/softing_main.c [new file with mode: 0644]
drivers/net/can/softing/softing_platform.h [new file with mode: 0644]
drivers/net/cnic.c
drivers/net/cnic.h
drivers/net/cnic_if.h
drivers/net/cs89x0.c
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb4/cxgb4_main.c
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/cxgb4vf/t4vf_hw.c
drivers/net/davinci_emac.c
drivers/net/depca.c
drivers/net/dl2k.c
drivers/net/dm9000.c
drivers/net/dnet.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_osdep.h
drivers/net/e1000e/defines.h
drivers/net/e1000e/e1000.h
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/lib.c
drivers/net/e1000e/netdev.c
drivers/net/e1000e/phy.c
drivers/net/enc28j60.c
drivers/net/enic/Makefile
drivers/net/enic/enic.h
drivers/net/enic/enic_dev.c [new file with mode: 0644]
drivers/net/enic/enic_dev.h [new file with mode: 0644]
drivers/net/enic/enic_main.c
drivers/net/enic/vnic_dev.c
drivers/net/enic/vnic_dev.h
drivers/net/enic/vnic_devcmd.h
drivers/net/enic/vnic_rq.h
drivers/net/eql.c
drivers/net/fec.c
drivers/net/forcedeth.c
drivers/net/ftmac100.c [new file with mode: 0644]
drivers/net/ftmac100.h [new file with mode: 0644]
drivers/net/hamradio/bpqether.c
drivers/net/igb/e1000_82575.c
drivers/net/igb/e1000_82575.h
drivers/net/igb/e1000_defines.h
drivers/net/igb/e1000_hw.h
drivers/net/igb/e1000_mbx.c
drivers/net/igb/e1000_nvm.c
drivers/net/igb/e1000_nvm.h
drivers/net/igb/e1000_regs.h
drivers/net/igb/igb.h
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/ethtool.c
drivers/net/igbvf/igbvf.h
drivers/net/igbvf/netdev.c
drivers/net/igbvf/vf.c
drivers/net/ipg.c
drivers/net/ixgb/ixgb.h
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/ixgb/ixgb_main.c
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_common.h
drivers/net/ixgbe/ixgbe_dcb.c
drivers/net/ixgbe/ixgbe_dcb.h
drivers/net/ixgbe/ixgbe_dcb_82598.c
drivers/net/ixgbe/ixgbe_dcb_82598.h
drivers/net/ixgbe/ixgbe_dcb_82599.c
drivers/net/ixgbe/ixgbe_dcb_82599.h
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_fcoe.h
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_mbx.c
drivers/net/ixgbe/ixgbe_mbx.h
drivers/net/ixgbe/ixgbe_phy.c
drivers/net/ixgbe/ixgbe_phy.h
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_sriov.h
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_x540.c
drivers/net/ixgbevf/defines.h
drivers/net/ixgbevf/ethtool.c
drivers/net/ixgbevf/ixgbevf.h
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/ixgbevf/regs.h
drivers/net/jme.c
drivers/net/jme.h
drivers/net/loopback.c
drivers/net/macb.c
drivers/net/macvtap.c
drivers/net/mii.c
drivers/net/mlx4/main.c
drivers/net/mv643xx_eth.c
drivers/net/myri10ge/myri10ge.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_ctx.c
drivers/net/netxen/netxen_nic_ethtool.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/niu.c
drivers/net/pch_gbe/pch_gbe.h
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/phy/Kconfig
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/ppp_generic.c
drivers/net/pptp.c
drivers/net/qla3xxx.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_main.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/sfc/efx.c
drivers/net/sfc/efx.h
drivers/net/sfc/ethtool.c
drivers/net/sfc/falcon.c
drivers/net/sfc/falcon_boards.c
drivers/net/sfc/falcon_xmac.c
drivers/net/sfc/filter.c
drivers/net/sfc/io.h
drivers/net/sfc/mcdi.c
drivers/net/sfc/mcdi.h
drivers/net/sfc/mcdi_mac.c
drivers/net/sfc/mcdi_pcol.h
drivers/net/sfc/mcdi_phy.c
drivers/net/sfc/mdio_10g.c
drivers/net/sfc/mdio_10g.h
drivers/net/sfc/mtd.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/phy.h
drivers/net/sfc/qt202x_phy.c
drivers/net/sfc/regs.h
drivers/net/sfc/rx.c
drivers/net/sfc/selftest.c
drivers/net/sfc/selftest.h
drivers/net/sfc/siena.c
drivers/net/sfc/spi.h
drivers/net/sfc/tenxpress.c
drivers/net/sfc/tx.c
drivers/net/sfc/txc43128_phy.c
drivers/net/sfc/workarounds.h
drivers/net/sh_eth.c
drivers/net/sh_eth.h
drivers/net/sis900.c
drivers/net/skge.c
drivers/net/sky2.c
drivers/net/smc91x.c
drivers/net/smsc911x.c
drivers/net/stmmac/stmmac_main.c
drivers/net/sungem.c
drivers/net/sungem.h
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tlan.c
drivers/net/tlan.h
drivers/net/tun.c
drivers/net/typhoon.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/dm9601.c
drivers/net/usb/hso.c
drivers/net/usb/kaweth.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/via-velocity.c
drivers/net/via-velocity.h
drivers/net/virtio_net.c
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-traffic.c
drivers/net/vxge/vxge-traffic.h
drivers/net/vxge/vxge-version.h
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/adm8211.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/at76c50x-usb.h
drivers/net/wireless/ath/ar9170/Kconfig
drivers/net/wireless/ath/ar9170/ar9170.h
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/base.h
drivers/net/wireless/ath/ath5k/caps.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath5k/debug.h
drivers/net/wireless/ath/ath5k/dma.c
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/eeprom.h
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reg.h
drivers/net/wireless/ath/ath5k/trace.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9485_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/virtual.c [deleted file]
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/fwcmd.h
drivers/net/wireless/ath/carl9170/fwdesc.h
drivers/net/wireless/ath/carl9170/hw.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/carl9170/version.h
drivers/net/wireless/ath/carl9170/wlan.h
drivers/net/wireless/ath/key.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/regd.h
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43/xmit.h
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.h
drivers/net/wireless/iwlegacy/Kconfig [new file with mode: 0644]
drivers/net/wireless/iwlegacy/Makefile [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c [moved from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c with 99% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h [moved from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h with 95% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-fh.h [moved from drivers/net/wireless/iwlwifi/iwl-3945-fh.h with 98% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-hw.h [moved from drivers/net/wireless/iwlwifi/iwl-3945-hw.h with 96% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-led.c [moved from drivers/net/wireless/iwlwifi/iwl-3945-led.c with 73% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-led.h [moved from drivers/net/wireless/iwlwifi/iwl-3945-led.h with 95% similarity]
drivers/net/wireless/iwlegacy/iwl-3945-rs.c [moved from drivers/net/wireless/iwlwifi/iwl-3945-rs.c with 96% similarity]
drivers/net/wireless/iwlegacy/iwl-3945.c [moved from drivers/net/wireless/iwlwifi/iwl-3945.c with 90% similarity]
drivers/net/wireless/iwlegacy/iwl-3945.h [moved from drivers/net/wireless/iwlwifi/iwl-3945.h with 97% similarity]
drivers/net/wireless/iwlegacy/iwl-4965-calib.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-calib.h [moved from drivers/net/wireless/iwlwifi/iwl-legacy.h with 80% similarity]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-hw.h [moved from drivers/net/wireless/iwlwifi/iwl-4965-hw.h with 97% similarity]
drivers/net/wireless/iwlegacy/iwl-4965-led.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-led.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-lib.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-rs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-rx.c [moved from drivers/net/wireless/iwlwifi/iwl-agn-rx.c with 59% similarity]
drivers/net/wireless/iwlegacy/iwl-4965-sta.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-tx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-ucode.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965.c [moved from drivers/net/wireless/iwlwifi/iwl-4965.c with 71% similarity]
drivers/net/wireless/iwlegacy/iwl-4965.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-commands.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-core.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-core.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-csr.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-debug.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-dev.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-devtrace.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-devtrace.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-eeprom.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-eeprom.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-fh.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-hcmd.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-helpers.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-io.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-led.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-led.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-legacy-rs.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-power.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-power.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-prph.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-rx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-scan.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-spectrum.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-sta.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-sta.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-tx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl3945-base.c [moved from drivers/net/wireless/iwlwifi/iwl3945-base.c with 89% similarity]
drivers/net/wireless/iwlegacy/iwl4965-base.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.c
drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
drivers/net/wireless/iwlwifi/iwl-agn-led.c
drivers/net/wireless/iwlwifi/iwl-agn-led.h
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.h
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-commands.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/iwlwifi/iwl-hcmd.c
drivers/net/wireless/iwlwifi/iwl-led.c
drivers/net/wireless/iwlwifi/iwl-led.h
drivers/net/wireless/iwlwifi/iwl-legacy.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-power.c
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-sta.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwmc3200wifi/cfg80211.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/dev.h
drivers/net/wireless/libertas/host.h
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas/mesh.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/scan.c
drivers/net/wireless/p54/Kconfig
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/eeprom.h
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/lmac.h
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54.h
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54spi_eeprom.h
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00ht.c
drivers/net/wireless/rt2x00/rt2x00lib.h
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rt2x00/rt2x00reg.h
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/Makefile
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/debug.h
drivers/net/wireless/rtlwifi/efuse.c
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/pci.h
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/rtl8192c/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c [moved from drivers/net/wireless/rtlwifi/rtl8192ce/fw.c with 94% similarity]
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h [moved from drivers/net/wireless/rtlwifi/rtl8192ce/fw.h with 100% similarity]
drivers/net/wireless/rtlwifi/rtl8192c/main.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
drivers/net/wireless/rtlwifi/rtl8192ce/led.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/def.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/dm.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/dm.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/led.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/led.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/mac.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/phy.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/reg.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/rf.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/sw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/table.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/table.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/usb.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/usb.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/wl1251/acx.c
drivers/net/wireless/wl1251/acx.h
drivers/net/wireless/wl1251/event.c
drivers/net/wireless/wl1251/main.c
drivers/net/wireless/wl1251/ps.c
drivers/net/wireless/wl1251/rx.c
drivers/net/wireless/wl1251/tx.c
drivers/net/wireless/wl1251/wl1251.h
drivers/net/wireless/wl1251/wl12xx_80211.h
drivers/net/wireless/wl12xx/Kconfig
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/acx.h
drivers/net/wireless/wl12xx/boot.c
drivers/net/wireless/wl12xx/boot.h
drivers/net/wireless/wl12xx/cmd.c
drivers/net/wireless/wl12xx/cmd.h
drivers/net/wireless/wl12xx/conf.h
drivers/net/wireless/wl12xx/debugfs.c
drivers/net/wireless/wl12xx/event.c
drivers/net/wireless/wl12xx/event.h
drivers/net/wireless/wl12xx/init.c
drivers/net/wireless/wl12xx/init.h
drivers/net/wireless/wl12xx/io.h
drivers/net/wireless/wl12xx/main.c
drivers/net/wireless/wl12xx/ps.c
drivers/net/wireless/wl12xx/ps.h
drivers/net/wireless/wl12xx/rx.c
drivers/net/wireless/wl12xx/rx.h
drivers/net/wireless/wl12xx/scan.c
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/tx.c
drivers/net/wireless/wl12xx/tx.h
drivers/net/wireless/wl12xx/wl12xx.h
drivers/net/wireless/wl12xx/wl12xx_80211.h
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/net/wireless/zd1211rw/zd_chip.h
drivers/net/wireless/zd1211rw/zd_def.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_mac.h
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/wireless/zd1211rw/zd_usb.h
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/pn544.c
drivers/of/pdt.c
drivers/parport/share.c
drivers/pci/pci-sysfs.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_base.h
drivers/pcmcia/pxa2xx_colibri.c
drivers/pcmcia/pxa2xx_lubbock.c
drivers/platform/x86/Kconfig
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus_acpi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/intel_scu_ipcutil.c
drivers/platform/x86/tc1100-wmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pps/clients/pps-ktimer.c
drivers/pps/clients/pps_parport.c
drivers/pps/generators/Kconfig
drivers/pps/generators/pps_gen_parport.c
drivers/pps/kapi.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/wm831x-dcdc.c
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-at32ap700x.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91sam9.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-dev.c
drivers/rtc/rtc-ds1286.c
drivers/rtc/rtc-ds1305.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1374.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-m48t59.c
drivers/rtc/rtc-mrst.c
drivers/rtc/rtc-msm6242.c
drivers/rtc/rtc-mv.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-proc.c
drivers/rtc/rtc-rp5c01.c
drivers/rtc/rtc-rs5c372.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-sa1100.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-test.c
drivers/rtc/rtc-vr41xx.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/xpram.c
drivers/s390/char/keyboard.c
drivers/s390/char/tape.h
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/cio/qdio_main.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/smsgiucv.c
drivers/scsi/arcmsr/arcmsr.h
drivers/scsi/arcmsr/arcmsr_attr.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/cxgbi/cxgb3i/Kconfig
drivers/scsi/cxgbi/cxgb4i/Kconfig
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/sh/intc/chip.c
drivers/spi/pxa2xx_spi_pci.c
drivers/spi/spi_sh_msiof.c
drivers/ssb/main.c
drivers/ssb/pci.c
drivers/ssb/pcmcia.c
drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
drivers/staging/brcm80211/sys/wl_mac80211.c
drivers/staging/brcm80211/sys/wlc_mac80211.c
drivers/staging/brcm80211/sys/wlc_pub.h
drivers/staging/comedi/Kconfig
drivers/staging/comedi/drivers/mite.c
drivers/staging/comedi/drivers/ni_6527.c
drivers/staging/comedi/drivers/ni_65xx.c
drivers/staging/comedi/drivers/ni_660x.c
drivers/staging/comedi/drivers/ni_670x.c
drivers/staging/comedi/drivers/ni_labpc.c
drivers/staging/comedi/drivers/ni_pcidio.c
drivers/staging/comedi/drivers/ni_pcimio.c
drivers/staging/hv/blkvsc_drv.c
drivers/staging/hv/netvsc.c
drivers/staging/hv/netvsc_drv.c
drivers/staging/iio/adc/ad7476_core.c
drivers/staging/iio/adc/ad7887_core.c
drivers/staging/iio/adc/ad799x_core.c
drivers/staging/iio/dac/ad5446.c
drivers/staging/intel_sst/intelmid_v2_control.c
drivers/staging/lirc/lirc_zilog.c
drivers/staging/msm/msm_fb.c
drivers/staging/olpc_dcon/olpc_dcon.c
drivers/staging/pohmelfs/config.c
drivers/staging/rt2860/rt_main_dev.c
drivers/staging/rt2860/usb_main_dev.c
drivers/staging/rtl8712/hal_init.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/sm7xx/smtcfb.c
drivers/staging/speakup/kobjects.c
drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
drivers/staging/tidspbridge/core/io_sm.c
drivers/staging/tidspbridge/core/tiomap3430.c
drivers/staging/tidspbridge/include/dspbridge/io_sm.h
drivers/staging/usbip/stub.h
drivers/staging/usbip/stub_dev.c
drivers/staging/usbip/stub_rx.c
drivers/staging/usbip/vhci.h
drivers/staging/usbip/vhci_hcd.c
drivers/staging/usbip/vhci_rx.c
drivers/staging/vme/bridges/Module.symvers [deleted file]
drivers/staging/winbond/wbusb.c
drivers/staging/xgifb/vb_setmode.c
drivers/staging/zram/zram_drv.c
drivers/target/Makefile
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_iblock.c
drivers/target/target_core_mib.c [deleted file]
drivers/target/target_core_mib.h [deleted file]
drivers/target/target_core_pscsi.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/thermal/Kconfig
drivers/thermal/thermal_sys.c
drivers/tty/hvc/Makefile
drivers/tty/n_gsm.c
drivers/tty/n_hdlc.c
drivers/tty/serial/68328serial.c
drivers/tty/serial/68360serial.c
drivers/tty/serial/8250.c
drivers/tty/serial/Kconfig
drivers/tty/serial/bfin_5xx.c
drivers/tty/serial/max3100.c
drivers/tty/serial/max3107.c
drivers/tty/serial/sb1250-duart.c
drivers/tty/serial/serial_cs.c
drivers/tty/sysrq.c
drivers/tty/tty_io.c
drivers/tty/vt/selection.c
drivers/tty/vt/vc_screen.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/endpoint.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/ci13xxx_udc.c
drivers/usb/gadget/ci13xxx_udc.h
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_phonet.c
drivers/usb/gadget/pch_udc.c
drivers/usb/gadget/printer.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/host/Kconfig
drivers/usb/host/ehci-au1xxx.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-fsl.h
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/usbled.c
drivers/usb/misc/uss720.c
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_dma.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_host.c
drivers/usb/musb/musbhsdma.h
drivers/usb/musb/omap2430.c
drivers/usb/otg/Kconfig
drivers/usb/otg/nop-usb-xceiv.c
drivers/usb/otg/ulpi.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_tables.h
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan.h
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/moto_modem.c
drivers/usb/serial/option.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcaux.c
drivers/usb/serial/siemens_mpi.c
drivers/usb/serial/sierra.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/usb_debug.c
drivers/usb/serial/usb_wwan.c
drivers/usb/serial/visor.c
drivers/usb/storage/unusual_cypress.h
drivers/usb/storage/unusual_devs.h
drivers/vhost/net.c
drivers/vhost/vhost.h
drivers/video/arkfb.c
drivers/video/aty/aty128fb.c
drivers/video/aty/atyfb_base.c
drivers/video/aty/radeon_pm.c
drivers/video/backlight/ltv350qv.c
drivers/video/bf537-lq035.c
drivers/video/chipsfb.c
drivers/video/console/fbcon.c
drivers/video/console/vgacon.c
drivers/video/da8xx-fb.c
drivers/video/fbmem.c
drivers/video/fbsysfs.c
drivers/video/geode/gxfb_core.c
drivers/video/geode/lxfb_core.c
drivers/video/i810/i810_main.c
drivers/video/jz4740_fb.c
drivers/video/mx3fb.c
drivers/video/nuc900fb.c
drivers/video/nvidia/nvidia.c
drivers/video/ps3fb.c
drivers/video/pxa168fb.c
drivers/video/pxa3xx-gcu.c
drivers/video/s3fb.c
drivers/video/savage/savagefb_driver.c
drivers/video/sh_mobile_hdmi.c
drivers/video/sh_mobile_lcdcfb.c
drivers/video/sm501fb.c
drivers/video/tmiofb.c
drivers/video/uvesafb.c
drivers/video/via/viafbdev.c
drivers/video/vt8623fb.c
drivers/video/xen-fbfront.c
drivers/w1/masters/omap_hdq.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/m54xx_wdt.c [moved from drivers/watchdog/m548x_wdt.c with 80% similarity]
drivers/watchdog/sbc_fitpc2_wdt.c
drivers/xen/manage.c
firmware/Makefile
firmware/WHENCE
firmware/bnx2/bnx2-mips-09-6.2.1a.fw.ihex [moved from firmware/bnx2/bnx2-mips-09-6.2.1.fw.ihex with 62% similarity]
fs/afs/write.c
fs/aio.c
fs/block_dev.c
fs/btrfs/acl.c
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/ordered-data.c
fs/btrfs/print-tree.c
fs/btrfs/relocation.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/Makefile
fs/cifs/README
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifsacl.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsencrypt.h [deleted file]
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/link.c
fs/cifs/md4.c [deleted file]
fs/cifs/md5.c [deleted file]
fs/cifs/md5.h [deleted file]
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smbdes.c
fs/cifs/smbencrypt.c
fs/cifs/transport.c
fs/dcache.c
fs/dlm/lowcomms.c
fs/ecryptfs/dentry.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/exofs/inode.c
fs/exofs/namei.c
fs/ext2/namei.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fat/namei_vfat.c
fs/fcntl.c
fs/file_table.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/gfs2/dentry.c
fs/gfs2/glock.c
fs/gfs2/main.c
fs/hfs/dir.c
fs/hfsplus/extents.c
fs/hfsplus/part_tbl.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/inode.c
fs/internal.h
fs/ioctl.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jfs/namei.c
fs/lockd/host.c
fs/minix/namei.c
fs/namei.c
fs/namespace.c
fs/nfs/callback.c
fs/nfs/callback.h
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/client.c
fs/nfs/delegation.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3acl.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/nfs_common/nfsacl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/mdt.c
fs/nilfs2/namei.c
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/ntfs/mft.c
fs/ocfs2/dcache.c
fs/ocfs2/journal.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/open.c
fs/partitions/ldm.c
fs/partitions/mac.c
fs/posix_acl.c
fs/proc/array.c
fs/proc/base.c
fs/proc/consoles.c
fs/proc/inode.c
fs/proc/proc_devtree.c
fs/proc/proc_sysctl.c
fs/reiserfs/namei.c
fs/reiserfs/xattr.c
fs/squashfs/block.c
fs/squashfs/xz_wrapper.c
fs/squashfs/zlib_wrapper.c
fs/super.c
fs/sysv/namei.c
fs/udf/namei.c
fs/ufs/namei.c
fs/xfs/linux-2.6/xfs_discard.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/quota/xfs_qm.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_bmap.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_trans.c
include/asm-generic/pgtable.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_pciids.h
include/drm/radeon_drm.h
include/keys/rxrpc-type.h
include/linux/Kbuild
include/linux/audit.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/caif/Kbuild [new file with mode: 0644]
include/linux/ceph/messenger.h
include/linux/console.h
include/linux/cpu_rmap.h [new file with mode: 0644]
include/linux/dcbnl.h
include/linux/dccp.h
include/linux/ethtool.h
include/linux/freezer.h
include/linux/fs.h
include/linux/gfp.h
include/linux/huge_mm.h
include/linux/icmpv6.h
include/linux/ieee80211.h
include/linux/if.h
include/linux/if_link.h
include/linux/igmp.h
include/linux/inetdevice.h
include/linux/input/bu21013.h
include/linux/input/matrix_keypad.h
include/linux/ip_vs.h
include/linux/irq.h
include/linux/kernel.h
include/linux/klist.h
include/linux/kmemcheck.h
include/linux/list.h
include/linux/mfd/wm8994/core.h
include/linux/micrel_phy.h [new file with mode: 0644]
include/linux/mmc/sh_mmcif.h
include/linux/module.h
include/linux/moduleparam.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/Kbuild
include/linux/netfilter/ipset/Kbuild [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_ahash.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_bitmap.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_getport.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_hash.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_list.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_timeout.h [new file with mode: 0644]
include/linux/netfilter/ipset/pfxlen.h [new file with mode: 0644]
include/linux/netfilter/nf_conntrack_snmp.h [new file with mode: 0644]
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter/x_tables.h
include/linux/netfilter/xt_AUDIT.h [new file with mode: 0644]
include/linux/netfilter/xt_CT.h
include/linux/netfilter/xt_NFQUEUE.h
include/linux/netfilter/xt_TCPOPTSTRIP.h
include/linux/netfilter/xt_TPROXY.h
include/linux/netfilter/xt_addrtype.h [new file with mode: 0644]
include/linux/netfilter/xt_cluster.h
include/linux/netfilter/xt_comment.h
include/linux/netfilter/xt_connlimit.h
include/linux/netfilter/xt_conntrack.h
include/linux/netfilter/xt_devgroup.h [new file with mode: 0644]
include/linux/netfilter/xt_quota.h
include/linux/netfilter/xt_set.h [new file with mode: 0644]
include/linux/netfilter/xt_socket.h
include/linux/netfilter/xt_time.h
include/linux/netfilter/xt_u32.h
include/linux/netfilter_bridge/ebt_802_3.h
include/linux/netfilter_bridge/ebt_among.h
include/linux/netfilter_bridge/ebt_arp.h
include/linux/netfilter_bridge/ebt_ip.h
include/linux/netfilter_bridge/ebt_ip6.h
include/linux/netfilter_bridge/ebt_limit.h
include/linux/netfilter_bridge/ebt_log.h
include/linux/netfilter_bridge/ebt_mark_m.h
include/linux/netfilter_bridge/ebt_nflog.h
include/linux/netfilter_bridge/ebt_pkttype.h
include/linux/netfilter_bridge/ebt_stp.h
include/linux/netfilter_bridge/ebt_ulog.h
include/linux/netfilter_bridge/ebt_vlan.h
include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
include/linux/netfilter_ipv4/ipt_ECN.h
include/linux/netfilter_ipv4/ipt_SAME.h
include/linux/netfilter_ipv4/ipt_TTL.h
include/linux/netfilter_ipv4/ipt_addrtype.h
include/linux/netfilter_ipv4/ipt_ah.h
include/linux/netfilter_ipv4/ipt_ecn.h
include/linux/netfilter_ipv4/ipt_ttl.h
include/linux/netfilter_ipv6/ip6t_HL.h
include/linux/netfilter_ipv6/ip6t_REJECT.h
include/linux/netfilter_ipv6/ip6t_ah.h
include/linux/netfilter_ipv6/ip6t_frag.h
include/linux/netfilter_ipv6/ip6t_hl.h
include/linux/netfilter_ipv6/ip6t_ipv6header.h
include/linux/netfilter_ipv6/ip6t_mh.h
include/linux/netfilter_ipv6/ip6t_opts.h
include/linux/netfilter_ipv6/ip6t_rt.h
include/linux/netlink.h
include/linux/nfsacl.h
include/linux/nl80211.h
include/linux/oprofile.h
include/linux/pci.h
include/linux/phonet.h
include/linux/pkt_sched.h
include/linux/pm.h
include/linux/pm_wakeup.h
include/linux/posix_acl.h
include/linux/ptrace.h
include/linux/res_counter.h
include/linux/rio_regs.h
include/linux/rtc.h
include/linux/sched.h
include/linux/security.h
include/linux/skbuff.h
include/linux/sockios.h
include/linux/ssb/ssb_regs.h
include/linux/sunrpc/bc_xprt.h
include/linux/sunrpc/svc_xprt.h
include/linux/syscalls.h
include/linux/sysctl.h
include/linux/sysrq.h
include/linux/thermal.h
include/linux/tipc.h
include/linux/tipc_config.h
include/linux/tracepoint.h
include/linux/usb/cdc.h
include/linux/usb/hcd.h
include/linux/usb/msm_hsusb_hw.h
include/linux/usb/serial.h
include/linux/virtio_config.h
include/linux/virtio_console.h
include/linux/workqueue.h
include/linux/xfrm.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/smp.h [new file with mode: 0644]
include/net/cfg80211.h
include/net/dcbnl.h
include/net/dn.h
include/net/dn_fib.h
include/net/dn_route.h
include/net/dst.h
include/net/dst_ops.h
include/net/flow.h
include/net/genetlink.h
include/net/icmp.h
include/net/ieee80211_radiotap.h
include/net/inet_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netevent.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_extend.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_conntrack_l3proto.h
include/net/netfilter/nf_conntrack_timestamp.h [new file with mode: 0644]
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_tproxy_core.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/phonet/pep.h
include/net/phonet/phonet.h
include/net/protocol.h
include/net/route.h
include/net/sch_generic.h
include/net/sock.h
include/net/tcp.h
include/net/transp_v6.h
include/net/udp.h
include/net/udplite.h
include/net/xfrm.h
include/pcmcia/ds.h
include/scsi/scsi.h
include/sound/wm8903.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/trace/events/block.h
include/trace/ftrace.h
init/calibrate.c
kernel/audit.c
kernel/auditfilter.c
kernel/capability.c
kernel/cpuset.c
kernel/cred.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/resend.c
kernel/module.c
kernel/params.c
kernel/perf_event.c
kernel/power/main.c
kernel/power/process.c
kernel/power/snapshot.c
kernel/printk.c
kernel/ptrace.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/sys.c
kernel/sysctl.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c
kernel/time/timer_list.c
kernel/timer.c
kernel/trace/blktrace.c
kernel/trace/trace_events.c
kernel/trace/trace_export.c
kernel/trace/trace_syscalls.c
kernel/tracepoint.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/cpu_rmap.c [new file with mode: 0644]
lib/list_debug.c
lib/nlattr.c
lib/radix-tree.c
lib/rbtree.c
lib/swiotlb.c
lib/textsearch.c
mm/Kconfig
mm/huge_memory.c
mm/kmemleak-test.c
mm/kmemleak.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mremap.c
mm/page_alloc.c
mm/pgtable-generic.c
mm/swapfile.c
mm/truncate.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/trans_rdma.c
net/Kconfig
net/Makefile
net/atm/clip.c
net/batman-adv/Makefile
net/batman-adv/aggregation.c
net/batman-adv/aggregation.h
net/batman-adv/bat_debugfs.c
net/batman-adv/bat_debugfs.h
net/batman-adv/bat_sysfs.c
net/batman-adv/bat_sysfs.h
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/ring_buffer.c
net/batman-adv/ring_buffer.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/unicast.h
net/batman-adv/vis.c
net/batman-adv/vis.h
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/capi.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c [moved from net/bluetooth/l2cap.c with 75% similarity]
net/bluetooth/l2cap_sock.c [new file with mode: 0644]
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/sco.c
net/bridge/Kconfig
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_stp_timer.c
net/bridge/netfilter/ebt_ip6.c
net/bridge/netfilter/ebtables.c
net/caif/cfcnfg.c
net/caif/cfdgml.c
net/caif/cfserl.c
net/caif/cfutill.c
net/caif/cfveil.c
net/caif/chnl_net.c
net/ceph/messenger.c
net/ceph/pagevec.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dst.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dcb/dcbnl.c
net/dccp/ccids/ccid2.c
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/af_decnet.c
net/decnet/dn_fib.c
net/decnet/dn_nsp_out.c
net/decnet/dn_route.c
net/decnet/dn_rules.c
net/decnet/dn_table.c
net/dns_resolver/dns_key.c
net/dsa/dsa.c
net/dsa/mv88e6060.c
net/econet/af_econet.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_hash.c [deleted file]
net/ipv4/fib_lookup.h
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_timewait_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arpt_mangle.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv4/netfilter/ipt_addrtype.c [deleted file]
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_nat_amanda.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/netfilter/nf_nat_standalone.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_westwood.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/exthdrs.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_state.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/llc/llc_input.c
net/mac80211/Kconfig
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.h
net/mac80211/driver-trace.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_pid.h
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/work.c
net/mac80211/wpa.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/Kconfig [new file with mode: 0644]
net/netfilter/ipset/Makefile [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_ip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_ipmac.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_port.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_core.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_getport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipportip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipportnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_net.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_netport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_list_set.c [new file with mode: 0644]
net/netfilter/ipset/pfxlen.c [new file with mode: 0644]
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_lc.c
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_pe.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_proto.c
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_proto_tcp.c
net/netfilter/ipvs/ip_vs_proto_udp.c
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wrr.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_broadcast.c [new file with mode: 0644]
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netbios_ns.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_snmp.c [new file with mode: 0644]
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_timestamp.c [new file with mode: 0644]
net/netfilter/nf_log.c
net/netfilter/nf_queue.c
net/netfilter/nf_tproxy_core.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/netfilter/xt_AUDIT.c [new file with mode: 0644]
net/netfilter/xt_CLASSIFY.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LED.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TEE.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_addrtype.c [new file with mode: 0644]
net/netfilter/xt_connlimit.c
net/netfilter/xt_conntrack.c
net/netfilter/xt_cpu.c
net/netfilter/xt_devgroup.c [new file with mode: 0644]
net/netfilter/xt_iprange.c
net/netfilter/xt_ipvs.c
net/netfilter/xt_set.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/netlabel/netlabel_user.h
net/netlink/af_netlink.c
net/packet/af_packet.c
net/phonet/Kconfig
net/phonet/af_phonet.c
net/phonet/pep.c
net/phonet/socket.c
net/rds/ib_send.c
net/rds/loop.c
net/rds/rds.h
net/rose/af_rose.c
net/rose/rose_route.c
net/rxrpc/ar-input.c
net/rxrpc/ar-key.c
net/rxrpc/ar-peer.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_cmp.c
net/sched/em_meta.c
net/sched/em_nbyte.c
net/sched/em_text.c
net/sched/em_u32.c
net/sched/ematch.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_choke.c [new file with mode: 0644]
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_fifo.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c [new file with mode: 0644]
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_red.c
net/sched/sch_sfb.c [new file with mode: 0644]
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sctp/tsnmap.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/svcsock.c
net/tipc/Kconfig
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/net.c
net/tipc/net.h
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/node_subscr.h
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/subscr.c
net/unix/af_unix.c
net/wanrouter/wanmain.c
net/wireless/core.c
net/wireless/ethtool.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/reg.h
net/wireless/util.c
net/wireless/wext-compat.c
net/x25/x25_facilities.c
net/x25/x25_in.c
net/x25/x25_link.c
net/xfrm/Makefile
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c [new file with mode: 0644]
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/basic/fixdep.c
scripts/package/builddeb
security/capability.c
security/commoncap.c
security/keys/Makefile
security/keys/encrypted.c [moved from security/keys/encrypted_defined.c with 99% similarity]
security/keys/encrypted.h [moved from security/keys/encrypted_defined.h with 100% similarity]
security/keys/internal.h
security/keys/key.c
security/keys/keyring.c
security/keys/request_key.c
security/keys/trusted.c [moved from security/keys/trusted_defined.c with 99% similarity]
security/keys/trusted.h [moved from security/keys/trusted_defined.h with 100% similarity]
security/security.c
security/selinux/hooks.c
security/selinux/include/xfrm.h
security/selinux/ss/conditional.c
security/selinux/ss/policydb.c
security/selinux/xfrm.c
sound/arm/aaci.c
sound/atmel/ac97c.c
sound/core/hrtimer.c
sound/core/jack.c
sound/drivers/mtpav.c
sound/oss/Makefile
sound/pci/au88x0/au88x0_core.c
sound/pci/azt3328.c
sound/pci/hda/hda_eld.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_mixer.c
sound/pci/oxygen/xonar_cs43xx.c
sound/pci/oxygen/xonar_dg.c
sound/pcmcia/pdaudiocf/pdaudiocf.h
sound/pcmcia/vx/vxp_ops.c
sound/soc/atmel/snd-soc-afeb9260.c
sound/soc/blackfin/bf5xx-ssm2602.c
sound/soc/codecs/cq93vc.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8903.h
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8995.c
sound/soc/codecs/wm9081.c
sound/soc/codecs/wm_hubs.c
sound/soc/davinci/davinci-evm.c
sound/soc/imx/eukrea-tlv320.c
sound/soc/omap/ams-delta.c
sound/soc/pxa/corgi.c
sound/soc/pxa/e740_wm9705.c
sound/soc/pxa/e750_wm9705.c
sound/soc/pxa/e800_wm9712.c
sound/soc/pxa/em-x270.c
sound/soc/pxa/mioa701_wm9713.c
sound/soc/pxa/palm27x.c
sound/soc/pxa/poodle.c
sound/soc/pxa/spitz.c
sound/soc/pxa/tosa.c
sound/soc/pxa/zylonite.c
sound/soc/samsung/neo1973_gta02_wm8753.c
sound/soc/samsung/neo1973_wm8753.c
sound/soc/samsung/s3c24xx_simtec_hermes.c
sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
sound/soc/samsung/s3c24xx_uda134x.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/caiaq/audio.c
sound/usb/caiaq/midi.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-kmem.c
tools/perf/builtin-lock.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/include/linux/bitops.h
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/probe-event.c
tools/perf/util/session.c
tools/perf/util/svghelper.c
tools/perf/util/symbol.c
tools/perf/util/types.h
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/values.c
tools/power/x86/turbostat/turbostat.c

index 8faa6c0..5d56a3f 100644 (file)
@@ -28,6 +28,7 @@ modules.builtin
 *.gz
 *.bz2
 *.lzma
+*.xz
 *.lzo
 *.patch
 *.gcno
index 581fd39..1eba28a 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -23,6 +23,7 @@ Andy Adamson <andros@citi.umich.edu>
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Axel Dyks <xl@xlsigned.net>
+Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
diff --git a/Documentation/ABI/testing/sysfs-platform-at91 b/Documentation/ABI/testing/sysfs-platform-at91
new file mode 100644 (file)
index 0000000..4cc6a86
--- /dev/null
@@ -0,0 +1,25 @@
+What:          /sys/devices/platform/at91_can/net/<iface>/mb0_id
+Date:          January 2011
+KernelVersion: 2.6.38
+Contact:       Marc Kleine-Budde <kernel@pengutronix.de>
+Description:
+               Value representing the can_id of mailbox 0.
+
+               Default: 0x7ff (standard frame)
+
+               Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in
+               "AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the
+               contents of mailbox 0 may be send under certain
+               conditions (even if disabled or in rx mode).
+
+               The workaround in the errata suggests not to use the
+               mailbox and load it with an unused identifier.
+
+               In order to use an extended can_id add the
+               CAN_EFF_FLAG (0x80000000U) to the can_id. Example:
+
+               - standard id 0x7ff:
+               echo 0x7ff      > /sys/class/net/can0/mb0_id
+
+               - extended id 0x1fffffff:
+               echo 0x9fffffff > /sys/class/net/can0/mb0_id
index 35447e0..36f63d4 100644 (file)
@@ -217,8 +217,8 @@ X!Isound/sound_firmware.c
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
 !Iinclude/linux/serial_core.h
-!Edrivers/serial/serial_core.c
-!Edrivers/serial/8250.c
+!Edrivers/tty/serial/serial_core.c
+!Edrivers/tty/serial/8250.c
   </chapter>
 
   <chapter id="fbdev">
index 2861055..c279158 100644 (file)
@@ -73,8 +73,8 @@
       services.
     </para>
     <para>
-      The core of every DRM driver is struct drm_device.  Drivers
-      will typically statically initialize a drm_device structure,
+      The core of every DRM driver is struct drm_driver.  Drivers
+      will typically statically initialize a drm_driver structure,
       then pass it to drm_init() at load time.
     </para>
 
@@ -84,7 +84,7 @@
     <title>Driver initialization</title>
     <para>
       Before calling the DRM initialization routines, the driver must
-      first create and fill out a struct drm_device structure.
+      first create and fill out a struct drm_driver structure.
     </para>
     <programlisting>
       static struct drm_driver driver = {
index 5e87ad5..f51f285 100644 (file)
      </sect1>
   </chapter>
 
+  <chapter id="fs_events">
+     <title>Events based on file descriptors</title>
+!Efs/eventfd.c
+  </chapter>
+
   <chapter id="sysfs">
      <title>The Filesystem for Exporting Kernel Objects</title>
 !Efs/sysfs/file.c
@@ -13,7 +13,6 @@ Table of Contents
 
   I - Introduction
     1) Entry point for arch/powerpc
-    2) Board support
 
   II - The DT block format
     1) Header
@@ -41,13 +40,6 @@ Table of Contents
   VI - System-on-a-chip devices and nodes
     1) Defining child nodes of an SOC
     2) Representing devices without a current OF specification
-      a) PHY nodes
-      b) Interrupt controllers
-      c) 4xx/Axon EMAC ethernet nodes
-      d) Xilinx IP cores
-      e) USB EHCI controllers
-      f) MDIO on GPIOs
-      g) SPI busses
 
   VII - Specifying interrupt information for devices
     1) interrupts property
@@ -123,7 +115,7 @@ Revision Information
 I - Introduction
 ================
 
-During the recent development of the Linux/ppc64 kernel, and more
+During the development of the Linux/ppc64 kernel, and more
 specifically, the addition of new platform types outside of the old
 IBM pSeries/iSeries pair, it was decided to enforce some strict rules
 regarding the kernel entry and bootloader <-> kernel interfaces, in
@@ -146,7 +138,7 @@ section III, but, for example, the kernel does not require you to
 create a node for every PCI device in the system. It is a requirement
 to have a node for PCI host bridges in order to provide interrupt
 routing informations and memory/IO ranges, among others. It is also
-recommended to define nodes for on chip devices and other busses that
+recommended to define nodes for on chip devices and other buses that
 don't specifically fit in an existing OF specification. This creates a
 great flexibility in the way the kernel can then probe those and match
 drivers to device, without having to hard code all sorts of tables. It
@@ -158,7 +150,7 @@ it with special cases.
 1) Entry point for arch/powerpc
 -------------------------------
 
-   There is one and one single entry point to the kernel, at the start
+   There is one single entry point to the kernel, at the start
    of the kernel image. That entry point supports two calling
    conventions:
 
@@ -210,12 +202,6 @@ it with special cases.
         with all CPUs. The way to do that with method b) will be
         described in a later revision of this document.
 
-
-2) Board support
-----------------
-
-64-bit kernels:
-
    Board supports (platforms) are not exclusive config options. An
    arbitrary set of board supports can be built in a single kernel
    image. The kernel will "know" what set of functions to use for a
@@ -234,48 +220,11 @@ it with special cases.
         containing the various callbacks that the generic code will
         use to get to your platform specific code
 
-        c) Add a reference to your "ppc_md" structure in the
-        "machines" table in arch/powerpc/kernel/setup_64.c if you are
-        a 64-bit platform.
-
-        d) request and get assigned a platform number (see PLATFORM_*
-        constants in arch/powerpc/include/asm/processor.h
-
-32-bit embedded kernels:
-
-  Currently, board support is essentially an exclusive config option.
-  The kernel is configured for a single platform.  Part of the reason
-  for this is to keep kernels on embedded systems small and efficient;
-  part of this is due to the fact the code is already that way. In the
-  future, a kernel may support multiple platforms, but only if the
+  A kernel image may support multiple platforms, but only if the
   platforms feature the same core architecture.  A single kernel build
   cannot support both configurations with Book E and configurations
   with classic Powerpc architectures.
 
-  32-bit embedded platforms that are moved into arch/powerpc using a
-  flattened device tree should adopt the merged tree practice of
-  setting ppc_md up dynamically, even though the kernel is currently
-  built with support for only a single platform at a time.  This allows
-  unification of the setup code, and will make it easier to go to a
-  multiple-platform-support model in the future.
-
-NOTE: I believe the above will be true once Ben's done with the merge
-of the boot sequences.... someone speak up if this is wrong!
-
-  To add a 32-bit embedded platform support, follow the instructions
-  for 64-bit platforms above, with the exception that the Kconfig
-  option should be set up such that the kernel builds exclusively for
-  the platform selected.  The processor type for the platform should
-  enable another config option to select the specific board
-  supported.
-
-NOTE: If Ben doesn't merge the setup files, may need to change this to
-point to setup_32.c
-
-
-   I will describe later the boot process and various callbacks that
-   your platform should implement.
-
 
 II - The DT block format
 ========================
@@ -300,8 +249,8 @@ the block to RAM before passing it to the kernel.
 1) Header
 ---------
 
-   The kernel is entered with r3 pointing to an area of memory that is
-   roughly described in arch/powerpc/include/asm/prom.h by the structure
+   The kernel is passed the physical address pointing to an area of memory
+   that is roughly described in include/linux/of_fdt.h by the structure
    boot_param_header:
 
 struct boot_param_header {
@@ -339,7 +288,7 @@ struct boot_param_header {
    All values in this header are in big endian format, the various
    fields in this header are defined more precisely below. All
    "offset" values are in bytes from the start of the header; that is
-   from the value of r3.
+   from the physical base address of the device tree block.
 
    - magic
 
@@ -437,7 +386,7 @@ struct boot_param_header {
 
 
              ------------------------------
-       r3 -> |  struct boot_param_header  |
+     base -> |  struct boot_param_header  |
              ------------------------------
              |      (alignment gap) (*)   |
              ------------------------------
@@ -457,7 +406,7 @@ struct boot_param_header {
       -----> ------------------------------
       |
       |
-      --- (r3 + totalsize)
+      --- (base + totalsize)
 
   (*) The alignment gaps are not necessarily present; their presence
       and size are dependent on the various alignment requirements of
@@ -500,7 +449,7 @@ the device-tree structure. It is typically used to represent "path" in
 the device-tree. More details about the actual format of these will be
 below.
 
-The kernel powerpc generic code does not make any formal use of the
+The kernel generic code does not make any formal use of the
 unit address (though some board support code may do) so the only real
 requirement here for the unit address is to ensure uniqueness of
 the node unit name at a given level of the tree. Nodes with no notion
@@ -518,20 +467,21 @@ path to the root node is "/".
 
 Every node which actually represents an actual device (that is, a node
 which isn't only a virtual "container" for more nodes, like "/cpus"
-is) is also required to have a "device_type" property indicating the
-type of node .
+is) is also required to have a "compatible" property indicating the
+specific hardware and an optional list of devices it is fully
+backwards compatible with.
 
 Finally, every node that can be referenced from a property in another
-node is required to have a "linux,phandle" property. Real open
-firmware implementations provide a unique "phandle" value for every
-node that the "prom_init()" trampoline code turns into
-"linux,phandle" properties. However, this is made optional if the
-flattened device tree is used directly. An example of a node
+node is required to have either a "phandle" or a "linux,phandle"
+property. Real Open Firmware implementations provide a unique
+"phandle" value for every node that the "prom_init()" trampoline code
+turns into "linux,phandle" properties. However, this is made optional
+if the flattened device tree is used directly. An example of a node
 referencing another node via "phandle" is when laying out the
 interrupt tree which will be described in a further version of this
 document.
 
-This "linux, phandle" property is a 32-bit value that uniquely
+The "phandle" property is a 32-bit value that uniquely
 identifies a node. You are free to use whatever values or system of
 values, internal pointers, or whatever to generate these, the only
 requirement is that every node for which you provide that property has
@@ -694,7 +644,7 @@ made of 3 cells, the bottom two containing the actual address itself
 while the top cell contains address space indication, flags, and pci
 bus & device numbers.
 
-For busses that support dynamic allocation, it's the accepted practice
+For buses that support dynamic allocation, it's the accepted practice
 to then not provide the address in "reg" (keep it 0) though while
 providing a flag indicating the address is dynamically allocated, and
 then, to provide a separate "assigned-addresses" property that
@@ -711,7 +661,7 @@ prom_parse.c file of the recent kernels for your bus type.
 The "reg" property only defines addresses and sizes (if #size-cells is
 non-0) within a given bus. In order to translate addresses upward
 (that is into parent bus addresses, and possibly into CPU physical
-addresses), all busses must contain a "ranges" property. If the
+addresses), all buses must contain a "ranges" property. If the
 "ranges" property is missing at a given level, it's assumed that
 translation isn't possible, i.e., the registers are not visible on the
 parent bus.  The format of the "ranges" property for a bus is a list
@@ -727,9 +677,9 @@ example, for a PCI host controller, that would be a CPU address. For a
 PCI<->ISA bridge, that would be a PCI address. It defines the base
 address in the parent bus where the beginning of that range is mapped.
 
-For a new 64-bit powerpc board, I recommend either the 2/2 format or
+For new 64-bit board support, I recommend either the 2/2 format or
 Apple's 2/1 format which is slightly more compact since sizes usually
-fit in a single 32-bit word.   New 32-bit powerpc boards should use a
+fit in a single 32-bit word.   New 32-bit board support should use a
 1/1 format, unless the processor supports physical addresses greater
 than 32-bits, in which case a 2/1 format is recommended.
 
@@ -754,7 +704,7 @@ of their actual names.
 While earlier users of Open Firmware like OldWorld macintoshes tended
 to use the actual device name for the "name" property, it's nowadays
 considered a good practice to use a name that is closer to the device
-class (often equal to device_type). For example, nowadays, ethernet
+class (often equal to device_type). For example, nowadays, Ethernet
 controllers are named "ethernet", an additional "model" property
 defining precisely the chip type/model, and "compatible" property
 defining the family in case a single driver can driver more than one
@@ -772,7 +722,7 @@ is present).
 4) Note about node and property names and character set
 -------------------------------------------------------
 
-While open firmware provides more flexible usage of 8859-1, this
+While Open Firmware provides more flexible usage of 8859-1, this
 specification enforces more strict rules. Nodes and properties should
 be comprised only of ASCII characters 'a' to 'z', '0' to
 '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally
@@ -792,7 +742,7 @@ address which can extend beyond that limit.
 --------------------------------
   These are all that are currently required. However, it is strongly
   recommended that you expose PCI host bridges as documented in the
-  PCI binding to open firmware, and your interrupt tree as documented
+  PCI binding to Open Firmware, and your interrupt tree as documented
   in OF interrupt tree specification.
 
   a) The root node
@@ -802,20 +752,12 @@ address which can extend beyond that limit.
     - model : this is your board name/model
     - #address-cells : address representation for "root" devices
     - #size-cells: the size representation for "root" devices
-    - device_type : This property shouldn't be necessary. However, if
-      you decide to create a device_type for your root node, make sure it
-      is _not_ "chrp" unless your platform is a pSeries or PAPR compliant
-      one for 64-bit, or a CHRP-type machine for 32-bit as this will
-      matched by the kernel this way.
-
-  Additionally, some recommended properties are:
-
     - compatible : the board "family" generally finds its way here,
       for example, if you have 2 board models with a similar layout,
       that typically get driven by the same platform code in the
-      kernel, you would use a different "model" property but put a
-      value in "compatible". The kernel doesn't directly use that
-      value but it is generally useful.
+      kernel, you would specify the exact board model in the
+      compatible property followed by an entry that represents the SoC
+      model.
 
   The root node is also generally where you add additional properties
   specific to your board like the serial number if any, that sort of
@@ -841,8 +783,11 @@ address which can extend beyond that limit.
 
   So under /cpus, you are supposed to create a node for every CPU on
   the machine. There is no specific restriction on the name of the
-  CPU, though It's common practice to call it PowerPC,<name>. For
+  CPU, though it's common to call it <architecture>,<core>. For
   example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX.
+  However, the Generic Names convention suggests that it would be
+  better to simply use 'cpu' for each cpu node and use the compatible
+  property to identify the specific cpu core.
 
   Required properties:
 
@@ -923,7 +868,7 @@ compatibility.
 
   e) The /chosen node
 
-  This node is a bit "special". Normally, that's where open firmware
+  This node is a bit "special". Normally, that's where Open Firmware
   puts some variable environment information, like the arguments, or
   the default input/output devices.
 
@@ -940,11 +885,7 @@ compatibility.
       console device if any. Typically, if you have serial devices on
       your board, you may want to put the full path to the one set as
       the default console in the firmware here, for the kernel to pick
-      it up as its own default console. If you look at the function
-      set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see
-      that the kernel tries to find out the default console and has
-      knowledge of various types like 8250 serial ports. You may want
-      to extend this function to add your own.
+      it up as its own default console.
 
   Note that u-boot creates and fills in the chosen node for platforms
   that use it.
@@ -955,23 +896,23 @@ compatibility.
 
   f) the /soc<SOCname> node
 
-  This node is used to represent a system-on-a-chip (SOC) and must be
-  present if the processor is a SOC. The top-level soc node contains
-  information that is global to all devices on the SOC. The node name
-  should contain a unit address for the SOC, which is the base address
-  of the memory-mapped register set for the SOC. The name of an soc
+  This node is used to represent a system-on-a-chip (SoC) and must be
+  present if the processor is a SoC. The top-level soc node contains
+  information that is global to all devices on the SoC. The node name
+  should contain a unit address for the SoC, which is the base address
+  of the memory-mapped register set for the SoC. The name of an SoC
   node should start with "soc", and the remainder of the name should
   represent the part number for the soc.  For example, the MPC8540's
   soc node would be called "soc8540".
 
   Required properties:
 
-    - device_type : Should be "soc"
     - ranges : Should be defined as specified in 1) to describe the
-      translation of SOC addresses for memory mapped SOC registers.
-    - bus-frequency: Contains the bus frequency for the SOC node.
+      translation of SoC addresses for memory mapped SoC registers.
+    - bus-frequency: Contains the bus frequency for the SoC node.
       Typically, the value of this field is filled in by the boot
       loader.
+    - compatible : Exact model of the SoC
 
 
   Recommended properties:
@@ -1155,12 +1096,13 @@ while all this has been defined and implemented.
 
   - An example of code for iterating nodes & retrieving properties
     directly from the flattened tree format can be found in the kernel
-    file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function,
+    file drivers/of/fdt.c.  Look at the of_scan_flat_dt() function,
     its usage in early_init_devtree(), and the corresponding various
     early_init_dt_scan_*() callbacks. That code can be re-used in a
     GPL bootloader, and as the author of that code, I would be happy
     to discuss possible free licensing to any vendor who wishes to
     integrate all or part of this code into a non-GPL bootloader.
+    (reference needed; who is 'I' here? ---gcl Jan 31, 2011)
 
 
 
@@ -1203,18 +1145,19 @@ MPC8540.
 2) Representing devices without a current OF specification
 ----------------------------------------------------------
 
-Currently, there are many devices on SOCs that do not have a standard
-representation pre-defined as part of the open firmware
-specifications, mainly because the boards that contain these SOCs are
-not currently booted using open firmware.   This section contains
-descriptions for the SOC devices for which new nodes have been
-defined; this list will expand as more and more SOC-containing
-platforms are moved over to use the flattened-device-tree model.
+Currently, there are many devices on SoCs that do not have a standard
+representation defined as part of the Open Firmware specifications,
+mainly because the boards that contain these SoCs are not currently
+booted using Open Firmware.  Binding documentation for new devices
+should be added to the Documentation/devicetree/bindings directory.
+That directory will expand as device tree support is added to more and
+more SoCs.
+
 
 VII - Specifying interrupt information for devices
 ===================================================
 
-The device tree represents the busses and devices of a hardware
+The device tree represents the buses and devices of a hardware
 system in a form similar to the physical bus topology of the
 hardware.
 
index b959659..a9c4245 100644 (file)
@@ -35,6 +35,17 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
 
 ---------------------------
 
+What:  AR9170USB
+When:  2.6.40
+
+Why:   This driver is deprecated and the firmware is no longer
+       maintained. The replacement driver "carl9170" has been
+       around for a while, so the devices are still supported.
+
+Who:   Christian Lamparter <chunkeey@googlemail.com>
+
+---------------------------
+
 What:  IRQF_SAMPLE_RANDOM
 Check: IRQF_SAMPLE_RANDOM
 When:  July 2009
@@ -603,3 +614,35 @@ Why:       The adm9240, w83792d and w83793 hardware monitoring drivers have
 Who:   Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
+
+What:  xt_connlimit rev 0
+When:  2012
+Who:   Jan Engelhardt <jengelh@medozas.de>
+Files: net/netfilter/xt_connlimit.c
+
+----------------------------
+
+What:  noswapaccount kernel command line parameter
+When:  2.6.40
+Why:   The original implementation of memsw feature enabled by
+       CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount
+       kernel parameter (introduced in 2.6.29-rc1). Later on, this decision
+       turned out to be not ideal because we cannot have the feature compiled
+       in and disabled by default and let only interested to enable it
+       (e.g. general distribution kernels might need it). Therefore we have
+       added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides
+       the both possibilities. If we remove noswapaccount we will have
+       less command line parameters with the same functionality and we
+       can also cleanup the parameter handling a bit ().
+Who:   Michal Hocko <mhocko@suse.cz>
+
+----------------------------
+
+What:  ipt_addrtype match include file
+When:  2012
+Why:   superseded by xt_addrtype
+Who:   Florian Westphal <fw@strlen.de>
+Files: include/linux/netfilter_ipv4/ipt_addrtype.h
+>>>>>>> 2f5dc63123905a89d4260ab8ee08d19ec104db04
+
+----------------------------
index 6ef8cf3..933bc66 100644 (file)
@@ -460,6 +460,8 @@ Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 2.1.30:
        - Fix writev() (it kept writing the first segment over and over again
          instead of moving onto subsequent segments).
+       - Fix crash in ntfs_mft_record_alloc() when mapping the new extent mft
+         record failed.
 2.1.29:
        - Fix a deadlock when mounting read-write.
 2.1.28:
index 0e76ef1..a22ecf4 100644 (file)
@@ -51,7 +51,8 @@ Supported chips:
   * JEDEC JC 42.4 compliant temperature sensor chips
     Prefix: 'jc42'
     Addresses scanned: I2C 0x18 - 0x1f
-    Datasheet: -
+    Datasheet:
+       http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
 
 Author:
        Guenter Roeck <guenter.roeck@ericsson.com>
@@ -60,7 +61,11 @@ Author:
 Description
 -----------
 
-This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
+This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
+which are used on many DDR3 memory modules for mobile devices and servers. Some
+systems use the sensor to prevent memory overheating by automatically throttling
+the memory controller.
+
 The driver auto-detects the chips listed above, but can be manually instantiated
 to support other JC 42.4 compliant chips.
 
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
 which applies to all limits. This register can be written by writing into
 temp1_crit_hyst. Other hysteresis attributes are read-only.
 
+If the BIOS has configured the sensor for automatic temperature management, it
+is likely that it has locked the registers, i.e., that the temperature limits
+cannot be changed.
+
 Sysfs entries
 -------------
 
 temp1_input            Temperature (RO)
-temp1_min              Minimum temperature (RW)
-temp1_max              Maximum temperature (RW)
-temp1_crit             Critical high temperature (RW)
+temp1_min              Minimum temperature (RO or RW)
+temp1_max              Maximum temperature (RO or RW)
+temp1_crit             Critical high temperature (RO or RW)
 
-temp1_crit_hyst                Critical hysteresis temperature (RW)
+temp1_crit_hyst                Critical hysteresis temperature (RO or RW)
 temp1_max_hyst         Maximum hysteresis temperature (RO)
 
 temp1_min_alarm                Temperature low alarm
index 6526eee..d2b56a4 100644 (file)
@@ -9,6 +9,8 @@ Supported chips:
   Socket S1G3: Athlon II, Sempron, Turion II
 * AMD Family 11h processors:
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+* AMD Family 12h processors: "Llano"
+* AMD Family 14h processors: "Brazos" (C/E/G-Series)
 
   Prefix: 'k10temp'
   Addresses scanned: PCI space
@@ -17,10 +19,14 @@ Supported chips:
     http://support.amd.com/us/Processor_TechDocs/31116.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41256.pdf
+  BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
+    http://support.amd.com/us/Processor_TechDocs/43170.pdf
   Revision Guide for AMD Family 10h Processors:
     http://support.amd.com/us/Processor_TechDocs/41322.pdf
   Revision Guide for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41788.pdf
+  Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
+    http://support.amd.com/us/Processor_TechDocs/47534.pdf
   AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
     http://support.amd.com/us/Processor_TechDocs/43373.pdf
   AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@@ -34,7 +40,7 @@ Description
 -----------
 
 This driver permits reading of the internal temperature sensor of AMD
-Family 10h and 11h processors.
+Family 10h/11h/12h/14h processors.
 
 All these processors have a sensor, but on those for Socket F or AM2+,
 the sensor may return inconsistent values (erratum 319).  The driver
index b72e071..f4a04c0 100644 (file)
@@ -43,11 +43,11 @@ parameter is applicable:
        AVR32   AVR32 architecture is enabled.
        AX25    Appropriate AX.25 support is enabled.
        BLACKFIN Blackfin architecture is enabled.
+       DRM     Direct Rendering Management support is enabled.
+       DYNAMIC_DEBUG Build in debug messages and enable them at runtime
        EDD     BIOS Enhanced Disk Drive Services (EDD) is enabled
        EFI     EFI Partitioning (GPT) is enabled
        EIDE    EIDE/ATAPI support is enabled.
-       DRM     Direct Rendering Management support is enabled.
-       DYNAMIC_DEBUG Build in debug messages and enable them at runtime
        FB      The frame buffer device is enabled.
        GCOV    GCOV profiling is enabled.
        HW      Appropriate hardware is enabled.
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
 and is between 256 and 4096 characters. It is defined in the file
 ./include/asm/setup.h as COMMAND_LINE_SIZE.
 
+Finally, the [KMG] suffix is commonly described after a number of kernel
+parameter values. These 'K', 'M', and 'G' letters represent the _binary_
+multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
+bytes respectively. Such letter suffixes can also be entirely omitted.
+
 
        acpi=           [HW,ACPI,X86]
                        Advanced Configuration and Power Interface
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
                        Format:
                        <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
 
-       crashkernel=nn[KMG]@ss[KMG]
-                       [KNL] Reserve a chunk of physical memory to
-                       hold a kernel to switch to with kexec on panic.
+       crashkernel=size[KMG][@offset[KMG]]
+                       [KNL] Using kexec, Linux can switch to a 'crash kernel'
+                       upon panic. This parameter reserves the physical
+                       memory region [offset, offset + size] for that kernel
+                       image. If '@offset' is omitted, then a suitable offset
+                       is selected automatically. Check
+                       Documentation/kdump/kdump.txt for further details.
 
        crashkernel=range1:size1[,range2:size2,...][@offset]
                        [KNL] Same as above, but depends on the memory
                        in the running system. The syntax of range is
                        start-[end] where start and end are both
                        a memory unit (amount[KMG]). See also
-                       Documentation/kdump/kdump.txt for a example.
+                       Documentation/kdump/kdump.txt for an example.
 
        cs89x0_dma=     [HW,NET]
                        Format: <dma>
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
                        6 (KERN_INFO)           informational
                        7 (KERN_DEBUG)          debug-level messages
 
-       log_buf_len=n   Sets the size of the printk ring buffer, in bytes.
-                       Format: { n | nk | nM }
-                       n must be a power of two.  The default size
-                       is set in the kernel config file.
+       log_buf_len=n[KMG]      Sets the size of the printk ring buffer,
+                       in bytes.  n must be a power of two.  The default
+                       size is set in the kernel config file.
 
        logo.nologo     [FB] Disables display of the built-in Linux logo.
                        This may be used to provide more screen space for
index fe5c099..4edd78d 100644 (file)
@@ -40,8 +40,6 @@ decnet.txt
        - info on using the DECnet networking layer in Linux.
 depca.txt
        - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
-dgrs.txt
-       - the Digi International RightSwitch SE-X Ethernet driver
 dmfe.txt
        - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
 e100.txt
@@ -50,8 +48,6 @@ e1000.txt
        - info on Intel's E1000 line of gigabit ethernet boards
 eql.txt
        - serial IP load balancing
-ethertap.txt
-       - the Ethertap user space packet reception and transmission driver
 ewrk3.txt
        - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
 filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
        - TUN/TAP device driver, allowing user space Rx/Tx of packets.
 vortex.txt
        - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wavelan.txt
-       - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
 x25.txt
        - general info on X.25 development.
 x25-iface.txt
index 5aba7a3..24c308d 100644 (file)
@@ -4,6 +4,8 @@ obj- := dummy.o
 # List of programs to build
 hostprogs-y := ifenslave
 
+HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
+
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
index 77f0cdd..18afcd8 100644 (file)
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
 
 BATMAN-ADV
 ----------
@@ -67,15 +67,16 @@ All  mesh  wide  settings  can be found in batman's own interface
 folder:
 
 #  ls  /sys/class/net/bat0/mesh/
-#  aggregated_ogms  bonding  fragmentation  orig_interval
-#  vis_mode
+#  aggregated_ogms  gw_bandwidth  hop_penalty
+#  bonding          gw_mode       orig_interval
+#  fragmentation    gw_sel_class  vis_mode
 
 
 There is a special folder for debugging informations:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  originators  socket  transtable_global  transtable_local
-#  vis_data
+#  gateways     socket        transtable_global  vis_data
+#  originators  softif_neigh  transtable_local
 
 
 Some of the files contain all sort of status information  regard-
@@ -230,9 +231,8 @@ CONTACT
 Please send us comments, experiences, questions, anything :)
 
 IRC:            #batman   on   irc.freenode.org
-Mailing-list:   b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
-                (optional   subscription   at
-                 https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list:   b.a.t.m.a.n@open-mesh.org (optional  subscription
+          at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
 
 You can also contact the Authors:
 
index 5dc6387..25d2f41 100644 (file)
@@ -49,7 +49,8 @@ Table of Contents
 3.3    Configuring Bonding Manually with Ifenslave
 3.3.1          Configuring Multiple Bonds Manually
 3.4    Configuring Bonding Manually via Sysfs
-3.5    Overriding Configuration for Special Cases
+3.5    Configuration with Interfaces Support
+3.6    Overriding Configuration for Special Cases
 
 4. Querying Bonding Configuration
 4.1    Bonding Configuration
@@ -161,8 +162,8 @@ onwards) do not have /usr/include/linux symbolically linked to the
 default kernel source include directory.
 
 SECOND IMPORTANT NOTE:
-       If you plan to configure bonding using sysfs, you do not need
-to use ifenslave.
+       If you plan to configure bonding using sysfs or using the
+/etc/network/interfaces file, you do not need to use ifenslave.
 
 2. Bonding Driver Options
 =========================
@@ -779,22 +780,26 @@ resend_igmp
 
        You can configure bonding using either your distro's network
 initialization scripts, or manually using either ifenslave or the
-sysfs interface.  Distros generally use one of two packages for the
-network initialization scripts: initscripts or sysconfig.  Recent
-versions of these packages have support for bonding, while older
+sysfs interface.  Distros generally use one of three packages for the
+network initialization scripts: initscripts, sysconfig or interfaces.
+Recent versions of these packages have support for bonding, while older
 versions do not.
 
        We will first describe the options for configuring bonding for
-distros using versions of initscripts and sysconfig with full or
-partial support for bonding, then provide information on enabling
+distros using versions of initscripts, sysconfig and interfaces with full
+or partial support for bonding, then provide information on enabling
 bonding without support from the network initialization scripts (i.e.,
 older versions of initscripts or sysconfig).
 
-       If you're unsure whether your distro uses sysconfig or
-initscripts, or don't know if it's new enough, have no fear.
+       If you're unsure whether your distro uses sysconfig,
+initscripts or interfaces, or don't know if it's new enough, have no fear.
 Determining this is fairly straightforward.
 
-       First, issue the command:
+       First, look for a file called interfaces in /etc/network directory.
+If this file is present in your system, then your system use interfaces. See
+Configuration with Interfaces Support.
+
+       Else, issue the command:
 
 $ rpm -qf /sbin/ifup
 
@@ -1327,8 +1332,62 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 
-3.5 Overriding Configuration for Special Cases
+3.5 Configuration with Interfaces Support
+-----------------------------------------
+
+        This section applies to distros which use /etc/network/interfaces file
+to describe network interface configuration, most notably Debian and it's
+derivatives.
+
+       The ifup and ifdown commands on Debian don't support bonding out of
+the box. The ifenslave-2.6 package should be installed to provide bonding
+support.  Once installed, this package will provide bond-* options to be used
+into /etc/network/interfaces.
+
+       Note that ifenslave-2.6 package will load the bonding module and use
+the ifenslave command when appropriate.
+
+Example Configurations
+----------------------
+
+In /etc/network/interfaces, the following stanza will configure bond0, in
+active-backup mode, with eth0 and eth1 as slaves.
+
+auto bond0
+iface bond0 inet dhcp
+       bond-slaves eth0 eth1
+       bond-mode active-backup
+       bond-miimon 100
+       bond-primary eth0 eth1
+
+If the above configuration doesn't work, you might have a system using
+upstart for system startup. This is most notably true for recent
+Ubuntu versions. The following stanza in /etc/network/interfaces will
+produce the same result on those systems.
+
+auto bond0
+iface bond0 inet dhcp
+       bond-slaves none
+       bond-mode active-backup
+       bond-miimon 100
+
+auto eth0
+iface eth0 inet manual
+       bond-master bond0
+       bond-primary eth0 eth1
+
+auto eth1
+iface eth1 inet manual
+       bond-master bond0
+       bond-primary eth0 eth1
+
+For a full list of bond-* supported options in /etc/network/interfaces and some
+more advanced examples tailored to you particular distros, see the files in
+/usr/share/doc/ifenslave-2.6.
+
+3.6 Overriding Configuration for Special Cases
 ----------------------------------------------
+
 When using the bonding driver, the physical port which transmits a frame is
 typically selected by the bonding driver, and is not relevant to the user or
 system administrator.  The output port is simply selected using the policies of
index aefd1e6..04ca063 100644 (file)
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
        create  dns_resolver    foo:*   *       /usr/sbin/dns.foo %k
 
 
-
 =====
 USAGE
 =====
@@ -104,6 +103,14 @@ implemented in the module can be called after doing:
      returned also.
 
 
+===============================
+READING DNS KEYS FROM USERSPACE
+===============================
+
+Keys of dns_resolver type can be read from userspace using keyctl_read() or
+"keyctl read/print/pipe".
+
+
 =========
 MECHANISM
 =========
index d99940d..d3d653a 100644 (file)
@@ -187,7 +187,7 @@ tcp_cookie_size - INTEGER
 tcp_dsack - BOOLEAN
        Allows TCP to send "duplicate" SACKs.
 
-tcp_ecn - BOOLEAN
+tcp_ecn - INTEGER
        Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
        used when both ends of the TCP flow support it. It is useful to
        avoid losses due to congestion (when the bottleneck router supports
@@ -280,6 +280,17 @@ tcp_max_orphans - INTEGER
        more aggressively. Let me to remind again: each orphan eats
        up to ~64K of unswappable memory.
 
+tcp_max_ssthresh - INTEGER
+       Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
+       RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
+       on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
+       by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
+       segments per RTT when the cwnd is above tcp_max_ssthresh.
+       If TCP connection increased cwnd to thousands (or tens of thousands) segments,
+       and thousands of packets were being dropped during slow-start, you can set
+       tcp_max_ssthresh to improve performance for new TCP connection.
+       Default: 0 (off)
+
 tcp_max_syn_backlog - INTEGER
        Maximal number of remembered connection requests, which are
        still did not receive an acknowledgment from connecting client.
index 24ad2ad..8100358 100644 (file)
@@ -154,9 +154,28 @@ connections, one per accept()'d socket.
     write(cfd, msg, msglen);
   }
 
-Connections are established between two endpoints by a "third party"
-application. This means that both endpoints are passive; so connect()
-is not possible.
+Connections are traditionally established between two endpoints by a
+"third party" application. This means that both endpoints are passive.
+
+
+As of Linux kernel version 2.6.39, it is also possible to connect
+two endpoints directly, using connect() on the active side. This is
+intended to support the newer Nokia Wireless Modem API, as found in
+e.g. the Nokia Slim Modem in the ST-Ericsson U8500 platform:
+
+  struct sockaddr_spn spn;
+  int fd;
+
+  fd = socket(PF_PHONET, SOCK_SEQPACKET, PN_PROTO_PIPE);
+  memset(&spn, 0, sizeof(spn));
+  spn.spn_family = AF_PHONET;
+  spn.spn_obj = ...;
+  spn.spn_dev = ...;
+  spn.spn_resource = 0xD9;
+  connect(fd, (struct sockaddr *)&spn, sizeof(spn));
+  /* normal I/O here ... */
+  close(fd);
+
 
 WARNING:
 When polling a connected pipe socket for writability, there is an
@@ -181,45 +200,9 @@ The pipe protocol provides two socket options at the SOL_PNPIPE level:
     interface index of the network interface created by PNPIPE_ENCAP,
     or zero if encapsulation is off.
 
-
-Phonet Pipe-controller Implementation
--------------------------------------
-
-Phonet Pipe-controller is enabled by selecting the CONFIG_PHONET_PIPECTRLR Kconfig
-option. It is useful when communicating with those Nokia Modems which do not
-implement Pipe controller in them e.g. Nokia Slim Modem used in ST-Ericsson
-U8500 platform.
-
-The implementation is based on the Data Connection Establishment Sequence
-depicted in 'Nokia Wireless Modem API - Wireless_modem_user_guide.pdf'
-document.
-
-It allows a phonet sequenced socket (host-pep) to initiate a Pipe connection
-between itself and a remote pipe-end point (e.g. modem).
-
-The implementation adds socket options at SOL_PNPIPE level:
-
- PNPIPE_PIPE_HANDLE
-       It accepts an integer argument for setting value of pipe handle.
-
-  PNPIPE_ENABLE accepts one integer value (int). If set to zero, the pipe
-    is disabled. If the value is non-zero, the pipe is enabled. If the pipe
-    is not (yet) connected, ENOTCONN is error is returned.
-
-The implementation also adds socket 'connect'. On calling the 'connect', pipe
-will be created between the source socket and the destination, and the pipe
-state will be set to PIPE_DISABLED.
-
-After a pipe has been created and enabled successfully, the Pipe data can be
-exchanged between the host-pep and remote-pep (modem).
-
-User-space would typically follow below sequence with Pipe controller:-
--socket
--bind
--setsockopt for PNPIPE_PIPE_HANDLE
--connect
--setsockopt for PNPIPE_ENCAP_IP
--setsockopt for PNPIPE_ENABLE
+  PNPIPE_HANDLE is a read-only integer value. It contains the underlying
+    identifier ("pipe handle") of the pipe. This is only defined for
+    socket descriptors that are already connected or being connected.
 
 
 Authors
index 01e6940..1cd5d51 100644 (file)
@@ -1,3 +1,7 @@
+Version 15 of schedstats dropped counters for some sched_yield:
+yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is
+identical to version 14.
+
 Version 14 of schedstats includes support for sched_domains, which hit the
 mainline kernel in 2.6.20 although it is identical to the stats from version
 12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel
@@ -28,32 +32,25 @@ to write their own scripts, the fields are described here.
 
 CPU statistics
 --------------
-cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12
-
-NOTE: In the sched_yield() statistics, the active queue is considered empty
-    if it has only one process in it, since obviously the process calling
-    sched_yield() is that process.
+cpu<N> 1 2 3 4 5 6 7 8 9
 
-First four fields are sched_yield() statistics:
-     1) # of times both the active and the expired queue were empty
-     2) # of times just the active queue was empty
-     3) # of times just the expired queue was empty
-     4) # of times sched_yield() was called
+First field is a sched_yield() statistic:
+     1) # of times sched_yield() was called
 
 Next three are schedule() statistics:
-     5) # of times we switched to the expired queue and reused it
-     6) # of times schedule() was called
-     7) # of times schedule() left the processor idle
+     2) # of times we switched to the expired queue and reused it
+     3) # of times schedule() was called
+     4) # of times schedule() left the processor idle
 
 Next two are try_to_wake_up() statistics:
-     8) # of times try_to_wake_up() was called
-     9) # of times try_to_wake_up() was called to wake up the local cpu
+     5) # of times try_to_wake_up() was called
+     6) # of times try_to_wake_up() was called to wake up the local cpu
 
 Next three are statistics describing scheduling latency:
-    10) sum of all time spent running by tasks on this processor (in jiffies)
-    11) sum of all time spent waiting to run by tasks on this processor (in
+     7) sum of all time spent running by tasks on this processor (in jiffies)
+     8) sum of all time spent waiting to run by tasks on this processor (in
         jiffies)
-    12) # of timeslices run on this cpu
+     9) # of timeslices run on this cpu
 
 
 Domain statistics
index 16ae430..0caf77e 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5066
 =============
   laptop       Basic Laptop config (default)
   hp-laptop    HP laptops, e g G60
+  asus         Asus K52JU, Lenovo G560
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
index 996a27d..01c513f 100644 (file)
@@ -190,9 +190,9 @@ resources, scheduled and executed.
        * Long running CPU intensive workloads which can be better
          managed by the system scheduler.
 
-  WQ_FREEZEABLE
+  WQ_FREEZABLE
 
-       A freezeable wq participates in the freeze phase of the system
+       A freezable wq participates in the freeze phase of the system
        suspend operations.  Work items on the wq are drained and no
        new work item starts execution until thawed.
 
index 55592f8..a41c1e0 100644 (file)
@@ -885,7 +885,7 @@ S:  Supported
 
 ARM/QUALCOMM MSM MACHINE SUPPORT
 M:     David Brown <davidb@codeaurora.org>
-M:     Daniel Walker <dwalker@codeaurora.org>
+M:     Daniel Walker <dwalker@fifo99.com>
 M:     Bryan Huntsman <bryanh@codeaurora.org>
 L:     linux-arm-msm@vger.kernel.org
 F:     arch/arm/mach-msm/
@@ -978,6 +978,8 @@ S:  Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     drivers/*/*s3c2410*
+F:     drivers/*/*/*s3c2410*
 
 ARM/S3C2410 ARM ARCHITECTURE
 M:     Ben Dooks <ben-linux@fluff.org>
@@ -1008,6 +1010,15 @@ L:       linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-s5p*/
 
+ARM/SAMSUNG MOBILE MACHINE SUPPORT
+M:     Kyungmin Park <kyungmin.park@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5pv210/mach-aquila.c
+F:     arch/arm/mach-s5pv210/mach-goni.c
+F:     arch/arm/mach-exynos4/mach-universal_c210.c
+F:     arch/arm/mach-exynos4/mach-nuri.c
+
 ARM/SAMSUNG S5P SERIES FIMC SUPPORT
 M:     Kyungmin Park <kyungmin.park@samsung.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1203,7 +1214,7 @@ ATHEROS AR9170 WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@web.de>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/en/users/Drivers/ar9170
-S:     Maintained
+S:     Obsolete
 F:     drivers/net/wireless/ath/ar9170/
 
 CARL9170 LINUX COMMUNITY WIRELESS DRIVER
@@ -1465,6 +1476,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -1690,7 +1702,15 @@ M:       Andy Whitcroft <apw@canonical.com>
 S:     Supported
 F:     scripts/checkpatch.pl
 
+CHINESE DOCUMENTATION
+M:     Harry Wei <harryxiyou@gmail.com>
+L:     xiyoulinuxkernelgroup@googlegroups.com
+L:     linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/zh_CN/
+
 CISCO VIC ETHERNET NIC DRIVER
+M:     Christian Benvenuti <benve@cisco.com>
 M:     Vasanthy Kolluri <vkolluri@cisco.com>
 M:     Roopa Prabhu <roprabhu@cisco.com>
 M:     David Wang <dwang2@cisco.com>
@@ -2024,7 +2044,7 @@ F:        Documentation/scsi/dc395x.txt
 F:     drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+M:     Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:     dccp@vger.kernel.org
 W:     http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:     Maintained
@@ -2124,6 +2144,7 @@ S:        Supported
 F:     fs/dlm/
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
+M:     Vinod Koul <vinod.koul@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
 S:     Supported
 F:     drivers/dma/
@@ -2772,6 +2793,15 @@ F:       Documentation/isdn/README.gigaset
 F:     drivers/isdn/gigaset/
 F:     include/linux/gigaset_dev.h
 
+GPIO SUBSYSTEM
+M:     Grant Likely <grant.likely@secretlab.ca>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+T:     git git://git.secretlab.ca/git/linux-2.6.git
+F:     Documentation/gpio/gpio.txt
+F:     drivers/gpio/
+F:     include/linux/gpio*
+
 GRETH 10/100/1G Ethernet MAC device driver
 M:     Kristoffer Glembo <kristoffer@gaisler.com>
 L:     netdev@vger.kernel.org
@@ -2861,7 +2891,6 @@ M:        Guenter Roeck <guenter.roeck@ericsson.com>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
-T:     quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -3139,6 +3168,12 @@ S:       Maintained
 F:     net/ieee802154/
 F:     drivers/ieee802154/
 
+IKANOS/ADI EAGLE ADSL USB DRIVER
+M:     Matthieu Castet <castet.matthieu@free.fr>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
+S:     Maintained
+F:     drivers/usb/atm/ueagle-atm.c
+
 INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
 M:     Mimi Zohar <zohar@us.ibm.com>
 S:     Supported
@@ -3327,7 +3362,6 @@ F:        drivers/net/wimax/i2400m/
 F:     include/linux/wimax/i2400m.h
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:     Reinette Chatre <reinette.chatre@intel.com>
 M:     Wey-Yi Guy <wey-yi.w.guy@intel.com>
 M:     Intel Linux Wireless <ilw@linux.intel.com>
 L:     linux-wireless@vger.kernel.org
@@ -3496,7 +3530,7 @@ F:        drivers/hwmon/jc42.c
 F:     Documentation/hwmon/jc42
 
 JFS FILESYSTEM
-M:     Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+M:     Dave Kleikamp <shaggy@kernel.org>
 L:     jfs-discussion@lists.sourceforge.net
 W:     http://jfs.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -3674,6 +3708,28 @@ F:       include/linux/key-type.h
 F:     include/keys/
 F:     security/keys/
 
+KEYS-TRUSTED
+M:     David Safford <safford@watson.ibm.com>
+M:     Mimi Zohar <zohar@us.ibm.com>
+L:     linux-security-module@vger.kernel.org
+L:     keyrings@linux-nfs.org
+S:     Supported
+F:     Documentation/keys-trusted-encrypted.txt
+F:     include/keys/trusted-type.h
+F:     security/keys/trusted.c
+F:     security/keys/trusted.h
+
+KEYS-ENCRYPTED
+M:     Mimi Zohar <zohar@us.ibm.com>
+M:     David Safford <safford@watson.ibm.com>
+L:     linux-security-module@vger.kernel.org
+L:     keyrings@linux-nfs.org
+S:     Supported
+F:     Documentation/keys-trusted-encrypted.txt
+F:     include/keys/encrypted-type.h
+F:     security/keys/encrypted.c
+F:     security/keys/encrypted.h
+
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
 W:     http://kgdb.wiki.kernel.org/
@@ -4237,10 +4293,7 @@ S:       Maintained
 F:     net/sched/sch_netem.c
 
 NETERION 10GbE DRIVERS (s2io/vxge)
-M:     Ramkrishna Vepa <ramkrishna.vepa@exar.com>
-M:     Sivakumar Subramani <sivakumar.subramani@exar.com>
-M:     Sreenivasa Honnur <sreenivasa.honnur@exar.com>
-M:     Jon Mason <jon.mason@exar.com>
+M:     Jon Mason <jdmason@kudzu.us>
 L:     netdev@vger.kernel.org
 W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
 W:     http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -4562,7 +4615,7 @@ F:        drivers/i2c/busses/i2c-ocores.c
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Grant Likely <grant.likely@secretlab.ca>
-L:     devicetree-discuss@lists.ozlabs.org
+L:     devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
 W:     http://fdt.secretlab.ca
 T:     git git://git.secretlab.ca/git/linux-2.6.git
 S:     Maintained
@@ -5106,6 +5159,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
 M:     Ivo van Doorn <IvDoorn@gmail.com>
 M:     Gertjan van Wingerde <gwingerde@gmail.com>
+M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 L:     users@rt2x00.serialmonkey.com (moderated for non-subscribers)
 W:     http://rt2x00.serialmonkey.com/
@@ -5126,6 +5180,7 @@ F:        drivers/char/random.c
 
 RAPIDIO SUBSYSTEM
 M:     Matt Porter <mporter@kernel.crashing.org>
+M:     Alexandre Bounine <alexandre.bounine@idt.com>
 S:     Maintained
 F:     drivers/rapidio/
 
@@ -5228,7 +5283,7 @@ S:        Maintained
 F:     drivers/net/wireless/rtl818x/rtl8180/
 
 RTL8187 WIRELESS DRIVER
-M:     Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:     Herton Ronaldo Krzesinski <herton@canonical.com>
 M:     Hin-Tak Leung <htl10@users.sourceforge.net>
 M:     Larry Finger <Larry.Finger@lwfinger.net>
 L:     linux-wireless@vger.kernel.org
@@ -5522,12 +5577,11 @@ S:      Supported
 F:     drivers/scsi/be2iscsi/
 
 SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
-M:     Sathya Perla <sathyap@serverengines.com>
-M:     Subbu Seetharaman <subbus@serverengines.com>
-M:     Sarveshwar Bandi <sarveshwarb@serverengines.com>
-M:     Ajit Khaparde <ajitk@serverengines.com>
+M:     Sathya Perla <sathya.perla@emulex.com>
+M:     Subbu Seetharaman <subbu.seetharaman@emulex.com>
+M:     Ajit Khaparde <ajit.khaparde@emulex.com>
 L:     netdev@vger.kernel.org
-W:     http://www.serverengines.com
+W:     http://www.emulex.com
 S:     Supported
 F:     drivers/net/benet/
 
@@ -5587,18 +5641,20 @@ F:      include/linux/sfi*.h
 
 SIMTEC EB110ATX (Chalice CATS)
 P:     Ben Dooks
-M:     Vincent Sanders <support@simtec.co.uk>
+P:     Vincent Sanders <vince@simtec.co.uk>
+M:     Simtec Linux Team <linux@simtec.co.uk>
 W:     http://www.simtec.co.uk/products/EB110ATX/
 S:     Supported
 
 SIMTEC EB2410ITX (BAST)
 P:     Ben Dooks
-M:     Vincent Sanders <support@simtec.co.uk>
+P:     Vincent Sanders <vince@simtec.co.uk>
+M:     Simtec Linux Team <linux@simtec.co.uk>
 W:     http://www.simtec.co.uk/products/EB2410ITX/
 S:     Supported
-F:     arch/arm/mach-s3c2410/
-F:     drivers/*/*s3c2410*
-F:     drivers/*/*/*s3c2410*
+F:     arch/arm/mach-s3c2410/mach-bast.c
+F:     arch/arm/mach-s3c2410/bast-ide.c
+F:     arch/arm/mach-s3c2410/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:     Kevin Hilman <khilman@deeprootsystems.com>
@@ -6027,13 +6083,11 @@ F:      sound/soc/codecs/twl4030*
 TIPC NETWORK LAYER
 M:     Jon Maloy <jon.maloy@ericsson.com>
 M:     Allan Stephens <allan.stephens@windriver.com>
-L:     tipc-discussion@lists.sourceforge.net
+L:     netdev@vger.kernel.org (core kernel code)
+L:     tipc-discussion@lists.sourceforge.net (user apps, general discussion)
 W:     http://tipc.sourceforge.net/
-W:     http://tipc.cslab.ericsson.net/
-T:     git git://tipc.cslab.ericsson.net/pub/git/tipc.git
 S:     Maintained
 F:     include/linux/tipc*.h
-F:     include/net/tipc/
 F:     net/tipc/
 
 TILE ARCHITECTURE
@@ -6065,7 +6119,7 @@ S:        Maintained
 F:     security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
-M:     Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:     Herton Ronaldo Krzesinski <herton@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/topstar-laptop.c
@@ -6573,6 +6627,16 @@ S:       Maintained
 F:     drivers/char/virtio_console.c
 F:     include/linux/virtio_console.h
 
+VIRTIO CORE, NET AND BLOCK DRIVERS
+M:     Rusty Russell <rusty@rustcorp.com.au>
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/virtio/
+F:     drivers/net/virtio_net.c
+F:     drivers/block/virtio_blk.c
+F:     include/linux/virtio_*.h
+
 VIRTIO HOST (VHOST)
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 L:     kvm@vger.kernel.org
@@ -6746,12 +6810,12 @@ S:      Maintained
 F:     drivers/net/wireless/wl1251/*
 
 WL1271 WIRELESS DRIVER
-M:     Luciano Coelho <luciano.coelho@nokia.com>
+M:     Luciano Coelho <coelho@ti.com>
 L:     linux-wireless@vger.kernel.org
-W:     http://wireless.kernel.org
+W:     http://wireless.kernel.org/en/users/Drivers/wl12xx
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 S:     Maintained
-F:     drivers/net/wireless/wl12xx/wl1271*
+F:     drivers/net/wireless/wl12xx/
 F:     include/linux/wl12xx.h
 
 WL3501 WIRELESS PCMCIA CARD DRIVER
index 1f47495..504f788 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 38
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc8
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 47f63d4..cc31bec 100644 (file)
@@ -11,6 +11,7 @@ config ALPHA
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
        select AUTO_IRQ_AFFINITY if SMP
+       select GENERIC_HARDIRQS_NO_DEPRECATED
        help
          The Alpha is a 64-bit general-purpose processor designed and
          marketed by the Digital Equipment Corporation of blessed memory,
index 9ab234f..a19d600 100644 (file)
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
 
 int irq_select_affinity(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc[irq];
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct irq_chip *chip;
        static int last_cpu;
        int cpu = last_cpu + 1;
 
-       if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
+       if (!data)
+               return 1;
+       chip = irq_data_get_irq_chip(data);
+
+       if (!chip->irq_set_affinity || irq_user_affinity[irq])
                return 1;
 
        while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       cpumask_copy(desc->affinity, cpumask_of(cpu));
-       get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
+       cpumask_copy(data->affinity, cpumask_of(cpu));
+       chip->irq_set_affinity(data, cpumask_of(cpu), false);
        return 0;
 }
 #endif /* CONFIG_SMP */
index 2d0679b..411ca11 100644 (file)
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
 void __init
 init_rtc_irq(void)
 {
-       struct irq_desc *desc = irq_to_desc(RTC_IRQ);
-
-       if (desc) {
-               desc->status |= IRQ_DISABLED;
-               set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
-                       handle_simple_irq, "RTC");
-               setup_irq(RTC_IRQ, &timer_irqaction);
-       }
+       set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+                                     handle_simple_irq, "RTC");
+       setup_irq(RTC_IRQ, &timer_irqaction);
 }
 
 /* Dummy irqactions.  */
index 956ea0e..c7cc981 100644 (file)
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 inline void
-i8259a_enable_irq(unsigned int irq)
+i8259a_enable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
        spin_unlock(&i8259_irq_lock);
 }
 
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
 }
 
 void
-i8259a_disable_irq(unsigned int irq)
+i8259a_disable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       __i8259a_disable_irq(irq);
+       __i8259a_disable_irq(d->irq);
        spin_unlock(&i8259_irq_lock);
 }
 
 void
-i8259a_mask_and_ack_irq(unsigned int irq)
+i8259a_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        spin_lock(&i8259_irq_lock);
        __i8259a_disable_irq(irq);
 
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
 
 struct irq_chip i8259a_irq_type = {
        .name           = "XT-PIC",
-       .unmask         = i8259a_enable_irq,
-       .mask           = i8259a_disable_irq,
-       .mask_ack       = i8259a_mask_and_ack_irq,
+       .irq_unmask     = i8259a_enable_irq,
+       .irq_mask       = i8259a_disable_irq,
+       .irq_mask_ack   = i8259a_mask_and_ack_irq,
 };
 
 void __init
index b63ccd7..d507a23 100644 (file)
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
 
 extern void common_init_isa_dma(void);
 
-extern void i8259a_enable_irq(unsigned int);
-extern void i8259a_disable_irq(unsigned int);
-extern void i8259a_mask_and_ack_irq(unsigned int);
-extern unsigned int i8259a_startup_irq(unsigned int);
-extern void i8259a_end_irq(unsigned int);
+extern void i8259a_enable_irq(struct irq_data *d);
+extern void i8259a_disable_irq(struct irq_data *d);
+extern void i8259a_mask_and_ack_irq(struct irq_data *d);
 extern struct irq_chip i8259a_irq_type;
 extern void init_i8259a_irqs(void);
 
index 2863458..b30227f 100644 (file)
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-pyxis_enable_irq(unsigned int irq)
+pyxis_enable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-pyxis_disable_irq(unsigned int irq)
+pyxis_disable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-pyxis_mask_and_ack_irq(unsigned int irq)
+pyxis_mask_and_ack_irq(struct irq_data *d)
 {
-       unsigned long bit = 1UL << (irq - 16);
+       unsigned long bit = 1UL << (d->irq - 16);
        unsigned long mask = cached_irq_mask &= ~bit;
 
        /* Disable the interrupt.  */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip pyxis_irq_type = {
        .name           = "PYXIS",
-       .mask_ack       = pyxis_mask_and_ack_irq,
-       .mask           = pyxis_disable_irq,
-       .unmask         = pyxis_enable_irq,
+       .irq_mask_ack   = pyxis_mask_and_ack_irq,
+       .irq_mask       = pyxis_disable_irq,
+       .irq_unmask     = pyxis_enable_irq,
 };
 
 void 
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
                if ((ignore_mask >> i) & 1)
                        continue;
                set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        setup_irq(16+7, &isa_cascade_irqaction);
index 0e57e82..82a47bb 100644 (file)
 DEFINE_SPINLOCK(srm_irq_lock);
 
 static inline void
-srm_enable_irq(unsigned int irq)
+srm_enable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_ena(irq - 16);
+       cserve_ena(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 static void
-srm_disable_irq(unsigned int irq)
+srm_disable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_dis(irq - 16);
+       cserve_dis(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
 static struct irq_chip srm_irq_type = {
        .name           = "SRM",
-       .unmask         = srm_enable_irq,
-       .mask           = srm_disable_irq,
-       .mask_ack       = srm_disable_irq,
+       .irq_unmask     = srm_enable_irq,
+       .irq_mask       = srm_disable_irq,
+       .irq_mask_ack   = srm_disable_irq,
 };
 
 void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
                if (i < 64 && ((ignore_mask >> i) & 1))
                        continue;
                set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index 7bef617..88d95e8 100644 (file)
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-alcor_enable_irq(unsigned int irq)
+alcor_enable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-alcor_disable_irq(unsigned int irq)
+alcor_disable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-alcor_mask_and_ack_irq(unsigned int irq)
+alcor_mask_and_ack_irq(struct irq_data *d)
 {
-       alcor_disable_irq(irq);
+       alcor_disable_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
-       *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
+       *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
        *(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
 static void
-alcor_isa_mask_and_ack_irq(unsigned int irq)
+alcor_isa_mask_and_ack_irq(struct irq_data *d)
 {
-       i8259a_mask_and_ack_irq(irq);
+       i8259a_mask_and_ack_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
        *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip alcor_irq_type = {
        .name           = "ALCOR",
-       .unmask         = alcor_enable_irq,
-       .mask           = alcor_disable_irq,
-       .mask_ack       = alcor_mask_and_ack_irq,
+       .irq_unmask     = alcor_enable_irq,
+       .irq_mask       = alcor_disable_irq,
+       .irq_mask_ack   = alcor_mask_and_ack_irq,
 };
 
 static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
                if (i >= 16+20 && i <= 16+30)
                        continue;
                set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
-       i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
+       i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
 
        init_i8259a_irqs();
        common_init_isa_dma();
index b0c9164..57eb630 100644 (file)
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-cabriolet_enable_irq(unsigned int irq)
+cabriolet_enable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
 }
 
 static void
-cabriolet_disable_irq(unsigned int irq)
+cabriolet_disable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
 }
 
 static struct irq_chip cabriolet_irq_type = {
        .name           = "CABRIOLET",
-       .unmask         = cabriolet_enable_irq,
-       .mask           = cabriolet_disable_irq,
-       .mask_ack       = cabriolet_disable_irq,
+       .irq_unmask     = cabriolet_enable_irq,
+       .irq_mask       = cabriolet_disable_irq,
+       .irq_mask_ack   = cabriolet_disable_irq,
 };
 
 static void 
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
                for (i = 16; i < 35; ++i) {
                        set_irq_chip_and_handler(i, &cabriolet_irq_type,
                                handle_level_irq);
-                       irq_to_desc(i)->status |= IRQ_LEVEL;
+                       irq_set_status_flags(i, IRQ_LEVEL);
                }
        }
 
index edad5f7..481df4e 100644 (file)
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
 }
 
 static void
-dp264_enable_irq(unsigned int irq)
+dp264_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << irq;
+       cached_irq_mask |= 1UL << d->irq;
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-dp264_disable_irq(unsigned int irq)
+dp264_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << irq);
+       cached_irq_mask &= ~(1UL << d->irq);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_enable_irq(unsigned int irq)
+clipper_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << (irq - 16);
+       cached_irq_mask |= 1UL << (d->irq - 16);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_disable_irq(unsigned int irq)
+clipper_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << (irq - 16));
+       cached_irq_mask &= ~(1UL << (d->irq - 16));
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                  bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq, *affinity);
+       cpu_set_irq_affinity(d->irq, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static int
-clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                    bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq - 16, *affinity);
+       cpu_set_irq_affinity(d->irq - 16, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static struct irq_chip dp264_irq_type = {
-       .name           = "DP264",
-       .unmask         = dp264_enable_irq,
-       .mask           = dp264_disable_irq,
-       .mask_ack       = dp264_disable_irq,
-       .set_affinity   = dp264_set_affinity,
+       .name                   = "DP264",
+       .irq_unmask             = dp264_enable_irq,
+       .irq_mask               = dp264_disable_irq,
+       .irq_mask_ack           = dp264_disable_irq,
+       .irq_set_affinity       = dp264_set_affinity,
 };
 
 static struct irq_chip clipper_irq_type = {
-       .name           = "CLIPPER",
-       .unmask         = clipper_enable_irq,
-       .mask           = clipper_disable_irq,
-       .mask_ack       = clipper_disable_irq,
-       .set_affinity   = clipper_set_affinity,
+       .name                   = "CLIPPER",
+       .irq_unmask             = clipper_enable_irq,
+       .irq_mask               = clipper_disable_irq,
+       .irq_mask_ack           = clipper_disable_irq,
+       .irq_set_affinity       = clipper_set_affinity,
 };
 
 static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index ae5f29d..402e908 100644 (file)
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-eb64p_enable_irq(unsigned int irq)
+eb64p_enable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
 }
 
 static void
-eb64p_disable_irq(unsigned int irq)
+eb64p_disable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+       eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
 }
 
 static struct irq_chip eb64p_irq_type = {
        .name           = "EB64P",
-       .unmask         = eb64p_enable_irq,
-       .mask           = eb64p_disable_irq,
-       .mask_ack       = eb64p_disable_irq,
+       .irq_unmask     = eb64p_enable_irq,
+       .irq_mask       = eb64p_disable_irq,
+       .irq_mask_ack   = eb64p_disable_irq,
 };
 
 static void 
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
-       }               
+               irq_set_status_flags(i, IRQ_LEVEL);
+       }
 
        common_init_isa_dma();
        setup_irq(16+5, &isa_cascade_irqaction);
index 1121bc5..0b44a54 100644 (file)
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-eiger_enable_irq(unsigned int irq)
+eiger_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        eiger_update_irq_hw(irq, mask);
 }
 
 static void
-eiger_disable_irq(unsigned int irq)
+eiger_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
 
 static struct irq_chip eiger_irq_type = {
        .name           = "EIGER",
-       .unmask         = eiger_enable_irq,
-       .mask           = eiger_disable_irq,
-       .mask_ack       = eiger_disable_irq,
+       .irq_unmask     = eiger_enable_irq,
+       .irq_mask       = eiger_disable_irq,
+       .irq_mask_ack   = eiger_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index 34f55e0..00341b7 100644 (file)
  */
 
 static void
-jensen_local_enable(unsigned int irq)
+jensen_local_enable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_enable_irq(1);
+       if (d->irq == 7)
+               i8259a_enable_irq(d);
 }
 
 static void
-jensen_local_disable(unsigned int irq)
+jensen_local_disable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_disable_irq(1);
+       if (d->irq == 7)
+               i8259a_disable_irq(d);
 }
 
 static void
-jensen_local_mask_ack(unsigned int irq)
+jensen_local_mask_ack(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_mask_and_ack_irq(1);
+       if (d->irq == 7)
+               i8259a_mask_and_ack_irq(d);
 }
 
 static struct irq_chip jensen_local_irq_type = {
        .name           = "LOCAL",
-       .unmask         = jensen_local_enable,
-       .mask           = jensen_local_disable,
-       .mask_ack       = jensen_local_mask_ack,
+       .irq_unmask     = jensen_local_enable,
+       .irq_mask       = jensen_local_disable,
+       .irq_mask_ack   = jensen_local_mask_ack,
 };
 
 static void 
index 2bfc9f1..e619107 100644 (file)
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
 }
 
 static void
-io7_enable_irq(unsigned int irq)
+io7_enable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl |= 1UL << 24;
        mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
 }
 
 static void
-io7_disable_irq(unsigned int irq)
+io7_disable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl &= ~(1UL << 24);
        mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
 }
 
 static void
-marvel_irq_noop(unsigned int irq) 
-{ 
-       return; 
-}
-
-static unsigned int
-marvel_irq_noop_return(unsigned int irq) 
-{ 
-       return 0; 
+marvel_irq_noop(struct irq_data *d)
+{
+       return;
 }
 
 static struct irq_chip marvel_legacy_irq_type = {
        .name           = "LEGACY",
-       .mask           = marvel_irq_noop,
-       .unmask         = marvel_irq_noop,
+       .irq_mask       = marvel_irq_noop,
+       .irq_unmask     = marvel_irq_noop,
 };
 
 static struct irq_chip io7_lsi_irq_type = {
        .name           = "LSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .mask_ack       = io7_disable_irq,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_mask_ack   = io7_disable_irq,
 };
 
 static struct irq_chip io7_msi_irq_type = {
        .name           = "MSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .ack            = marvel_irq_noop,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_ack        = marvel_irq_noop,
 };
 
 static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the lsi irqs.  */
        for (i = 0; i < 128; ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        /* Disable the implemented irqs in hardware.  */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the msi irqs.  */
        for (i = 128; i < (128 + 512); ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        for (i = 0; i < 16; ++i)
index bcc1639..cf7f43d 100644 (file)
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
 }
 
 static inline void
-mikasa_enable_irq(unsigned int irq)
+mikasa_enable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
+       mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-mikasa_disable_irq(unsigned int irq)
+mikasa_disable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
+       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip mikasa_irq_type = {
        .name           = "MIKASA",
-       .unmask         = mikasa_enable_irq,
-       .mask           = mikasa_disable_irq,
-       .mask_ack       = mikasa_disable_irq,
+       .irq_unmask     = mikasa_enable_irq,
+       .irq_mask       = mikasa_disable_irq,
+       .irq_mask_ack   = mikasa_disable_irq,
 };
 
 static void 
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
        mikasa_update_irq_hw(0);
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index e88f4ae..92bc188 100644 (file)
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
 }
 
 static void
-noritake_enable_irq(unsigned int irq)
+noritake_enable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
+       noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-noritake_disable_irq(unsigned int irq)
+noritake_disable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
+       noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip noritake_irq_type = {
        .name           = "NORITAKE",
-       .unmask         = noritake_enable_irq,
-       .mask           = noritake_disable_irq,
-       .mask_ack       = noritake_disable_irq,
+       .irq_unmask     = noritake_enable_irq,
+       .irq_mask       = noritake_disable_irq,
+       .irq_mask_ack   = noritake_disable_irq,
 };
 
 static void 
@@ -127,8 +127,8 @@ noritake_init_irq(void)
        outw(0, 0x54c);
 
        for (i = 16; i < 48; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 6a51364..936d414 100644 (file)
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
   (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
 
 static inline void 
-rawhide_enable_irq(unsigned int irq)
+rawhide_enable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
 }
 
 static void 
-rawhide_disable_irq(unsigned int irq)
+rawhide_disable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
 }
 
 static void
-rawhide_mask_and_ack_irq(unsigned int irq)
+rawhide_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned int mask, mask1, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip rawhide_irq_type = {
        .name           = "RAWHIDE",
-       .unmask         = rawhide_enable_irq,
-       .mask           = rawhide_disable_irq,
-       .mask_ack       = rawhide_mask_and_ack_irq,
+       .irq_unmask     = rawhide_enable_irq,
+       .irq_mask       = rawhide_disable_irq,
+       .irq_mask_ack   = rawhide_mask_and_ack_irq,
 };
 
 static void 
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
        }
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 89e7e37..cea22a6 100644 (file)
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-rx164_enable_irq(unsigned int irq)
+rx164_enable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-rx164_disable_irq(unsigned int irq)
+rx164_disable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static struct irq_chip rx164_irq_type = {
        .name           = "RX164",
-       .unmask         = rx164_enable_irq,
-       .mask           = rx164_disable_irq,
-       .mask_ack       = rx164_disable_irq,
+       .irq_unmask     = rx164_enable_irq,
+       .irq_mask       = rx164_disable_irq,
+       .irq_mask_ack   = rx164_disable_irq,
 };
 
 static void 
@@ -99,8 +99,8 @@ rx164_init_irq(void)
 
        rx164_update_irq_hw(0);
        for (i = 16; i < 40; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 5c4423d..a349538 100644 (file)
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
 /* GENERIC irq routines */
 
 static inline void
-sable_lynx_enable_irq(unsigned int irq)
+sable_lynx_enable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_disable_irq(unsigned int irq)
+sable_lynx_disable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_mask_and_ack_irq(unsigned int irq)
+sable_lynx_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip sable_lynx_irq_type = {
        .name           = "SABLE/LYNX",
-       .unmask         = sable_lynx_enable_irq,
-       .mask           = sable_lynx_disable_irq,
-       .mask_ack       = sable_lynx_mask_and_ack_irq,
+       .irq_unmask     = sable_lynx_enable_irq,
+       .irq_mask       = sable_lynx_disable_irq,
+       .irq_mask_ack   = sable_lynx_mask_and_ack_irq,
 };
 
 static void 
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
        long i;
 
        for (i = 0; i < nr_of_irqs; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &sable_lynx_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index f8a1e8a..42a5331 100644 (file)
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-takara_enable_irq(unsigned int irq)
+takara_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        takara_update_irq_hw(irq, mask);
 }
 
 static void
-takara_disable_irq(unsigned int irq)
+takara_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
 
 static struct irq_chip takara_irq_type = {
        .name           = "TAKARA",
-       .unmask         = takara_enable_irq,
-       .mask           = takara_disable_irq,
-       .mask_ack       = takara_disable_irq,
+       .irq_unmask     = takara_enable_irq,
+       .irq_mask       = takara_disable_irq,
+       .irq_mask_ack   = takara_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
                takara_update_irq_hw(i, -1);
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index e02494b..8c13a0c 100644 (file)
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-titan_enable_irq(unsigned int irq)
+titan_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask |= 1UL << (irq - 16);
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
 }
 
 static inline void
-titan_disable_irq(unsigned int irq)
+titan_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask &= ~(1UL << (irq - 16));
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+                      bool force)
 { 
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cpu_set_irq_affinity(irq - 16, *affinity);
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
 static struct irq_chip titan_irq_type = {
-       .name           = "TITAN",
-       .unmask         = titan_enable_irq,
-       .mask           = titan_disable_irq,
-       .mask_ack       = titan_disable_irq,
-       .set_affinity   = titan_set_irq_affinity,
+       .name                   = "TITAN",
+       .irq_unmask             = titan_enable_irq,
+       .irq_mask               = titan_disable_irq,
+       .irq_mask_ack           = titan_disable_irq,
+       .irq_set_affinity       = titan_set_irq_affinity,
 };
 
 static irqreturn_t
index eec5259..ca60a38 100644 (file)
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
 }
 
 static void
-wildfire_enable_irq(unsigned int irq)
+wildfire_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_enable_irq(irq);
+               i8259a_enable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
 }
 
 static void
-wildfire_disable_irq(unsigned int irq)
+wildfire_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_disable_irq(irq);
+               i8259a_disable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
 }
 
 static void
-wildfire_mask_and_ack_irq(unsigned int irq)
+wildfire_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_mask_and_ack_irq(irq);
+               i8259a_mask_and_ack_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip wildfire_irq_type = {
        .name           = "WILDFIRE",
-       .unmask         = wildfire_enable_irq,
-       .mask           = wildfire_disable_irq,
-       .mask_ack       = wildfire_mask_and_ack_irq,
+       .irq_unmask     = wildfire_enable_irq,
+       .irq_mask       = wildfire_disable_irq,
+       .irq_mask_ack   = wildfire_mask_and_ack_irq,
 };
 
 static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
        for (i = 0; i < 16; ++i) {
                if (i == 2)
                        continue;
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
                handle_level_irq);
+       irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
        for (i = 40; i < 64; ++i) {
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       setup_irq(32+irq_bias, &isa_enable);    
+       setup_irq(32+irq_bias, &isa_enable);
 }
 
 static void __init
index 5cff165..166efa2 100644 (file)
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622
          visible impact on the overall performance or power consumption of the
          processor.
 
+config ARM_ERRATA_751472
+       bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 751472 Cortex-A9 (prior
+         to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
+         completion of a following broadcasted operation if the second
+         operation is received by a CPU before the ICIALLUIS has completed,
+         potentially leading to corrupted entries in the cache or TLB.
+
+config ARM_ERRATA_753970
+       bool "ARM errata: cache sync operation may be faulty"
+       depends on CACHE_PL310
+       help
+         This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+
+         Under some condition the effect of cache sync operation on
+         the store buffer still remains when the operation completes.
+         This means that the store buffer is always asked to drain and
+         this prevents it from merging any further writes. The workaround
+         is to replace the normal offset of cache sync operation (0x730)
+         by another offset targeting an unmapped PL310 register 0x740.
+         This has the same effect as the cache sync operation: store buffer
+         drain and waiting for all buffers empty.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1391,7 +1416,7 @@ config AEABI
 
 config OABI_COMPAT
        bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
-       depends on AEABI && EXPERIMENTAL
+       depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
        default y
        help
          This option preserves the old syscall interface along with the
index c22c1ad..6f7b292 100644 (file)
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux        += --be8
 endif
 
-OBJCOPYFLAGS   :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS   :=-O binary -R .comment -S
 GZFLAGS                :=-9
 #KBUILD_CFLAGS +=-pipe
 # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
index ab204db..c602896 100644 (file)
@@ -1,3 +1,7 @@
 font.c
-piggy.gz
+lib1funcs.S
+piggy.gzip
+piggy.lzo
+piggy.lzma
+vmlinux
 vmlinux.lds
index 778655f..ea5ee4d 100644 (file)
@@ -6,6 +6,8 @@ config ARM_VIC
 
 config ARM_VIC_NR
        int
+       default 4 if ARCH_S5PV210
+       default 3 if ARCH_S5P6442 || ARCH_S5PC100
        default 2
        depends on ARM_VIC
        help
index 5aeec1e..16bd480 100644 (file)
@@ -36,6 +36,7 @@
 #define L2X0_RAW_INTR_STAT             0x21C
 #define L2X0_INTR_CLEAR                        0x220
 #define L2X0_CACHE_SYNC                        0x730
+#define L2X0_DUMMY_REG                 0x740
 #define L2X0_INV_LINE_PA               0x770
 #define L2X0_INV_WAY                   0x77C
 #define L2X0_CLEAN_LINE_PA             0x7B0
index a101f10..e0d1c0c 100644 (file)
 #define SCPCELLID2             0xFF8
 #define SCPCELLID3             0xFFC
 
+#define SCCTRL_TIMEREN0SEL_REFCLK      (0 << 15)
+#define SCCTRL_TIMEREN0SEL_TIMCLK      (1 << 15)
+
+#define SCCTRL_TIMEREN1SEL_REFCLK      (0 << 17)
+#define SCCTRL_TIMEREN1SEL_TIMCLK      (1 << 17)
+
 static inline void sysctl_soft_reset(void __iomem *base)
 {
+       /* switch to slow mode */
+       writel(0x2, base + SCCTRL);
+
        /* writing any value to SCSYSSTAT reg will reset system */
        writel(0, base + SCSYSSTAT);
 }
index 20e0f7c..d66605d 100644 (file)
@@ -95,6 +95,15 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
        return (void __iomem *)addr;
 }
 
+/* IO barriers */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
 /*
  * Now, pick up the machine-defined IO definitions
  */
@@ -125,17 +134,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
  * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
  */
 #ifdef __io
-#define outb(v,p)              __raw_writeb(v,__io(p))
-#define outw(v,p)              __raw_writew((__force __u16) \
-                                       cpu_to_le16(v),__io(p))
-#define outl(v,p)              __raw_writel((__force __u32) \
-                                       cpu_to_le32(v),__io(p))
+#define outb(v,p)      ({ __iowmb(); __raw_writeb(v,__io(p)); })
+#define outw(v,p)      ({ __iowmb(); __raw_writew((__force __u16) \
+                                       cpu_to_le16(v),__io(p)); })
+#define outl(v,p)      ({ __iowmb(); __raw_writel((__force __u32) \
+                                       cpu_to_le32(v),__io(p)); })
 
-#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; })
+#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
 #define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
-                       __raw_readw(__io(p))); __v; })
+                       __raw_readw(__io(p))); __iormb(); __v; })
 #define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
-                       __raw_readl(__io(p))); __v; })
+                       __raw_readl(__io(p))); __iormb(); __v; })
 
 #define outsb(p,d,l)           __raw_writesb(__io(p),d,l)
 #define outsw(p,d,l)           __raw_writesw(__io(p),d,l)
@@ -192,14 +201,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define writel_relaxed(v,c)    ((void)__raw_writel((__force u32) \
                                        cpu_to_le32(v),__mem_pci(c)))
 
-#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
-#define __iormb()              rmb()
-#define __iowmb()              wmb()
-#else
-#define __iormb()              do { } while (0)
-#define __iowmb()              do { } while (0)
-#endif
-
 #define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
index 3a0893a..bf13b81 100644 (file)
@@ -15,10 +15,6 @@ struct meminfo;
 struct sys_timer;
 
 struct machine_desc {
-       /*
-        * Note! The first two elements are used
-        * by assembler code in head.S, head-common.S
-        */
        unsigned int            nr;             /* architecture number  */
        const char              *name;          /* architecture name    */
        unsigned long           boot_params;    /* tagged list          */
index 23c2e8e..d0ee74b 100644 (file)
  * translation for translating DMA addresses.  Use the driver
  * DMA support - see dma-mapping.h.
  */
-static inline unsigned long virt_to_phys(void *x)
+static inline unsigned long virt_to_phys(const volatile void *x)
 {
        return __virt_to_phys((unsigned long)(x));
 }
index 9763be0..22de005 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _ASMARM_PGALLOC_H
 #define _ASMARM_PGALLOC_H
 
+#include <linux/pagemap.h>
+
 #include <asm/domain.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/processor.h>
index f41a6f5..82dfe5d 100644 (file)
 #define __ASMARM_TLB_H
 
 #include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
 
 #ifndef CONFIG_MMU
 
 #include <linux/pagemap.h>
+
+#define tlb_flush(tlb) ((void) tlb)
+
 #include <asm-generic/tlb.h>
 
 #else /* !CONFIG_MMU */
 
+#include <linux/swap.h>
 #include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * We need to delay page freeing for SMP as other CPUs can access pages
+ * which have been removed but not yet had their TLB entries invalidated.
+ * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
+ * we need to apply this same delaying tactic to ensure correct operation.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
+#define tlb_fast_mode(tlb)     0
+#define FREE_PTE_NR            500
+#else
+#define tlb_fast_mode(tlb)     1
+#define FREE_PTE_NR            0
+#endif
 
 /*
  * TLB handling.  This allows us to remove pages from the page
 struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            fullmm;
+       struct vm_area_struct   *vma;
        unsigned long           range_start;
        unsigned long           range_end;
+       unsigned int            nr;
+       struct page             *pages[FREE_PTE_NR];
 };
 
 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
 
+/*
+ * This is unnecessarily complex.  There's three ways the TLB shootdown
+ * code is used:
+ *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
+ *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.
+ *  2. Unmapping all vmas.  See exit_mmap().
+ *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
+ *  3. Unmapping argument pages.  See shift_arg_pages().
+ *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ *     tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+       if (tlb->fullmm || !tlb->vma)
+               flush_tlb_mm(tlb->mm);
+       else if (tlb->range_end > 0) {
+               flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+               tlb->range_start = TASK_SIZE;
+               tlb->range_end = 0;
+       }
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+       if (!tlb->fullmm) {
+               if (addr < tlb->range_start)
+                       tlb->range_start = addr;
+               if (addr + PAGE_SIZE > tlb->range_end)
+                       tlb->range_end = addr + PAGE_SIZE;
+       }
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush(tlb);
+       if (!tlb_fast_mode(tlb)) {
+               free_pages_and_swap_cache(tlb->pages, tlb->nr);
+               tlb->nr = 0;
+       }
+}
+
 static inline struct mmu_gather *
 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
 {
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
 
        tlb->mm = mm;
        tlb->fullmm = full_mm_flush;
+       tlb->vma = NULL;
+       tlb->nr = 0;
 
        return tlb;
 }
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
 static inline void
 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
-       if (tlb->fullmm)
-               flush_tlb_mm(tlb->mm);
+       tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
        check_pgt_cache();
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 static inline void
 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
 {
-       if (!tlb->fullmm) {
-               if (addr < tlb->range_start)
-                       tlb->range_start = addr;
-               if (addr + PAGE_SIZE > tlb->range_end)
-                       tlb->range_end = addr + PAGE_SIZE;
-       }
+       tlb_add_flush(tlb, addr);
 }
 
 /*
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
        if (!tlb->fullmm) {
                flush_cache_range(vma, vma->vm_start, vma->vm_end);
+               tlb->vma = vma;
                tlb->range_start = TASK_SIZE;
                tlb->range_end = 0;
        }
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 static inline void
 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-       if (!tlb->fullmm && tlb->range_end > 0)
-               flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+       if (!tlb->fullmm)
+               tlb_flush(tlb);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+       if (tlb_fast_mode(tlb)) {
+               free_page_and_swap_cache(page);
+       } else {
+               tlb->pages[tlb->nr++] = page;
+               if (tlb->nr >= FREE_PTE_NR)
+                       tlb_flush_mmu(tlb);
+       }
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+       unsigned long addr)
+{
+       pgtable_page_dtor(pte);
+       tlb_add_flush(tlb, addr);
+       tlb_remove_page(tlb, pte);
 }
 
-#define tlb_remove_page(tlb,page)      free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep, addr)  pte_free((tlb)->mm, ptep)
+#define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
 #define pmd_free_tlb(tlb, pmdp, addr)  pmd_free((tlb)->mm, pmdp)
 
 #define tlb_migrate_finish(mm)         do { } while (0)
index ce7378e..d2005de 100644 (file)
 #ifndef _ASMARM_TLBFLUSH_H
 #define _ASMARM_TLBFLUSH_H
 
-
-#ifndef CONFIG_MMU
-
-#define tlb_flush(tlb) ((void) tlb)
-
-#else /* CONFIG_MMU */
+#ifdef CONFIG_MMU
 
 #include <asm/glue.h>
 
index f17d9a0..f06ff9f 100644 (file)
@@ -391,25 +391,24 @@ ENDPROC(__turn_mmu_on)
 
 
 #ifdef CONFIG_SMP_ON_UP
+       __INIT
 __fixup_smp:
-       mov     r4, #0x00070000
-       orr     r3, r4, #0xff000000     @ mask 0xff070000
-       orr     r4, r4, #0x41000000     @ val 0x41070000
-       and     r0, r9, r3
-       teq     r0, r4                  @ ARM CPU and ARMv6/v7?
+       and     r3, r9, #0x000f0000     @ architecture version
+       teq     r3, #0x000f0000         @ CPU ID supported?
        bne     __fixup_smp_on_up       @ no, assume UP
 
-       orr     r3, r3, #0x0000ff00
-       orr     r3, r3, #0x000000f0     @ mask 0xff07fff0
+       bic     r3, r9, #0x00ff0000
+       bic     r3, r3, #0x0000000f     @ mask 0xff00fff0
+       mov     r4, #0x41000000
        orr     r4, r4, #0x0000b000
-       orr     r4, r4, #0x00000020     @ val 0x4107b020
-       and     r0, r9, r3
-       teq     r0, r4                  @ ARM 11MPCore?
+       orr     r4, r4, #0x00000020     @ val 0x4100b020
+       teq     r3, r4                  @ ARM 11MPCore?
        moveq   pc, lr                  @ yes, assume SMP
 
        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
-       tst     r0, #1 << 31
-       movne   pc, lr                  @ bit 31 => SMP
+       and     r0, r0, #0xc0000000     @ multiprocessing extensions and
+       teq     r0, #0x80000000         @ not part of a uniprocessor system?
+       moveq   pc, lr                  @ yes, assume SMP
 
 __fixup_smp_on_up:
        adr     r0, 1f
@@ -417,18 +416,7 @@ __fixup_smp_on_up:
        sub     r3, r0, r3
        add     r4, r4, r3
        add     r5, r5, r3
-2:     cmp     r4, r5
-       movhs   pc, lr
-       ldmia   r4!, {r0, r6}
- ARM(  str     r6, [r0, r3]    )
- THUMB(        add     r0, r0, r3      )
-#ifdef __ARMEB__
- THUMB(        mov     r6, r6, ror #16 )       @ Convert word order for big-endian.
-#endif
- THUMB(        strh    r6, [r0], #2    )       @ For Thumb-2, store as two halfwords
- THUMB(        mov     r6, r6, lsr #16 )       @ to be robust against misaligned r3.
- THUMB(        strh    r6, [r0]        )
-       b       2b
+       b       __do_fixup_smp_on_up
 ENDPROC(__fixup_smp)
 
        .align
@@ -442,7 +430,31 @@ smp_on_up:
        ALT_SMP(.long   1)
        ALT_UP(.long    0)
        .popsection
+#endif
 
+       .text
+__do_fixup_smp_on_up:
+       cmp     r4, r5
+       movhs   pc, lr
+       ldmia   r4!, {r0, r6}
+ ARM(  str     r6, [r0, r3]    )
+ THUMB(        add     r0, r0, r3      )
+#ifdef __ARMEB__
+ THUMB(        mov     r6, r6, ror #16 )       @ Convert word order for big-endian.
 #endif
+ THUMB(        strh    r6, [r0], #2    )       @ For Thumb-2, store as two halfwords
+ THUMB(        mov     r6, r6, lsr #16 )       @ to be robust against misaligned r3.
+ THUMB(        strh    r6, [r0]        )
+       b       __do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+       stmfd   sp!, {r4 - r6, lr}
+       mov     r4, r0
+       add     r5, r0, r1
+       mov     r3, #0
+       bl      __do_fixup_smp_on_up
+       ldmfd   sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
 
 #include "head-common.S"
index c9f3f04..44b84fe 100644 (file)
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
        u32 didr;
 
        /* Do we implement the extended CPUID interface? */
-       if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
-               pr_warning("CPUID feature registers not supported. "
-                               "Assuming v6 debug is present.\n");
+       if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+           "CPUID feature registers not supported. "
+           "Assuming v6 debug is present.\n"))
                return ARM_DEBUG_ARCH_V6;
-       }
 
        ARM_DBG_READ(c0, 0, didr);
        return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
        return debug_arch;
 }
 
+static int debug_arch_supported(void)
+{
+       u8 arch = get_debug_arch();
+       return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
 /* Determine number of BRP register available. */
 static int get_num_brp_resources(void)
 {
@@ -268,6 +273,9 @@ out:
 
 int hw_breakpoint_slots(int type)
 {
+       if (!debug_arch_supported())
+               return 0;
+
        /*
         * We can be called early, so don't rely on
         * our static variables being initialised.
@@ -828,19 +836,32 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
 /*
  * One-time initialisation.
  */
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(void *info)
 {
-       int i;
+       int i, cpu = smp_processor_id();
+       u32 dbg_power;
+       cpumask_t *cpumask = info;
 
        /*
         * v7 debug contains save and restore registers so that debug state
-        * can be maintained across low-power modes without leaving
-        * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
-        * we can write to the debug registers out of reset, so we must
-        * unlock the OS Lock Access Register to avoid taking undefined
-        * instruction exceptions later on.
+        * can be maintained across low-power modes without leaving the debug
+        * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+        * the debug registers out of reset, so we must unlock the OS Lock
+        * Access Register to avoid taking undefined instruction exceptions
+        * later on.
         */
        if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
+               /*
+                * Ensure sticky power-down is clear (i.e. debug logic is
+                * powered up).
+                */
+               asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
+               if ((dbg_power & 0x1) == 0) {
+                       pr_warning("CPU %d debug is powered down!\n", cpu);
+                       cpumask_or(cpumask, cpumask, cpumask_of(cpu));
+                       return;
+               }
+
                /*
                 * Unconditionally clear the lock by writing a value
                 * other than 0xC5ACCE55 to the access register.
@@ -879,10 +900,11 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
 static int __init arch_hw_breakpoint_init(void)
 {
        u32 dscr;
+       cpumask_t cpumask = { CPU_BITS_NONE };
 
        debug_arch = get_debug_arch();
 
-       if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+       if (!debug_arch_supported()) {
                pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
                return 0;
        }
@@ -899,18 +921,24 @@ static int __init arch_hw_breakpoint_init(void)
                pr_info("%d breakpoint(s) reserved for watchpoint "
                                "single-step.\n", core_num_reserved_brps);
 
+       /*
+        * Reset the breakpoint resources. We assume that a halting
+        * debugger will leave the world in a nice state for us.
+        */
+       on_each_cpu(reset_ctrl_regs, &cpumask, 1);
+       if (!cpumask_empty(&cpumask)) {
+               core_num_brps = 0;
+               core_num_reserved_brps = 0;
+               core_num_wrps = 0;
+               return 0;
+       }
+
        ARM_DBG_READ(c1, 0, dscr);
        if (dscr & ARM_DSCR_HDBGEN) {
+               max_watchpoint_len = 4;
                pr_warning("halting debug mode enabled. Assuming maximum "
-                               "watchpoint size of 4 bytes.");
+                          "watchpoint size of %u bytes.", max_watchpoint_len);
        } else {
-               /*
-                * Reset the breakpoint resources. We assume that a halting
-                * debugger will leave the world in a nice state for us.
-                */
-               smp_call_function(reset_ctrl_regs, NULL, 1);
-               reset_ctrl_regs(NULL);
-
                /* Work out the maximum supported watchpoint length. */
                max_watchpoint_len = get_max_wp_len();
                pr_info("maximum watchpoint size is %u bytes.\n",
index 2c1f005..8f6ed43 100644 (file)
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
                return space_cccc_1100_010x(insn, asi);
 
-       } else if ((insn & 0x0e000000) == 0x0c400000) {
+       } else if ((insn & 0x0e000000) == 0x0c000000) {
 
                return space_cccc_110x(insn, asi);
 
index 2cfe816..6d4105e 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/sections.h>
+#include <asm/smp_plat.h>
 #include <asm/unwind.h>
 
 #ifdef CONFIG_XIP_KERNEL
@@ -268,12 +269,28 @@ struct mod_unwind_map {
        const Elf_Shdr *txt_sec;
 };
 
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+       const Elf_Shdr *sechdrs, const char *name)
+{
+       const Elf_Shdr *s, *se;
+       const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+               if (strcmp(name, secstrs + s->sh_name) == 0)
+                       return s;
+
+       return NULL;
+}
+
+extern void fixup_smp(const void *, unsigned long);
+
 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                    struct module *mod)
 {
+       const Elf_Shdr * __maybe_unused s = NULL;
 #ifdef CONFIG_ARM_UNWIND
        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-       const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+       const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
        struct mod_unwind_map maps[ARM_SEC_MAX];
        int i;
 
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                                                 maps[i].txt_sec->sh_addr,
                                                 maps[i].txt_sec->sh_size);
 #endif
+       s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+       if (s && !is_smp())
+               fixup_smp((void *)s->sh_addr, s->sh_size);
        return 0;
 }
 
index 5efa264..d150ad1 100644 (file)
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
         * Frame pointers should strictly progress back up the stack
         * (towards higher addresses).
         */
-       if (tail >= buftail.fp)
+       if (tail + 1 >= buftail.fp)
                return NULL;
 
        return buftail.fp - 1;
index b8af96e..2c79eec 100644 (file)
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
                           irq, cpu);
        return err;
 #else
-       return 0;
+       return -EINVAL;
 #endif
 }
 
 static int
 init_cpu_pmu(void)
 {
-       int i, err = 0;
+       int i, irqs, err = 0;
        struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
 
-       if (!pdev) {
-               err = -ENODEV;
-               goto out;
-       }
+       if (!pdev)
+               return -ENODEV;
+
+       irqs = pdev->num_resources;
+
+       /*
+        * If we have a single PMU interrupt that we can't shift, assume that
+        * we're running on a uniprocessor machine and continue.
+        */
+       if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
+               return 0;
 
-       for (i = 0; i < pdev->num_resources; ++i) {
+       for (i = 0; i < irqs; ++i) {
                err = set_irq_affinity(platform_get_irq(pdev, i), i);
                if (err)
                        break;
        }
 
-out:
        return err;
 }
 
index 19c6816..b13e70f 100644 (file)
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
                while (!(arch_ctrl.len & 0x1))
                        arch_ctrl.len >>= 1;
 
-               if (idx & 0x1)
-                       reg = encode_ctrl_reg(arch_ctrl);
-               else
+               if (num & 0x1)
                        reg = bp->attr.bp_addr;
+               else
+                       reg = encode_ctrl_reg(arch_ctrl);
        }
 
 put:
index 420b8d6..5ea4fb7 100644 (file)
@@ -226,8 +226,8 @@ int cpu_architecture(void)
                 * Register 0 and check for VMSAv7 or PMSAv7 */
                asm("mrc        p15, 0, %0, c0, c1, 4"
                    : "=r" (mmfr0));
-               if ((mmfr0 & 0x0000000f) == 0x00000003 ||
-                   (mmfr0 & 0x000000f0) == 0x00000030)
+               if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+                   (mmfr0 & 0x000000f0) >= 0x00000030)
                        cpu_arch = CPU_ARCH_ARMv7;
                else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
                         (mmfr0 & 0x000000f0) == 0x00000020)
index 907d5a6..abaf844 100644 (file)
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
        unsigned long handler = (unsigned long)ka->sa.sa_handler;
        unsigned long retcode;
        int thumb = 0;
-       unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+       unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+       cpsr |= PSR_ENDSTATE;
 
        /*
         * Maybe we need to deliver a 32-bit signal to a 26-bit task.
index fd91566..60636f4 100644 (file)
@@ -36,6 +36,7 @@ static void twd_set_mode(enum clock_event_mode mode,
                /* timer load already set up */
                ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
                        | TWD_TIMER_CONTROL_PERIODIC;
+               __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
                /* period set, and timer enabled in 'next_event' hook */
@@ -81,7 +82,7 @@ int twd_timer_ack(void)
 
 static void __cpuinit twd_calibrate_rate(void)
 {
-       unsigned long load, count;
+       unsigned long count;
        u64 waitjiffies;
 
        /*
@@ -116,10 +117,6 @@ static void __cpuinit twd_calibrate_rate(void)
                printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
                        (twd_timer_rate / 1000000) % 100);
        }
-
-       load = twd_timer_rate / HZ;
-
-       __raw_writel(load, twd_base + TWD_TIMER_LOAD);
 }
 
 /*
index 86b66f3..6146279 100644 (file)
 #define ARM_CPU_KEEP(x)
 #endif
 
+#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
+#define ARM_EXIT_KEEP(x)       x
+#else
+#define ARM_EXIT_KEEP(x)
+#endif
+
 OUTPUT_ARCH(arm)
 ENTRY(stext)
 
@@ -43,6 +49,7 @@ SECTIONS
                _sinittext = .;
                        HEAD_TEXT
                        INIT_TEXT
+                       ARM_EXIT_KEEP(EXIT_TEXT)
                _einittext = .;
                ARM_CPU_DISCARD(PROC_INFO)
                __arch_info_begin = .;
@@ -67,6 +74,7 @@ SECTIONS
 #ifndef CONFIG_XIP_KERNEL
                __init_begin = _stext;
                INIT_DATA
+               ARM_EXIT_KEEP(EXIT_DATA)
 #endif
        }
 
@@ -162,6 +170,7 @@ SECTIONS
                . = ALIGN(PAGE_SIZE);
                __init_begin = .;
                INIT_DATA
+               ARM_EXIT_KEEP(EXIT_DATA)
                . = ALIGN(PAGE_SIZE);
                __init_end = .;
 #endif
@@ -247,6 +256,8 @@ SECTIONS
        }
 #endif
 
+       NOTES
+
        BSS_SECTION(0, 0, 0)
        _end = .;
 
index 343de73..4a68c2b 100644 (file)
@@ -132,7 +132,7 @@ out:
        return ret;
 }
 
-static int __init davinci_cpu_init(struct cpufreq_policy *policy)
+static int davinci_cpu_init(struct cpufreq_policy *policy)
 {
        int result = 0;
        struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
index 9eec630..beda8a4 100644 (file)
@@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
        .resource       = da850_mcasp_resources,
 };
 
+struct platform_device davinci_pcm_device = {
+       .name   = "davinci-pcm-audio",
+       .id     = -1,
+};
+
 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
 {
+       platform_device_register(&davinci_pcm_device);
+
        /* DA830/OMAP-L137 has 3 instances of McASP */
        if (cpu_is_davinci_da830() && id == 1) {
                da830_mcasp1_device.dev.platform_data = pdata;
index d102986..3fa3e28 100644 (file)
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_set_bit(&regs->enable, gpio);
+       gpio_reg_set_bit(regs->enable, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_clear_bit(&regs->enable, gpio);
+       gpio_reg_clear_bit(regs->enable, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 }
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
 
        spin_lock_irqsave(&ctlr->lock, flags);
 
-       gpio_reg_set_bit(&regs->direction, gpio);
+       gpio_reg_set_bit(regs->direction, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
        spin_lock_irqsave(&ctlr->lock, flags);
 
        if (value)
-               gpio_reg_set_bit(&regs->data_out, gpio);
+               gpio_reg_set_bit(regs->data_out, gpio);
        else
-               gpio_reg_clear_bit(&regs->data_out, gpio);
+               gpio_reg_clear_bit(regs->data_out, gpio);
 
-       gpio_reg_clear_bit(&regs->direction, gpio);
+       gpio_reg_clear_bit(regs->direction, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
        unsigned gpio = chip->base + offset;
        int ret;
 
-       ret = gpio_reg_get_bit(&regs->data_in, gpio);
+       ret = gpio_reg_get_bit(regs->data_in, gpio);
 
        return ret ? 1 : 0;
 }
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
        spin_lock_irqsave(&ctlr->lock, flags);
 
        if (value)
-               gpio_reg_set_bit(&regs->data_out, gpio);
+               gpio_reg_set_bit(regs->data_out, gpio);
        else
-               gpio_reg_clear_bit(&regs->data_out, gpio);
+               gpio_reg_clear_bit(regs->data_out, gpio);
 
        spin_unlock_irqrestore(&ctlr->lock, flags);
 }
index 730c49d..14a5048 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
        return 1;
index ffdf87b..8207954 100644 (file)
@@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release);
 static struct resource ep93xx_ac97_resources[] = {
        {
                .start  = EP93XX_AAC_PHYS_BASE,
-               .end    = EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
+               .end    = EP93XX_AAC_PHYS_BASE + 0xac - 1,
                .flags  = IORESOURCE_MEM,
        },
        {
index f3dc76f..bec34b8 100644 (file)
@@ -427,6 +427,13 @@ void __init ep93xx_gpio_init(void)
 {
        int i;
 
+       /* Set Ports C, D, E, G, and H for GPIO use */
+       ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
+                                EP93XX_SYSCON_DEVCFG_GONK |
+                                EP93XX_SYSCON_DEVCFG_EONIDE |
+                                EP93XX_SYSCON_DEVCFG_GONIDE |
+                                EP93XX_SYSCON_DEVCFG_HONIDE);
+
        for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
                gpiochip_add(&ep93xx_gpio_banks[i].chip);
 }
index 3c9e0c4..30b971d 100644 (file)
@@ -17,8 +17,8 @@
        /* For NetWinder debugging */
                .macro  addruart, rp, rv
                mov     \rp, #0x000003f8
-               orr     \rv, \rp, #0x7c000000   @ physical
-               orr     \rp, \rp, #0xff000000   @ virtual
+               orr     \rv, \rp, #0xff000000   @ virtual
+               orr     \rp, \rp, #0x7c000000   @ physical
                .endm
 
 #define UART_SHIFT     0
index aa76cfd..8382e79 100644 (file)
@@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = {
        KEY(3, 3, KEY_POWER),
 };
 
-static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = {
+static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
        .keymap         = mx25pdk_keymap,
        .keymap_size    = ARRAY_SIZE(mx25pdk_keymap),
 };
index 4dc68d6..9fd8942 100644 (file)
@@ -432,7 +432,7 @@ static struct clocksource clocksource_ixp4xx = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-unsigned long ixp4xx_timer_freq = FREQ;
+unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
@@ -496,7 +496,7 @@ static struct clock_event_device clockevent_ixp4xx = {
 
 static void __init ixp4xx_clockevent_init(void)
 {
-       clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC,
+       clockevent_ixp4xx.mult = div_sc(IXP4XX_TIMER_FREQ, NSEC_PER_SEC,
                                        clockevent_ixp4xx.shift);
        clockevent_ixp4xx.max_delta_ns =
                clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
index 2c3f93c..c9e930f 100644 (file)
@@ -10,6 +10,7 @@
  * 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
  * timer register ignores the bottom 2 bits of the LATCH value.
  */
-#define FREQ 66666000
-#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
+#define IXP4XX_TIMER_FREQ 66666000
+#define CLOCK_TICK_RATE \
+       (((IXP4XX_TIMER_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
 
index bfdbe4b..852f7c9 100644 (file)
@@ -265,6 +265,11 @@ void qmgr_release_queue(unsigned int queue)
               qmgr_queue_descs[queue], queue);
        qmgr_queue_descs[queue][0] = '\x0';
 #endif
+
+       while ((addr = qmgr_get_entry(queue)))
+               printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+                      queue, addr);
+
        __raw_writel(0, &qmgr_regs->sram[queue]);
 
        used_sram_bitmap[0] &= ~mask[0];
@@ -275,10 +280,6 @@ void qmgr_release_queue(unsigned int queue)
        spin_unlock_irq(&qmgr_lock);
 
        module_put(THIS_MODULE);
-
-       while ((addr = qmgr_get_entry(queue)))
-               printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
-                      queue, addr);
 }
 
 static int qmgr_init(void)
index b1a362e..ca72a05 100644 (file)
@@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)             \
        reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
        reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
        reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg | (1 << clk->enable_shift)) {                           \
+       if (reg & (1 << clk->enable_shift)) {                           \
                pr_err("%s: clock is gated\n", __func__);               \
                return -EINVAL;                                         \
        }                                                               \
@@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent)   \
 {                                                                      \
        if (parent != clk->parent) {                                    \
                __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,            \
-                        HW_CLKCTRL_CLKSEQ_TOG);                        \
+                        CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);    \
                clk->parent = parent;                                   \
        }                                                               \
                                                                        \
index 56312c0..fd1c4c5 100644 (file)
@@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)           \
        } else {                                                        \
                reg &= ~BM_CLKCTRL_##dr##_DIV;                          \
                reg |= div << BP_CLKCTRL_##dr##_DIV;                    \
-               if (reg | (1 << clk->enable_shift)) {                   \
+               if (reg & (1 << clk->enable_shift)) {                   \
                        pr_err("%s: clock is gated\n", __func__);       \
                        return -EINVAL;                                 \
                }                                                       \
        }                                                               \
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);          \
+       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
                                                                        \
        for (i = 10000; i; i--)                                         \
                if (!(__raw_readl(CLKCTRL_BASE_ADDR +                   \
@@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent)   \
 {                                                                      \
        if (parent != clk->parent) {                                    \
                __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,            \
-                        HW_CLKCTRL_CLKSEQ_TOG);                        \
+                        CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);    \
                clk->parent = parent;                                   \
        }                                                               \
                                                                        \
@@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = {
        _REGISTER_CLOCK("duart", NULL, uart_clk)
        _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
        _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
-       _REGISTER_CLOCK("fec.0", NULL, fec_clk)
        _REGISTER_CLOCK("rtc", NULL, rtc_clk)
        _REGISTER_CLOCK("pll2", NULL, pll2_clk)
        _REGISTER_CLOCK(NULL, "hclk", hbus_clk)
index e7d2269..a7093c8 100644 (file)
@@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk)
                if (clk->disable)
                        clk->disable(clk);
                __clk_disable(clk->parent);
-               __clk_disable(clk->secondary);
        }
 }
 
@@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk)
 
        if (clk->usecount++ == 0) {
                __clk_enable(clk->parent);
-               __clk_enable(clk->secondary);
 
                if (clk->enable)
                        clk->enable(clk);
index d7ad7a6..cb0c0e8 100644 (file)
@@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
        struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
        u32 gpio_irq_no_base = port->virtual_irq_start;
 
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
+
        irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
                        __raw_readl(port->base + PINCTRL_IRQEN(port->id));
 
index 041e276..592c9ab 100644 (file)
@@ -29,8 +29,6 @@ struct clk {
        int id;
        /* Source clock this clk depends on */
        struct clk *parent;
-       /* Secondary clock to enable/disable with this clock */
-       struct clk *secondary;
        /* Reference count of clock enable/disable */
        __s8 usecount;
        /* Register bit position for clock's enable/disable control. */
index 8d2f2da..e0a0281 100644 (file)
@@ -9,6 +9,7 @@ config ARCH_OMAP730
        depends on ARCH_OMAP1
        bool "OMAP730 Based System"
        select CPU_ARM926T
+       select OMAP_MPU_TIMER
        select ARCH_OMAP_OTG
 
 config ARCH_OMAP850
@@ -22,6 +23,7 @@ config ARCH_OMAP15XX
        default y
        bool "OMAP15xx Based System"
        select CPU_ARM925T
+       select OMAP_MPU_TIMER
 
 config ARCH_OMAP16XX
        depends on ARCH_OMAP1
index 6ee1950..ba6009f 100644 (file)
@@ -3,12 +3,11 @@
 #
 
 # Common support
-obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
+obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
 obj-y += clock.o clock_data.o opp_data.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
-obj-$(CONFIG_OMAP_MPU_TIMER)   += time.o
 obj-$(CONFIG_OMAP_32K_TIMER)   += timer32k.o
 
 # Power Management
index c9be6d4..bfb4fb1 100644 (file)
 #include <mach/irqs.h>
 #include <asm/hardware/gic.h>
 
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_flags as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-               .pushsection .data
-               .globl  omap_irq_flags
-omap_irq_flags:
-               .word   0
-               .popsection
-#endif
-
                .macro  disable_fiq
                .endm
 
index 4770158..731dd33 100644 (file)
@@ -57,6 +57,7 @@ struct omap_irq_bank {
        unsigned long wake_enable;
 };
 
+u32 omap_irq_flags;
 static unsigned int irq_bank_count;
 static struct omap_irq_bank *irq_banks;
 
@@ -176,7 +177,6 @@ static struct irq_chip omap_irq_chip = {
 
 void __init omap_init_irq(void)
 {
-       extern unsigned int omap_irq_flags;
        int i, j;
 
 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
index c9088d8..4538093 100644 (file)
@@ -37,7 +37,7 @@ int omap_lcd_dma_running(void)
         * On OMAP1510, internal LCD controller will start the transfer
         * when it gets enabled, so assume DMA running if LCD enabled.
         */
-       if (cpu_is_omap1510())
+       if (cpu_is_omap15xx())
                if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
                        return 1;
 
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
 
 void omap_set_lcd_dma_b1_rotation(int rotate)
 {
-       if (cpu_is_omap1510()) {
+       if (cpu_is_omap15xx()) {
                printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
                BUG();
                return;
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
 
 void omap_set_lcd_dma_b1_mirror(int mirror)
 {
-       if (cpu_is_omap1510()) {
+       if (cpu_is_omap15xx()) {
                printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
                BUG();
        }
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
 
 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
 {
-       if (cpu_is_omap1510()) {
+       if (cpu_is_omap15xx()) {
                printk(KERN_ERR "DMA virtual resulotion is not supported "
                                "in 1510 mode\n");
                BUG();
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
 
 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
 {
-       if (cpu_is_omap1510()) {
+       if (cpu_is_omap15xx()) {
                printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
                BUG();
        }
@@ -177,7 +177,7 @@ static void set_b1_regs(void)
                        bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
                        /* 1510 DMA requires the bottom address to be 2 more
                         * than the actual last memory access location. */
-                       if (cpu_is_omap1510() &&
+                       if (cpu_is_omap15xx() &&
                                lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
                                        bottom += 2;
                        ei = PIXSTEP(0, 0, 1, 0);
@@ -241,7 +241,7 @@ static void set_b1_regs(void)
                return; /* Suppress warning about uninitialized vars */
        }
 
-       if (cpu_is_omap1510()) {
+       if (cpu_is_omap15xx()) {
                omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
                omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
                omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
@@ -343,7 +343,7 @@ void omap_free_lcd_dma(void)
                BUG();
                return;
        }
-       if (!cpu_is_omap1510())
+       if (!cpu_is_omap15xx())
                omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
                            OMAP1610_DMA_LCD_CCR);
        lcd_dma.reserved = 0;
@@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void)
         * connected. Otherwise the OMAP internal controller will
         * start the transfer when it gets enabled.
         */
-       if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+       if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
                return;
 
        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
@@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma);
 void omap_setup_lcd_dma(void)
 {
        BUG_ON(lcd_dma.active);
-       if (!cpu_is_omap1510()) {
+       if (!cpu_is_omap15xx()) {
                /* Set some reasonable defaults */
                omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
                omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
                omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
        }
        set_b1_regs();
-       if (!cpu_is_omap1510()) {
+       if (!cpu_is_omap15xx()) {
                u16 w;
 
                w = omap_readw(OMAP1610_DMA_LCD_CCR);
@@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void)
        u16 w;
 
        lcd_dma.active = 0;
-       if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+       if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
                return;
 
        w = omap_readw(OMAP1610_DMA_LCD_CCR);
index ed7a61f..6885d2f 100644 (file)
 #include <mach/hardware.h>
 #include <asm/leds.h>
 #include <asm/irq.h>
+#include <asm/sched_clock.h>
+
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
 #include <plat/common.h>
 
+#ifdef CONFIG_OMAP_MPU_TIMER
+
 #define OMAP_MPU_TIMER_BASE            OMAP_MPU_TIMER1_BASE
 #define OMAP_MPU_TIMER_OFFSET          0x100
 
@@ -67,7 +71,7 @@ typedef struct {
 ((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE +       \
                                 (n)*OMAP_MPU_TIMER_OFFSET))
 
-static inline unsigned long omap_mpu_timer_read(int nr)
+static inline unsigned long notrace omap_mpu_timer_read(int nr)
 {
        volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr);
        return timer->read_tim;
@@ -212,6 +216,32 @@ static struct clocksource clocksource_mpu = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static DEFINE_CLOCK_DATA(cd);
+
+static inline unsigned long long notrace _omap_mpu_sched_clock(void)
+{
+       u32 cyc = mpu_read(&clocksource_mpu);
+       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+#ifndef CONFIG_OMAP_32K_TIMER
+unsigned long long notrace sched_clock(void)
+{
+       return _omap_mpu_sched_clock();
+}
+#else
+static unsigned long long notrace omap_mpu_sched_clock(void)
+{
+       return _omap_mpu_sched_clock();
+}
+#endif
+
+static void notrace mpu_update_sched_clock(void)
+{
+       u32 cyc = mpu_read(&clocksource_mpu);
+       update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 static void __init omap_init_clocksource(unsigned long rate)
 {
        static char err[] __initdata = KERN_ERR
@@ -219,17 +249,13 @@ static void __init omap_init_clocksource(unsigned long rate)
 
        setup_irq(INT_TIMER2, &omap_mpu_timer2_irq);
        omap_mpu_timer_start(1, ~0, 1);
+       init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
 
        if (clocksource_register_hz(&clocksource_mpu, rate))
                printk(err, clocksource_mpu.name);
 }
 
-/*
- * ---------------------------------------------------------------------------
- * Timer initialization
- * ---------------------------------------------------------------------------
- */
-static void __init omap_timer_init(void)
+static void __init omap_mpu_timer_init(void)
 {
        struct clk      *ck_ref = clk_get(NULL, "ck_ref");
        unsigned long   rate;
@@ -246,6 +272,66 @@ static void __init omap_timer_init(void)
        omap_init_clocksource(rate);
 }
 
+#else
+static inline void omap_mpu_timer_init(void)
+{
+       pr_err("Bogus timer, should not happen\n");
+}
+#endif /* CONFIG_OMAP_MPU_TIMER */
+
+#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
+static unsigned long long (*preferred_sched_clock)(void);
+
+unsigned long long notrace sched_clock(void)
+{
+       if (!preferred_sched_clock)
+               return 0;
+
+       return preferred_sched_clock();
+}
+
+static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
+{
+       if (use_32k_sched_clock)
+               preferred_sched_clock = omap_32k_sched_clock;
+       else
+               preferred_sched_clock = omap_mpu_sched_clock;
+}
+#else
+static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
+{
+}
+#endif
+
+static inline int omap_32k_timer_usable(void)
+{
+       int res = false;
+
+       if (cpu_is_omap730() || cpu_is_omap15xx())
+               return res;
+
+#ifdef CONFIG_OMAP_32K_TIMER
+       res = omap_32k_timer_init();
+#endif
+
+       return res;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * Timer initialization
+ * ---------------------------------------------------------------------------
+ */
+static void __init omap_timer_init(void)
+{
+       if (omap_32k_timer_usable()) {
+               preferred_sched_clock_init(1);
+       } else {
+               omap_mpu_timer_init();
+               preferred_sched_clock_init(0);
+       }
+}
+
 struct sys_timer omap_timer = {
        .init           = omap_timer_init,
 };
index 20cfbcc..13d7b8f 100644 (file)
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
+#include <plat/common.h>
 #include <plat/dmtimer.h>
 
-struct sys_timer omap_timer;
-
 /*
  * ---------------------------------------------------------------------------
  * 32KHz OS timer
@@ -181,14 +180,14 @@ static __init void omap_init_32k_timer(void)
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-static void __init omap_timer_init(void)
+bool __init omap_32k_timer_init(void)
 {
+       omap_init_clocksource_32k();
+
 #ifdef CONFIG_OMAP_DM_TIMER
        omap_dm_timer_init();
 #endif
        omap_init_32k_timer();
-}
 
-struct sys_timer omap_timer = {
-       .init           = omap_timer_init,
-};
+       return true;
+}
index 5b0c777..8f9a64d 100644 (file)
@@ -124,8 +124,9 @@ static inline void cm_t3517_init_hecc(void) {}
 #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
 #define RTC_IO_GPIO            (153)
 #define RTC_WR_GPIO            (154)
-#define RTC_RD_GPIO            (160)
+#define RTC_RD_GPIO            (53)
 #define RTC_CS_GPIO            (163)
+#define RTC_CS_EN_GPIO         (160)
 
 struct v3020_platform_data cm_t3517_v3020_pdata = {
        .use_gpio       = 1,
@@ -145,6 +146,16 @@ static struct platform_device cm_t3517_rtc_device = {
 
 static void __init cm_t3517_init_rtc(void)
 {
+       int err;
+
+       err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en");
+       if (err) {
+               pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
+               return;
+       }
+
+       gpio_direction_output(RTC_CS_EN_GPIO, 1);
+
        platform_device_register(&cm_t3517_rtc_device);
 }
 #else
@@ -214,12 +225,12 @@ static struct mtd_partition cm_t3517_nand_partitions[] = {
        },
        {
                .name           = "linux",
-               .offset         = MTDPART_OFS_APPEND,   /* Offset = 0x280000 */
+               .offset         = MTDPART_OFS_APPEND,   /* Offset = 0x2A0000 */
                .size           = 32 * NAND_BLOCK_SIZE,
        },
        {
                .name           = "rootfs",
-               .offset         = MTDPART_OFS_APPEND,   /* Offset = 0x680000 */
+               .offset         = MTDPART_OFS_APPEND,   /* Offset = 0x6A0000 */
                .size           = MTDPART_SIZ_FULL,
        },
 };
@@ -256,11 +267,19 @@ static void __init cm_t3517_init_irq(void)
 static struct omap_board_mux board_mux[] __initdata = {
        /* GPIO186 - Green LED */
        OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-       /* RTC GPIOs: IO, WR#, RD#, CS# */
+
+       /* RTC GPIOs: */
+       /* IO - GPIO153 */
        OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+       /* WR# - GPIO154 */
        OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-       OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+       /* RD# - GPIO53 */
+       OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+       /* CS# - GPIO163 */
        OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+       /* CS EN - GPIO160 */
+       OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
        /* HSUSB1 RESET */
        OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
        /* HSUSB2 RESET */
index 00bb1fc..9a2a31e 100644 (file)
@@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = {
 
 static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
 {
-       twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
-       twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
-
        if (gpio_is_valid(dssdev->reset_gpio))
                gpio_set_value_cansleep(dssdev->reset_gpio, 1);
        return 0;
@@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[];
 static int devkit8000_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
+       int ret;
+
        omap_mux_init_gpio(29, OMAP_PIN_INPUT);
        /* gpio + 0 is "mmc0_cd" (input/IRQ) */
        mmc[0].gpio_cd = gpio + 0;
@@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
        /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
        gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
-        /* gpio + 1 is "LCD_PWREN" (out, active high) */
-       devkit8000_lcd_device.reset_gpio = gpio + 1;
-       gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
-       /* Disable until needed */
-       gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
+       /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
+       devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
+       ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
+                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
+       if (ret < 0) {
+               devkit8000_lcd_device.reset_gpio = -EINVAL;
+               printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
+       }
 
        /* gpio + 7 is "DVI_PD" (out, active low) */
        devkit8000_dvi_device.reset_gpio = gpio + 7;
-       gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
-       /* Disable until needed */
-       gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
+       ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
+                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
+       if (ret < 0) {
+               devkit8000_dvi_device.reset_gpio = -EINVAL;
+               printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
+       }
 
        return 0;
 }
@@ -275,8 +280,7 @@ static struct twl4030_gpio_platform_data devkit8000_gpio_data = {
        .irq_base       = TWL4030_GPIO_IRQ_BASE,
        .irq_end        = TWL4030_GPIO_IRQ_END,
        .use_leds       = true,
-       .pullups        = BIT(1),
-       .pulldowns      = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+       .pulldowns      = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
                                | BIT(15) | BIT(16) | BIT(17),
        .setup          = devkit8000_twl_gpio_setup,
 };
index e001a04..e944025 100644 (file)
@@ -409,8 +409,6 @@ static void __init omap4_panda_init(void)
        platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
        omap_serial_init();
        omap4_twl6030_hsmmc_init(mmc);
-       /* OMAP4 Panda uses internal transceiver so register nop transceiver */
-       usb_nop_xceiv_register();
        omap4_ehci_init();
        usb_musb_init(&musb_board_data);
 }
index cb77be7..39a71bb 100644 (file)
@@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
 static struct regulator_init_data rm680_vemmc = {
        .constraints =  {
                .name                   = "rm680_vemmc",
-               .min_uV                 = 2900000,
-               .max_uV                 = 2900000,
-               .apply_uV               = 1,
                .valid_modes_mask       = REGULATOR_MODE_NORMAL
                                        | REGULATOR_MODE_STANDBY,
                .valid_ops_mask         = REGULATOR_CHANGE_STATUS
index 337392c..acb7ae5 100644 (file)
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
        dd = clk->dpll_data;
 
        /* DPLL divider must result in a valid jitter correction val */
-       fint = clk->parent->rate / (n + 1);
+       fint = clk->parent->rate / n;
        if (fint < DPLL_FINT_BAND1_MIN) {
 
                pr_debug("rejecting n=%d due to Fint failure, "
index e8cb32f..de9ec8d 100644 (file)
@@ -34,7 +34,6 @@
 #include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
-#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
 #include "control.h"
 #include "scrm44xx.h"
index e20b986..58e42f7 100644 (file)
@@ -423,6 +423,12 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
 {
        struct clkdm_dep *cd;
 
+       if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+               pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+                      clkdm1->name, clkdm2->name, __func__);
+               return -EINVAL;
+       }
+
        if (!clkdm1 || !clkdm2)
                return -EINVAL;
 
@@ -458,6 +464,12 @@ int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
 {
        struct clkdm_dep *cd;
 
+       if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+               pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+                      clkdm1->name, clkdm2->name, __func__);
+               return -EINVAL;
+       }
+
        if (!clkdm1 || !clkdm2)
                return -EINVAL;
 
@@ -500,6 +512,12 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
        if (!clkdm1 || !clkdm2)
                return -EINVAL;
 
+       if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+               pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+                      clkdm1->name, clkdm2->name, __func__);
+               return -EINVAL;
+       }
+
        cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
        if (IS_ERR(cd)) {
                pr_debug("clockdomain: hardware cannot set/clear wake up of "
@@ -527,6 +545,12 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
        struct clkdm_dep *cd;
        u32 mask = 0;
 
+       if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+               pr_err("clockdomain: %s: %s: not yet implemented\n",
+                      clkdm->name, __func__);
+               return -EINVAL;
+       }
+
        if (!clkdm)
                return -EINVAL;
 
@@ -830,8 +854,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
         * dependency code and data for OMAP4.
         */
        if (cpu_is_omap44xx()) {
-               WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-                         "support is not yet implemented\n");
+               pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
        } else {
                if (atomic_read(&clkdm->usecount) > 0)
                        _clkdm_add_autodeps(clkdm);
@@ -872,8 +895,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
         * dependency code and data for OMAP4.
         */
        if (cpu_is_omap44xx()) {
-               WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-                         "support is not yet implemented\n");
+               pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
        } else {
                if (atomic_read(&clkdm->usecount) > 0)
                        _clkdm_del_autodeps(clkdm);
index 51920fc..10622c9 100644 (file)
@@ -30,8 +30,6 @@
 #include "cm1_44xx.h"
 #include "cm2_44xx.h"
 
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
 #include "prcm44xx.h"
index d2f15f5..34922b2 100644 (file)
@@ -264,7 +264,7 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
        if (IS_ERR(od)) {
                pr_err("%s: Cant build omap_device for %s:%s.\n",
                        __func__, name, oh->name);
-               return IS_ERR(od);
+               return PTR_ERR(od);
        }
 
        mem = platform_get_resource(&od->pdev, IORESOURCE_MEM, 0);
index befa321..81985a6 100644 (file)
  */
 
 #ifdef MULTI_OMAP2
-
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_base as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-               .pushsection .data
-               .globl  omap_irq_base
-omap_irq_base:
-               .word   0
-               .popsection
-#endif
-
                /*
                 * Configure the interrupt base on the first interrupt.
                 * See also omap_irq_base_init for setting omap_irq_base.
index e66687b..c203204 100644 (file)
@@ -314,14 +314,13 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
        return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
 }
 
+void __iomem *omap_irq_base;
+
 /*
  * Initialize asm_irq_base for entry-macro.S
  */
 static inline void omap_irq_base_init(void)
 {
-       extern void __iomem *omap_irq_base;
-
-#ifdef MULTI_OMAP2
        if (cpu_is_omap24xx())
                omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
        else if (cpu_is_omap34xx())
@@ -330,7 +329,6 @@ static inline void omap_irq_base_init(void)
                omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
        else
                pr_err("Could not initialize omap_irq_base\n");
-#endif
 }
 
 void __init omap2_init_common_infrastructure(void)
index 394413d..24b8850 100644 (file)
@@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
                omap_mbox_type_t irq)
 {
        struct omap_mbox2_priv *p = mbox->priv;
-       u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-       l = mbox_read_reg(p->irqdisable);
-       l &= ~bit;
-       mbox_write_reg(l, p->irqdisable);
+       u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
+
+       if (!cpu_is_omap44xx())
+               bit = mbox_read_reg(p->irqdisable) & ~bit;
+
+       mbox_write_reg(bit, p->irqdisable);
 }
 
 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = {
        .priv   = &omap2_mbox_iva_priv,
 };
 
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4)
index df8d2f2..6c84659 100644 (file)
@@ -160,7 +160,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
        struct omap_mux *mux = NULL;
        struct omap_mux_entry *e;
        const char *mode_name;
-       int found = 0, found_mode, mode0_len = 0;
+       int found = 0, found_mode = 0, mode0_len = 0;
        struct list_head *muxmodes = &partition->muxmodes;
 
        mode_name = strchr(muxname, '.');
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
        list_for_each_entry(e, &partition->muxmodes, node) {
                struct omap_mux *m = &e->mux;
 
-               (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+               (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
                                          m, &omap_mux_dbg_signal_fops);
        }
 }
@@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags,
        if (!partition->base) {
                pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
                        __func__, partition->phys);
+               kfree(partition);
                return -ENODEV;
        }
 
index 125f565..a5a83b3 100644 (file)
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
 
                }
 
-       (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
                                   &enable_off_mode, &pm_dbg_option_fops);
-       (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
                                   &sleep_while_idle, &pm_dbg_option_fops);
-       (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+       (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
                                   &wakeup_timer_seconds, &pm_dbg_option_fops);
        (void) debugfs_create_file("wakeup_timer_milliseconds",
-                       S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+                       S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
                        &pm_dbg_option_fops);
        pm_dbg_init_done = 1;
 
index 9e5dc8e..97feb3a 100644 (file)
@@ -134,7 +134,7 @@ static void omap2_enter_full_retention(void)
 
        /* Block console output in case it is on one of the OMAP UARTs */
        if (!is_suspending())
-               if (try_acquire_console_sem())
+               if (!console_trylock())
                        goto no_sleep;
 
        omap_uart_prepare_idle(0);
@@ -151,7 +151,7 @@ static void omap2_enter_full_retention(void)
        omap_uart_resume_idle(0);
 
        if (!is_suspending())
-               release_console_sem();
+               console_unlock();
 
 no_sleep:
        if (omap2_pm_debug) {
index 8cbbead..2f864e4 100644 (file)
@@ -168,9 +168,10 @@ static void omap3_core_restore_context(void)
  * once during boot sequence, but this works as we are not using secure
  * services.
  */
-static void omap3_save_secure_ram_context(u32 target_mpu_state)
+static void omap3_save_secure_ram_context(void)
 {
        u32 ret;
+       int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
 
        if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
                /*
@@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state)
                pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
                ret = _omap_save_secure_sram((u32 *)
                                __pa(omap3_secure_ram_storage));
-               pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
+               pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
                /* Following is for error tracking, it should not happen */
                if (ret) {
                        printk(KERN_ERR "save_secure_sram() returns %08x\n",
@@ -398,7 +399,7 @@ void omap_sram_idle(void)
        if (!is_suspending())
                if (per_next_state < PWRDM_POWER_ON ||
                    core_next_state < PWRDM_POWER_ON)
-                       if (try_acquire_console_sem())
+                       if (!console_trylock())
                                goto console_still_active;
 
        /* PER */
@@ -481,7 +482,7 @@ void omap_sram_idle(void)
        }
 
        if (!is_suspending())
-               release_console_sem();
+               console_unlock();
 
 console_still_active:
        /* Disable IO-PAD and IO-CHAIN wakeup */
@@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void)
                local_fiq_disable();
 
                omap_dma_global_context_save();
-               omap3_save_secure_ram_context(PWRDM_POWER_ON);
+               omap3_save_secure_ram_context();
                omap_dma_global_context_restore();
 
                local_irq_enable();
index d523389..cf600e2 100644 (file)
@@ -19,7 +19,6 @@
 #include <plat/prcm.h>
 
 #include "powerdomain.h"
-#include "prm-regbits-34xx.h"
 #include "prm.h"
 #include "prm-regbits-24xx.h"
 #include "prm-regbits-34xx.h"
index 729a644..3300ff6 100644 (file)
@@ -38,8 +38,8 @@
 #define OMAP4430_PRCM_MPU_CPU1_INST            0x0800
 
 /* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS      0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS      0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS      0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS      0x0018
 
 
 /*
index 302da74..32e91a9 100644 (file)
@@ -812,7 +812,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
 
        oh->dev_attr = uart;
 
-       acquire_console_sem(); /* in case the earlycon is on the UART */
+       console_lock(); /* in case the earlycon is on the UART */
 
        /*
         * Because of early UART probing, UART did not get idled
@@ -838,7 +838,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
        omap_uart_block_sleep(uart);
        uart->timeout = DEFAULT_TIMEOUT;
 
-       release_console_sem();
+       console_unlock();
 
        if ((cpu_is_omap34xx() && uart->padconf) ||
            (uart->wk_en && uart->wk_mask)) {
index 77ecebf..1a777e3 100644 (file)
@@ -282,6 +282,7 @@ error:
                dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
                        "interrupt handler. Smartreflex will"
                        "not function as desired\n", __func__);
+               kfree(name);
                kfree(sr_info);
                return ret;
 }
@@ -780,8 +781,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val)
        struct omap_sr *sr_info = (struct omap_sr *) data;
 
        if (!sr_info) {
-               pr_warning("%s: omap_sr struct for sr_%s not found\n",
-                       __func__, sr_info->voltdm->name);
+               pr_warning("%s: omap_sr struct not found\n", __func__);
                return -EINVAL;
        }
 
@@ -795,8 +795,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
        struct omap_sr *sr_info = (struct omap_sr *) data;
 
        if (!sr_info) {
-               pr_warning("%s: omap_sr struct for sr_%s not found\n",
-                       __func__, sr_info->voltdm->name);
+               pr_warning("%s: omap_sr struct not found\n", __func__);
                return -EINVAL;
        }
 
@@ -834,7 +833,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
 
        if (!pdata) {
                dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_free_devinfo;
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -880,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                ret = sr_late_init(sr_info);
                if (ret) {
                        pr_warning("%s: Error in SR late init\n", __func__);
-                       return ret;
+                       goto err_release_region;
                }
        }
 
@@ -891,17 +891,20 @@ static int __init omap_sr_probe(struct platform_device *pdev)
         * not try to create rest of the debugfs entries.
         */
        vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
-       if (!vdd_dbg_dir)
-               return -EINVAL;
+       if (!vdd_dbg_dir) {
+               ret = -EINVAL;
+               goto err_release_region;
+       }
 
        dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
        if (IS_ERR(dbg_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
                        __func__);
-               return PTR_ERR(dbg_dir);
+               ret = PTR_ERR(dbg_dir);
+               goto err_release_region;
        }
 
-       (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+       (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
                                (void *)sr_info, &pm_sr_fops);
        (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
                        &sr_info->err_weight);
@@ -914,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
        if (IS_ERR(nvalue_dir)) {
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
                        "for n-values\n", __func__);
-               return PTR_ERR(nvalue_dir);
+               ret = PTR_ERR(nvalue_dir);
+               goto err_release_region;
        }
 
        omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -923,24 +927,16 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                        " corresponding vdd vdd_%s. Cannot create debugfs"
                        "entries for n-values\n",
                        __func__, sr_info->voltdm->name);
-               return -ENODATA;
+               ret = -ENODATA;
+               goto err_release_region;
        }
 
        for (i = 0; i < sr_info->nvalue_count; i++) {
-               char *name;
-               char volt_name[32];
-
-               name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
-               if (!name) {
-                       dev_err(&pdev->dev, "%s: Unable to allocate memory"
-                               " for n-value directory name\n",  __func__);
-                       return -ENOMEM;
-               }
+               char name[NVALUE_NAME_LEN + 1];
 
-               strcpy(name, "volt_");
-               sprintf(volt_name, "%d", volt_data[i].volt_nominal);
-               strcat(name, volt_name);
-               (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+               snprintf(name, sizeof(name), "volt_%d",
+                        volt_data[i].volt_nominal);
+               (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
                                &(sr_info->nvalue_table[i].nvalue));
        }
 
@@ -966,7 +962,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
        }
 
        sr_info = _sr_lookup(pdata->voltdm);
-       if (!sr_info) {
+       if (IS_ERR(sr_info)) {
                dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
                        __func__);
                return -EINVAL;
index 4e48e78..0fc550e 100644 (file)
 #include <asm/mach/time.h>
 #include <plat/dmtimer.h>
 #include <asm/localtimer.h>
+#include <asm/sched_clock.h>
 
 #include "timer-gp.h"
 
+#include <plat/common.h>
+
 /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
 #define MAX_GPTIMER_ID         12
 
@@ -176,14 +179,19 @@ static void __init omap2_gp_clockevent_init(void)
 /* 
  * When 32k-timer is enabled, don't use GPTimer for clocksource
  * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in see plat-omap/common.c. 
+ * sync counter.  See clocksource setup in plat-omap/counter_32k.c
  */
 
-static inline void __init omap2_gp_clocksource_init(void) {}
+static void __init omap2_gp_clocksource_init(void)
+{
+       omap_init_clocksource_32k();
+}
+
 #else
 /*
  * clocksource
  */
+static DEFINE_CLOCK_DATA(cd);
 static struct omap_dm_timer *gpt_clocksource;
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
@@ -198,6 +206,15 @@ static struct clocksource clocksource_gpt = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static void notrace dmtimer_update_sched_clock(void)
+{
+       u32 cyc;
+
+       cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+       update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 /* Setup free-running counter for clocksource */
 static void __init omap2_gp_clocksource_init(void)
 {
@@ -218,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
 
        omap_dm_timer_set_load_start(gpt, 1, 0);
 
+       init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
        if (clocksource_register_hz(&clocksource_gpt, tick_rate))
                printk(err2, clocksource_gpt.name);
 }
index ed6079c..12be525 100644 (file)
@@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
        strcat(name, vdd->voltdm.name);
 
        vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
+       kfree(name);
        if (IS_ERR(vdd->debug_dir)) {
                pr_warning("%s: Unable to create debugfs directory for"
                        " vdd_%s\n", __func__, vdd->voltdm.name);
index 6b2c800..28f667e 100644 (file)
@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void)
                        GPIO0_COLIBRI_PXA270_SD_DETECT;
        if (machine_is_colibri300())    /* PXA300 Colibri */
                colibri_mci_platform_data.gpio_card_detect =
-                       GPIO39_COLIBRI_PXA300_SD_DETECT;
+                       GPIO13_COLIBRI_PXA300_SD_DETECT;
        else                            /* PXA320 Colibri */
                colibri_mci_platform_data.gpio_card_detect =
                        GPIO28_COLIBRI_PXA320_SD_DETECT;
index fddb16d..66dd81c 100644 (file)
@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {
        GPIO4_MMC1_DAT1,
        GPIO5_MMC1_DAT2,
        GPIO6_MMC1_DAT3,
-       GPIO39_GPIO,    /* SD detect */
+       GPIO13_GPIO,    /* GPIO13_COLIBRI_PXA300_SD_DETECT */
 
        /* UHC */
        GPIO0_2_USBH_PEN,
index 388a96f..cb4236e 100644 (file)
@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {}
 #define GPIO113_COLIBRI_PXA270_TS_IRQ  113
 
 /* GPIO definitions for Colibri PXA300/310 */
-#define GPIO39_COLIBRI_PXA300_SD_DETECT        39
+#define GPIO13_COLIBRI_PXA300_SD_DETECT        13
 
 /* GPIO definitions for Colibri PXA320 */
 #define GPIO28_COLIBRI_PXA320_SD_DETECT        28
index 405b92a..35572c4 100644 (file)
@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = {
        .pwm_id         = 0,
        .max_brightness = 0xfe,
        .dft_brightness = 0x7e,
-       .pwm_period_ns  = 3500,
+       .pwm_period_ns  = 3500 * 1024,
        .init           = palm27x_backlight_init,
        .notify         = palm27x_backlight_notify,
        .exit           = palm27x_backlight_exit,
index 978e1b2..1807c9a 100644 (file)
@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state)
 #endif
 
        /* skip registers saving for standby */
-       if (state != PM_SUSPEND_STANDBY) {
+       if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
                pxa_cpu_pm_fns->save(sleep_save);
                /* before sleeping, calculate and save a checksum */
                for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state)
        pxa_cpu_pm_fns->enter(state);
        cpu_init();
 
-       if (state != PM_SUSPEND_STANDBY) {
+       if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
                /* after sleeping, validate the checksum */
                for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
                        checksum += sleep_save[i];
index fbc5b77..b166b1d 100644 (file)
@@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
        &pxa25x_device_assp,
        &pxa25x_device_pwm0,
        &pxa25x_device_pwm1,
+       &pxa_device_asoc_platform,
 };
 
 static struct sys_device pxa25x_sysdev[] = {
index c31e601..b9b1e5c 100644 (file)
@@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
                goto err_rfk_alloc;
        }
 
-       rfkill_set_led_trigger_name(rfk, "tosa-bt");
-
        rc = rfkill_register(rfk);
        if (rc)
                goto err_rfkill;
index af152e7..f2582ec 100644 (file)
@@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
        .dev.platform_data = &sharpsl_rom_data,
 };
 
+static struct platform_device wm9712_device = {
+       .name   = "wm9712-codec",
+       .id     = -1,
+};
+
 static struct platform_device *devices[] __initdata = {
        &tosascoop_device,
        &tosascoop_jc_device,
@@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
        &tosaled_device,
        &tosa_bt_device,
        &sharpsl_rom_device,
+       &wm9712_device,
 };
 
 static void tosa_poweroff(void)
index b4575ae..7ca138a 100644 (file)
@@ -2,52 +2,56 @@ menu "RealView platform type"
        depends on ARCH_REALVIEW
 
 config MACH_REALVIEW_EB
-       bool "Support RealView/EB platform"
+       bool "Support RealView(R) Emulation Baseboard"
        select ARM_GIC
        help
-         Include support for the ARM(R) RealView Emulation Baseboard platform.
+         Include support for the ARM(R) RealView(R) Emulation Baseboard
+         platform.
 
 config REALVIEW_EB_A9MP
-       bool "Support Multicore Cortex-A9"
+       bool "Support Multicore Cortex-A9 Tile"
        depends on MACH_REALVIEW_EB
        select CPU_V7
        help
-         Enable support for the Cortex-A9MPCore tile on the Realview platform.
+         Enable support for the Cortex-A9MPCore tile fitted to the
+         Realview(R) Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP
-       bool "Support ARM11MPCore tile"
+       bool "Support ARM11MPCore Tile"
        depends on MACH_REALVIEW_EB
        select CPU_V6
        select ARCH_HAS_BARRIERS if SMP
        help
-         Enable support for the ARM11MPCore tile on the Realview platform.
+         Enable support for the ARM11MPCore tile fitted to the Realview(R)
+         Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP_REVB
-       bool "Support ARM11MPCore RevB tile"
+       bool "Support ARM11MPCore RevB Tile"
        depends on REALVIEW_EB_ARM11MP
        help
-         Enable support for the ARM11MPCore RevB tile on the Realview
-         platform. Since there are device address differences, a
-         kernel built with this option enabled is not compatible with
-         other revisions of the ARM11MPCore tile.
+         Enable support for the ARM11MPCore Revision B tile on the
+         Realview(R) Emulation Baseboard platform. Since there are device
+         address differences, a kernel built with this option enabled is
+         not compatible with other revisions of the ARM11MPCore tile.
 
 config MACH_REALVIEW_PB11MP
-       bool "Support RealView/PB11MPCore platform"
+       bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
        select CPU_V6
        select ARM_GIC
        select HAVE_PATA_PLATFORM
        select ARCH_HAS_BARRIERS if SMP
        help
-         Include support for the ARM(R) RealView MPCore Platform Baseboard.
-         PB11MPCore is a platform with an on-board ARM11MPCore and has
+         Include support for the ARM(R) RealView(R) Platform Baseboard for
+         the ARM11MPCore.  This platform has an on-board ARM11MPCore and has
          support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PB1176
-       bool "Support RealView/PB1176 platform"
+       bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
        select CPU_V6
        select ARM_GIC
        help
-         Include support for the ARM(R) RealView ARM1176 Platform Baseboard.
+         Include support for the ARM(R) RealView(R) Platform Baseboard for
+         ARM1176JZF-S.
 
 config REALVIEW_PB1176_SECURE_FLASH
        bool "Allow access to the secure flash memory block"
@@ -59,23 +63,24 @@ config REALVIEW_PB1176_SECURE_FLASH
          block (64MB @ 0x3c000000) is required.
 
 config MACH_REALVIEW_PBA8
-       bool "Support RealView/PB-A8 platform"
+       bool "Support RealView(R) Platform Baseboard for Cortex(tm)-A8 platform"
        select CPU_V7
        select ARM_GIC
        select HAVE_PATA_PLATFORM
        help
-         Include support for the ARM(R) RealView Cortex-A8 Platform Baseboard.
-         PB-A8 is a platform with an on-board Cortex-A8 and has support for
-         PCI-E and Compact Flash.
+         Include support for the ARM(R) RealView Platform Baseboard for
+         Cortex(tm)-A8.  This platform has an on-board Cortex-A8 and has
+         support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PBX
-       bool "Support RealView/PBX platform"
+       bool "Support RealView(R) Platform Baseboard Explore"
        select ARM_GIC
        select HAVE_PATA_PLATFORM
        select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
        select ZONE_DMA if SPARSEMEM
        help
-         Include support for the ARM(R) RealView PBX platform.
+         Include support for the ARM(R) RealView(R) Platform Baseboard
+         Explore.
 
 config REALVIEW_HIGH_PHYS_OFFSET
        bool "High physical base address for the RealView platform"
index a22bf67..6959d13 100644 (file)
@@ -41,7 +41,7 @@ volatile int __cpuinitdata pen_release = -1;
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
        pen_release = val;
        smp_wmb();
index a0cb258..50825a3 100644 (file)
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
        select POWER_SUPPLY
        select MACH_NEO1973
        select S3C2410_PWM
+       select S3C_DEV_USB_HOST
        help
           Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
 
index 953331d..3a56a22 100644 (file)
 #define GTA02v3_GPIO_nUSB_FLT  S3C2410_GPG(10) /* v3 + v4 only */
 #define GTA02v3_GPIO_nGSM_OC   S3C2410_GPG(11) /* v3 + v4 only */
 
-#define GTA02_GPIO_AMP_SHUT    S3C2440_GPJ1    /* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10       S3C2440_GPJ2
-#define GTA02_GPIO_HP_IN       S3C2440_GPJ2    /* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0                S3C2440_GPJ3    /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN     S3C2440_GPJ4
-#define GTA02_GPIO_3D_RESET    S3C2440_GPJ5
-#define GTA02_GPIO_nDL_GSM     S3C2440_GPJ6    /* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0  S3C2440_GPJ7
-#define GTA02v1_GPIO_BAT_ID    S3C2440_GPJ8
-#define GTA02_GPIO_KEEPACT     S3C2440_GPJ8
-#define GTA02v1_GPIO_HP_IN     S3C2440_GPJ10
-#define GTA02_CHIP_PWD         S3C2440_GPJ11   /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12   /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AMP_SHUT    S3C2410_GPJ(1)  /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10       S3C2410_GPJ(2)
+#define GTA02_GPIO_HP_IN       S3C2410_GPJ(2)  /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0                S3C2410_GPJ(3)  /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN     S3C2410_GPJ(4)
+#define GTA02_GPIO_3D_RESET    S3C2410_GPJ(5)
+#define GTA02_GPIO_nDL_GSM     S3C2410_GPJ(6)  /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0  S3C2410_GPJ(7)
+#define GTA02v1_GPIO_BAT_ID    S3C2410_GPJ(8)
+#define GTA02_GPIO_KEEPACT     S3C2410_GPJ(8)
+#define GTA02v1_GPIO_HP_IN     S3C2410_GPJ(10)
+#define GTA02_CHIP_PWD         S3C2410_GPJ(11) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
 
 #define GTA02_IRQ_GSENSOR_1    IRQ_EINT0
 #define GTA02_IRQ_MODEM                IRQ_EINT1
index dd37820..fdfc4d5 100644 (file)
@@ -150,6 +150,12 @@ static struct clk init_clocks_off[] = {
                .parent         = &clk_p,
                .enable         = s3c64xx_pclk_ctrl,
                .ctrlbit        = S3C_CLKCON_PCLK_IIC,
+       }, {
+               .name           = "i2c",
+               .id             = 1,
+               .parent         = &clk_p,
+               .enable         = s3c64xx_pclk_ctrl,
+               .ctrlbit        = S3C6410_CLKCON_PCLK_I2C1,
        }, {
                .name           = "iis",
                .id             = 0,
index 135db1b..c35585c 100644 (file)
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
 
        regptr = regs + PL080_Cx_BASE(0);
 
-       for (ch = 0; ch < 8; ch++, chno++, chptr++) {
-               printk(KERN_INFO "%s: registering DMA %d (%p)\n",
-                      __func__, chno, regptr);
+       for (ch = 0; ch < 8; ch++, chptr++) {
+               pr_debug("%s: registering DMA %d (%p)\n",
+                        __func__, chno + ch, regptr);
 
                chptr->bit = 1 << ch;
-               chptr->number = chno;
+               chptr->number = chno + ch;
                chptr->dmac = dmac;
                chptr->regs = regptr;
                regptr += PL080_Cx_STRIDE;
@@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
        /* for the moment, permanently enable the controller */
        writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
 
-       printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
+       printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
+              irq, regs, chno, chno+8);
 
        return 0;
 
index fd99a82..92b0908 100644 (file)
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
        .get_pull       = s3c_gpio_getpull_updown,
 };
 
-int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
 {
        return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
 }
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
        },
 };
 
-int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
 {
        return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
 }
index e85192a..a80a316 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/smsc911x.h>
 #include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
 
 #ifdef CONFIG_SMDK6410_WM1190_EV1
 #include <linux/mfd/wm8350/core.h>
@@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
 /* VDD_UH_MMC, LDO5 on J5 */
 static struct regulator_init_data smdk6410_vdduh_mmc = {
        .constraints = {
-               .name = "PVDD_UH/PVDD_MMC",
+               .name = "PVDD_UH+PVDD_MMC",
                .always_on = 1,
        },
 };
@@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
 /* S3C64xx internal logic & PLL */
 static struct regulator_init_data wm8350_dcdc1_data = {
        .constraints = {
-               .name = "PVDD_INT/PVDD_PLL",
+               .name = "PVDD_INT+PVDD_PLL",
                .min_uV = 1200000,
                .max_uV = 1200000,
                .always_on = 1,
@@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
 
 static struct regulator_init_data wm8350_dcdc4_data = {
        .constraints = {
-               .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV",
+               .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
                .min_uV = 3000000,
                .max_uV = 3000000,
                .always_on = 1,
@@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
 /* OTGi/1190-EV1 HPVDD & AVDD */
 static struct regulator_init_data wm8350_ldo4_data = {
        .constraints = {
-               .name = "PVDD_OTGI/HPVDD/AVDD",
+               .name = "PVDD_OTGI+HPVDD+AVDD",
                .min_uV = 1200000,
                .max_uV = 1200000,
                .apply_uV = 1,
@@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
 
 static struct regulator_init_data wm1192_dcdc3 = {
        .constraints = {
-               .name = "PVDD_MEM/PVDD_GPS",
+               .name = "PVDD_MEM+PVDD_GPS",
                .always_on = 1,
        },
 };
@@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
 
 static struct regulator_init_data wm1192_ldo1 = {
        .constraints = {
-               .name = "PVDD_LCD/PVDD_EXT",
+               .name = "PVDD_LCD+PVDD_EXT",
                .always_on = 1,
        },
        .consumer_supplies = wm1192_ldo1_consumers,
index f8ed0d2..1d4d0ee 100644 (file)
@@ -17,7 +17,7 @@
 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
 {
        /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
-       s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
+       s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
 
        /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
        s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
index 1a94203..f344a22 100644 (file)
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
        else
                ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
 
-       printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
+       pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
        writel(ctrl2, r + S3C_SDHCI_CONTROL2);
        writel(ctrl3, r + S3C_SDHCI_CONTROL3);
 }
index 203dd5a..058dab4 100644 (file)
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5p6442/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  *
  * S5P6442 - Memory map definitions
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5P6442_PA_CHIPID      (0xE0000000)
-#define S5P_PA_CHIPID          S5P6442_PA_CHIPID
+#define S5P6442_PA_SDRAM       0x20000000
 
-#define S5P6442_PA_SYSCON      (0xE0100000)
-#define S5P_PA_SYSCON          S5P6442_PA_SYSCON
+#define S5P6442_PA_I2S0                0xC0B00000
+#define S5P6442_PA_I2S1                0xF2200000
 
-#define S5P6442_PA_GPIO                (0xE0200000)
+#define S5P6442_PA_CHIPID      0xE0000000
 
-#define S5P6442_PA_VIC0                (0xE4000000)
-#define S5P6442_PA_VIC1                (0xE4100000)
-#define S5P6442_PA_VIC2                (0xE4200000)
+#define S5P6442_PA_SYSCON      0xE0100000
 
-#define S5P6442_PA_SROMC       (0xE7000000)
-#define S5P_PA_SROMC           S5P6442_PA_SROMC
+#define S5P6442_PA_GPIO                0xE0200000
 
-#define S5P6442_PA_MDMA                0xE8000000
-#define S5P6442_PA_PDMA                0xE9000000
+#define S5P6442_PA_VIC0                0xE4000000
+#define S5P6442_PA_VIC1                0xE4100000
+#define S5P6442_PA_VIC2                0xE4200000
 
-#define S5P6442_PA_TIMER       (0xEA000000)
-#define S5P_PA_TIMER           S5P6442_PA_TIMER
+#define S5P6442_PA_SROMC       0xE7000000
 
-#define S5P6442_PA_SYSTIMER    (0xEA100000)
+#define S5P6442_PA_MDMA                0xE8000000
+#define S5P6442_PA_PDMA                0xE9000000
 
-#define S5P6442_PA_WATCHDOG    (0xEA200000)
+#define S5P6442_PA_TIMER       0xEA000000
 
-#define S5P6442_PA_UART                (0xEC000000)
+#define S5P6442_PA_SYSTIMER    0xEA100000
 
-#define S5P_PA_UART0           (S5P6442_PA_UART + 0x0)
-#define S5P_PA_UART1           (S5P6442_PA_UART + 0x400)
-#define S5P_PA_UART2           (S5P6442_PA_UART + 0x800)
-#define S5P_SZ_UART            SZ_256
+#define S5P6442_PA_WATCHDOG    0xEA200000
 
-#define S5P6442_PA_IIC0                (0xEC100000)
+#define S5P6442_PA_UART                0xEC000000
 
-#define S5P6442_PA_SDRAM       (0x20000000)
-#define S5P_PA_SDRAM           S5P6442_PA_SDRAM
+#define S5P6442_PA_IIC0                0xEC100000
 
 #define S5P6442_PA_SPI         0xEC300000
 
-/* I2S */
-#define S5P6442_PA_I2S0                0xC0B00000
-#define S5P6442_PA_I2S1                0xF2200000
-
-/* PCM */
 #define S5P6442_PA_PCM0                0xF2400000
 #define S5P6442_PA_PCM1                0xF2500000
 
-/* compatibiltiy defines. */
+/* Compatibiltiy Defines */
+
+#define S3C_PA_IIC             S5P6442_PA_IIC0
 #define S3C_PA_WDT             S5P6442_PA_WATCHDOG
+
+#define S5P_PA_CHIPID          S5P6442_PA_CHIPID
+#define S5P_PA_SDRAM           S5P6442_PA_SDRAM
+#define S5P_PA_SROMC           S5P6442_PA_SROMC
+#define S5P_PA_SYSCON          S5P6442_PA_SYSCON
+#define S5P_PA_TIMER           S5P6442_PA_TIMER
+
+/* UART */
+
 #define S3C_PA_UART            S5P6442_PA_UART
-#define S3C_PA_IIC             S5P6442_PA_IIC0
+
+#define S5P_PA_UART(x)         (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0           S5P_PA_UART(0)
+#define S5P_PA_UART1           S5P_PA_UART(1)
+#define S5P_PA_UART2           S5P_PA_UART(2)
+
+#define S5P_SZ_UART            SZ_256
 
 #endif /* __ASM_ARCH_MAP_H */
index 5486c8f..adb5f29 100644 (file)
@@ -23,7 +23,7 @@
 #define S5P6440_GPIO_A_NR      (6)
 #define S5P6440_GPIO_B_NR      (7)
 #define S5P6440_GPIO_C_NR      (8)
-#define S5P6440_GPIO_F_NR      (2)
+#define S5P6440_GPIO_F_NR      (16)
 #define S5P6440_GPIO_G_NR      (7)
 #define S5P6440_GPIO_H_NR      (10)
 #define S5P6440_GPIO_I_NR      (16)
@@ -36,7 +36,7 @@
 #define S5P6450_GPIO_B_NR      (7)
 #define S5P6450_GPIO_C_NR      (8)
 #define S5P6450_GPIO_D_NR      (8)
-#define S5P6450_GPIO_F_NR      (2)
+#define S5P6450_GPIO_F_NR      (16)
 #define S5P6450_GPIO_G_NR      (14)
 #define S5P6450_GPIO_H_NR      (10)
 #define S5P6450_GPIO_I_NR      (16)
index a9365e5..95c9125 100644 (file)
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5p64x0/include/mach/map.h
  *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
  * S5P64X0 - Memory map definitions
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5P64X0_PA_SDRAM       (0x20000000)
+#define S5P64X0_PA_SDRAM       0x20000000
 
-#define S5P64X0_PA_CHIPID      (0xE0000000)
-#define S5P_PA_CHIPID          S5P64X0_PA_CHIPID
-
-#define S5P64X0_PA_SYSCON      (0xE0100000)
-#define S5P_PA_SYSCON          S5P64X0_PA_SYSCON
-
-#define S5P64X0_PA_GPIO                (0xE0308000)
-
-#define S5P64X0_PA_VIC0                (0xE4000000)
-#define S5P64X0_PA_VIC1                (0xE4100000)
+#define S5P64X0_PA_CHIPID      0xE0000000
 
-#define S5P64X0_PA_SROMC       (0xE7000000)
-#define S5P_PA_SROMC           S5P64X0_PA_SROMC
-
-#define S5P64X0_PA_PDMA                (0xE9000000)
-
-#define S5P64X0_PA_TIMER       (0xEA000000)
-#define S5P_PA_TIMER           S5P64X0_PA_TIMER
+#define S5P64X0_PA_SYSCON      0xE0100000
 
-#define S5P64X0_PA_RTC         (0xEA100000)
+#define S5P64X0_PA_GPIO                0xE0308000
 
-#define S5P64X0_PA_WDT         (0xEA200000)
+#define S5P64X0_PA_VIC0                0xE4000000
+#define S5P64X0_PA_VIC1                0xE4100000
 
-#define S5P6440_PA_UART(x)     (0xEC000000 + ((x) * S3C_UART_OFFSET))
-#define S5P6450_PA_UART(x)     ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
+#define S5P64X0_PA_SROMC       0xE7000000
 
-#define S5P_PA_UART0           S5P6450_PA_UART(0)
-#define S5P_PA_UART1           S5P6450_PA_UART(1)
-#define S5P_PA_UART2           S5P6450_PA_UART(2)
-#define S5P_PA_UART3           S5P6450_PA_UART(3)
-#define S5P_PA_UART4           S5P6450_PA_UART(4)
-#define S5P_PA_UART5           S5P6450_PA_UART(5)
+#define S5P64X0_PA_PDMA                0xE9000000
 
-#define S5P_SZ_UART            SZ_256
+#define S5P64X0_PA_TIMER       0xEA000000
+#define S5P64X0_PA_RTC         0xEA100000
+#define S5P64X0_PA_WDT         0xEA200000
 
-#define S5P6440_PA_IIC0                (0xEC104000)
-#define S5P6440_PA_IIC1                (0xEC20F000)
-#define S5P6450_PA_IIC0                (0xEC100000)
-#define S5P6450_PA_IIC1                (0xEC200000)
+#define S5P6440_PA_IIC0                0xEC104000
+#define S5P6440_PA_IIC1                0xEC20F000
+#define S5P6450_PA_IIC0                0xEC100000
+#define S5P6450_PA_IIC1                0xEC200000
 
-#define S5P64X0_PA_SPI0                (0xEC400000)
-#define S5P64X0_PA_SPI1                (0xEC500000)
+#define S5P64X0_PA_SPI0                0xEC400000
+#define S5P64X0_PA_SPI1                0xEC500000
 
-#define S5P64X0_PA_HSOTG       (0xED100000)
+#define S5P64X0_PA_HSOTG       0xED100000
 
 #define S5P64X0_PA_HSMMC(x)    (0xED800000 + ((x) * 0x100000))
 
-#define S5P64X0_PA_I2S         (0xF2000000)
+#define S5P64X0_PA_I2S         0xF2000000
 #define S5P6450_PA_I2S1                0xF2800000
 #define S5P6450_PA_I2S2                0xF2900000
 
-#define S5P64X0_PA_PCM         (0xF2100000)
+#define S5P64X0_PA_PCM         0xF2100000
 
-#define S5P64X0_PA_ADC         (0xF3000000)
+#define S5P64X0_PA_ADC         0xF3000000
 
-/* compatibiltiy defines. */
+/* Compatibiltiy Defines */
 
 #define S3C_PA_HSMMC0          S5P64X0_PA_HSMMC(0)
 #define S3C_PA_HSMMC1          S5P64X0_PA_HSMMC(1)
 #define S3C_PA_RTC             S5P64X0_PA_RTC
 #define S3C_PA_WDT             S5P64X0_PA_WDT
 
+#define S5P_PA_CHIPID          S5P64X0_PA_CHIPID
+#define S5P_PA_SROMC           S5P64X0_PA_SROMC
+#define S5P_PA_SYSCON          S5P64X0_PA_SYSCON
+#define S5P_PA_TIMER           S5P64X0_PA_TIMER
+
 #define SAMSUNG_PA_ADC         S5P64X0_PA_ADC
 
+/* UART */
+
+#define S5P6440_PA_UART(x)     (0xEC000000 + ((x) * S3C_UART_OFFSET))
+#define S5P6450_PA_UART(x)     ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
+
+#define S5P_PA_UART0           S5P6450_PA_UART(0)
+#define S5P_PA_UART1           S5P6450_PA_UART(1)
+#define S5P_PA_UART2           S5P6450_PA_UART(2)
+#define S5P_PA_UART3           S5P6450_PA_UART(3)
+#define S5P_PA_UART4           S5P6450_PA_UART(4)
+#define S5P_PA_UART5           S5P6450_PA_UART(5)
+
+#define S5P_SZ_UART            SZ_256
+
 #endif /* __ASM_ARCH_MAP_H */
index 328467b..ccbe6b7 100644 (file)
@@ -1,4 +1,7 @@
 /* linux/arch/arm/mach-s5pc100/include/mach/map.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
  *
  * Copyright 2009 Samsung Electronics Co.
  *     Byungho Min <bhmin@samsung.com>
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-/*
- * map-base.h has already defined virtual memory address
- * S3C_VA_IRQ          S3C_ADDR(0x00000000)    irq controller(s)
- * S3C_VA_SYS          S3C_ADDR(0x00100000)    system control
- * S3C_VA_MEM          S3C_ADDR(0x00200000)    system control (not used)
- * S3C_VA_TIMER                S3C_ADDR(0x00300000)    timer block
- * S3C_VA_WATCHDOG     S3C_ADDR(0x00400000)    watchdog
- * S3C_VA_UART         S3C_ADDR(0x01000000)    UART
- *
- * S5PC100 specific virtual memory address can be defined here
- * S5PC1XX_VA_GPIO     S3C_ADDR(0x00500000)    GPIO
- *
- */
+#define S5PC100_PA_SDRAM               0x20000000
+
+#define S5PC100_PA_ONENAND             0xE7100000
+#define S5PC100_PA_ONENAND_BUF         0xB0000000
+
+#define S5PC100_PA_CHIPID              0xE0000000
 
-#define S5PC100_PA_ONENAND_BUF (0xB0000000)
-#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
+#define S5PC100_PA_SYSCON              0xE0100000
 
-/* Chip ID */
+#define S5PC100_PA_OTHERS              0xE0200000
 
-#define S5PC100_PA_CHIPID      (0xE0000000)
-#define S5P_PA_CHIPID          S5PC100_PA_CHIPID
+#define S5PC100_PA_GPIO                        0xE0300000
 
-#define S5PC100_PA_SYSCON      (0xE0100000)
-#define S5P_PA_SYSCON          S5PC100_PA_SYSCON
+#define S5PC100_PA_VIC0                        0xE4000000
+#define S5PC100_PA_VIC1                        0xE4100000
+#define S5PC100_PA_VIC2                        0xE4200000
 
-#define S5PC100_PA_OTHERS      (0xE0200000)
-#define S5PC100_VA_OTHERS      (S3C_VA_SYS + 0x10000)
+#define S5PC100_PA_SROMC               0xE7000000
 
-#define S5PC100_PA_GPIO                (0xE0300000)
-#define S5PC1XX_VA_GPIO                S3C_ADDR(0x00500000)
+#define S5PC100_PA_CFCON               0xE7800000
 
-/* Interrupt */
-#define S5PC100_PA_VIC0                (0xE4000000)
-#define S5PC100_PA_VIC1                (0xE4100000)
-#define S5PC100_PA_VIC2                (0xE4200000)
-#define S5PC100_VA_VIC         S3C_VA_IRQ
-#define S5PC100_VA_VIC_OFFSET  0x10000
-#define S5PC1XX_VA_VIC(x)      (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
+#define S5PC100_PA_MDMA                        0xE8100000
+#define S5PC100_PA_PDMA0               0xE9000000
+#define S5PC100_PA_PDMA1               0xE9200000
 
-#define S5PC100_PA_SROMC       (0xE7000000)
-#define S5P_PA_SROMC           S5PC100_PA_SROMC
+#define S5PC100_PA_TIMER               0xEA000000
+#define S5PC100_PA_SYSTIMER            0xEA100000
+#define S5PC100_PA_WATCHDOG            0xEA200000
+#define S5PC100_PA_RTC                 0xEA300000
 
-#define S5PC100_PA_ONENAND     (0xE7100000)
+#define S5PC100_PA_UART                        0xEC000000
 
-#define S5PC100_PA_CFCON       (0xE7800000)
+#define S5PC100_PA_IIC0                        0xEC100000
+#define S5PC100_PA_IIC1                        0xEC200000
 
-/* DMA */
-#define S5PC100_PA_MDMA                (0xE8100000)
-#define S5PC100_PA_PDMA0       (0xE9000000)
-#define S5PC100_PA_PDMA1       (0xE9200000)
+#define S5PC100_PA_SPI0                        0xEC300000
+#define S5PC100_PA_SPI1                        0xEC400000
+#define S5PC100_PA_SPI2                        0xEC500000
 
-/* Timer */
-#define S5PC100_PA_TIMER       (0xEA000000)
-#define S5P_PA_TIMER           S5PC100_PA_TIMER
+#define S5PC100_PA_USB_HSOTG           0xED200000
+#define S5PC100_PA_USB_HSPHY           0xED300000
 
-#define S5PC100_PA_SYSTIMER    (0xEA100000)
+#define S5PC100_PA_HSMMC(x)            (0xED800000 + ((x) * 0x100000))
 
-#define S5PC100_PA_WATCHDOG    (0xEA200000)
-#define S5PC100_PA_RTC         (0xEA300000)
+#define S5PC100_PA_FB                  0xEE000000
 
-#define S5PC100_PA_UART                (0xEC000000)
+#define S5PC100_PA_FIMC0               0xEE200000
+#define S5PC100_PA_FIMC1               0xEE300000
+#define S5PC100_PA_FIMC2               0xEE400000
 
-#define S5P_PA_UART0           (S5PC100_PA_UART + 0x0)
-#define S5P_PA_UART1           (S5PC100_PA_UART + 0x400)
-#define S5P_PA_UART2           (S5PC100_PA_UART + 0x800)
-#define S5P_PA_UART3           (S5PC100_PA_UART + 0xC00)
-#define S5P_SZ_UART            SZ_256
+#define S5PC100_PA_I2S0                        0xF2000000
+#define S5PC100_PA_I2S1                        0xF2100000
+#define S5PC100_PA_I2S2                        0xF2200000
 
-#define S5PC100_PA_IIC0                (0xEC100000)
-#define S5PC100_PA_IIC1                (0xEC200000)
+#define S5PC100_PA_AC97                        0xF2300000
 
-/* SPI */
-#define S5PC100_PA_SPI0                0xEC300000
-#define S5PC100_PA_SPI1                0xEC400000
-#define S5PC100_PA_SPI2                0xEC500000
+#define S5PC100_PA_PCM0                        0xF2400000
+#define S5PC100_PA_PCM1                        0xF2500000
 
-/* USB HS OTG */
-#define S5PC100_PA_USB_HSOTG   (0xED200000)
-#define S5PC100_PA_USB_HSPHY   (0xED300000)
+#define S5PC100_PA_SPDIF               0xF2600000
 
-#define S5PC100_PA_FB          (0xEE000000)
+#define S5PC100_PA_TSADC               0xF3000000
 
-#define S5PC100_PA_FIMC0       (0xEE200000)
-#define S5PC100_PA_FIMC1       (0xEE300000)
-#define S5PC100_PA_FIMC2       (0xEE400000)
+#define S5PC100_PA_KEYPAD              0xF3100000
 
-#define S5PC100_PA_I2S0                (0xF2000000)
-#define S5PC100_PA_I2S1                (0xF2100000)
-#define S5PC100_PA_I2S2                (0xF2200000)
+/* Compatibiltiy Defines */
 
-#define S5PC100_PA_AC97                0xF2300000
+#define S3C_PA_FB                      S5PC100_PA_FB
+#define S3C_PA_HSMMC0                  S5PC100_PA_HSMMC(0)
+#define S3C_PA_HSMMC1                  S5PC100_PA_HSMMC(1)
+#define S3C_PA_HSMMC2                  S5PC100_PA_HSMMC(2)
+#define S3C_PA_IIC                     S5PC100_PA_IIC0
+#define S3C_PA_IIC1                    S5PC100_PA_IIC1
+#define S3C_PA_KEYPAD                  S5PC100_PA_KEYPAD
+#define S3C_PA_ONENAND                 S5PC100_PA_ONENAND
+#define S3C_PA_ONENAND_BUF             S5PC100_PA_ONENAND_BUF
+#define S3C_PA_RTC                     S5PC100_PA_RTC
+#define S3C_PA_TSADC                   S5PC100_PA_TSADC
+#define S3C_PA_USB_HSOTG               S5PC100_PA_USB_HSOTG
+#define S3C_PA_USB_HSPHY               S5PC100_PA_USB_HSPHY
+#define S3C_PA_WDT                     S5PC100_PA_WATCHDOG
 
-/* PCM */
-#define S5PC100_PA_PCM0                0xF2400000
-#define S5PC100_PA_PCM1                0xF2500000
+#define S5P_PA_CHIPID                  S5PC100_PA_CHIPID
+#define S5P_PA_FIMC0                   S5PC100_PA_FIMC0
+#define S5P_PA_FIMC1                   S5PC100_PA_FIMC1
+#define S5P_PA_FIMC2                   S5PC100_PA_FIMC2
+#define S5P_PA_SDRAM                   S5PC100_PA_SDRAM
+#define S5P_PA_SROMC                   S5PC100_PA_SROMC
+#define S5P_PA_SYSCON                  S5PC100_PA_SYSCON
+#define S5P_PA_TIMER                   S5PC100_PA_TIMER
 
-#define S5PC100_PA_SPDIF       0xF2600000
+#define SAMSUNG_PA_ADC                 S5PC100_PA_TSADC
+#define SAMSUNG_PA_CFCON               S5PC100_PA_CFCON
+#define SAMSUNG_PA_KEYPAD              S5PC100_PA_KEYPAD
 
-#define S5PC100_PA_TSADC       (0xF3000000)
+#define S5PC100_VA_OTHERS              (S3C_VA_SYS + 0x10000)
 
-/* KEYPAD */
-#define S5PC100_PA_KEYPAD      (0xF3100000)
+#define S3C_SZ_ONENAND_BUF             (SZ_256M - SZ_32M)
 
-#define S5PC100_PA_HSMMC(x)    (0xED800000 + ((x) * 0x100000))
+/* UART */
 
-#define S5PC100_PA_SDRAM       (0x20000000)
-#define S5P_PA_SDRAM           S5PC100_PA_SDRAM
+#define S3C_PA_UART                    S5PC100_PA_UART
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART            S5PC100_PA_UART
-#define S3C_PA_IIC             S5PC100_PA_IIC0
-#define S3C_PA_IIC1            S5PC100_PA_IIC1
-#define S3C_PA_FB              S5PC100_PA_FB
-#define S3C_PA_G2D             S5PC100_PA_G2D
-#define S3C_PA_G3D             S5PC100_PA_G3D
-#define S3C_PA_JPEG            S5PC100_PA_JPEG
-#define S3C_PA_ROTATOR         S5PC100_PA_ROTATOR
-#define S5P_VA_VIC0            S5PC1XX_VA_VIC(0)
-#define S5P_VA_VIC1            S5PC1XX_VA_VIC(1)
-#define S5P_VA_VIC2            S5PC1XX_VA_VIC(2)
-#define S3C_PA_USB_HSOTG       S5PC100_PA_USB_HSOTG
-#define S3C_PA_USB_HSPHY       S5PC100_PA_USB_HSPHY
-#define S3C_PA_HSMMC0          S5PC100_PA_HSMMC(0)
-#define S3C_PA_HSMMC1          S5PC100_PA_HSMMC(1)
-#define S3C_PA_HSMMC2          S5PC100_PA_HSMMC(2)
-#define S3C_PA_KEYPAD          S5PC100_PA_KEYPAD
-#define S3C_PA_WDT             S5PC100_PA_WATCHDOG
-#define S3C_PA_TSADC           S5PC100_PA_TSADC
-#define S3C_PA_ONENAND         S5PC100_PA_ONENAND
-#define S3C_PA_ONENAND_BUF     S5PC100_PA_ONENAND_BUF
-#define S3C_SZ_ONENAND_BUF     S5PC100_SZ_ONENAND_BUF
-#define S3C_PA_RTC             S5PC100_PA_RTC
-
-#define SAMSUNG_PA_ADC         S5PC100_PA_TSADC
-#define SAMSUNG_PA_CFCON       S5PC100_PA_CFCON
-#define SAMSUNG_PA_KEYPAD      S5PC100_PA_KEYPAD
+#define S5P_PA_UART(x)                 (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0                   S5P_PA_UART(0)
+#define S5P_PA_UART1                   S5P_PA_UART(1)
+#define S5P_PA_UART2                   S5P_PA_UART(2)
+#define S5P_PA_UART3                   S5P_PA_UART(3)
 
-#define S5P_PA_FIMC0           S5PC100_PA_FIMC0
-#define S5P_PA_FIMC1           S5PC100_PA_FIMC1
-#define S5P_PA_FIMC2           S5PC100_PA_FIMC2
+#define S5P_SZ_UART                    SZ_256
 
-#endif /* __ASM_ARCH_C100_MAP_H */
+#endif /* __ASM_ARCH_MAP_H */
index 3611492..1dd5883 100644 (file)
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5pv210/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  *
  * S5PV210 - Memory map definitions
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5PV210_PA_SROM_BANK5  (0xA8000000)
+#define S5PV210_PA_SDRAM               0x20000000
 
-#define S5PC110_PA_ONENAND     (0xB0000000)
-#define S5P_PA_ONENAND         S5PC110_PA_ONENAND
+#define S5PV210_PA_SROM_BANK5          0xA8000000
 
-#define S5PC110_PA_ONENAND_DMA (0xB0600000)
-#define S5P_PA_ONENAND_DMA     S5PC110_PA_ONENAND_DMA
+#define S5PC110_PA_ONENAND             0xB0000000
+#define S5PC110_PA_ONENAND_DMA         0xB0600000
 
-#define S5PV210_PA_CHIPID      (0xE0000000)
-#define S5P_PA_CHIPID          S5PV210_PA_CHIPID
+#define S5PV210_PA_CHIPID              0xE0000000
 
-#define S5PV210_PA_SYSCON      (0xE0100000)
-#define S5P_PA_SYSCON          S5PV210_PA_SYSCON
+#define S5PV210_PA_SYSCON              0xE0100000
 
-#define S5PV210_PA_GPIO                (0xE0200000)
+#define S5PV210_PA_GPIO                        0xE0200000
 
-/* SPI */
-#define S5PV210_PA_SPI0                0xE1300000
-#define S5PV210_PA_SPI1                0xE1400000
+#define S5PV210_PA_SPDIF               0xE1100000
 
-#define S5PV210_PA_KEYPAD      (0xE1600000)
+#define S5PV210_PA_SPI0                        0xE1300000
+#define S5PV210_PA_SPI1                        0xE1400000
 
-#define S5PV210_PA_IIC0                (0xE1800000)
-#define S5PV210_PA_IIC1                (0xFAB00000)
-#define S5PV210_PA_IIC2                (0xE1A00000)
+#define S5PV210_PA_KEYPAD              0xE1600000
 
-#define S5PV210_PA_TIMER       (0xE2500000)
-#define S5P_PA_TIMER           S5PV210_PA_TIMER
+#define S5PV210_PA_ADC                 0xE1700000
 
-#define S5PV210_PA_SYSTIMER    (0xE2600000)
+#define S5PV210_PA_IIC0                        0xE1800000
+#define S5PV210_PA_IIC1                        0xFAB00000
+#define S5PV210_PA_IIC2                        0xE1A00000
 
-#define S5PV210_PA_WATCHDOG    (0xE2700000)
+#define S5PV210_PA_AC97                        0xE2200000
 
-#define S5PV210_PA_RTC         (0xE2800000)
-#define S5PV210_PA_UART                (0xE2900000)
+#define S5PV210_PA_PCM0                        0xE2300000
+#define S5PV210_PA_PCM1                        0xE1200000
+#define S5PV210_PA_PCM2                        0xE2B00000
 
-#define S5P_PA_UART0           (S5PV210_PA_UART + 0x0)
-#define S5P_PA_UART1           (S5PV210_PA_UART + 0x400)
-#define S5P_PA_UART2           (S5PV210_PA_UART + 0x800)
-#define S5P_PA_UART3           (S5PV210_PA_UART + 0xC00)
+#define S5PV210_PA_TIMER               0xE2500000
+#define S5PV210_PA_SYSTIMER            0xE2600000
+#define S5PV210_PA_WATCHDOG            0xE2700000
+#define S5PV210_PA_RTC                 0xE2800000
 
-#define S5P_SZ_UART            SZ_256
+#define S5PV210_PA_UART                        0xE2900000
 
-#define S3C_VA_UARTx(x)                (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+#define S5PV210_PA_SROMC               0xE8000000
 
-#define S5PV210_PA_SROMC       (0xE8000000)
-#define S5P_PA_SROMC           S5PV210_PA_SROMC
+#define S5PV210_PA_CFCON               0xE8200000
 
-#define S5PV210_PA_CFCON       (0xE8200000)
+#define S5PV210_PA_HSMMC(x)            (0xEB000000 + ((x) * 0x100000))
 
-#define S5PV210_PA_MDMA                0xFA200000
-#define S5PV210_PA_PDMA0       0xE0900000
-#define S5PV210_PA_PDMA1       0xE0A00000
+#define S5PV210_PA_HSOTG               0xEC000000
+#define S5PV210_PA_HSPHY               0xEC100000
 
-#define S5PV210_PA_FB          (0xF8000000)
+#define S5PV210_PA_IIS0                        0xEEE30000
+#define S5PV210_PA_IIS1                        0xE2100000
+#define S5PV210_PA_IIS2                        0xE2A00000
 
-#define S5PV210_PA_FIMC0       (0xFB200000)
-#define S5PV210_PA_FIMC1       (0xFB300000)
-#define S5PV210_PA_FIMC2       (0xFB400000)
+#define S5PV210_PA_DMC0                        0xF0000000
+#define S5PV210_PA_DMC1                        0xF1400000
 
-#define S5PV210_PA_HSMMC(x)    (0xEB000000 + ((x) * 0x100000))
+#define S5PV210_PA_VIC0                        0xF2000000
+#define S5PV210_PA_VIC1                        0xF2100000
+#define S5PV210_PA_VIC2                        0xF2200000
+#define S5PV210_PA_VIC3                        0xF2300000
 
-#define S5PV210_PA_HSOTG       (0xEC000000)
-#define S5PV210_PA_HSPHY       (0xEC100000)
+#define S5PV210_PA_FB                  0xF8000000
 
-#define S5PV210_PA_VIC0                (0xF2000000)
-#define S5PV210_PA_VIC1                (0xF2100000)
-#define S5PV210_PA_VIC2                (0xF2200000)
-#define S5PV210_PA_VIC3                (0xF2300000)
+#define S5PV210_PA_MDMA                        0xFA200000
+#define S5PV210_PA_PDMA0               0xE0900000
+#define S5PV210_PA_PDMA1               0xE0A00000
 
-#define S5PV210_PA_SDRAM       (0x20000000)
-#define S5P_PA_SDRAM           S5PV210_PA_SDRAM
+#define S5PV210_PA_MIPI_CSIS           0xFA600000
 
-/* S/PDIF */
-#define S5PV210_PA_SPDIF       0xE1100000
+#define S5PV210_PA_FIMC0               0xFB200000
+#define S5PV210_PA_FIMC1               0xFB300000
+#define S5PV210_PA_FIMC2               0xFB400000
 
-/* I2S */
-#define S5PV210_PA_IIS0                0xEEE30000
-#define S5PV210_PA_IIS1                0xE2100000
-#define S5PV210_PA_IIS2                0xE2A00000
+/* Compatibiltiy Defines */
 
-/* PCM */
-#define S5PV210_PA_PCM0                0xE2300000
-#define S5PV210_PA_PCM1                0xE1200000
-#define S5PV210_PA_PCM2                0xE2B00000
+#define S3C_PA_FB                      S5PV210_PA_FB
+#define S3C_PA_HSMMC0                  S5PV210_PA_HSMMC(0)
+#define S3C_PA_HSMMC1                  S5PV210_PA_HSMMC(1)
+#define S3C_PA_HSMMC2                  S5PV210_PA_HSMMC(2)
+#define S3C_PA_HSMMC3                  S5PV210_PA_HSMMC(3)
+#define S3C_PA_IIC                     S5PV210_PA_IIC0
+#define S3C_PA_IIC1                    S5PV210_PA_IIC1
+#define S3C_PA_IIC2                    S5PV210_PA_IIC2
+#define S3C_PA_RTC                     S5PV210_PA_RTC
+#define S3C_PA_USB_HSOTG               S5PV210_PA_HSOTG
+#define S3C_PA_WDT                     S5PV210_PA_WATCHDOG
 
-/* AC97 */
-#define S5PV210_PA_AC97                0xE2200000
+#define S5P_PA_CHIPID                  S5PV210_PA_CHIPID
+#define S5P_PA_FIMC0                   S5PV210_PA_FIMC0
+#define S5P_PA_FIMC1                   S5PV210_PA_FIMC1
+#define S5P_PA_FIMC2                   S5PV210_PA_FIMC2
+#define S5P_PA_MIPI_CSIS0              S5PV210_PA_MIPI_CSIS
+#define S5P_PA_ONENAND                 S5PC110_PA_ONENAND
+#define S5P_PA_ONENAND_DMA             S5PC110_PA_ONENAND_DMA
+#define S5P_PA_SDRAM                   S5PV210_PA_SDRAM
+#define S5P_PA_SROMC                   S5PV210_PA_SROMC
+#define S5P_PA_SYSCON                  S5PV210_PA_SYSCON
+#define S5P_PA_TIMER                   S5PV210_PA_TIMER
 
-#define S5PV210_PA_ADC         (0xE1700000)
+#define SAMSUNG_PA_ADC                 S5PV210_PA_ADC
+#define SAMSUNG_PA_CFCON               S5PV210_PA_CFCON
+#define SAMSUNG_PA_KEYPAD              S5PV210_PA_KEYPAD
 
-#define S5PV210_PA_DMC0                (0xF0000000)
-#define S5PV210_PA_DMC1                (0xF1400000)
+/* UART */
 
-#define S5PV210_PA_MIPI_CSIS   0xFA600000
+#define S3C_VA_UARTx(x)                        (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART            S5PV210_PA_UART
-#define S3C_PA_HSMMC0          S5PV210_PA_HSMMC(0)
-#define S3C_PA_HSMMC1          S5PV210_PA_HSMMC(1)
-#define S3C_PA_HSMMC2          S5PV210_PA_HSMMC(2)
-#define S3C_PA_HSMMC3          S5PV210_PA_HSMMC(3)
-#define S3C_PA_IIC             S5PV210_PA_IIC0
-#define S3C_PA_IIC1            S5PV210_PA_IIC1
-#define S3C_PA_IIC2            S5PV210_PA_IIC2
-#define S3C_PA_FB              S5PV210_PA_FB
-#define S3C_PA_RTC             S5PV210_PA_RTC
-#define S3C_PA_WDT             S5PV210_PA_WATCHDOG
-#define S3C_PA_USB_HSOTG       S5PV210_PA_HSOTG
-#define S5P_PA_FIMC0           S5PV210_PA_FIMC0
-#define S5P_PA_FIMC1           S5PV210_PA_FIMC1
-#define S5P_PA_FIMC2           S5PV210_PA_FIMC2
-#define S5P_PA_MIPI_CSIS0      S5PV210_PA_MIPI_CSIS
+#define S3C_PA_UART                    S5PV210_PA_UART
 
-#define SAMSUNG_PA_ADC         S5PV210_PA_ADC
-#define SAMSUNG_PA_CFCON       S5PV210_PA_CFCON
-#define SAMSUNG_PA_KEYPAD      S5PV210_PA_KEYPAD
+#define S5P_PA_UART(x)                 (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0                   S5P_PA_UART(0)
+#define S5P_PA_UART1                   S5P_PA_UART(1)
+#define S5P_PA_UART2                   S5P_PA_UART(2)
+#define S5P_PA_UART3                   S5P_PA_UART(3)
+
+#define S5P_SZ_UART                    SZ_256
 
 #endif /* __ASM_ARCH_MAP_H */
index 461aa03..557add4 100644 (file)
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
 
 static struct regulator_init_data aquila_ldo3_data = {
        .constraints    = {
-               .name           = "VUSB/MIPI_1.1V",
+               .name           = "VUSB+MIPI_1.1V",
                .min_uV         = 1100000,
                .max_uV         = 1100000,
                .apply_uV       = 1,
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
 
 static struct regulator_init_data aquila_ldo8_data = {
        .constraints    = {
-               .name           = "VUSB/VADC_3.3V",
+               .name           = "VUSB+VADC_3.3V",
                .min_uV         = 3300000,
                .max_uV         = 3300000,
                .apply_uV       = 1,
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
 
 static struct regulator_init_data aquila_ldo9_data = {
        .constraints    = {
-               .name           = "VCC/VCAM_2.8V",
+               .name           = "VCC+VCAM_2.8V",
                .min_uV         = 2800000,
                .max_uV         = 2800000,
                .apply_uV       = 1,
@@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = {
        .buck1_set1     = S5PV210_GPH0(3),
        .buck1_set2     = S5PV210_GPH0(4),
        .buck2_set3     = S5PV210_GPH0(5),
-       .buck1_max_voltage1 = 1200000,
-       .buck1_max_voltage2 = 1200000,
-       .buck2_max_voltage = 1200000,
+       .buck1_voltage1 = 1200000,
+       .buck1_voltage2 = 1200000,
+       .buck1_voltage3 = 1200000,
+       .buck1_voltage4 = 1200000,
+       .buck2_voltage1 = 1200000,
+       .buck2_voltage2 = 1200000,
 };
 #endif
 
index e22d511..056f5c7 100644 (file)
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = {
 
 static struct regulator_init_data goni_ldo3_data = {
        .constraints    = {
-               .name           = "VUSB/MIPI_1.1V",
+               .name           = "VUSB+MIPI_1.1V",
                .min_uV         = 1100000,
                .max_uV         = 1100000,
                .apply_uV       = 1,
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = {
 
 static struct regulator_init_data goni_ldo8_data = {
        .constraints    = {
-               .name           = "VUSB/VADC_3.3V",
+               .name           = "VUSB+VADC_3.3V",
                .min_uV         = 3300000,
                .max_uV         = 3300000,
                .apply_uV       = 1,
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = {
 
 static struct regulator_init_data goni_ldo9_data = {
        .constraints    = {
-               .name           = "VCC/VCAM_2.8V",
+               .name           = "VCC+VCAM_2.8V",
                .min_uV         = 2800000,
                .max_uV         = 2800000,
                .apply_uV       = 1,
@@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = {
        .buck1_set1     = S5PV210_GPH0(3),
        .buck1_set2     = S5PV210_GPH0(4),
        .buck2_set3     = S5PV210_GPH0(5),
-       .buck1_max_voltage1 = 1200000,
-       .buck1_max_voltage2 = 1200000,
-       .buck2_max_voltage = 1200000,
+       .buck1_voltage1 = 1200000,
+       .buck1_voltage2 = 1200000,
+       .buck1_voltage3 = 1200000,
+       .buck1_voltage4 = 1200000,
+       .buck2_voltage1 = 1200000,
+       .buck2_voltage2 = 1200000,
 };
 #endif
 
index 09c4c21..b2a9acc 100644 (file)
@@ -122,6 +122,7 @@ config MACH_SMDKV310
        select S3C_DEV_HSMMC2
        select S3C_DEV_HSMMC3
        select S5PV310_DEV_PD
+       select S5PV310_DEV_SYSMMU
        select S5PV310_SETUP_I2C1
        select S5PV310_SETUP_SDHCI
        help
index 74d4006..901657f 100644 (file)
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5pv310/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  *
  * S5PV310 - Memory map definitions
 
 #include <plat/map-s5p.h>
 
-#define S5PV310_PA_SYSRAM              (0x02025000)
+#define S5PV310_PA_SYSRAM              0x02025000
 
-#define S5PV310_PA_SROM_BANK(x)                (0x04000000 + ((x) * 0x01000000))
-
-#define S5PC210_PA_ONENAND             (0x0C000000)
-#define S5P_PA_ONENAND                 S5PC210_PA_ONENAND
-
-#define S5PC210_PA_ONENAND_DMA         (0x0C600000)
-#define S5P_PA_ONENAND_DMA             S5PC210_PA_ONENAND_DMA
-
-#define S5PV310_PA_CHIPID              (0x10000000)
-#define S5P_PA_CHIPID                  S5PV310_PA_CHIPID
-
-#define S5PV310_PA_SYSCON              (0x10010000)
-#define S5P_PA_SYSCON                  S5PV310_PA_SYSCON
+#define S5PV310_PA_I2S0                        0x03830000
+#define S5PV310_PA_I2S1                        0xE3100000
+#define S5PV310_PA_I2S2                        0xE2A00000
 
-#define S5PV310_PA_PMU                 (0x10020000)
+#define S5PV310_PA_PCM0                        0x03840000
+#define S5PV310_PA_PCM1                        0x13980000
+#define S5PV310_PA_PCM2                        0x13990000
 
-#define S5PV310_PA_CMU                 (0x10030000)
-
-#define S5PV310_PA_WATCHDOG            (0x10060000)
-#define S5PV310_PA_RTC                 (0x10070000)
-
-#define S5PV310_PA_DMC0                        (0x10400000)
-
-#define S5PV310_PA_COMBINER            (0x10448000)
-
-#define S5PV310_PA_COREPERI            (0x10500000)
-#define S5PV310_PA_GIC_CPU             (0x10500100)
-#define S5PV310_PA_TWD                 (0x10500600)
-#define S5PV310_PA_GIC_DIST            (0x10501000)
-#define S5PV310_PA_L2CC                        (0x10502000)
-
-/* DMA */
-#define S5PV310_PA_MDMA                0x10810000
-#define S5PV310_PA_PDMA0       0x12680000
-#define S5PV310_PA_PDMA1       0x12690000
-
-#define S5PV310_PA_GPIO1               (0x11400000)
-#define S5PV310_PA_GPIO2               (0x11000000)
-#define S5PV310_PA_GPIO3               (0x03860000)
-
-#define S5PV310_PA_MIPI_CSIS0          0x11880000
-#define S5PV310_PA_MIPI_CSIS1          0x11890000
+#define S5PV310_PA_SROM_BANK(x)                (0x04000000 + ((x) * 0x01000000))
 
-#define S5PV310_PA_HSMMC(x)            (0x12510000 + ((x) * 0x10000))
+#define S5PC210_PA_ONENAND             0x0C000000
+#define S5PC210_PA_ONENAND_DMA         0x0C600000
 
-#define S5PV310_PA_SROMC               (0x12570000)
-#define S5P_PA_SROMC                   S5PV310_PA_SROMC
+#define S5PV310_PA_CHIPID              0x10000000
 
-/* S/PDIF */
-#define S5PV310_PA_SPDIF       0xE1100000
+#define S5PV310_PA_SYSCON              0x10010000
+#define S5PV310_PA_PMU                 0x10020000
+#define S5PV310_PA_CMU                 0x10030000
 
-/* I2S */
-#define S5PV310_PA_I2S0                0x03830000
-#define S5PV310_PA_I2S1                0xE3100000
-#define S5PV310_PA_I2S2                0xE2A00000
+#define S5PV310_PA_WATCHDOG            0x10060000
+#define S5PV310_PA_RTC                 0x10070000
 
-/* PCM */
-#define S5PV310_PA_PCM0                0x03840000
-#define S5PV310_PA_PCM1                0x13980000
-#define S5PV310_PA_PCM2                0x13990000
+#define S5PV310_PA_DMC0                        0x10400000
 
-/* AC97 */
-#define S5PV310_PA_AC97                0x139A0000
+#define S5PV310_PA_COMBINER            0x10448000
 
-#define S5PV310_PA_UART                        (0x13800000)
+#define S5PV310_PA_COREPERI            0x10500000
+#define S5PV310_PA_GIC_CPU             0x10500100
+#define S5PV310_PA_TWD                 0x10500600
+#define S5PV310_PA_GIC_DIST            0x10501000
+#define S5PV310_PA_L2CC                        0x10502000
 
-#define S5P_PA_UART(x)                 (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0                   S5P_PA_UART(0)
-#define S5P_PA_UART1                   S5P_PA_UART(1)
-#define S5P_PA_UART2                   S5P_PA_UART(2)
-#define S5P_PA_UART3                   S5P_PA_UART(3)
-#define S5P_PA_UART4                   S5P_PA_UART(4)
-
-#define S5P_SZ_UART                    SZ_256
-
-#define S5PV310_PA_IIC(x)              (0x13860000 + ((x) * 0x10000))
-
-#define S5PV310_PA_TIMER               (0x139D0000)
-#define S5P_PA_TIMER                   S5PV310_PA_TIMER
-
-#define S5PV310_PA_SDRAM               (0x40000000)
-#define S5P_PA_SDRAM                   S5PV310_PA_SDRAM
+#define S5PV310_PA_MDMA                        0x10810000
+#define S5PV310_PA_PDMA0               0x12680000
+#define S5PV310_PA_PDMA1               0x12690000
 
 #define S5PV310_PA_SYSMMU_MDMA         0x10A40000
 #define S5PV310_PA_SYSMMU_SSS          0x10A50000
 #define S5PV310_PA_SYSMMU_TV           0x12E20000
 #define S5PV310_PA_SYSMMU_MFC_L                0x13620000
 #define S5PV310_PA_SYSMMU_MFC_R                0x13630000
-#define S5PV310_SYSMMU_TOTAL_IPNUM     16
-#define S5P_SYSMMU_TOTAL_IPNUM         S5PV310_SYSMMU_TOTAL_IPNUM
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART                    S5PV310_PA_UART
+#define S5PV310_PA_GPIO1               0x11400000
+#define S5PV310_PA_GPIO2               0x11000000
+#define S5PV310_PA_GPIO3               0x03860000
+
+#define S5PV310_PA_MIPI_CSIS0          0x11880000
+#define S5PV310_PA_MIPI_CSIS1          0x11890000
+
+#define S5PV310_PA_HSMMC(x)            (0x12510000 + ((x) * 0x10000))
+
+#define S5PV310_PA_SROMC               0x12570000
+
+#define S5PV310_PA_UART                        0x13800000
+
+#define S5PV310_PA_IIC(x)              (0x13860000 + ((x) * 0x10000))
+
+#define S5PV310_PA_AC97                        0x139A0000
+
+#define S5PV310_PA_TIMER               0x139D0000
+
+#define S5PV310_PA_SDRAM               0x40000000
+
+#define S5PV310_PA_SPDIF               0xE1100000
+
+/* Compatibiltiy Defines */
+
 #define S3C_PA_HSMMC0                  S5PV310_PA_HSMMC(0)
 #define S3C_PA_HSMMC1                  S5PV310_PA_HSMMC(1)
 #define S3C_PA_HSMMC2                  S5PV310_PA_HSMMC(2)
 #define S3C_PA_IIC7                    S5PV310_PA_IIC(7)
 #define S3C_PA_RTC                     S5PV310_PA_RTC
 #define S3C_PA_WDT                     S5PV310_PA_WATCHDOG
+
+#define S5P_PA_CHIPID                  S5PV310_PA_CHIPID
 #define S5P_PA_MIPI_CSIS0              S5PV310_PA_MIPI_CSIS0
 #define S5P_PA_MIPI_CSIS1              S5PV310_PA_MIPI_CSIS1
+#define S5P_PA_ONENAND                 S5PC210_PA_ONENAND
+#define S5P_PA_ONENAND_DMA             S5PC210_PA_ONENAND_DMA
+#define S5P_PA_SDRAM                   S5PV310_PA_SDRAM
+#define S5P_PA_SROMC                   S5PV310_PA_SROMC
+#define S5P_PA_SYSCON                  S5PV310_PA_SYSCON
+#define S5P_PA_TIMER                   S5PV310_PA_TIMER
+
+/* UART */
+
+#define S3C_PA_UART                    S5PV310_PA_UART
+
+#define S5P_PA_UART(x)                 (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0                   S5P_PA_UART(0)
+#define S5P_PA_UART1                   S5P_PA_UART(1)
+#define S5P_PA_UART2                   S5P_PA_UART(2)
+#define S5P_PA_UART3                   S5P_PA_UART(3)
+#define S5P_PA_UART4                   S5P_PA_UART(4)
+
+#define S5P_SZ_UART                    SZ_256
 
 #endif /* __ASM_ARCH_MAP_H */
index 662fe85..598fc5c 100644 (file)
@@ -13,6 +13,9 @@
 #ifndef __ASM_ARM_ARCH_SYSMMU_H
 #define __ASM_ARM_ARCH_SYSMMU_H __FILE__
 
+#define S5PV310_SYSMMU_TOTAL_IPNUM     16
+#define S5P_SYSMMU_TOTAL_IPNUM         S5PV310_SYSMMU_TOTAL_IPNUM
+
 enum s5pv310_sysmmu_ips {
        SYSMMU_MDMA,
        SYSMMU_SSS,
@@ -32,7 +35,7 @@ enum s5pv310_sysmmu_ips {
        SYSMMU_MFC_R,
 };
 
-static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = {
+static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
        "SYSMMU_MDMA"   ,
        "SYSMMU_SSS"    ,
        "SYSMMU_FIMC0"  ,
index d43c5ef..bd3e1bf 100644 (file)
@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = {
 struct platform_device collie_locomo_device = {
        .name           = "locomo",
        .id             = 0,
+       .dev            = {
+               .platform_data  = &locomo_info,
+       },
        .num_resources  = ARRAY_SIZE(locomo_resources),
        .resource       = locomo_resources,
 };
index 4d1b4c5..0c8f6cf 100644 (file)
@@ -60,6 +60,8 @@ endchoice
 
 config MACH_AG5EVM
        bool "AG5EVM board"
+       select ARCH_REQUIRE_GPIOLIB
+       select SH_LCD_MIPI_DSI
        depends on ARCH_SH73A0
 
 config MACH_MACKEREL
index c18a740..4303a86 100644 (file)
 #include <linux/input/sh_keysc.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
-
+#include <linux/sh_clk.h>
+#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mipi_dsi.h>
 #include <sound/sh_fsi.h>
-
 #include <mach/hardware.h>
 #include <mach/sh73a0.h>
 #include <mach/common.h>
@@ -183,11 +184,165 @@ static struct platform_device mmc_device = {
        .resource       = sh_mmcif_resources,
 };
 
+/* IrDA */
+static struct resource irda_resources[] = {
+       [0] = {
+               .start  = 0xE6D00000,
+               .end    = 0xE6D01FD4 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = gic_spi(95),
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device irda_device = {
+       .name           = "sh_irda",
+       .id             = 0,
+       .resource       = irda_resources,
+       .num_resources  = ARRAY_SIZE(irda_resources),
+};
+
+static unsigned char lcd_backlight_seq[3][2] = {
+       { 0x04, 0x07 },
+       { 0x23, 0x80 },
+       { 0x03, 0x01 },
+};
+
+static void lcd_backlight_on(void)
+{
+       struct i2c_adapter *a;
+       struct i2c_msg msg;
+       int k;
+
+       a = i2c_get_adapter(1);
+       for (k = 0; a && k < 3; k++) {
+               msg.addr = 0x6d;
+               msg.buf = &lcd_backlight_seq[k][0];
+               msg.len = 2;
+               msg.flags = 0;
+               if (i2c_transfer(a, &msg, 1) != 1)
+                       break;
+       }
+}
+
+static void lcd_backlight_reset(void)
+{
+       gpio_set_value(GPIO_PORT235, 0);
+       mdelay(24);
+       gpio_set_value(GPIO_PORT235, 1);
+}
+
+static void lcd_on(void *board_data, struct fb_info *info)
+{
+       lcd_backlight_on();
+}
+
+static void lcd_off(void *board_data)
+{
+       lcd_backlight_reset();
+}
+
+/* LCDC0 */
+static const struct fb_videomode lcdc0_modes[] = {
+       {
+               .name           = "R63302(QHD)",
+               .xres           = 544,
+               .yres           = 961,
+               .left_margin    = 72,
+               .right_margin   = 600,
+               .hsync_len      = 16,
+               .upper_margin   = 8,
+               .lower_margin   = 8,
+               .vsync_len      = 2,
+               .sync           = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+       },
+};
+
+static struct sh_mobile_lcdc_info lcdc0_info = {
+       .clock_source = LCDC_CLK_PERIPHERAL,
+       .ch[0] = {
+               .chan = LCDC_CHAN_MAINLCD,
+               .interface_type = RGB24,
+               .clock_divider = 1,
+               .flags = LCDC_FLAGS_DWPOL,
+               .lcd_size_cfg.width = 44,
+               .lcd_size_cfg.height = 79,
+               .bpp = 16,
+               .lcd_cfg = lcdc0_modes,
+               .num_cfg = ARRAY_SIZE(lcdc0_modes),
+               .board_cfg = {
+                       .display_on = lcd_on,
+                       .display_off = lcd_off,
+               },
+       }
+};
+
+static struct resource lcdc0_resources[] = {
+       [0] = {
+               .name   = "LCDC0",
+               .start  = 0xfe940000, /* P4-only space */
+               .end    = 0xfe943fff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = intcs_evt2irq(0x580),
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device lcdc0_device = {
+       .name           = "sh_mobile_lcdc_fb",
+       .num_resources  = ARRAY_SIZE(lcdc0_resources),
+       .resource       = lcdc0_resources,
+       .id             = 0,
+       .dev    = {
+               .platform_data  = &lcdc0_info,
+               .coherent_dma_mask = ~0,
+       },
+};
+
+/* MIPI-DSI */
+static struct resource mipidsi0_resources[] = {
+       [0] = {
+               .start  = 0xfeab0000,
+               .end    = 0xfeab3fff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 0xfeab4000,
+               .end    = 0xfeab7fff,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct sh_mipi_dsi_info mipidsi0_info = {
+       .data_format    = MIPI_RGB888,
+       .lcd_chan       = &lcdc0_info.ch[0],
+       .vsynw_offset   = 20,
+       .clksrc         = 1,
+       .flags          = SH_MIPI_DSI_HSABM,
+};
+
+static struct platform_device mipidsi0_device = {
+       .name           = "sh-mipi-dsi",
+       .num_resources  = ARRAY_SIZE(mipidsi0_resources),
+       .resource       = mipidsi0_resources,
+       .id             = 0,
+       .dev    = {
+               .platform_data  = &mipidsi0_info,
+       },
+};
+
 static struct platform_device *ag5evm_devices[] __initdata = {
        &eth_device,
        &keysc_device,
        &fsi_device,
        &mmc_device,
+       &irda_device,
+       &lcdc0_device,
+       &mipidsi0_device,
 };
 
 static struct map_desc ag5evm_io_desc[] __initdata = {
@@ -224,6 +379,8 @@ void __init ag5evm_init_irq(void)
        __raw_writew(__raw_readw(PINTCR0A) | (2<<10), PINTCR0A);
 }
 
+#define DSI0PHYCR      0xe615006c
+
 static void __init ag5evm_init(void)
 {
        sh73a0_pinmux_init();
@@ -287,6 +444,26 @@ static void __init ag5evm_init(void)
        gpio_request(GPIO_FN_FSIAISLD, NULL);
        gpio_request(GPIO_FN_FSIAOSLD, NULL);
 
+       /* IrDA */
+       gpio_request(GPIO_FN_PORT241_IRDA_OUT, NULL);
+       gpio_request(GPIO_FN_PORT242_IRDA_IN,  NULL);
+       gpio_request(GPIO_FN_PORT243_IRDA_FIRSEL, NULL);
+
+       /* LCD panel */
+       gpio_request(GPIO_PORT217, NULL); /* RESET */
+       gpio_direction_output(GPIO_PORT217, 0);
+       mdelay(1);
+       gpio_set_value(GPIO_PORT217, 1);
+       mdelay(100);
+
+       /* LCD backlight controller */
+       gpio_request(GPIO_PORT235, NULL); /* RESET */
+       gpio_direction_output(GPIO_PORT235, 0);
+       lcd_backlight_reset();
+
+       /* MIPI-DSI clock setup */
+       __raw_writel(0x2a809010, DSI0PHYCR);
+
 #ifdef CONFIG_CACHE_L2X0
        /* Shared attribute override enable, 64K*8way */
        l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
index 3cf0951..81d6536 100644 (file)
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
 
        lcdc_info.clock_source                  = LCDC_CLK_BUS;
        lcdc_info.ch[0].interface_type          = RGB18;
-       lcdc_info.ch[0].clock_divider           = 2;
+       lcdc_info.ch[0].clock_divider           = 3;
        lcdc_info.ch[0].flags                   = 0;
        lcdc_info.ch[0].lcd_size_cfg.width      = 152;
        lcdc_info.ch[0].lcd_size_cfg.height     = 91;
index 686b304..ef4613b 100644 (file)
@@ -347,7 +347,6 @@ static void __init g3evm_init(void)
        gpio_request(GPIO_FN_IRDA_OUT, NULL);
        gpio_request(GPIO_FN_IRDA_IN, NULL);
        gpio_request(GPIO_FN_IRDA_FIRSEL, NULL);
-       set_irq_type(evt2irq(0x480), IRQ_TYPE_LEVEL_LOW);
 
        sh7367_add_standard_devices();
 
index 7b15d21..1657eac 100644 (file)
  *     SW1     |       SW33
  *             | bit1 | bit2 | bit3 | bit4
  * -------------+------+------+------+-------
- * MMC0          OFF   |  OFF |  ON  |  ON  |  X
- * MMC1          ON    |  OFF |  ON  |  X   | ON
- * SDHI1  OFF  |  ON  |   X  |  OFF | ON
+ * MMC0   OFF  |  OFF |   X  |  ON  |  X       (Use MMCIF)
+ * SDHI1  OFF  |  ON  |   X  |  OFF |  X       (Use MFD_SH_MOBILE_SDHI)
  *
  */
 
@@ -304,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
                .lcd_cfg = mackerel_lcdc_modes,
                .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
                .interface_type         = RGB24,
-               .clock_divider          = 2,
+               .clock_divider          = 3,
                .flags                  = 0,
                .lcd_size_cfg.width     = 152,
                .lcd_size_cfg.height    = 91,
index 9aa8d68..e9731b5 100644 (file)
@@ -234,7 +234,9 @@ static int pllc2_set_rate(struct clk *clk, unsigned long rate)
 
        value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
 
-       __raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
+       __raw_writel(value | ((idx + 19) << 24), PLLC2CR);
+
+       clk->rate = clk->freq_table[idx].frequency;
 
        return 0;
 }
index 720a714..7e58904 100644 (file)
@@ -118,8 +118,16 @@ static unsigned long pll_recalc(struct clk *clk)
 {
        unsigned long mult = 1;
 
-       if (__raw_readl(PLLECR) & (1 << clk->enable_bit))
+       if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) {
                mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
+               /* handle CFG bit for PLL1 and PLL2 */
+               switch (clk->enable_bit) {
+               case 1:
+               case 2:
+                       if (__raw_readl(clk->enable_reg) & (1 << 20))
+                               mult *= 2;
+               }
+       }
 
        return clk->parent->rate * mult;
 }
@@ -212,7 +220,7 @@ enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2,
 static struct clk div4_clks[DIV4_NR] = {
        [DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT),
        [DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT),
-       [DIV4_M3] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+       [DIV4_M3] = DIV4(FRQCRA, 12, 0xfff, CLK_ENABLE_ON_INIT),
        [DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
        [DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0),
        [DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0),
@@ -255,10 +263,10 @@ static struct clk div6_clks[DIV6_NR] = {
 };
 
 enum { MSTP001,
-       MSTP125, MSTP116,
+       MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
        MSTP219,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-       MSTP331, MSTP329, MSTP323, MSTP312,
+       MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
        MSTP411, MSTP410, MSTP403,
        MSTP_NR };
 
@@ -267,8 +275,14 @@ enum { MSTP001,
 
 static struct clk mstp_clks[MSTP_NR] = {
        [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+       [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
+       [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
        [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
+       [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
        [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
+       [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
        [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
        [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
        [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
@@ -279,6 +293,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+       [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
        [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
        [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
@@ -288,16 +303,32 @@ static struct clk mstp_clks[MSTP_NR] = {
 
 #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
 #define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
 
 static struct clk_lookup lookups[] = {
        /* main clocks */
        CLKDEV_CON_ID("r_clk", &r_clk),
 
+       /* DIV6 clocks */
+       CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
+       CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
+       CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
+       CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
+       CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
+       CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
+       CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
+
        /* MSTP32 clocks */
        CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
+       CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
        CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
        CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
+       CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+       CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
@@ -308,6 +339,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
        CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
        CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+       CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
        CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
        CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
index efd3687..3029aba 100644 (file)
@@ -6,13 +6,10 @@ LIST "RWT Setting"
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
 
 LIST "SCIF0 - Serial port for earlyprintk"
 EB 0xE6053098, 0x11
index efd3687..3029aba 100644 (file)
@@ -6,13 +6,10 @@ LIST "RWT Setting"
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
 
 LIST "SCIF0 - Serial port for earlyprintk"
 EB 0xE6053098, 0x11
index f78a1ea..ca5f9d1 100644 (file)
@@ -365,6 +365,7 @@ static struct intc_desc intca_desc __initdata = {
 
 enum {
        UNUSED_INTCS = 0,
+       ENABLED_INTCS,
 
        INTCS,
 
@@ -413,7 +414,7 @@ enum {
        CMT4,
        DSITX1_DSITX1_0,
        DSITX1_DSITX1_1,
-       /* MFIS2 */
+       MFIS2_INTCS, /* Priority always enabled using ENABLED_INTCS */
        CPORTS2R,
        /* CEC */
        JPU6E,
@@ -477,7 +478,7 @@ static struct intc_vect intcs_vectors[] = {
        INTCS_VECT(CMT4, 0x1980),
        INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
        INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
-       /* MFIS2 */
+       INTCS_VECT(MFIS2_INTCS, 0x1a00),
        INTCS_VECT(CPORTS2R, 0x1a20),
        /* CEC */
        INTCS_VECT(JPU6E, 0x1a80),
@@ -543,7 +544,7 @@ static struct intc_mask_reg intcs_mask_registers[] = {
          { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
            CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } },
        { 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
-         { 0, CPORTS2R, 0, 0,
+         { MFIS2_INTCS, CPORTS2R, 0, 0,
            JPU6E, 0, 0, 0 } },
        { 0xffd20104, 0, 16, /* INTAMASK */
          { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -571,7 +572,8 @@ static struct intc_prio_reg intcs_prio_registers[] = {
        { 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } },
        { 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DSITX1_DSITX1_0,
                                               DSITX1_DSITX1_1, 0 } },
-       { 0xffd50038, 0, 16, 4, /* IPROS3 */ { 0, CPORTS2R, 0, 0 } },
+       { 0xffd50038, 0, 16, 4, /* IPROS3 */ { ENABLED_INTCS, CPORTS2R,
+                                              0, 0 } },
        { 0xffd5003c, 0, 16, 4, /* IPRPS3 */ { JPU6E, 0, 0, 0 } },
 };
 
@@ -590,6 +592,7 @@ static struct resource intcs_resources[] __initdata = {
 
 static struct intc_desc intcs_desc __initdata = {
        .name = "sh7372-intcs",
+       .force_enable = ENABLED_INTCS,
        .resource = intcs_resources,
        .num_resources = ARRAY_SIZE(intcs_resources),
        .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
index 322d8d5..5d0e150 100644 (file)
@@ -252,10 +252,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
 
 void __init sh73a0_init_irq(void)
 {
-       void __iomem *gic_base = __io(0xf0001000);
+       void __iomem *gic_dist_base = __io(0xf0001000);
+       void __iomem *gic_cpu_base = __io(0xf0000100);
        void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
 
-       gic_init(0, 29, gic_base, gic_base);
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
 
        register_intc_controller(&intcs_desc);
 
index cacf17a..53677e4 100644 (file)
@@ -62,7 +62,7 @@
 #define SPEAR320_SMII1_BASE            0xAB000000
 #define SPEAR320_SMII1_SIZE            0x01000000
 
-#define SPEAR320_SOC_CONFIG_BASE       0xB4000000
+#define SPEAR320_SOC_CONFIG_BASE       0xB3000000
 #define SPEAR320_SOC_CONFIG_SIZE       0x00000070
 /* Interrupt registers offsets and masks */
 #define INT_STS_MASK_REG               0x04
index bd06620..ad80488 100644 (file)
@@ -207,9 +207,9 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
 
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-               __set_irq_handler_unlocked(irq, handle_level_irq);
+               __set_irq_handler_unlocked(d->irq, handle_level_irq);
        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-               __set_irq_handler_unlocked(irq, handle_edge_irq);
+               __set_irq_handler_unlocked(d->irq, handle_edge_irq);
 
        return 0;
 }
index d772395..a217f68 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLK_H
 #define __MACH_CLK_H
 
+struct clk;
+
 void tegra_periph_reset_deassert(struct clk *c);
 void tegra_periph_reset_assert(struct clk *c);
 
index 412f5c6..66cd3f4 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
        return 1;
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
new file mode 100644 (file)
index 0000000..04c7798
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Platform definitions for tegra-kbc keyboard input driver
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef ASMARM_ARCH_TEGRA_KBC_H
+#define ASMARM_ARCH_TEGRA_KBC_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define KBC_MAX_GPIO   24
+#define KBC_MAX_KPENT  8
+#else
+#define KBC_MAX_GPIO   20
+#define KBC_MAX_KPENT  7
+#endif
+
+#define KBC_MAX_ROW    16
+#define KBC_MAX_COL    8
+#define KBC_MAX_KEY    (KBC_MAX_ROW * KBC_MAX_COL)
+
+struct tegra_kbc_pin_cfg {
+       bool is_row;
+       unsigned char num;
+};
+
+struct tegra_kbc_wake_key {
+       u8 row:4;
+       u8 col:4;
+};
+
+struct tegra_kbc_platform_data {
+       unsigned int debounce_cnt;
+       unsigned int repeat_cnt;
+
+       unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
+       const struct tegra_kbc_wake_key *wake_cfg;
+
+       struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+       const struct matrix_keymap_data *keymap_data;
+
+       bool wakeup;
+       bool use_fn_map;
+};
+#endif
index de7dfad..17c74d2 100644 (file)
 #define ICTLR_COP_IER_CLR      0x38
 #define ICTLR_COP_IEP_CLASS    0x3c
 
-static void (*gic_mask_irq)(struct irq_data *d);
-static void (*gic_unmask_irq)(struct irq_data *d);
+static void (*tegra_gic_mask_irq)(struct irq_data *d);
+static void (*tegra_gic_unmask_irq)(struct irq_data *d);
 
-#define irq_to_ictlr(irq) (((irq)-32) >> 5)
+#define irq_to_ictlr(irq) (((irq) - 32) >> 5)
 static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
-#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr)*0x100)
+#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100)
 
 static void tegra_mask(struct irq_data *d)
 {
        void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
-       gic_mask_irq(d);
-       writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_CLR);
+       tegra_gic_mask_irq(d);
+       writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR);
 }
 
 static void tegra_unmask(struct irq_data *d)
 {
        void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
-       gic_unmask_irq(d);
+       tegra_gic_unmask_irq(d);
        writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET);
 }
 
@@ -98,8 +98,8 @@ void __init tegra_init_irq(void)
                 IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
 
        gic = get_irq_chip(29);
-       gic_unmask_irq = gic->irq_unmask;
-       gic_mask_irq = gic->irq_mask;
+       tegra_gic_unmask_irq = gic->irq_unmask;
+       tegra_gic_mask_irq = gic->irq_mask;
        tegra_irq.irq_ack = gic->irq_ack;
 #ifdef CONFIG_SMP
        tegra_irq.irq_set_affinity = gic->irq_set_affinity;
index 3f7b5e9..9cdec5a 100644 (file)
@@ -2,17 +2,19 @@ menu "Versatile platform type"
        depends on ARCH_VERSATILE
 
 config ARCH_VERSATILE_PB
-       bool "Support Versatile/PB platform"
+       bool "Support Versatile Platform Baseboard for ARM926EJ-S"
        select CPU_ARM926T
        select MIGHT_HAVE_PCI
        default y
        help
-         Include support for the ARM(R) Versatile/PB platform.
+         Include support for the ARM(R) Versatile Platform Baseboard
+         for the ARM926EJ-S.
 
 config MACH_VERSATILE_AB
-       bool "Support Versatile/AB platform"
+       bool "Support Versatile Application Baseboard for ARM926EJ-S"
        select CPU_ARM926T
        help
-         Include support for the ARM(R) Versatile/AP platform.
+         Include support for the ARM(R) Versatile Application Baseboard
+         for the ARM926EJ-S.
 
 endmenu
index b1687b6..634bf1d 100644 (file)
@@ -39,7 +39,7 @@ volatile int __cpuinitdata pen_release = -1;
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
        pen_release = val;
        smp_wmb();
index a9ed342..1edae65 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mach/time.h>
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
+#include <asm/hardware/sp810.h>
 
 #include <mach/motherboard.h>
 
@@ -50,8 +51,16 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
 
 static void __init v2m_timer_init(void)
 {
+       u32 scctrl;
+
        versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
 
+       /* Select 1MHz TIMCLK as the reference clock for SP804 timers */
+       scctrl = readl(MMIO_P2V(V2M_SYSCTL + SCCTRL));
+       scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
+       scctrl |= SCCTRL_TIMEREN1SEL_TIMCLK;
+       writel(scctrl, MMIO_P2V(V2M_SYSCTL + SCCTRL));
+
        writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
        writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
 
index 9d30c6f..e4509ba 100644 (file)
@@ -405,7 +405,7 @@ config CPU_V6
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
        depends on CPU_V6 || CPU_V7
-       default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
+       default y if SMP
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
          This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@ config CPU_32v6K
 # ARMv7
 config CPU_V7
        bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
-       select CPU_32v6K if !ARCH_OMAP2
+       select CPU_32v6K
        select CPU_32v7
        select CPU_ABRT_EV7
        select CPU_PABRT_V7
@@ -644,7 +644,7 @@ config ARM_THUMBEE
 
 config SWP_EMULATE
        bool "Emulate SWP/SWPB instructions"
-       depends on CPU_V7 && !CPU_V6
+       depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
        select HAVE_PROC_CPU if PROC_FS
        default y if SMP
        help
index 170c9bb..f2ce38e 100644 (file)
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
 static inline void cache_sync(void)
 {
        void __iomem *base = l2x0_base;
+
+#ifdef CONFIG_ARM_ERRATA_753970
+       /* write to an unmmapped register */
+       writel_relaxed(0, base + L2X0_DUMMY_REG);
+#else
        writel_relaxed(0, base + L2X0_CACHE_SYNC);
+#endif
        cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
index 5164069..cddd684 100644 (file)
@@ -297,6 +297,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
        memblock_reserve(__pa(_stext), _end - _stext);
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
+       if (phys_initrd_size &&
+           memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
+               pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
+                      phys_initrd_start, phys_initrd_size);
+               phys_initrd_start = phys_initrd_size = 0;
+       }
        if (phys_initrd_size) {
                memblock_reserve(phys_initrd_start, phys_initrd_size);
 
index 0c1172b..8e33562 100644 (file)
@@ -264,6 +264,12 @@ __v7_setup:
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
+#ifdef CONFIG_ARM_ERRATA_751472
+       cmp     r6, #0x30                       @ present prior to r3p0
+       mrclt   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orrlt   r10, r10, #1 << 11              @ set bit #11
+       mcrlt   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
 
 3:     mov     r10, #0
 #ifdef HARVARD_CACHE
index 8aa9744..c074e66 100644 (file)
@@ -10,8 +10,6 @@
  */
 
 #include <linux/cpumask.h>
-#include <linux/err.h>
-#include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/oprofile.h>
@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void)
                return NULL;
        }
 }
+#endif
 
 static int report_trace(struct stackframe *frame, void *d)
 {
@@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail)
 
        /* frame pointers should strictly progress back up the stack
         * (towards higher addresses) */
-       if (tail >= buftail[0].fp)
+       if (tail + 1 >= buftail[0].fp)
                return NULL;
 
        return buftail[0].fp-1;
@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
+       /* provide backtrace support also in timer mode: */
        ops->backtrace          = arm_backtrace;
 
        return oprofile_perf_init(ops);
@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
 }
-#else
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-       pr_info("oprofile: hardware counters not available\n");
-       return -ENODEV;
-}
-void __exit oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
index 3a70ebf..ff469c4 100644 (file)
@@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
        case MACH_TYPE_MX35_3DS:
        case MACH_TYPE_PCM043:
        case MACH_TYPE_LILLY1131:
+       case MACH_TYPE_VPR200:
                uart_base = MX3X_UART1_BASE_ADDR;
                break;
        case MACH_TYPE_MAGX_ZN5:
@@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
                break;
        case MACH_TYPE_MX51_BABBAGE:
        case MACH_TYPE_EUKREA_CPUIMX51SD:
+       case MACH_TYPE_MX51_3DS:
                uart_base = MX51_UART1_BASE_ADDR;
                break;
        case MACH_TYPE_MX50_RDP:
index 18fe3cb..b6333ae 100644 (file)
@@ -144,12 +144,9 @@ config OMAP_IOMMU_DEBUG
 config OMAP_IOMMU_IVA2
        bool
 
-choice
-       prompt "System timer"
-       default OMAP_32K_TIMER if !ARCH_OMAP15XX
-
 config OMAP_MPU_TIMER
        bool "Use mpu timer"
+       depends on ARCH_OMAP1
        help
          Select this option if you want to use the OMAP mpu timer. This
          timer provides more intra-tick resolution than the 32KHz timer,
@@ -158,6 +155,7 @@ config OMAP_MPU_TIMER
 config OMAP_32K_TIMER
        bool "Use 32KHz timer"
        depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+       default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
        help
          Select this option if you want to enable the OMAP 32KHz timer.
          This timer saves power compared to the OMAP_MPU_TIMER, and has
@@ -165,8 +163,6 @@ config OMAP_32K_TIMER
          intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
          currently only available for OMAP16XX, 24XX, 34XX and OMAP4.
 
-endchoice
-
 config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
        bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
        depends on ARCH_OMAP3 && PM
index ea46440..862dda9 100644 (file)
@@ -36,8 +36,6 @@
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED                0xfffbc410
 
-#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
-
 #include <linux/clocksource.h>
 
 /*
@@ -122,12 +120,24 @@ static DEFINE_CLOCK_DATA(cd);
 #define SC_MULT                4000000000u
 #define SC_SHIFT       17
 
-unsigned long long notrace sched_clock(void)
+static inline unsigned long long notrace _omap_32k_sched_clock(void)
 {
        u32 cyc = clocksource_32k.read(&clocksource_32k);
        return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
 }
 
+#ifndef CONFIG_OMAP_MPU_TIMER
+unsigned long long notrace sched_clock(void)
+{
+       return _omap_32k_sched_clock();
+}
+#else
+unsigned long long notrace omap_32k_sched_clock(void)
+{
+       return _omap_32k_sched_clock();
+}
+#endif
+
 static void notrace omap_update_sched_clock(void)
 {
        u32 cyc = clocksource_32k.read(&clocksource_32k);
@@ -160,7 +170,7 @@ void read_persistent_clock(struct timespec *ts)
        *ts = *tsp;
 }
 
-static int __init omap_init_clocksource_32k(void)
+int __init omap_init_clocksource_32k(void)
 {
        static char err[] __initdata = KERN_ERR
                        "%s: can't register clocksource!\n";
@@ -195,7 +205,3 @@ static int __init omap_init_clocksource_32k(void)
        }
        return 0;
 }
-arch_initcall(omap_init_clocksource_32k);
-
-#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
-
index c4b2b47..8536308 100644 (file)
@@ -53,7 +53,7 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
 #endif
 
 #define OMAP_DMA_ACTIVE                        0x01
-#define OMAP2_DMA_CSR_CLEAR_MASK       0xffe
+#define OMAP2_DMA_CSR_CLEAR_MASK       0xffffffff
 
 #define OMAP_FUNC_MUX_ARM_BASE         (0xfffe1000 + 0xec)
 
@@ -1873,7 +1873,7 @@ static int omap2_dma_handle_ch(int ch)
                printk(KERN_INFO "DMA misaligned error with device %d\n",
                       dma_chan[ch].dev_id);
 
-       p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
+       p->dma_write(status, CSR, ch);
        p->dma_write(1 << ch, IRQSTATUS_L0, ch);
        /* read back the register to flush the write */
        p->dma_read(IRQSTATUS_L0, ch);
@@ -1893,10 +1893,9 @@ static int omap2_dma_handle_ch(int ch)
                        OMAP_DMA_CHAIN_INCQHEAD(chain_id);
 
                status = p->dma_read(CSR, ch);
+               p->dma_write(status, CSR, ch);
        }
 
-       p->dma_write(status, CSR, ch);
-
        if (likely(dma_chan[ch].callback != NULL))
                dma_chan[ch].callback(ch, status, dma_chan[ch].data);
 
index 6b8088e..29b2afb 100644 (file)
@@ -35,6 +35,9 @@ struct sys_timer;
 
 extern void omap_map_common_io(void);
 extern struct sys_timer omap_timer;
+extern bool omap_32k_timer_init(void);
+extern int __init omap_init_clocksource_32k(void);
+extern unsigned long long notrace omap_32k_sched_clock(void);
 
 extern void omap_reserve(void);
 
index 459b319..49d3208 100644 (file)
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
 
 struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
 {
-       struct omap_mbox *mbox;
-       int ret;
+       struct omap_mbox *_mbox, *mbox = NULL;
+       int i, ret;
 
        if (!mboxes)
                return ERR_PTR(-EINVAL);
 
-       for (mbox = *mboxes; mbox; mbox++)
-               if (!strcmp(mbox->name, name))
+       for (i = 0; (_mbox = mboxes[i]); i++) {
+               if (!strcmp(_mbox->name, name)) {
+                       mbox = _mbox;
                        break;
+               }
+       }
 
        if (!mbox)
                return ERR_PTR(-ENOENT);
index b77e018..a9aa5ad 100644 (file)
@@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = {
 #define mfp_configured(p)      ((p)->config != -1)
 
 /*
- * perform a read-back of any MFPR register to make sure the
+ * perform a read-back of any valid MFPR register to make sure the
  * previous writings are finished
  */
-#define mfpr_sync()    (void)__raw_readl(mfpr_mmio_base + 0)
+static unsigned long mfpr_off_readback;
+#define mfpr_sync()    (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
 
 static inline void __mfp_config_run(struct mfp_pin *p)
 {
@@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map)
 
        spin_lock_irqsave(&mfp_spin_lock, flags);
 
+       /* mfp offset for readback */
+       mfpr_off_readback = map[0].offset;
+
        for (p = map; p->start != MFP_PIN_INVALID; p++) {
                offset = p->offset;
                i = p->start;
index deb3995..557f8c5 100644 (file)
@@ -37,6 +37,14 @@ config S5P_GPIO_INT
        help
          Common code for the GPIO interrupts (other than external interrupts.)
 
+comment "System MMU"
+
+config S5P_SYSTEM_MMU
+       bool "S5P SYSTEM MMU"
+       depends on ARCH_S5PV310
+       help
+         Say Y here if you want to enable System MMU
+
 config S5P_DEV_FIMC0
        bool
        help
@@ -66,19 +74,3 @@ config S5P_DEV_CSIS1
        bool
        help
          Compile in platform device definitions for MIPI-CSIS channel 1
-
-menuconfig S5P_SYSMMU
-       bool "SYSMMU support"
-       depends on ARCH_S5PV310
-       help
-         This is a System MMU driver for Samsung ARM based Soc.
-
-if S5P_SYSMMU
-
-config S5P_SYSMMU_DEBUG
-       bool "Enables debug messages"
-       depends on S5P_SYSMMU
-       help
-         This enables SYSMMU driver debug massages.
-
-endif
index 92efe1a..4bd5cf9 100644 (file)
@@ -19,6 +19,7 @@ obj-y                         += clock.o
 obj-y                          += irq.o
 obj-$(CONFIG_S5P_EXT_INT)      += irq-eint.o
 obj-$(CONFIG_S5P_GPIO_INT)     += irq-gpioint.o
+obj-$(CONFIG_S5P_SYSTEM_MMU)   += sysmmu.o
 obj-$(CONFIG_PM)               += pm.o
 obj-$(CONFIG_PM)               += irq-pm.o
 
@@ -30,4 +31,3 @@ obj-$(CONFIG_S5P_DEV_FIMC2)   += dev-fimc2.o
 obj-$(CONFIG_S5P_DEV_ONENAND)  += dev-onenand.o
 obj-$(CONFIG_S5P_DEV_CSIS0)    += dev-csis0.o
 obj-$(CONFIG_S5P_DEV_CSIS1)    += dev-csis1.o
-obj-$(CONFIG_S5P_SYSMMU)       += sysmmu.o
index 6a73428..afaf87f 100644 (file)
@@ -28,7 +28,7 @@
 static struct resource s5p_uart0_resource[] = {
        [0] = {
                .start  = S5P_PA_UART0,
-               .end    = S5P_PA_UART0 + S5P_SZ_UART,
+               .end    = S5P_PA_UART0 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
 static struct resource s5p_uart1_resource[] = {
        [0] = {
                .start  = S5P_PA_UART1,
-               .end    = S5P_PA_UART1 + S5P_SZ_UART,
+               .end    = S5P_PA_UART1 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
 static struct resource s5p_uart2_resource[] = {
        [0] = {
                .start  = S5P_PA_UART2,
-               .end    = S5P_PA_UART2 + S5P_SZ_UART,
+               .end    = S5P_PA_UART2 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 3
        [0] = {
                .start  = S5P_PA_UART3,
-               .end    = S5P_PA_UART3 + S5P_SZ_UART,
+               .end    = S5P_PA_UART3 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 4
        [0] = {
                .start  = S5P_PA_UART4,
-               .end    = S5P_PA_UART4 + S5P_SZ_UART,
+               .end    = S5P_PA_UART4 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 5
        [0] = {
                .start  = S5P_PA_UART5,
-               .end    = S5P_PA_UART5 + S5P_SZ_UART,
+               .end    = S5P_PA_UART5 + S5P_SZ_UART - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
diff --git a/arch/arm/plat-s5p/include/plat/sysmmu.h b/arch/arm/plat-s5p/include/plat/sysmmu.h
deleted file mode 100644 (file)
index db298fc..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * Samsung sysmmu driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_PLAT_S5P_SYSMMU_H
-#define __ASM_PLAT_S5P_SYSMMU_H __FILE__
-
-/* debug macro */
-#ifdef CONFIG_S5P_SYSMMU_DEBUG
-#define sysmmu_debug(fmt, arg...)      printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
-#else
-#define sysmmu_debug(fmt, arg...)      do { } while (0)
-#endif
-
-#endif /* __ASM_PLAT_S5P_SYSMMU_H */
index d804914..ffe8a48 100644 (file)
@@ -16,8 +16,6 @@
 #include <mach/regs-sysmmu.h>
 #include <mach/sysmmu.h>
 
-#include <plat/sysmmu.h>
-
 struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
 
 void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
@@ -123,7 +121,7 @@ static int s5p_sysmmu_set_tablebase(sysmmu_ips ips)
                : "=r" (pg) : : "cc");          \
                pg &= ~0x3fff;
 
-       sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg);
+       printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg);
 
        /* Set sysmmu page table base address */
        __raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
index 236ef84..3e4bd81 100644 (file)
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
 
        s3c_device_ts.dev.platform_data = npd;
 }
-EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
index 3776cd9..5928105 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 
+#include <plat/devs.h>
+
 /* uart devices */
 
 static struct platform_device s3c24xx_uart_device0 = {
index d9025e3..30518cc 100644 (file)
@@ -17,6 +17,8 @@
 
 #include <linux/irq.h>
 
+struct sys_device;
+
 #ifdef CONFIG_PM
 
 extern __init int s3c_pm_init(void);
index 99ba678..6dd455b 100644 (file)
@@ -24,10 +24,10 @@ static inline void putc(int c)
 {
        void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
 
-       while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
+       while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
                barrier();
 
-       writel(c, base + UART01x_DR);
+       writel_relaxed(c, base + UART01x_DR);
 }
 
 static inline void flush(void)
index 09e9372..8c8b24d 100644 (file)
@@ -14,6 +14,6 @@
 #ifndef __PLAT_VMALLOC_H
 #define __PLAT_VMALLOC_H
 
-#define VMALLOC_END            0xF0000000
+#define VMALLOC_END            0xF0000000UL
 
 #endif /* __PLAT_VMALLOC_H */
index 2fea897..9d6feaa 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Sun Dec 12 23:24:27 2010
+# Last update: Mon Feb 7 08:59:27 2011
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2240,7 +2240,7 @@ arm_ultimator2            MACH_ARM_ULTIMATOR2     ARM_ULTIMATOR2          2250
 vs_v210                        MACH_VS_V210            VS_V210                 2252
 vs_v212                        MACH_VS_V212            VS_V212                 2253
 hmt                    MACH_HMT                HMT                     2254
-suen3                  MACH_SUEN3              SUEN3                   2255
+km_kirkwood            MACH_KM_KIRKWOOD        KM_KIRKWOOD             2255
 vesper                 MACH_VESPER             VESPER                  2256
 str9                   MACH_STR9               STR9                    2257
 omap3_wl_ff            MACH_OMAP3_WL_FF        OMAP3_WL_FF             2258
@@ -2987,7 +2987,7 @@ pxwnas_500_1000           MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
 ea20                   MACH_EA20               EA20                    3002
 awm2                   MACH_AWM2               AWM2                    3003
 ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
-tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+seaboard               MACH_SEABOARD           SEABOARD                3005
 linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
 tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
 rubys                  MACH_RUBYS              RUBYS                   3008
@@ -3190,7 +3190,7 @@ synergy                   MACH_SYNERGY            SYNERGY                 3205
 ics_if_voip            MACH_ICS_IF_VOIP        ICS_IF_VOIP             3206
 wlf_cragg_6410         MACH_WLF_CRAGG_6410     WLF_CRAGG_6410          3207
 punica                 MACH_PUNICA             PUNICA                  3208
-sbc_nt250              MACH_SBC_NT250          SBC_NT250               3209
+trimslice              MACH_TRIMSLICE          TRIMSLICE               3209
 mx27_wmultra           MACH_MX27_WMULTRA       MX27_WMULTRA            3210
 mackerel               MACH_MACKEREL           MACKEREL                3211
 fa9x27                 MACH_FA9X27             FA9X27                  3213
@@ -3219,3 +3219,100 @@ pivicc                  MACH_PIVICC             PIVICC                  3235
 pcm048                 MACH_PCM048             PCM048                  3236
 dds                    MACH_DDS                DDS                     3237
 chalten_xa1            MACH_CHALTEN_XA1        CHALTEN_XA1             3238
+ts48xx                 MACH_TS48XX             TS48XX                  3239
+tonga2_tfttimer                MACH_TONGA2_TFTTIMER    TONGA2_TFTTIMER         3240
+whistler               MACH_WHISTLER           WHISTLER                3241
+asl_phoenix            MACH_ASL_PHOENIX        ASL_PHOENIX             3242
+at91sam9263otlite      MACH_AT91SAM9263OTLITE  AT91SAM9263OTLITE       3243
+ddplug                 MACH_DDPLUG             DDPLUG                  3244
+d2plug                 MACH_D2PLUG             D2PLUG                  3245
+kzm9d                  MACH_KZM9D              KZM9D                   3246
+verdi_lte              MACH_VERDI_LTE          VERDI_LTE               3247
+nanozoom               MACH_NANOZOOM           NANOZOOM                3248
+dm3730_som_lv          MACH_DM3730_SOM_LV      DM3730_SOM_LV           3249
+dm3730_torpedo         MACH_DM3730_TORPEDO     DM3730_TORPEDO          3250
+anchovy                        MACH_ANCHOVY            ANCHOVY                 3251
+re2rev20               MACH_RE2REV20           RE2REV20                3253
+re2rev21               MACH_RE2REV21           RE2REV21                3254
+cns21xx                        MACH_CNS21XX            CNS21XX                 3255
+rider                  MACH_RIDER              RIDER                   3257
+nsk330                 MACH_NSK330             NSK330                  3258
+cns2133evb             MACH_CNS2133EVB         CNS2133EVB              3259
+z3_816x_mod            MACH_Z3_816X_MOD        Z3_816X_MOD             3260
+z3_814x_mod            MACH_Z3_814X_MOD        Z3_814X_MOD             3261
+beect                  MACH_BEECT              BEECT                   3262
+dma_thunderbug         MACH_DMA_THUNDERBUG     DMA_THUNDERBUG          3263
+omn_at91sam9g20                MACH_OMN_AT91SAM9G20    OMN_AT91SAM9G20         3264
+mx25_e2s_uc            MACH_MX25_E2S_UC        MX25_E2S_UC             3265
+mione                  MACH_MIONE              MIONE                   3266
+top9000_tcu            MACH_TOP9000_TCU        TOP9000_TCU             3267
+top9000_bsl            MACH_TOP9000_BSL        TOP9000_BSL             3268
+kingdom                        MACH_KINGDOM            KINGDOM                 3269
+armadillo460           MACH_ARMADILLO460       ARMADILLO460            3270
+lq2                    MACH_LQ2                LQ2                     3271
+sweda_tms2             MACH_SWEDA_TMS2         SWEDA_TMS2              3272
+mx53_loco              MACH_MX53_LOCO          MX53_LOCO               3273
+acer_a8                        MACH_ACER_A8            ACER_A8                 3275
+acer_gauguin           MACH_ACER_GAUGUIN       ACER_GAUGUIN            3276
+guppy                  MACH_GUPPY              GUPPY                   3277
+mx61_ard               MACH_MX61_ARD           MX61_ARD                3278
+tx53                   MACH_TX53               TX53                    3279
+omapl138_case_a3       MACH_OMAPL138_CASE_A3   OMAPL138_CASE_A3        3280
+uemd                   MACH_UEMD               UEMD                    3281
+ccwmx51mut             MACH_CCWMX51MUT         CCWMX51MUT              3282
+rockhopper             MACH_ROCKHOPPER         ROCKHOPPER              3283
+nookcolor              MACH_NOOKCOLOR          NOOKCOLOR               3284
+hkdkc100               MACH_HKDKC100           HKDKC100                3285
+ts42xx                 MACH_TS42XX             TS42XX                  3286
+aebl                   MACH_AEBL               AEBL                    3287
+wario                  MACH_WARIO              WARIO                   3288
+gfs_spm                        MACH_GFS_SPM            GFS_SPM                 3289
+cm_t3730               MACH_CM_T3730           CM_T3730                3290
+isc3                   MACH_ISC3               ISC3                    3291
+rascal                 MACH_RASCAL             RASCAL                  3292
+hrefv60                        MACH_HREFV60            HREFV60                 3293
+tpt_2_0                        MACH_TPT_2_0            TPT_2_0                 3294
+pyramid_td             MACH_PYRAMID_TD         PYRAMID_TD              3295
+splendor               MACH_SPLENDOR           SPLENDOR                3296
+guf_planet             MACH_GUF_PLANET         GUF_PLANET              3297
+msm8x60_qt             MACH_MSM8X60_QT         MSM8X60_QT              3298
+htc_hd_mini            MACH_HTC_HD_MINI        HTC_HD_MINI             3299
+athene                 MACH_ATHENE             ATHENE                  3300
+deep_r_ek_1            MACH_DEEP_R_EK_1        DEEP_R_EK_1             3301
+vivow_ct               MACH_VIVOW_CT           VIVOW_CT                3302
+nery_1000              MACH_NERY_1000          NERY_1000               3303
+rfl109145_ssrv         MACH_RFL109145_SSRV     RFL109145_SSRV          3304
+nmh                    MACH_NMH                NMH                     3305
+wn802t                 MACH_WN802T             WN802T                  3306
+dragonet               MACH_DRAGONET           DRAGONET                3307
+geneva_b               MACH_GENEVA_B           GENEVA_B                3308
+at91sam9263desk16l     MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L      3309
+bcmhana_sv             MACH_BCMHANA_SV         BCMHANA_SV              3310
+bcmhana_tablet         MACH_BCMHANA_TABLET     BCMHANA_TABLET          3311
+koi                    MACH_KOI                KOI                     3312
+ts4800                 MACH_TS4800             TS4800                  3313
+tqma9263               MACH_TQMA9263           TQMA9263                3314
+holiday                        MACH_HOLIDAY            HOLIDAY                 3315
+dma_6410               MACH_DMA6410            DMA6410                 3316
+pcats_overlay          MACH_PCATS_OVERLAY      PCATS_OVERLAY           3317
+hwgw6410               MACH_HWGW6410           HWGW6410                3318
+shenzhou               MACH_SHENZHOU           SHENZHOU                3319
+cwme9210               MACH_CWME9210           CWME9210                3320
+cwme9210js             MACH_CWME9210JS         CWME9210JS              3321
+pgs_v1                 MACH_PGS_SITARA         PGS_SITARA              3322
+colibri_tegra2         MACH_COLIBRI_TEGRA2     COLIBRI_TEGRA2          3323
+w21                    MACH_W21                W21                     3324
+polysat1               MACH_POLYSAT1           POLYSAT1                3325
+dataway                        MACH_DATAWAY            DATAWAY                 3326
+cobral138              MACH_COBRAL138          COBRAL138               3327
+roverpcs8              MACH_ROVERPCS8          ROVERPCS8               3328
+marvelc                        MACH_MARVELC            MARVELC                 3329
+navefihid              MACH_NAVEFIHID          NAVEFIHID               3330
+dm365_cv100            MACH_DM365_CV100        DM365_CV100             3331
+able                   MACH_ABLE               ABLE                    3332
+legacy                 MACH_LEGACY             LEGACY                  3333
+icong                  MACH_ICONG              ICONG                   3334
+rover_g8               MACH_ROVER_G8           ROVER_G8                3335
+t5388p                 MACH_T5388P             T5388P                  3336
+dingo                  MACH_DINGO              DINGO                   3337
+goflexhome             MACH_GOFLEXHOME         GOFLEXHOME              3338
index 92ecd84..bc7e8ae 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_AVR32_PGALLOC_H
 #define __ASM_AVR32_PGALLOC_H
 
+#include <linux/mm.h>
 #include <linux/quicklist.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
index 1ff9f14..7dbc664 100644 (file)
@@ -10,6 +10,7 @@
 #define __BFIN_ASM_SERIAL_H__
 
 #include <linux/serial_core.h>
+#include <linux/spinlock.h>
 #include <mach/anomaly.h>
 #include <mach/bfin_serial.h>
 
@@ -41,6 +42,7 @@ struct bfin_serial_port {
        struct circ_buf rx_dma_buf;
        struct timer_list rx_dma_timer;
        int rx_dma_nrows;
+       spinlock_t rx_lock;
        unsigned int tx_dma_channel;
        unsigned int rx_dma_channel;
        struct work_struct tx_dma_workqueue;
index 250f4d4..06a5e67 100644 (file)
@@ -13,6 +13,8 @@
 .align 2
 
 ENTRY(_outsl)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -20,10 +22,12 @@ ENTRY(_outsl)
        LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
 .Llong_loop_s: R0 = [P1++];
 .Llong_loop_e: [P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsl)
 
 ENTRY(_outsw)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -31,10 +35,12 @@ ENTRY(_outsw)
        LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
 .Lword_loop_s: R0 = W[P1++];
 .Lword_loop_e: W[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsw)
 
 ENTRY(_outsb)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -42,10 +48,12 @@ ENTRY(_outsb)
        LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
 .Lbyte_loop_s: R0 = B[P1++];
 .Lbyte_loop_e: B[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsb)
 
 ENTRY(_outsw_8)
+       CC = R2 == 0;
+       IF CC JUMP 1f;
        P0 = R0;        /* P0 = port */
        P1 = R1;        /* P1 = address */
        P2 = R2;        /* P2 = count */
@@ -56,5 +64,5 @@ ENTRY(_outsw_8)
                R0 = R0 << 8;
                R0 = R0 + R1;
 .Lword8_loop_e: W[P0] = R0;
-       RTS;
+1:     RTS;
 ENDPROC(_outsw_8)
index 790c767..ab4a925 100644 (file)
@@ -58,6 +58,8 @@
 1:
 .ifeqs "\flushins", BROK_FLUSH_INST
        \flushins [P0++];
+       nop;
+       nop;
 2:     nop;
 .else
 2:     \flushins [P0++];
index 4422189..c49be84 100644 (file)
@@ -72,11 +72,6 @@ SECTIONS
        INIT_TEXT_SECTION(PAGE_SIZE)
        .init.data : { INIT_DATA }
        .init.setup : { INIT_SETUP(16) }
-#ifdef CONFIG_ETRAX_ARCH_V32
-       __start___param = .;
-       __param : { *(__param) }
-       __stop___param = .;
-#endif
        .initcall.init : {
                INIT_CALLS
        }
index f745c12..76eaf38 100644 (file)
@@ -80,7 +80,7 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        /* FIXME M32R */
 #endif
-       __do_IRQ(irq);
+       generic_handle_irq(irq);
        irq_exit();
        set_irq_regs(old_regs);
 
index b1577f7..82a4bb5 100644 (file)
@@ -610,17 +610,17 @@ static void amiga_mem_console_write(struct console *co, const char *s,
 
 static int __init amiga_savekmsg_setup(char *arg)
 {
-       static struct resource debug_res = { .name = "Debug" };
-
        if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
-               goto done;
+               return 0;
 
-       if (!AMIGAHW_PRESENT(CHIP_RAM)) {
-               printk("Warning: no chipram present for debugging\n");
-               goto done;
+       if (amiga_chip_size < SAVEKMSG_MAXMEM) {
+               pr_err("Not enough chipram for debugging\n");
+               return -ENOMEM;
        }
 
-       savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res);
+       /* Just steal the block, the chipram allocator isn't functional yet */
+       amiga_chip_size -= SAVEKMSG_MAXMEM;
+       savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
        savekmsg->magic1 = SAVEKMSG_MAGIC1;
        savekmsg->magic2 = SAVEKMSG_MAGIC2;
        savekmsg->magicptr = ZTWO_PADDR(savekmsg);
@@ -628,8 +628,6 @@ static int __init amiga_savekmsg_setup(char *arg)
 
        amiga_console_driver.write = amiga_mem_console_write;
        register_console(&amiga_console_driver);
-
-done:
        return 0;
 }
 
index 39478dd..26a804e 100644 (file)
@@ -388,9 +388,9 @@ void __init atari_init_IRQ(void)
        }
 
        if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) {
-               scc.cha_a_ctrl = 9;
+               atari_scc.cha_a_ctrl = 9;
                MFPDELAY();
-               scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
+               atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
        }
 
        if (ATARIHW_PRESENT(SCU)) {
index ae2d96e..4203d10 100644 (file)
@@ -315,7 +315,7 @@ void __init config_atari(void)
                ATARIHW_SET(SCC_DMA);
                printk("SCC_DMA ");
        }
-       if (scc_test(&scc.cha_a_ctrl)) {
+       if (scc_test(&atari_scc.cha_a_ctrl)) {
                ATARIHW_SET(SCC);
                printk("SCC ");
        }
index 28efdc3..5a48424 100644 (file)
@@ -53,9 +53,9 @@ static inline void ata_scc_out(char c)
 {
        do {
                MFPDELAY();
-       } while (!(scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
+       } while (!(atari_scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
        MFPDELAY();
-       scc.cha_b_data = c;
+       atari_scc.cha_b_data = c;
 }
 
 static void atari_scc_console_write(struct console *co, const char *str,
@@ -140,9 +140,9 @@ int atari_scc_console_wait_key(struct console *co)
 {
        do {
                MFPDELAY();
-       } while (!(scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
+       } while (!(atari_scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
        MFPDELAY();
-       return scc.cha_b_data;
+       return atari_scc.cha_b_data;
 }
 
 int atari_midi_console_wait_key(struct console *co)
@@ -185,9 +185,9 @@ static void __init atari_init_mfp_port(int cflag)
 
 #define SCC_WRITE(reg, val)                            \
        do {                                            \
-               scc.cha_b_ctrl = (reg);                 \
+               atari_scc.cha_b_ctrl = (reg);           \
                MFPDELAY();                             \
-               scc.cha_b_ctrl = (val);                 \
+               atari_scc.cha_b_ctrl = (val);           \
                MFPDELAY();                             \
        } while (0)
 
@@ -240,7 +240,7 @@ static void __init atari_init_scc_port(int cflag)
        reg3 = (cflag & CSIZE) == CS8 ? 0xc0 : 0x40;
        reg5 = (cflag & CSIZE) == CS8 ? 0x60 : 0x20 | 0x82 /* assert DTR/RTS */;
 
-       (void)scc.cha_b_ctrl;           /* reset reg pointer */
+       (void)atari_scc.cha_b_ctrl;     /* reset reg pointer */
        SCC_WRITE(9, 0xc0);             /* reset */
        LONG_DELAY();                   /* extra delay after WR9 access */
        SCC_WRITE(4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03)
index a714e1a..f51f709 100644 (file)
@@ -449,7 +449,7 @@ struct SCC
   u_char char_dummy3;
   u_char cha_b_data;
  };
-# define scc ((*(volatile struct SCC*)SCC_BAS))
+# define atari_scc ((*(volatile struct SCC*)SCC_BAS))
 
 /* The ESCC (Z85230) in an Atari ST. The channels are reversed! */
 # define st_escc ((*(volatile struct SCC*)0xfffffa31))
index 2936dda..3219845 100644 (file)
@@ -81,18 +81,6 @@ static inline char *strncpy(char *dest, const char *src, size_t n)
        strcpy(__d + strlen(__d), (s));         \
 })
 
-#define __HAVE_ARCH_STRCHR
-static inline char *strchr(const char *s, int c)
-{
-       char sc, ch = c;
-
-       for (; (sc = *s++) != ch; ) {
-               if (!sc)
-                       return NULL;
-       }
-       return (char *)s - 1;
-}
-
 #ifndef CONFIG_COLDFIRE
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char *cs, const char *ct)
@@ -111,14 +99,12 @@ static inline int strcmp(const char *cs, const char *ct)
                : "+a" (cs), "+a" (ct), "=d" (res));
        return res;
 }
+#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
 
-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, __kernel_size_t);
 #define memcmp(d, s, n) __builtin_memcmp(d, s, n)
-#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
index 4253f87..d399c5f 100644 (file)
@@ -243,14 +243,3 @@ void *memmove(void *dest, const void *src, size_t n)
        return xdest;
 }
 EXPORT_SYMBOL(memmove);
-
-int memcmp(const void *cs, const void *ct, size_t count)
-{
-       const unsigned char *su1, *su2;
-
-       for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
-               if (*su1 != *su2)
-                       return *su1 < *su2 ? -1 : +1;
-       return 0;
-}
-EXPORT_SYMBOL(memcmp);
index ef33213..47e15eb 100644 (file)
@@ -141,6 +141,12 @@ SECTIONS {
                *(__param)
                __stop___param = .;
 
+               /* Built-in module versions */
+               . = ALIGN(4) ;
+               __start___modver = .;
+               *(__modver)
+               __stop___modver = .;
+
                . = ALIGN(4) ;
                _etext = . ;
        } > TEXT
index d94d709..32d852e 100644 (file)
@@ -4,4 +4,4 @@
 
 lib-y  := ashldi3.o ashrdi3.o lshrdi3.o \
           muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
-          checksum.o memcpy.o memset.o delay.o
+          checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c
new file mode 100644 (file)
index 0000000..b3dcfe9
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#define __IN_STRING_C
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+       void *xdest = dest;
+       size_t temp;
+
+       if (!n)
+               return xdest;
+
+       if (dest < src) {
+               if ((long)dest & 1) {
+                       char *cdest = dest;
+                       const char *csrc = src;
+                       *cdest++ = *csrc++;
+                       dest = cdest;
+                       src = csrc;
+                       n--;
+               }
+               if (n > 2 && (long)dest & 2) {
+                       short *sdest = dest;
+                       const short *ssrc = src;
+                       *sdest++ = *ssrc++;
+                       dest = sdest;
+                       src = ssrc;
+                       n -= 2;
+               }
+               temp = n >> 2;
+               if (temp) {
+                       long *ldest = dest;
+                       const long *lsrc = src;
+                       temp--;
+                       do
+                               *ldest++ = *lsrc++;
+                       while (temp--);
+                       dest = ldest;
+                       src = lsrc;
+               }
+               if (n & 2) {
+                       short *sdest = dest;
+                       const short *ssrc = src;
+                       *sdest++ = *ssrc++;
+                       dest = sdest;
+                       src = ssrc;
+               }
+               if (n & 1) {
+                       char *cdest = dest;
+                       const char *csrc = src;
+                       *cdest = *csrc;
+               }
+       } else {
+               dest = (char *)dest + n;
+               src = (const char *)src + n;
+               if ((long)dest & 1) {
+                       char *cdest = dest;
+                       const char *csrc = src;
+                       *--cdest = *--csrc;
+                       dest = cdest;
+                       src = csrc;
+                       n--;
+               }
+               if (n > 2 && (long)dest & 2) {
+                       short *sdest = dest;
+                       const short *ssrc = src;
+                       *--sdest = *--ssrc;
+                       dest = sdest;
+                       src = ssrc;
+                       n -= 2;
+               }
+               temp = n >> 2;
+               if (temp) {
+                       long *ldest = dest;
+                       const long *lsrc = src;
+                       temp--;
+                       do
+                               *--ldest = *--lsrc;
+                       while (temp--);
+                       dest = ldest;
+                       src = lsrc;
+               }
+               if (n & 2) {
+                       short *sdest = dest;
+                       const short *ssrc = src;
+                       *--sdest = *--ssrc;
+                       dest = sdest;
+                       src = ssrc;
+               }
+               if (n & 1) {
+                       char *cdest = dest;
+                       const char *csrc = src;
+                       *--cdest = *--csrc;
+               }
+       }
+       return xdest;
+}
+EXPORT_SYMBOL(memmove);
index d09d9da..c5151f8 100644 (file)
@@ -50,8 +50,10 @@ static int __init mcf_intc2_init(void)
        int irq;
 
        /* GPIO interrupt sources */
-       for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
+       for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
                irq_desc[irq].chip = &intc2_irq_gpio_chip;
+               set_irq_handler(irq, handle_edge_irq);
+       }
 
        return 0;
 }
index 240a7a6..676960c 100644 (file)
@@ -108,7 +108,6 @@ Luser_return:
        movel   %d1,%a2
 1:
        move    %a2@(TI_FLAGS),%d1      /* thread_info->flags */
-       andl    #_TIF_WORK_MASK,%d1
        jne     Lwork_to_do
        RESTORE_ALL
 
index f27e688..8e4e10c 100644 (file)
@@ -210,7 +210,7 @@ void
 cpm_install_handler(int vec, void (*handler)(), void *dev_id)
 {
 
-       request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id);
+       request_irq(vec, handler, 0, "timer", dev_id);
 
 /*     if (cpm_vecs[vec].handler != 0) */
 /*             printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
index ac629fa..9dd5bca 100644 (file)
@@ -75,7 +75,7 @@ void hw_timer_init(void)
   /* Set compare register  32Khz / 32 / 10 = 100 */
   TCMP = 10;                                                              
 
-  request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
+  request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
 #endif
 
   /* General purpose quicc timers: MC68360UM p7-20 */
index 8a28788..46c1b18 100644 (file)
@@ -104,7 +104,6 @@ Luser_return:
        movel   %d1,%a2
 1:
        move    %a2@(TI_FLAGS),%d1      /* thread_info->flags */
-       andl    #_TIF_WORK_MASK,%d1
        jne     Lwork_to_do
        RESTORE_ALL
 
index ad96ab1..a29041c 100644 (file)
@@ -132,8 +132,8 @@ void init_IRQ(void)
        pquicc->intr_cimr = 0x00000000;
 
        for (i = 0; (i < NR_IRQS); i++) {
-               set_irq_chip(irq, &intc_irq_chip);
-               set_irq_handler(irq, handle_level_irq);
+               set_irq_chip(i, &intc_irq_chip);
+               set_irq_handler(i, handle_level_irq);
        }
 }
 
index 4ddfc3d..5837cf0 100644 (file)
@@ -138,7 +138,6 @@ Luser_return:
        andl    #-THREAD_SIZE,%d1       /* at base of kernel stack */
        movel   %d1,%a0
        movel   %a0@(TI_FLAGS),%d1      /* get thread_info->flags */
-       andl    #0xefff,%d1
        jne     Lwork_to_do             /* still work to do */
 
 Lreturn:
index 5fd3190..c4532f0 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/types.h>
 #include <asm/registers.h>
 
-#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 
 static inline unsigned long arch_local_irq_save(void)
 {
index b23f680..885574a 100644 (file)
@@ -411,20 +411,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 static inline unsigned long pte_update(pte_t *p, unsigned long clr,
                                unsigned long set)
 {
-       unsigned long old, tmp, msr;
-
-       __asm__ __volatile__("\
-       msrclr  %2, 0x2\n\
-       nop\n\
-       lw      %0, %4, r0\n\
-       andn    %1, %0, %5\n\
-       or      %1, %1, %6\n\
-       sw      %1, %4, r0\n\
-       mts     rmsr, %2\n\
-       nop"
-       : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
-       : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
-       : "cc");
+       unsigned long flags, old, tmp;
+
+       raw_local_irq_save(flags);
+
+       __asm__ __volatile__(   "lw     %0, %2, r0      \n"
+                               "andn   %1, %0, %3      \n"
+                               "or     %1, %1, %4      \n"
+                               "sw     %1, %2, r0      \n"
+                       : "=&r" (old), "=&r" (tmp)
+                       : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
+                       : "cc");
+
+       raw_local_irq_restore(flags);
 
        return old;
 }
index e01afa6..488c1ed 100644 (file)
@@ -27,7 +27,7 @@
        register unsigned tmp __asm__("r3");                    \
        tmp = 0x0;      /* Prevent warning about unused */      \
        __asm__ __volatile__ (                                  \
-                       "mfs    %0, rpvr" #pvrid ";"    \
+                       "mfs    %0, rpvr" #pvrid ";"            \
                        : "=r" (tmp) : : "memory");             \
        val = tmp;                                              \
 }
@@ -54,7 +54,7 @@ int cpu_has_pvr(void)
        if (!(flags & PVR_MSR_BIT))
                return 0;
 
-       get_single_pvr(0x00, pvr0);
+       get_single_pvr(0, pvr0);
        pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
 
        if (pvr0 & PVR0_PVR_FULL_MASK)
index 4243400..778a5ce 100644 (file)
@@ -62,23 +62,32 @@ real_start:
        andi    r1, r1, ~2
        mts     rmsr, r1
 /*
- * Here is checking mechanism which check if Microblaze has msr instructions
- * We load msr and compare it with previous r1 value - if is the same,
- * msr instructions works if not - cpu don't have them.
+ * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
+ * if the msrclr instruction is not enabled. We use this to detect
+ * if the opcode is available, by issuing msrclr and then testing the result.
+ * r8 == 0 - msr instructions are implemented
+ * r8 != 0 - msr instructions are not implemented
  */
-       /* r8=0 - I have msr instr, 1 - I don't have them */
-       rsubi   r0, r0, 1       /* set the carry bit */
-       msrclr  r0, 0x4         /* try to clear it */
-       /* read the carry bit, r8 will be '0' if msrclr exists */
-       addik   r8, r0, 0
+       msrclr  r8, 0 /* clear nothing - just read msr for test */
+       cmpu    r8, r8, r1 /* r1 must contain msr reg content */
 
 /* r7 may point to an FDT, or there may be one linked in.
    if it's in r7, we've got to save it away ASAP.
    We ensure r7 points to a valid FDT, just in case the bootloader
    is broken or non-existent */
        beqi    r7, no_fdt_arg                  /* NULL pointer?  don't copy */
-       lw      r11, r0, r7                     /* Does r7 point to a */
-       rsubi   r11, r11, OF_DT_HEADER          /* valid FDT? */
+/* Does r7 point to a valid FDT? Load HEADER magic number */
+       /* Run time Big/Little endian platform */
+       /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
+       addik   r11, r0, 0x1 /* BIG/LITTLE checking value */
+       /* __bss_start will be zeroed later - it is just temp location */
+       swi     r11, r0, TOPHYS(__bss_start)
+       lbui    r11, r0, TOPHYS(__bss_start)
+       beqid   r11, big_endian /* DO NOT break delay stop dependency */
+       lw      r11, r0, r7 /* Big endian load in delay slot */
+       lwr     r11, r0, r7 /* Little endian load */
+big_endian:
+       rsubi   r11, r11, OF_DT_HEADER  /* Check FDT header */
        beqi    r11, _prepare_copy_fdt
        or      r7, r0, r0              /* clear R7 when not valid DTB */
        bnei    r11, no_fdt_arg                 /* No - get out of here */
index 25f6e07..782680d 100644 (file)
        #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
        #define BSRLI(rD, rA, imm)      \
                bsrli rD, rA, imm
-       #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
-       #define BSRLI(rD, rA, imm)      \
-               ori rD, r0, (1 << imm); \
-               idivu rD, rD, rA
        #else
        #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
        /* Only the used shift constants defined here - add more if needed */
index bb1558e..9312fbb 100644 (file)
@@ -161,11 +161,11 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
        if (msr)
                eprintk("!!!Your kernel has setup MSR instruction but "
-                               "CPU don't have it %d\n", msr);
+                               "CPU don't have it %x\n", msr);
 #else
        if (!msr)
                eprintk("!!!Your kernel not setup MSR instruction but "
-                               "CPU have it %d\n", msr);
+                               "CPU have it %x\n", msr);
 #endif
 
        for (src = __ivt_start; src < __ivt_end; src++, dst++)
index fdc48bb..62021d7 100644 (file)
  *     between mem locations with size of xfer spec'd in bytes
  */
 
+#ifdef __MICROBLAZEEL__
+#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
+#endif
+
 #include <linux/linkage.h>
        .text
        .globl  memcpy
index 11bdd68..fc770be 100644 (file)
@@ -169,11 +169,11 @@ static int __init pdc_console_tty_driver_init(void)
 
        struct console *tmp;
 
-       acquire_console_sem();
+       console_lock();
        for_each_console(tmp)
                if (tmp == &pdc_cons)
                        break;
-       release_console_sem();
+       console_unlock();
 
        if (!tmp) {
                printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
index 380d48b..26b8c80 100644 (file)
 //
 //----------------------------------------------------------------------------
 #include <linux/cache.h>
+#include <linux/threads.h>
 #include <asm/types.h>
 #include <asm/mmu.h>
 
+/*
+ * We only have to have statically allocated lppaca structs on
+ * legacy iSeries, which supports at most 64 cpus.
+ */
+#ifdef CONFIG_PPC_ISERIES
+#if NR_CPUS < 64
+#define NR_LPPACAS     NR_CPUS
+#else
+#define NR_LPPACAS     64
+#endif
+#else /* not iSeries */
+#define NR_LPPACAS     1
+#endif
+
+
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
 struct lppaca {
index 991d599..fe56a23 100644 (file)
@@ -240,6 +240,12 @@ struct machdep_calls {
         * claims to support kexec.
         */
        int (*machine_kexec_prepare)(struct kimage *image);
+
+       /* Called to perform the _real_ kexec.
+        * Do NOT allocate memory or fail here. We are past the point of
+        * no return.
+        */
+       void (*machine_kexec)(struct kimage *image);
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_SUSPEND
index 8eaed81..17194fc 100644 (file)
@@ -40,8 +40,8 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x)         ((x << 28) & 0x30000000)
-#define MAS0_ESEL(x)           ((x << 16) & 0x0FFF0000)
+#define MAS0_TLBSEL(x)         (((x) << 28) & 0x30000000)
+#define MAS0_ESEL(x)           (((x) << 16) & 0x0FFF0000)
 #define MAS0_NV(x)             ((x) & 0x00000FFF)
 #define MAS0_HES               0x00004000
 #define MAS0_WQ_ALLWAYS                0x00000000
 
 #define MAS1_VALID             0x80000000
 #define MAS1_IPROT             0x40000000
-#define MAS1_TID(x)            ((x << 16) & 0x3FFF0000)
+#define MAS1_TID(x)            (((x) << 16) & 0x3FFF0000)
 #define MAS1_IND               0x00002000
 #define MAS1_TS                        0x00001000
 #define MAS1_TSIZE_MASK                0x00000f80
 #define MAS1_TSIZE_SHIFT       7
-#define MAS1_TSIZE(x)          ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_TSIZE(x)          (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
 
 #define MAS2_EPN               0xFFFFF000
 #define MAS2_X0                        0x00000040
index 53b64be..da4b200 100644 (file)
@@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr;
 
 #ifdef CONFIG_FLATMEM
 #define ARCH_PFN_OFFSET                (MEMORY_START >> PAGE_SHIFT)
-#define pfn_valid(pfn)         ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#define pfn_valid(pfn)         ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
 #endif
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
index 55cba4a..f8cd9fb 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/mmu.h>
 
 _GLOBAL(__setup_cpu_603)
-       mflr    r4
+       mflr    r5
 BEGIN_MMU_FTR_SECTION
        li      r10,0
        mtspr   SPRN_SPRG_603_LRU,r10           /* init SW LRU tracking */
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
        bl      __init_fpu_registers
 END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
        bl      setup_common_caches
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_604)
-       mflr    r4
+       mflr    r5
        bl      setup_common_caches
        bl      setup_604_hid0
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_750)
-       mflr    r4
+       mflr    r5
        bl      __init_fpu_registers
        bl      setup_common_caches
        bl      setup_750_7400_hid0
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_750cx)
-       mflr    r4
+       mflr    r5
        bl      __init_fpu_registers
        bl      setup_common_caches
        bl      setup_750_7400_hid0
        bl      setup_750cx
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_750fx)
-       mflr    r4
+       mflr    r5
        bl      __init_fpu_registers
        bl      setup_common_caches
        bl      setup_750_7400_hid0
        bl      setup_750fx
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_7400)
-       mflr    r4
+       mflr    r5
        bl      __init_fpu_registers
        bl      setup_7400_workarounds
        bl      setup_common_caches
        bl      setup_750_7400_hid0
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_7410)
-       mflr    r4
+       mflr    r5
        bl      __init_fpu_registers
        bl      setup_7410_workarounds
        bl      setup_common_caches
        bl      setup_750_7400_hid0
        li      r3,0
        mtspr   SPRN_L2CR2,r3
-       mtlr    r4
+       mtlr    r5
        blr
 _GLOBAL(__setup_cpu_745x)
-       mflr    r4
+       mflr    r5
        bl      setup_common_caches
        bl      setup_745x_specifics
-       mtlr    r4
+       mtlr    r5
        blr
 
 /* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@ setup_750cx:
        cror    4*cr0+eq,4*cr0+eq,4*cr1+eq
        cror    4*cr0+eq,4*cr0+eq,4*cr2+eq
        bnelr
-       lwz     r6,CPU_SPEC_FEATURES(r5)
+       lwz     r6,CPU_SPEC_FEATURES(r4)
        li      r7,CPU_FTR_CAN_NAP
        andc    r6,r6,r7
-       stw     r6,CPU_SPEC_FEATURES(r5)
+       stw     r6,CPU_SPEC_FEATURES(r4)
        blr
 
 /* 750fx specific
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
        andis.  r11,r11,L3CR_L3E@h
        beq     1f
 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
-       lwz     r6,CPU_SPEC_FEATURES(r5)
+       lwz     r6,CPU_SPEC_FEATURES(r4)
        andi.   r0,r6,CPU_FTR_L3_DISABLE_NAP
        beq     1f
        li      r7,CPU_FTR_CAN_NAP
        andc    r6,r6,r7
-       stw     r6,CPU_SPEC_FEATURES(r5)
+       stw     r6,CPU_SPEC_FEATURES(r4)
 1:
        mfspr   r11,SPRN_HID0
 
index 8d74a24..e8e915c 100644 (file)
@@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
         * pointer on ppc64 and booke as we are running at 0 in real mode
         * on ppc64 and reloc_offset is always 0 on booke.
         */
-       if (s->cpu_setup) {
-               s->cpu_setup(offset, s);
+       if (t->cpu_setup) {
+               t->cpu_setup(offset, t);
        }
 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */
 }
index 49a170a..a5f8672 100644 (file)
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
 
        save_ftrace_enabled = __ftrace_enabled_save();
 
-       default_machine_kexec(image);
+       if (ppc_md.machine_kexec)
+               ppc_md.machine_kexec(image);
+       else
+               default_machine_kexec(image);
 
        __ftrace_enabled_restore(save_ftrace_enabled);
 
index ebf9846..f4adf89 100644 (file)
@@ -26,20 +26,6 @@ extern unsigned long __toc_start;
 
 #ifdef CONFIG_PPC_BOOK3S
 
-/*
- * We only have to have statically allocated lppaca structs on
- * legacy iSeries, which supports at most 64 cpus.
- */
-#ifdef CONFIG_PPC_ISERIES
-#if NR_CPUS < 64
-#define NR_LPPACAS     NR_CPUS
-#else
-#define NR_LPPACAS     64
-#endif
-#else /* not iSeries */
-#define NR_LPPACAS     1
-#endif
-
 /*
  * The structure which the hypervisor knows about - this structure
  * should not cross a page boundary.  The vpa_init/register_vpa call
index 4dcf5f8..b0dc8f7 100644 (file)
@@ -596,6 +596,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        if (left <= 0)
                                left = period;
                        record = 1;
+                       event->hw.last_period = event->hw.sample_period;
                }
                if (left < 0x80000000LL)
                        val = 0x80000000LL - left;
index 7a1d5cb..8303a6c 100644 (file)
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
                        prime_debug_regs(new_thread);
 }
 #else  /* !CONFIG_PPC_ADV_DEBUG_REGS */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
 static void set_debug_reg_defaults(struct thread_struct *thread)
 {
        if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
                set_dabr(0);
        }
 }
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
 int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
 {
        discard_lazy_cpu_state();
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINTS
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINTS */
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
        set_debug_reg_defaults(&current->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 }
 
 void
index bf5cb91..0dc95c0 100644 (file)
@@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
        dbg("removing cpu %lu from node %d\n", cpu, node);
 
        if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
-               cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+               cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
        } else {
                printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
                       cpu, node);
@@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void)
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/* Vrtual Processor Home Node (VPHN) support */
+/* Virtual Processor Home Node (VPHN) support */
 #ifdef CONFIG_PPC_SPLPAR
-#define VPHN_NR_CHANGE_CTRS (8)
-static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
 static cpumask_t cpu_associativity_changes_mask;
 static int vphn_enabled;
 static void set_topology_timer(void);
@@ -1303,16 +1302,18 @@ static void set_topology_timer(void);
  */
 static void setup_cpu_associativity_change_counters(void)
 {
-       int cpu = 0;
+       int cpu;
+
+       /* The VPHN feature supports a maximum of 8 reference points */
+       BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
 
        for_each_possible_cpu(cpu) {
-               int i = 0;
+               int i;
                u8 *counts = vphn_cpu_change_counts[cpu];
                volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
 
-               for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+               for (i = 0; i < distance_ref_points_depth; i++)
                        counts[i] = hypervisor_counts[i];
-               }
        }
 }
 
@@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void)
  */
 static int update_cpu_associativity_changes_mask(void)
 {
-       int cpu = 0, nr_cpus = 0;
+       int cpu, nr_cpus = 0;
        cpumask_t *changes = &cpu_associativity_changes_mask;
 
        cpumask_clear(changes);
@@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void)
                u8 *counts = vphn_cpu_change_counts[cpu];
                volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
 
-               for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
-                       if (hypervisor_counts[i] > counts[i]) {
+               for (i = 0; i < distance_ref_points_depth; i++) {
+                       if (hypervisor_counts[i] != counts[i]) {
                                counts[i] = hypervisor_counts[i];
                                changed = 1;
                        }
@@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void)
        return nr_cpus;
 }
 
-/* 6 64-bit registers unpacked into 12 32-bit associativity values */
-#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+/*
+ * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
+ * the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
 
 /*
  * Convert the associativity domain numbers returned from the hypervisor
@@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void)
  */
 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
 {
-       int i = 0;
-       int nr_assoc_doms = 0;
+       int i, nr_assoc_doms = 0;
        const u16 *field = (const u16*) packed;
 
 #define VPHN_FIELD_UNUSED      (0xffff)
 #define VPHN_FIELD_MSB         (0x8000)
 #define VPHN_FIELD_MASK                (~VPHN_FIELD_MSB)
 
-       for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+       for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
                if (*field == VPHN_FIELD_UNUSED) {
                        /* All significant fields processed, and remaining
                         * fields contain the reserved value of all 1's.
@@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
                         */
                        unpacked[i] = *((u32*)field);
                        field += 2;
-               }
-               else if (*field & VPHN_FIELD_MSB) {
+               } else if (*field & VPHN_FIELD_MSB) {
                        /* Data is in the lower 15 bits of this field */
                        unpacked[i] = *field & VPHN_FIELD_MASK;
                        field++;
                        nr_assoc_doms++;
-               }
-               else {
+               } else {
                        /* Data is in the lower 15 bits of this field
                         * concatenated with the next 16 bit field
                         */
@@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
                }
        }
 
+       /* The first cell contains the length of the property */
+       unpacked[0] = nr_assoc_doms;
+
        return nr_assoc_doms;
 }
 
@@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
  */
 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
 {
-       long rc = 0;
+       long rc;
        long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
        u64 flags = 1;
        int hwcpu = get_hard_smp_processor_id(cpu);
@@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
 static long vphn_get_associativity(unsigned long cpu,
                                        unsigned int *associativity)
 {
-       long rc = 0;
+       long rc;
 
        rc = hcall_vphn(cpu, associativity);
 
@@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu,
  */
 int arch_update_cpu_topology(void)
 {
-       int cpu = 0, nid = 0, old_nid = 0;
+       int cpu, nid, old_nid;
        unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
-       struct sys_device *sysdev = NULL;
+       struct sys_device *sysdev;
 
        for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
                vphn_get_associativity(cpu, associativity);
@@ -1512,7 +1516,9 @@ int start_topology_update(void)
 {
        int rc = 0;
 
-       if (firmware_has_feature(FW_FEATURE_VPHN)) {
+       /* Disabled until races with load balancing are fixed */
+       if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
+           get_lppaca()->shared_proc) {
                vphn_enabled = 1;
                setup_cpu_associativity_change_counters();
                init_timer_deferrable(&topology_timer);
index 1ec0657..c14d09f 100644 (file)
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  * neesd to be flushed. This function will either perform the flush
  * immediately or will batch it up if the current CPU has an active
  * batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
  */
 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
 {
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
        unsigned long vsid, vaddr;
        unsigned int psize;
        int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
         */
        if (!batch->active) {
                flush_hash_page(vaddr, rpte, psize, ssize, 0);
+               put_cpu_var(ppc64_tlb_batch);
                return;
        }
 
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        batch->index = ++i;
        if (i >= PPC64_TLB_BATCH_NR)
                __flush_tlb_pending(batch);
+       put_cpu_var(ppc64_tlb_batch);
 }
 
 /*
index fdb7384..f0491cc 100644 (file)
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
        pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA  */
        pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (lppaca_of(i).dyn_proc_status >= 2)
+       for (i = 0; i < NR_LPPACAS; i++) {
+               if (lppaca[i].dyn_proc_status >= 2)
                        continue;
 
                snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
 
                dt_prop_str(dt, "device_type", device_type_cpu);
 
-               index = lppaca_of(i).dyn_hv_phys_proc_index;
+               index = lppaca[i].dyn_hv_phys_proc_index;
                d = &xIoHriProcessorVpd[index];
 
                dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
index b086341..2946ae1 100644 (file)
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
         * on but calling this function multiple times is fine.
         */
        identify_cpu(0, mfspr(SPRN_PVR));
+       initialise_paca(&boot_paca, 0);
 
        powerpc_firmware_features |= FW_FEATURE_ISERIES;
        powerpc_firmware_features |= FW_FEATURE_LPAR;
index 5d3ea9f..ca5d589 100644 (file)
@@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 extern long hcall_tracepoint_refcount;
 
+/* 
+ * Since the tracing code might execute hcalls we need to guard against
+ * recursion. One example of this are spinlocks calling H_YIELD on
+ * shared processor partitions.
+ */
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+
 void hcall_tracepoint_regfunc(void)
 {
        hcall_tracepoint_refcount++;
@@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
 
 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
 {
+       unsigned long flags;
+       unsigned int *depth;
+
+       local_irq_save(flags);
+
+       depth = &__get_cpu_var(hcall_trace_depth);
+
+       if (*depth)
+               goto out;
+
+       (*depth)++;
        trace_hcall_entry(opcode, args);
+       (*depth)--;
+
+out:
+       local_irq_restore(flags);
 }
 
 void __trace_hcall_exit(long opcode, unsigned long retval,
                        unsigned long *retbuf)
 {
+       unsigned long flags;
+       unsigned int *depth;
+
+       local_irq_save(flags);
+
+       depth = &__get_cpu_var(hcall_trace_depth);
+
+       if (*depth)
+               goto out;
+
+       (*depth)++;
        trace_hcall_exit(opcode, retval, retbuf);
+       (*depth)--;
+
+out:
+       local_irq_restore(flags);
 }
 #endif
index ff19efd..636bcb8 100644 (file)
@@ -406,7 +406,7 @@ config QDIO
          If unsure, say Y.
 
 config CHSC_SCH
-       def_tristate y
+       def_tristate m
        prompt "Support for CHSC subchannels"
        help
          This driver allows usage of CHSC subchannels. A CHSC subchannel
index 0851eb1..2751b3a 100644 (file)
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void)
        unsigned long output_addr;
        unsigned char *output;
 
-       check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
+       output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+       check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
        memset(&_bss, 0, &_ebss - &_bss);
        free_mem_ptr = (unsigned long)&_end;
        free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-       output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
+       output = (unsigned char *) output_addr;
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /*
index f42dbab..48884f8 100644 (file)
@@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
                BUG_ON(ret != bsize);
                data += bsize - index;
                len -= bsize - index;
+               index = 0;
        }
 
        /* process as many blocks as possible */
index 76daea1..5c5ba10 100644 (file)
 
 static inline int atomic_read(const atomic_t *v)
 {
-       barrier();
-       return v->counter;
+       int c;
+
+       asm volatile(
+               "       l       %0,%1\n"
+               : "=d" (c) : "Q" (v->counter));
+       return c;
 }
 
 static inline void atomic_set(atomic_t *v, int i)
 {
-       v->counter = i;
-       barrier();
+       asm volatile(
+               "       st      %1,%0\n"
+               : "=Q" (v->counter) : "d" (i));
 }
 
 static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 
 static inline long long atomic64_read(const atomic64_t *v)
 {
-       barrier();
-       return v->counter;
+       long long c;
+
+       asm volatile(
+               "       lg      %0,%1\n"
+               : "=d" (c) : "Q" (v->counter));
+       return c;
 }
 
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
-       v->counter = i;
-       barrier();
+       asm volatile(
+               "       stg     %1,%0\n"
+               : "=Q" (v->counter) : "d" (i));
 }
 
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
index 24aafa6..2a30d5a 100644 (file)
@@ -13,6 +13,7 @@
 
 #define L1_CACHE_BYTES     256
 #define L1_CACHE_SHIFT     8
+#define NET_SKB_PAD       32
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
index 405cc97..7e1f776 100644 (file)
@@ -1,29 +1,8 @@
 #ifndef _S390_CACHEFLUSH_H
 #define _S390_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the s390. */
-#define flush_cache_all()                      do { } while (0)
-#define flush_cache_mm(mm)                     do { } while (0)
-#define flush_cache_dup_mm(mm)                 do { } while (0)
-#define flush_cache_range(vma, start, end)     do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do { } while (0)
-#define flush_dcache_mmap_lock(mapping)                do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
-#define flush_icache_range(start, end)         do { } while (0)
-#define flush_icache_page(vma,pg)              do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)        do { } while (0)
-#define flush_cache_vmap(start, end)           do { } while (0)
-#define flush_cache_vunmap(start, end)         do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void kernel_map_pages(struct page *page, int numpages, int enable);
index bf3de04..2c79b64 100644 (file)
@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
  */
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
-/*
- * Print register of task into buffer. Used in fs/proc/array.c.
- */
-extern void task_show_regs(struct seq_file *m, struct task_struct *task);
-
 extern void show_code(struct pt_regs *regs);
 
 unsigned long get_wchan(struct task_struct *p);
index f1f644f..9074a54 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
index 5eb78dd..b5a4a73 100644 (file)
@@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs)
        show_last_breaking_event(regs);
 }
 
-/* This is called from fs/proc/array.c */
-void task_show_regs(struct seq_file *m, struct task_struct *task)
-{
-       struct pt_regs *regs;
-
-       regs = task_pt_regs(task);
-       seq_printf(m, "task: %p, ksp: %p\n",
-                      task, (void *)task->thread.ksp);
-       seq_printf(m, "User PSW : %p %p\n",
-                      (void *) regs->psw.mask, (void *)regs->psw.addr);
-
-       seq_printf(m, "User GPRS: " FOURLONG,
-                         regs->gprs[0], regs->gprs[1],
-                         regs->gprs[2], regs->gprs[3]);
-       seq_printf(m, "           " FOURLONG,
-                         regs->gprs[4], regs->gprs[5],
-                         regs->gprs[6], regs->gprs[7]);
-       seq_printf(m, "           " FOURLONG,
-                         regs->gprs[8], regs->gprs[9],
-                         regs->gprs[10], regs->gprs[11]);
-       seq_printf(m, "           " FOURLONG,
-                         regs->gprs[12], regs->gprs[13],
-                         regs->gprs[14], regs->gprs[15]);
-       seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
-                         task->thread.acrs[0], task->thread.acrs[1],
-                         task->thread.acrs[2], task->thread.acrs[3]);
-       seq_printf(m, "           %08x %08x %08x %08x\n",
-                         task->thread.acrs[4], task->thread.acrs[5],
-                         task->thread.acrs[6], task->thread.acrs[7]);
-       seq_printf(m, "           %08x %08x %08x %08x\n",
-                         task->thread.acrs[8], task->thread.acrs[9],
-                         task->thread.acrs[10], task->thread.acrs[11]);
-       seq_printf(m, "           %08x %08x %08x %08x\n",
-                         task->thread.acrs[12], task->thread.acrs[13],
-                         task->thread.acrs[14], task->thread.acrs[15]);
-}
-
 static DEFINE_SPINLOCK(die_lock);
 
 void die(const char * str, struct pt_regs * regs, long err)
index 07deaee..a6c4f7e 100644 (file)
@@ -125,9 +125,9 @@ static size_t copy_in_user_std(size_t size, void __user *to,
        unsigned long tmp1;
 
        asm volatile(
+               "   sacf  256\n"
                "  "AHI"  %0,-1\n"
                "   jo    5f\n"
-               "   sacf  256\n"
                "   bras  %3,3f\n"
                "0:"AHI"  %0,257\n"
                "1: mvc   0(1,%1),0(%2)\n"
@@ -142,9 +142,8 @@ static size_t copy_in_user_std(size_t size, void __user *to,
                "3:"AHI"  %0,-256\n"
                "   jnm   2b\n"
                "4: ex    %0,1b-0b(%3)\n"
-               "   sacf  0\n"
                "5: "SLR"  %0,%0\n"
-               "6:\n"
+               "6: sacf  0\n"
                EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
                : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
                : : "cc", "memory");
@@ -156,9 +155,9 @@ static size_t clear_user_std(size_t size, void __user *to)
        unsigned long tmp1, tmp2;
 
        asm volatile(
+               "   sacf  256\n"
                "  "AHI"  %0,-1\n"
                "   jo    5f\n"
-               "   sacf  256\n"
                "   bras  %3,3f\n"
                "   xc    0(1,%1),0(%1)\n"
                "0:"AHI"  %0,257\n"
@@ -178,9 +177,8 @@ static size_t clear_user_std(size_t size, void __user *to)
                "3:"AHI"  %0,-256\n"
                "   jnm   2b\n"
                "4: ex    %0,0(%3)\n"
-               "   sacf  0\n"
                "5: "SLR"  %0,%0\n"
-               "6:\n"
+               "6: sacf  0\n"
                EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
                : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
                : : "cc", "memory");
index 0c719c6..e1850c2 100644 (file)
@@ -336,7 +336,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
        page->flags ^= bits;
        if (page->flags & FRAG_MASK) {
                /* Page now has some free pgtable fragments. */
-               list_move(&page->lru, &mm->context.pgtable_list);
+               if (!list_empty(&page->lru))
+                       list_move(&page->lru, &mm->context.pgtable_list);
                page = NULL;
        } else
                /* All fragments of the 4K page have been freed. */
index ae55556..8a9011d 100644 (file)
@@ -15,6 +15,7 @@ config SUPERH
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_LZMA
+       select HAVE_KERNEL_XZ
        select HAVE_KERNEL_LZO
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_REGS_AND_STACK_ACCESS_API
index 9c8c6e1..e3d8170 100644 (file)
@@ -200,7 +200,7 @@ endif
 libs-$(CONFIG_SUPERH32)                := arch/sh/lib/ $(libs-y)
 libs-$(CONFIG_SUPERH64)                := arch/sh/lib64/ $(libs-y)
 
-BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.lzo \
+BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
               uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
               romImage
 PHONY += $(BOOT_TARGETS)
@@ -230,5 +230,6 @@ define archhelp
        @echo '* uImage.gz                 - Kernel-only image for U-Boot (gzip)'
        @echo '  uImage.bz2                - Kernel-only image for U-Boot (bzip2)'
        @echo '  uImage.lzma               - Kernel-only image for U-Boot (lzma)'
+       @echo '  uImage.xz                 - Kernel-only image for U-Boot (xz)'
        @echo '  uImage.lzo                - Kernel-only image for U-Boot (lzo)'
 endef
index 33b6629..701667a 100644 (file)
@@ -1294,6 +1294,7 @@ static int __init arch_setup(void)
        i2c_register_board_info(1, i2c1_devices,
                                ARRAY_SIZE(i2c1_devices));
 
+#if defined(CONFIG_VIDEO_SH_VOU) || defined(CONFIG_VIDEO_SH_VOU_MODULE)
        /* VOU */
        gpio_request(GPIO_FN_DV_D15, NULL);
        gpio_request(GPIO_FN_DV_D14, NULL);
@@ -1325,6 +1326,7 @@ static int __init arch_setup(void)
 
        /* Remove reset */
        gpio_set_value(GPIO_PTG4, 1);
+#endif
 
        return platform_add_devices(ecovec_devices,
                                    ARRAY_SIZE(ecovec_devices));
index 1ce6362..ba515d8 100644 (file)
@@ -24,12 +24,13 @@ suffix-y := bin
 suffix-$(CONFIG_KERNEL_GZIP)   := gz
 suffix-$(CONFIG_KERNEL_BZIP2)  := bz2
 suffix-$(CONFIG_KERNEL_LZMA)   := lzma
+suffix-$(CONFIG_KERNEL_XZ)     := xz
 suffix-$(CONFIG_KERNEL_LZO)    := lzo
 
 targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
-          uImage.bz2 uImage.lzma uImage.lzo uImage.bin
+          uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
-          vmlinux.bin.lzo
+          vmlinux.bin.xz vmlinux.bin.lzo
 subdir- := compressed romimage
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
@@ -76,6 +77,9 @@ $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
 $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzma)
 
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,xzkern)
+
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzo)
 
@@ -88,6 +92,9 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
        $(call if_changed,uimage,lzma)
 
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+       $(call if_changed,uimage,xz)
+
 $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
        $(call if_changed,uimage,lzo)
 
index cfa5a08..e0b0293 100644 (file)
@@ -6,7 +6,7 @@
 
 targets                := vmlinux vmlinux.bin vmlinux.bin.gz \
                   vmlinux.bin.bz2 vmlinux.bin.lzma \
-                  vmlinux.bin.lzo \
+                  vmlinux.bin.xz vmlinux.bin.lzo \
                   head_$(BITS).o misc.o piggy.o
 
 OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
@@ -50,6 +50,8 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+       $(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzo)
 
index 27140a6..95470a4 100644 (file)
@@ -61,6 +61,10 @@ static unsigned long free_mem_end_ptr;
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
index 083ea06..db85916 100644 (file)
@@ -134,6 +134,7 @@ typedef pte_t *pte_addr_t;
 extern void pgtable_cache_init(void);
 
 struct vm_area_struct;
+struct mm_struct;
 
 extern void __update_cache(struct vm_area_struct *vma,
                           unsigned long address, pte_t pte);
index a78701d..4a53500 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm-generic/sections.h>
 
-extern void __nosave_begin, __nosave_end;
+extern long __nosave_begin, __nosave_end;
 extern long __machvec_start, __machvec_end;
 extern char __uncached_start, __uncached_end;
 extern char _ebss[];
index f739061..0f325da 100644 (file)
@@ -1,11 +1,21 @@
 #ifndef __ASM_SH_ETH_H__
 #define __ASM_SH_ETH_H__
 
+#include <linux/phy.h>
+
 enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
+enum {
+       SH_ETH_REG_GIGABIT,
+       SH_ETH_REG_FAST_SH4,
+       SH_ETH_REG_FAST_SH3_SH2
+};
 
 struct sh_eth_plat_data {
        int phy;
        int edmac_endian;
+       int register_type;
+       phy_interface_t phy_interface;
+       void (*set_mdio_gate)(unsigned long addr);
 
        unsigned char mac_addr[6];
        unsigned no_ether_link:1;
index c2b0aaa..e53b4b3 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/io.h>
 #include <linux/sh_timer.h>
 #include <linux/serial_sci.h>
-#include <asm/machtypes.h>
+#include <generated/machtypes.h>
 
 static struct resource rtc_resources[] = {
        [0] = {
@@ -230,10 +230,10 @@ static struct platform_device *sh7750_devices[] __initdata = {
 static int __init sh7750_devices_setup(void)
 {
        if (mach_is_rts7751r2d()) {
-               platform_register_device(&scif_device);
+               platform_device_register(&scif_device);
        } else {
-               platform_register_device(&sci_device);
-               platform_register_device(&scif_device);
+               platform_device_register(&sci_device);
+               platform_device_register(&scif_device);
        }
 
        return platform_add_devices(sh7750_devices,
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
 
 void __init plat_early_device_setup(void)
 {
+       struct platform_device *dev[1];
+
        if (mach_is_rts7751r2d()) {
                scif_platform_data.scscr |= SCSCR_CKE1;
-               early_platform_add_devices(&scif_device, 1);
+               dev[0] = &scif_device;
+               early_platform_add_devices(dev, 1);
        } else {
-               early_platform_add_devices(&sci_device, 1);
-               early_platform_add_devices(&scif_device, 1);
+               dev[0] = &sci_device;
+               early_platform_add_devices(dev, 1);
+               dev[0] = &scif_device;
+               early_platform_add_devices(dev, 1);
        }
 
        early_platform_add_devices(sh7750_early_devices,
index 948fdb6..38e8628 100644 (file)
@@ -17,6 +17,7 @@
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
 
 static cpumask_t cpu_coregroup_map(unsigned int cpu)
 {
index faa8f86..0901b2f 100644 (file)
 void __delay(unsigned long loops)
 {
        __asm__ __volatile__(
+               /*
+                * ST40-300 appears to have an issue with this code,
+                * normally taking two cycles each loop, as with all
+                * other SH variants. If however the branch and the
+                * delay slot straddle an 8 byte boundary, this increases
+                * to 3 cycles.
+                * This align directive ensures this doesn't occur.
+                */
+               ".balign 8\n\t"
+
                "tst    %0, %0\n\t"
                "1:\t"
                "bf/s   1b\n\t"
index 88d3dc3..5a580ea 100644 (file)
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
                kunmap_atomic(vfrom, KM_USER0);
        }
 
-       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+           (vma->vm_flags & VM_EXEC))
                __flush_purge_region(vto, PAGE_SIZE);
 
        kunmap_atomic(vto, KM_USER1);
index a2f5c61..843e4fa 100644 (file)
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz)
 
 extern u64 pcr_enable;
 
+extern int pcr_arch_init(void);
+
 #endif /* __PCR_H */
index 47977a7..72509d0 100644 (file)
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 static int iommu_alloc_ctx(struct iommu *iommu)
 {
        int lowest = iommu->ctx_lowest_free;
-       int sz = IOMMU_NUM_CTXS - lowest;
-       int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
+       int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 
-       if (unlikely(n == sz)) {
+       if (unlikely(n == IOMMU_NUM_CTXS)) {
                n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
                if (unlikely(n == lowest)) {
                        printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
index ae96cf5..7c2ced6 100644 (file)
@@ -167,5 +167,3 @@ out_unregister:
        unregister_perf_hsvc();
        return err;
 }
-
-early_initcall(pcr_arch_init);
index b6a2b8f..555a76d 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/mdesc.h>
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
+#include <asm/pcr.h>
 
 #include "cpumap.h"
 
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+       pcr_arch_init();
 }
 
 void smp_send_reschedule(int cpu)
index 8cc0345..8f096e8 100644 (file)
@@ -24,9 +24,9 @@ retl_efault:
        .globl  __do_int_store
 __do_int_store:
        ld      [%o2], %g1
-       cmp     %1, 2
+       cmp     %o1, 2
        be      2f
-        cmp    %1, 4
+        cmp    %o1, 4
        be      1f
         srl    %g1, 24, %g2
        srl     %g1, 16, %g7
index 764b3eb..48d00e7 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include <linux/string.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 
 #include <asm/bitext.h>
 
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
                while (test_bit(offset + i, t->map) == 0) {
                        i++;
                        if (i == len) {
-                               for (i = 0; i < len; i++)
-                                       __set_bit(offset + i, t->map);
+                               bitmap_set(t->map, offset, len);
                                if (offset == t->first_free)
                                        t->first_free = find_next_zero_bit
                                                        (t->map, t->size,
index 646aa78..46a8238 100644 (file)
@@ -62,7 +62,12 @@ int main(int argc, char *argv[])
        if (fseek(f, -4L, SEEK_END)) {
                perror(argv[1]);
        }
-       fread(&olen, sizeof olen, 1, f);
+
+       if (fread(&olen, sizeof(olen), 1, f) != 1) {
+               perror(argv[1]);
+               return 1;
+       }
+
        ilen = ftell(f);
        olen = getle32(&olen);
        fclose(f);
index 211ca3f..4ea15ca 100644 (file)
@@ -88,6 +88,7 @@ extern int acpi_disabled;
 extern int acpi_pci_disabled;
 extern int acpi_skip_timer_override;
 extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
 
 extern u8 acpi_sci_flags;
 extern int acpi_sci_override_gsi;
index 5e3969c..3c89694 100644 (file)
@@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void);
 extern void init_bsp_APIC(void);
 extern void setup_local_APIC(void);
 extern void end_local_APIC_setup(void);
+extern void bsp_end_local_APIC_setup(void);
 extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
index 63e35ec..62f0844 100644 (file)
@@ -1,48 +1,8 @@
 #ifndef _ASM_X86_CACHEFLUSH_H
 #define _ASM_X86_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the intel. */
-static inline void flush_cache_all(void) { }
-static inline void flush_cache_mm(struct mm_struct *mm) { }
-static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
-static inline void flush_cache_range(struct vm_area_struct *vma,
-                                    unsigned long start, unsigned long end) { }
-static inline void flush_cache_page(struct vm_area_struct *vma,
-                                   unsigned long vmaddr, unsigned long pfn) { }
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-static inline void flush_dcache_page(struct page *page) { }
-static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
-static inline void flush_icache_range(unsigned long start,
-                                     unsigned long end) { }
-static inline void flush_icache_page(struct vm_area_struct *vma,
-                                    struct page *page) { }
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
-                                          struct page *page,
-                                          unsigned long addr,
-                                          unsigned long len) { }
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-static inline void flush_cache_vunmap(unsigned long start,
-                                     unsigned long end) { }
-
-static inline void copy_to_user_page(struct vm_area_struct *vma,
-                                    struct page *page, unsigned long vaddr,
-                                    void *dst, const void *src,
-                                    unsigned long len)
-{
-       memcpy(dst, src, len);
-}
-
-static inline void copy_from_user_page(struct vm_area_struct *vma,
-                                      struct page *page, unsigned long vaddr,
-                                      void *dst, const void *src,
-                                      unsigned long len)
-{
-       memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_X86_PAT
 /*
index 4fab24d..4564c8e 100644 (file)
@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
 
 DECLARE_PER_CPU(int, cpu_state);
 
+int mwait_usable(const struct cpuinfo_x86 *);
 
 #endif /* _ASM_X86_CPU_H */
index f52d42e..574dbc2 100644 (file)
@@ -14,7 +14,7 @@
        do {                                                    \
                asm goto("1:"                                   \
                        JUMP_LABEL_INITIAL_NOP                  \
-                       ".pushsection __jump_table,  \"a\" \n\t"\
+                       ".pushsection __jump_table,  \"aw\" \n\t"\
                        _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
                        ".popsection \n\t"                      \
                        : :  "i" (key) :  : label);             \
index 4a2d4e0..8b5393e 100644 (file)
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        unsigned cpu = smp_processor_id();
 
        if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpumask_clear_cpu(cpu, mm_cpumask(prev));
 #ifdef CONFIG_SMP
                percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
                percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                /* Re-load page tables */
                load_cr3(next->pgd);
 
+               /* stop flush ipis for the previous mm */
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
                /*
                 * load the LDT, if the LDT is different:
                 */
index 4d0dfa0..43a18c7 100644 (file)
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
 
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
+#define NHM_C3_AUTO_DEMOTE             (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE             (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE         (1UL << 25)
+
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 
index 2071a8b..ebbc4d8 100644 (file)
@@ -558,13 +558,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                              pmd_t *pmdp, pmd_t pmd)
 {
-#if PAGETABLE_LEVELS >= 3
        if (sizeof(pmdval_t) > sizeof(long))
                /* 5 arg words */
                pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
        else
-               PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd);
-#endif
+               PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
+                           native_pmd_val(pmd));
 }
 #endif
 
index 3788f46..7e17295 100644 (file)
@@ -273,34 +273,34 @@ do {                                                                      \
        typeof(var) pxo_new__ = (nval);                                 \
        switch (sizeof(var)) {                                          \
        case 1:                                                         \
-               asm("\n1:mov "__percpu_arg(1)",%%al"                    \
-                   "\n\tcmpxchgb %2, "__percpu_arg(1)                  \
+               asm("\n\tmov "__percpu_arg(1)",%%al"                    \
+                   "\n1:\tcmpxchgb %2, "__percpu_arg(1)                \
                    "\n\tjnz 1b"                                        \
-                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "=&a" (pxo_ret__), "+m" (var)             \
                            : "q" (pxo_new__)                           \
                            : "memory");                                \
                break;                                                  \
        case 2:                                                         \
-               asm("\n1:mov "__percpu_arg(1)",%%ax"                    \
-                   "\n\tcmpxchgw %2, "__percpu_arg(1)                  \
+               asm("\n\tmov "__percpu_arg(1)",%%ax"                    \
+                   "\n1:\tcmpxchgw %2, "__percpu_arg(1)                \
                    "\n\tjnz 1b"                                        \
-                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "=&a" (pxo_ret__), "+m" (var)             \
                            : "r" (pxo_new__)                           \
                            : "memory");                                \
                break;                                                  \
        case 4:                                                         \
-               asm("\n1:mov "__percpu_arg(1)",%%eax"                   \
-                   "\n\tcmpxchgl %2, "__percpu_arg(1)                  \
+               asm("\n\tmov "__percpu_arg(1)",%%eax"                   \
+                   "\n1:\tcmpxchgl %2, "__percpu_arg(1)                \
                    "\n\tjnz 1b"                                        \
-                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "=&a" (pxo_ret__), "+m" (var)             \
                            : "r" (pxo_new__)                           \
                            : "memory");                                \
                break;                                                  \
        case 8:                                                         \
-               asm("\n1:mov "__percpu_arg(1)",%%rax"                   \
-                   "\n\tcmpxchgq %2, "__percpu_arg(1)                  \
+               asm("\n\tmov "__percpu_arg(1)",%%rax"                   \
+                   "\n1:\tcmpxchgq %2, "__percpu_arg(1)                \
                    "\n\tjnz 1b"                                        \
-                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "=&a" (pxo_ret__), "+m" (var)             \
                            : "r" (pxo_new__)                           \
                            : "memory");                                \
                break;                                                  \
index e2f6a99..cc29086 100644 (file)
@@ -22,6 +22,7 @@
 
 #define ARCH_P4_CNTRVAL_BITS   (40)
 #define ARCH_P4_CNTRVAL_MASK   ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+#define ARCH_P4_UNFLAGGED_BIT  ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
 
 #define P4_ESCR_EVENT_MASK     0x7e000000U
 #define P4_ESCR_EVENT_SHIFT    25
index 4c2f63c..1f46951 100644 (file)
@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
 
 /* Static state in head.S used to set up a CPU */
-extern struct {
-       void *sp;
-       unsigned short ss;
-} stack_start;
+extern unsigned long stack_start; /* Initial stack pointer address */
 
 struct smp_ops {
        void (*smp_prepare_boot_cpu)(void);
index 6c22bf3..725b778 100644 (file)
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
         */
        CMOS_WRITE(0, 0xf);
 
-       *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+       *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644 (file)
index 1159e09..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_64_H
-#define _ASM_X86_SYSTEM_64_H
-
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-
-static inline unsigned long read_cr8(void)
-{
-       unsigned long cr8;
-       asm volatile("movq %%cr8,%0" : "=r" (cr8));
-       return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-       asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
-#include <linux/irqflags.h>
-
-#endif /* _ASM_X86_SYSTEM_64_H */
index ce1d54c..3e094af 100644 (file)
@@ -176,7 +176,7 @@ struct bau_msg_payload {
 struct bau_msg_header {
        unsigned int dest_subnodeid:6;  /* must be 0x10, for the LB */
        /* bits 5:0 */
-       unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+       unsigned int base_dest_nodeid:15; /* nasid of the */
        /* bits 20:6 */                   /* first bit in uvhub map */
        unsigned int command:8; /* message type */
        /* bits 28:21 */
index b3a7113..3e6e2d6 100644 (file)
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
 int acpi_sci_override_gsi __initdata;
 int acpi_skip_timer_override __initdata;
 int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
 
 #ifdef CONFIG_X86_LOCAL_APIC
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (acpi_skip_timer_override &&
-           intsrc->source_irq == 0 && intsrc->global_irq == 2) {
-               printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-               return 0;
+       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+               if (acpi_skip_timer_override) {
+                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       return 0;
+               }
+               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+                       intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+                       printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+               }
        }
 
        mp_override_legacy_irq(intsrc->source_irq,
index 69fd72a..68d1537 100644 (file)
 #include <linux/cpumask.h>
 #include <asm/segment.h>
 #include <asm/desc.h>
-
-#ifdef CONFIG_X86_32
 #include <asm/pgtable.h>
-#endif
+#include <asm/cacheflush.h>
 
 #include "realmode/wakeup.h"
 #include "sleep.h"
@@ -100,7 +98,7 @@ int acpi_save_state_mem(void)
 #else /* CONFIG_64BIT */
        header->trampoline_segment = setup_trampoline() >> 4;
 #ifdef CONFIG_SMP
-       stack_start.sp = temp_stack + sizeof(temp_stack);
+       stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
        early_gdt_descr.address =
                        (unsigned long)get_cpu_gdt_table(smp_processor_id());
        initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
        memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
 }
 
+int __init acpi_configure_wakeup_memory(void)
+{
+       if (acpi_realmode)
+               set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
+
+       return 0;
+}
+arch_initcall(acpi_configure_wakeup_memory);
+
 
 static int __init acpi_sleep_setup(char *str)
 {
index 1236085..7038b95 100644 (file)
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
 
        atomic_set(&stop_machine_first, 1);
        wrote_text = 0;
-       stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+       __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
 }
 
 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
index 51ef31a..51d4e16 100644 (file)
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
        memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
 
        if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
-               apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+               adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
                global_clock_event = &adev->evt;
                printk(KERN_DEBUG "%s clockevent registered as global\n",
                       global_clock_event->name);
index 06c196d..76b96d7 100644 (file)
@@ -1381,12 +1381,17 @@ void __cpuinit end_local_APIC_setup(void)
 #endif
 
        apic_pm_activate();
+}
+
+void __init bsp_end_local_APIC_setup(void)
+{
+       end_local_APIC_setup();
 
        /*
         * Now that local APIC setup is completed for BP, configure the fault
         * handling for interrupt remapping.
         */
-       if (!smp_processor_id() && intr_remapping_enabled)
+       if (intr_remapping_enabled)
                enable_drhd_fault_handling();
 
 }
@@ -1756,7 +1761,7 @@ int __init APIC_init_uniprocessor(void)
                enable_IO_APIC();
 #endif
 
-       end_local_APIC_setup();
+       bsp_end_local_APIC_setup();
 
 #ifdef CONFIG_X86_IO_APIC
        if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
index 697dc34..ca9e2a3 100644 (file)
@@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi)
 {
        int i = 0;
 
+       if (nr_ioapics == 0)
+               return -1;
+
        /* Find the IOAPIC that manages this GSI. */
        for (i = 0; i < nr_ioapics; i++) {
                if ((gsi >= mp_gsi_routing[i].gsi_base)
index bd1cac7..52c9364 100644 (file)
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
                if (cpu_has(c, X86_FEATURE_EST))
-                       printk(KERN_WARNING PFX "Warning: EST-capable CPU "
-                              "detected. The acpi-cpufreq module offers "
-                              "voltage scaling in addition of frequency "
+                       printk_once(KERN_WARNING PFX "Warning: EST-capable "
+                              "CPU detected. The acpi-cpufreq module offers "
+                              "voltage scaling in addition to frequency "
                               "scaling. You should use that instead of "
                               "p4-clockmod, if possible.\n");
                switch (c->x86_model) {
index 4f6f679..4a5a42b 100644 (file)
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu)
 cmd_incomplete:
        iowrite16(0, &pcch_hdr->status);
        spin_unlock(&pcc_lock);
-       return -EINVAL;
+       return 0;
 }
 
 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
index 35c7e65..c567dec 100644 (file)
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
 static int __cpuinit powernowk8_init(void)
 {
        unsigned int i, supported_cpus = 0, cpu;
+       int rv;
 
        for_each_online_cpu(i) {
                int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
 
                cpb_capable = true;
 
-               register_cpu_notifier(&cpb_nb);
-
                msrs = msrs_alloc();
                if (!msrs) {
                        printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
                        return -ENOMEM;
                }
 
+               register_cpu_notifier(&cpb_nb);
+
                rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
 
                for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
                        (cpb_enabled ? "on" : "off"));
        }
 
-       return cpufreq_register_driver(&cpufreq_amd64_driver);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
+               unregister_cpu_notifier(&cpb_nb);
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+       return rv;
 }
 
 /* driver entry point for term */
index 7283e98..ec2c19a 100644 (file)
@@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
        { 0x0a, LVL_1_DATA, 8 },        /* 2 way set assoc, 32 byte line size */
        { 0x0c, LVL_1_DATA, 16 },       /* 4-way set assoc, 32 byte line size */
        { 0x0d, LVL_1_DATA, 16 },       /* 4-way set assoc, 64 byte line size */
+       { 0x0e, LVL_1_DATA, 24 },       /* 6-way set assoc, 64 byte line size */
        { 0x21, LVL_2,      256 },      /* 8-way set assoc, 64 byte line size */
        { 0x22, LVL_3,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
        { 0x23, LVL_3,      MB(1) },    /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
        { 0x45, LVL_2,      MB(2) },    /* 4-way set assoc, 32 byte line size */
        { 0x46, LVL_3,      MB(4) },    /* 4-way set assoc, 64 byte line size */
        { 0x47, LVL_3,      MB(8) },    /* 8-way set assoc, 64 byte line size */
+       { 0x48, LVL_2,      MB(3) },    /* 12-way set assoc, 64 byte line size */
        { 0x49, LVL_3,      MB(4) },    /* 16-way set assoc, 64 byte line size */
        { 0x4a, LVL_3,      MB(6) },    /* 12-way set assoc, 64 byte line size */
        { 0x4b, LVL_3,      MB(8) },    /* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
        { 0x7c, LVL_2,      MB(1) },    /* 8-way set assoc, sectored cache, 64 byte line size */
        { 0x7d, LVL_2,      MB(2) },    /* 8-way set assoc, 64 byte line size */
        { 0x7f, LVL_2,      512 },      /* 2-way set assoc, 64 byte line size */
+       { 0x80, LVL_2,      512 },      /* 8-way set assoc, 64 byte line size */
        { 0x82, LVL_2,      256 },      /* 8-way set assoc, 32 byte line size */
        { 0x83, LVL_2,      512 },      /* 8-way set assoc, 32 byte line size */
        { 0x84, LVL_2,      MB(1) },    /* 8-way set assoc, 32 byte line size */
index e12246f..6f8c5e9 100644 (file)
@@ -59,6 +59,7 @@ struct thermal_state {
 
 /* Callback to handle core threshold interrupts */
 int (*platform_thermal_notify)(__u64 msr_val);
+EXPORT_SYMBOL(platform_thermal_notify);
 
 static DEFINE_PER_CPU(struct thermal_state, thermal_state);
 
index 01c0f3e..bebabec 100644 (file)
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
 }
 
 /*
- * MTRR initialization for all AP's
+ * Delayed MTRR initialization for all AP's
  */
 void mtrr_aps_init(void)
 {
        if (!use_intel())
                return;
 
+       /*
+        * Check if someone has requested the delay of AP MTRR initialization,
+        * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
+        * then we are done.
+        */
+       if (!mtrr_aps_delayed_init)
+               return;
+
        set_mtrr(~0U, 0, 0, 0);
        mtrr_aps_delayed_init = false;
 }
index e56b9bf..ff751a9 100644 (file)
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
         * if an event is shared accross the logical threads
         * the user needs special permissions to be able to use it
         */
-       if (p4_event_bind_map[v].shared) {
+       if (p4_ht_active() && p4_event_bind_map[v].shared) {
                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
                        return -EACCES;
        }
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
                event->hw.config = p4_set_ht_bit(event->hw.config);
 
        if (event->attr.type == PERF_TYPE_RAW) {
-
+               struct p4_event_bind *bind;
+               unsigned int esel;
                /*
                 * Clear bits we reserve to be managed by kernel itself
                 * and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
                 * bits since we keep additional info here (for cache events and etc)
                 */
                event->hw.config |= event->attr.config;
+               bind = p4_config_get_bind(event->attr.config);
+               if (!bind) {
+                       rc = -EINVAL;
+                       goto out;
+               }
+               esel = P4_OPCODE_ESEL(bind->opcode);
+               event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
        }
 
        rc = x86_setup_perfctr(event);
@@ -762,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
                return 1;
        }
 
-       /* it might be unflagged overflow */
-       rdmsrl(hwc->event_base + hwc->idx, v);
-       if (!(v & ARCH_P4_CNTRVAL_MASK))
+       /*
+        * In some circumstances the overflow might issue an NMI but did
+        * not set P4_CCCR_OVF bit. Because a counter holds a negative value
+        * we simply check for high bit being set, if it's cleared it means
+        * the counter has reached zero value and continued counting before
+        * real NMI signal was received:
+        */
+       if (!(v & ARCH_P4_UNFLAGGED_BIT))
                return 1;
 
        return 0;
index 6410133..a6b6fcf 100644 (file)
@@ -149,13 +149,13 @@ void dump_trace(struct task_struct *task,
        unsigned used = 0;
        struct thread_info *tinfo;
        int graph = 0;
+       unsigned long dummy;
        unsigned long bp;
 
        if (!task)
                task = current;
 
        if (!stack) {
-               unsigned long dummy;
                stack = &dummy;
                if (task && task != current)
                        stack = (unsigned long *)task->thread.sp;
index 76b8cd9..9efbdcc 100644 (file)
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
 
 static u32 __init ati_sbx00_rev(int num, int slot, int func)
 {
-       u32 old, d;
+       u32 d;
 
-       d = read_pci_config(num, slot, func, 0x70);
-       old = d;
-       d &= ~(1<<8);
-       write_pci_config(num, slot, func, 0x70, d);
        d = read_pci_config(num, slot, func, 0x8);
        d &= 0xff;
-       write_pci_config(num, slot, func, 0x70, old);
 
        return d;
 }
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 {
        u32 d, rev;
 
-       if (acpi_use_timer_override)
-               return;
-
        rev = ati_sbx00_rev(num, slot, func);
+       if (rev >= 0x40)
+               acpi_fix_pin2_polarity = 1;
+
        if (rev > 0x13)
                return;
 
+       if (acpi_use_timer_override)
+               return;
+
        /* check for IRQ0 interrupt swap */
        d = read_pci_config(num, slot, func, 0x64);
        if (!(d & (1<<14)))
index fc293dc..767d6c4 100644 (file)
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  */
 __HEAD
 ENTRY(startup_32)
+       movl pa(stack_start),%ecx
+       
        /* test KEEP_SEGMENTS flag to see if the bootloader is asking
                us to not reload segments */
        testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
        movl %eax,%es
        movl %eax,%fs
        movl %eax,%gs
+       movl %eax,%ss
 2:
+       leal -__PAGE_OFFSET(%ecx),%esp
 
 /*
  * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
  * _brk_end is set up to point to the first "safe" location.
  * Mappings are created both at virtual address 0 (identity mapping)
  * and PAGE_OFFSET for up to _end.
- *
- * Note that the stack is not yet set up!
  */
 #ifdef CONFIG_X86_PAE
 
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
        movl %eax,%es
        movl %eax,%fs
        movl %eax,%gs
+       movl pa(stack_start),%ecx
+       movl %eax,%ss
+       leal -__PAGE_OFFSET(%ecx),%esp
 #endif /* CONFIG_SMP */
 default_entry:
 
@@ -347,8 +352,8 @@ default_entry:
        movl %eax,%cr0          /* ..and set paging (PG) bit */
        ljmp $__BOOT_CS,$1f     /* Clear prefetch and normalize %eip */
 1:
-       /* Set up the stack pointer */
-       lss stack_start,%esp
+       /* Shift the stack pointer to a virtual address */
+       addl $__PAGE_OFFSET, %esp
 
 /*
  * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
@@ -360,9 +365,7 @@ default_entry:
 
 #ifdef CONFIG_SMP
        cmpb $0, ready
-       jz  1f                          /* Initial CPU cleans BSS */
-       jmp checkCPUtype
-1:
+       jnz checkCPUtype
 #endif /* CONFIG_SMP */
 
 /*
@@ -470,14 +473,7 @@ is386:     movl $2,%ecx            # set MP
 
        cld                     # gcc2 wants the direction flag cleared at all times
        pushl $0                # fake return address for unwinder
-#ifdef CONFIG_SMP
-       movb ready, %cl
        movb $1, ready
-       cmpb $0,%cl             # the first CPU calls start_kernel
-       je   1f
-       movl (stack_start), %esp
-1:
-#endif /* CONFIG_SMP */
        jmp *(initial_code)
 
 /*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
 #endif
 
 .data
+.balign 4
 ENTRY(stack_start)
        .long init_thread_union+THREAD_SIZE
-       .long __BOOT_DS
-
-ready: .byte 0
 
 early_recursion_flag:
        .long 0
 
+ready: .byte 0
+
 int_msg:
        .asciz "Unknown interrupt or fault at: %p %p %p\n"
 
index 52945da..387b6a0 100644 (file)
@@ -367,7 +367,8 @@ void fixup_irqs(void)
                if (irr  & (1 << (vector % 32))) {
                        irq = __this_cpu_read(vector_irq[vector]);
 
-                       data = irq_get_irq_data(irq);
+                       desc = irq_to_desc(irq);
+                       data = &desc->irq_data;
                        raw_spin_lock(&desc->lock);
                        if (data->chip->irq_retrigger)
                                data->chip->irq_retrigger(data);
index d8286ed..ff45541 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/utsname.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
+#include <asm/cpu.h>
 #include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/syscalls.h>
@@ -91,21 +92,31 @@ void show_regs(struct pt_regs *regs)
 
 void show_regs_common(void)
 {
-       const char *board, *product;
+       const char *vendor, *product, *board;
 
-       board = dmi_get_system_info(DMI_BOARD_NAME);
-       if (!board)
-               board = "";
+       vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+       if (!vendor)
+               vendor = "";
        product = dmi_get_system_info(DMI_PRODUCT_NAME);
        if (!product)
                product = "";
 
+       /* Board Name is optional */
+       board = dmi_get_system_info(DMI_BOARD_NAME);
+
        printk(KERN_CONT "\n");
-       printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
+       printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
                current->pid, current->comm, print_tainted(),
                init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version, board, product);
+               init_utsname()->version);
+       printk(KERN_CONT " ");
+       printk(KERN_CONT "%s %s", vendor, product);
+       if (board) {
+               printk(KERN_CONT "/");
+               printk(KERN_CONT "%s", board);
+       }
+       printk(KERN_CONT "\n");
 }
 
 void flush_thread(void)
@@ -505,7 +516,7 @@ static void poll_idle(void)
 #define MWAIT_ECX_EXTENDED_INFO                0x01
 #define MWAIT_EDX_C1                   0xf0
 
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+int mwait_usable(const struct cpuinfo_x86 *c)
 {
        u32 eax, ebx, ecx, edx;
 
index fc7aae1..715037c 100644 (file)
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
                },
        },
+       {       /* Handle problems with rebooting on VersaLogic Menlow boards */
+               .callback = set_bios_reboot,
+               .ident = "VersaLogic Menlow based board",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+               },
+       },
        { }
 };
 
index 763df77..08776a9 100644 (file)
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
         * target processor state.
         */
        startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-                        (unsigned long)stack_start.sp);
+                        stack_start);
 
        /*
         * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@ do_rest:
 #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
-       stack_start.sp = (void *) c_idle.idle->thread.sp;
+       stack_start  = c_idle.idle->thread.sp;
 
        /* start_ip had better be page-aligned! */
        start_ip = setup_trampoline();
@@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
 
                connect_bsp_APIC();
                setup_local_APIC();
-               end_local_APIC_setup();
+               bsp_end_local_APIC_setup();
                return -1;
        }
 
@@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        if (!skip_ioapic_setup && nr_ioapics)
                enable_IO_APIC();
 
-       end_local_APIC_setup();
+       bsp_end_local_APIC_setup();
 
        map_cpu_to_logical_apicid();
 
@@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void)
        unsigned int highest_subcstate = 0;
        int i;
        void *mwait_ptr;
+       struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
 
-       if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
+       if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
                return;
        if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
                return;
index 25bd1bc..63fec15 100644 (file)
@@ -1150,8 +1150,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
        loadsegment(fs, svm->host.fs);
-       load_gs_index(svm->host.gs);
        wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+       load_gs_index(svm->host.gs);
 #else
        loadsegment(gs, svm->host.gs);
 #endif
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
                        kvm_register_write(&svm->vcpu, reg, val);
        }
 
+       skip_emulated_instruction(&svm->vcpu);
+
        return 1;
 }
 
index 95ea155..1337c51 100644 (file)
@@ -780,11 +780,7 @@ void __cpuinit numa_add_cpu(int cpu)
        int physnid;
        int nid = NUMA_NO_NODE;
 
-       apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
-       if (apicid != BAD_APICID)
-               nid = apicid_to_node[apicid];
-       if (nid == NUMA_NO_NODE)
-               nid = early_cpu_to_node(cpu);
+       nid = early_cpu_to_node(cpu);
        BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
 
        /*
index 8b830ca..d343b3c 100644 (file)
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
                                   unsigned long pfn)
 {
        pgprot_t forbidden = __pgprot(0);
-       pgprot_t required = __pgprot(0);
 
        /*
         * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
        if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
                   __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
                pgprot_val(forbidden) |= _PAGE_RW;
-       /*
-        * .data and .bss should always be writable.
-        */
-       if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
-           within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
-               pgprot_val(required) |= _PAGE_RW;
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
        /*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
 #endif
 
        prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
-       prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
 
        return prot;
 }
index dab8746..044bda5 100644 (file)
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
                 * wasted bootmem) and hand off chunks of it to callers.
                 */
                res = alloc_bootmem(chunk_size);
-               if (!res)
-                       return NULL;
+               BUG_ON(!res);
                prom_early_allocated += chunk_size;
                memset(res, 0, chunk_size);
                free_mem = chunk_size;
index df58e9c..a7b38d3 100644 (file)
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
                memset(bd2, 0, sizeof(struct bau_desc));
                bd2->header.sw_ack_flag = 1;
                /*
-                * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+                * base_dest_nodeid is the nasid of the first uvhub
                 * in the partition. The bit map will indicate uvhub numbers,
                 * which are 0-N in a partition. Pnodes are unique system-wide.
                 */
-               bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+               bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
                bd2->header.dest_subnodeid = 0x10; /* the LB */
                bd2->header.command = UV_NET_ENDPOINT_INTD;
                bd2->header.int_both = 1;
index ddc81a0..fd12d7c 100644 (file)
@@ -241,21 +241,15 @@ void __init xen_build_dynamic_phys_to_machine(void)
                 * As long as the mfn_list has enough entries to completely
                 * fill a p2m page, pointing into the array is ok. But if
                 * not the entries beyond the last pfn will be undefined.
-                * And guessing that the 'what-ever-there-is' does not take it
-                * too kindly when changing it to invalid markers, a new page
-                * is allocated, initialized and filled with the valid part.
                 */
                if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
                        unsigned long p2midx;
-                       unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-                       p2m_init(p2m);
-
-                       for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) {
-                               p2m[p2midx] = mfn_list[pfn + p2midx];
-                       }
-                       p2m_top[topidx][mididx] = p2m;
-               } else
-                       p2m_top[topidx][mididx] = &mfn_list[pfn];
+
+                       p2midx = max_pfn % P2M_PER_PAGE;
+                       for ( ; p2midx < P2M_PER_PAGE; p2midx++)
+                               mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
+               }
+               p2m_top[topidx][mididx] = &mfn_list[pfn];
        }
 
        m2p_override_init();
index b5a7f92..a8a66a5 100644 (file)
@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void)
        e820.nr_map = 0;
        xen_extra_mem_start = mem_end;
        for (i = 0; i < memmap.nr_entries; i++) {
-               unsigned long long end = map[i].addr + map[i].size;
+               unsigned long long end;
 
+               /* Guard against non-page aligned E820 entries. */
+               if (map[i].type == E820_RAM)
+                       map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
+
+               end = map[i].addr + map[i].size;
                if (map[i].type == E820_RAM && end > mem_end) {
                        /* RAM off the end - may be partially included */
                        u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@ void __init xen_arch_setup(void)
        boot_cpu_data.hlt_works_ok = 1;
 #endif
        pm_idle = default_idle;
+       boot_option_idle_override = IDLE_HALT;
 
        fiddle_vdso();
 }
index 2f4002f..518dd42 100644 (file)
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
 /**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
  *
  */
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 {
        blk_remove_plug(q);
 
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-                       struct delayed_work *dwork, unsigned long delay)
-{
-       return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
index 54b123d..b27d020 100644 (file)
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
 
        /*
         * Moving a request silently to empty queue_head may stall the
-        * queue.  Kick the queue in those cases.
+        * queue.  Kick the queue in those cases.  This function is called
+        * from request completion path and calling directly into
+        * request_fn may confuse the driver.  Always use kblockd.
         */
        if (was_empty && next_rq)
-               __blk_run_queue(q);
+               __blk_run_queue(q, true);
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
                BUG();
        }
 
-       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
        return rq;
 }
 
index 1a320d2..eec78be 100644 (file)
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
 }
 
 /**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout generate number of zero filed write bios
  * @bdev:      blockdev to issue
  * @sector:    start sector
  * @nr_sects:  number of sectors to write
index 381b09b..e36cc10 100644 (file)
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;     /* 100 ms */
 
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+                               unsigned long delay);
+
 struct throtl_rb_root {
        struct rb_root rb;
        struct rb_node *left;
@@ -168,7 +173,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
         * tree of blkg (instead of traversing through hash list all
         * the time.
         */
-       tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+       /*
+        * This is the common case when there are no blkio cgroups.
+        * Avoid lookup in this case
+        */
+       if (blkcg == &blkio_root_cgroup)
+               tg = &td->root_tg;
+       else
+               tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
 
        /* Fill in device details for root group */
        if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
@@ -337,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
        update_min_dispatch_time(st);
 
        if (time_before_eq(st->min_disptime, jiffies))
-               throtl_schedule_delayed_work(td->queue, 0);
+               throtl_schedule_delayed_work(td, 0);
        else
-               throtl_schedule_delayed_work(td->queue,
-                               (st->min_disptime - jiffies));
+               throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 }
 
 static inline void
@@ -807,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
 }
 
 /* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 {
 
-       struct throtl_data *td = q->td;
        struct delayed_work *dwork = &td->throtl_work;
 
        if (total_nr_queued(td) > 0) {
@@ -819,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
                 * Cancel that and schedule a new one.
                 */
                __cancel_delayed_work(dwork);
-               kblockd_schedule_delayed_work(q, dwork, delay);
+               queue_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
 }
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
 
 static void
 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -912,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
        smp_mb__after_atomic_inc();
 
        /* Schedule a work now to process the limit change */
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_bps(void *key,
@@ -926,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_read_iops(void *key,
@@ -940,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_iops(void *key,
@@ -954,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1127,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
 
 static int __init throtl_init(void)
 {
+       kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+       if (!kthrotld_workqueue)
+               panic("Failed to create kthrotld\n");
+
        blkio_policy_register(&blkio_policy_throtl);
        return 0;
 }
index 501ffdf..ea83a4f 100644 (file)
@@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 }
 
 static inline unsigned
-cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
        if (cfqd->cfq_latency) {
@@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 static inline void
 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       unsigned slice = cfq_scaled_group_slice(cfqd, cfqq);
+       unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
 
        cfqq->slice_start = jiffies;
        cfqq->slice_end = jiffies + slice;
@@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
         */
        if (timed_out) {
                if (cfq_cfqq_slice_new(cfqq))
-                       cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq);
+                       cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
                else
                        cfqq->slice_resid = cfqq->slice_end - jiffies;
                cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue);
+                               __blk_run_queue(cfqd->queue, false);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue);
+               __blk_run_queue(cfqd->queue, false);
        }
 }
 
@@ -3432,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        struct cfq_io_context *cic = cfqd->active_cic;
 
+       /* If the queue already has requests, don't wait */
+       if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+               return false;
+
        /* If there are other queues in the group, don't wait */
        if (cfqq->cfqg->nr_cfqq > 1)
                return false;
@@ -3727,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue);
+       __blk_run_queue(cfqd->queue, false);
        spin_unlock_irq(q->queue_lock);
 }
 
index 2569512..236e93c 100644 (file)
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                break;
 
        case ELEVATOR_INSERT_SORT:
index 6a5b772..cbf1112 100644 (file)
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
        struct block_device *bdev = bdget_disk(disk, partno);
        if (bdev) {
                fsync_bdev(bdev);
-               res = __invalidate_device(bdev);
+               res = __invalidate_device(bdev, true);
                bdput(bdev);
        }
        return res;
index 9049d46..1124cd2 100644 (file)
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                        return -EINVAL;
                if (get_user(n, (int __user *) arg))
                        return -EFAULT;
-               if (!(mode & FMODE_EXCL) &&
-                   blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
-                       return -EBUSY;
+               if (!(mode & FMODE_EXCL)) {
+                       bdgrab(bdev);
+                       if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+                               return -EBUSY;
+               }
                ret = set_blocksize(bdev, n);
                if (!(mode & FMODE_EXCL))
                        blkdev_put(bdev, mode | FMODE_EXCL);
index e9a399c..ce5a813 100644 (file)
@@ -78,7 +78,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
 obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
-obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
+obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
 obj-$(CONFIG_CRYPTO_RNG2) += rng.o
 obj-$(CONFIG_CRYPTO_RNG2) += krng.o
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
new file mode 100644 (file)
index 0000000..136b68b
--- /dev/null
@@ -0,0 +1,835 @@
+/*
+ * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
+ *                 derived from authenc.c
+ *
+ * Copyright (C) 2010 secunet Security Networks AG
+ * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct authenc_esn_instance_ctx {
+       struct crypto_ahash_spawn auth;
+       struct crypto_skcipher_spawn enc;
+};
+
+struct crypto_authenc_esn_ctx {
+       unsigned int reqoff;
+       struct crypto_ahash *auth;
+       struct crypto_ablkcipher *enc;
+};
+
+struct authenc_esn_request_ctx {
+       unsigned int cryptlen;
+       unsigned int headlen;
+       unsigned int trailen;
+       struct scatterlist *sg;
+       struct scatterlist hsg[2];
+       struct scatterlist tsg[1];
+       struct scatterlist cipher[2];
+       crypto_completion_t complete;
+       crypto_completion_t update_complete;
+       crypto_completion_t update_complete2;
+       char tail[];
+};
+
+static void authenc_esn_request_complete(struct aead_request *req, int err)
+{
+       if (err != -EINPROGRESS)
+               aead_request_complete(req, err);
+}
+
+static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
+                                    unsigned int keylen)
+{
+       unsigned int authkeylen;
+       unsigned int enckeylen;
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct crypto_ahash *auth = ctx->auth;
+       struct crypto_ablkcipher *enc = ctx->enc;
+       struct rtattr *rta = (void *)key;
+       struct crypto_authenc_key_param *param;
+       int err = -EINVAL;
+
+       if (!RTA_OK(rta, keylen))
+               goto badkey;
+       if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+               goto badkey;
+       if (RTA_PAYLOAD(rta) < sizeof(*param))
+               goto badkey;
+
+       param = RTA_DATA(rta);
+       enckeylen = be32_to_cpu(param->enckeylen);
+
+       key += RTA_ALIGN(rta->rta_len);
+       keylen -= RTA_ALIGN(rta->rta_len);
+
+       if (keylen < enckeylen)
+               goto badkey;
+
+       authkeylen = keylen - enckeylen;
+
+       crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+       crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
+                                    CRYPTO_TFM_REQ_MASK);
+       err = crypto_ahash_setkey(auth, key, authkeylen);
+       crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
+                                          CRYPTO_TFM_RES_MASK);
+
+       if (err)
+               goto out;
+
+       crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+       crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
+                                        CRYPTO_TFM_REQ_MASK);
+       err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+       crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
+                                          CRYPTO_TFM_RES_MASK);
+
+out:
+       return err;
+
+badkey:
+       crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       goto out;
+}
+
+static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
+                                               int err)
+{
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+                               areq_ctx->cryptlen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) &
+                                         CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->update_complete2, req);
+
+       err = crypto_ahash_update(ahreq);
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+                               areq_ctx->trailen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) &
+                                         CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->complete, req);
+
+       err = crypto_ahash_finup(ahreq);
+       if (err)
+               goto out;
+
+       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+                                areq_ctx->cryptlen,
+                                crypto_aead_authsize(authenc_esn), 1);
+
+out:
+       authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
+                                                int err)
+{
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+                               areq_ctx->trailen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) &
+                                         CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->complete, req);
+
+       err = crypto_ahash_finup(ahreq);
+       if (err)
+               goto out;
+
+       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+                                areq_ctx->cryptlen,
+                                crypto_aead_authsize(authenc_esn), 1);
+
+out:
+       authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
+                                        int err)
+{
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+       if (err)
+               goto out;
+
+       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+                                areq_ctx->cryptlen,
+                                crypto_aead_authsize(authenc_esn), 1);
+
+out:
+       aead_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
+                                                int err)
+{
+       u8 *ihash;
+       unsigned int authsize;
+       struct ablkcipher_request *abreq;
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int cryptlen = req->cryptlen;
+
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+                               areq_ctx->cryptlen);
+
+       ahash_request_set_callback(ahreq,
+                                  aead_request_flags(req) &
+                                  CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->update_complete2, req);
+
+       err = crypto_ahash_update(ahreq);
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+                               areq_ctx->trailen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) &
+                                         CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->complete, req);
+
+       err = crypto_ahash_finup(ahreq);
+       if (err)
+               goto out;
+
+       authsize = crypto_aead_authsize(authenc_esn);
+       cryptlen -= authsize;
+       ihash = ahreq->result + authsize;
+       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+                                authsize, 0);
+
+       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       if (err)
+               goto out;
+
+       abreq = aead_request_ctx(req);
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+                                    cryptlen, req->iv);
+
+       err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+       authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
+                                                 int err)
+{
+       u8 *ihash;
+       unsigned int authsize;
+       struct ablkcipher_request *abreq;
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int cryptlen = req->cryptlen;
+
+       if (err)
+               goto out;
+
+       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+                               areq_ctx->trailen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) &
+                                         CRYPTO_TFM_REQ_MAY_SLEEP,
+                                  areq_ctx->complete, req);
+
+       err = crypto_ahash_finup(ahreq);
+       if (err)
+               goto out;
+
+       authsize = crypto_aead_authsize(authenc_esn);
+       cryptlen -= authsize;
+       ihash = ahreq->result + authsize;
+       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+                                authsize, 0);
+
+       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       if (err)
+               goto out;
+
+       abreq = aead_request_ctx(req);
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+                                    cryptlen, req->iv);
+
+       err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+       authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+                                         int err)
+{
+       u8 *ihash;
+       unsigned int authsize;
+       struct ablkcipher_request *abreq;
+       struct aead_request *req = areq->data;
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int cryptlen = req->cryptlen;
+
+       if (err)
+               goto out;
+
+       authsize = crypto_aead_authsize(authenc_esn);
+       cryptlen -= authsize;
+       ihash = ahreq->result + authsize;
+       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+                                authsize, 0);
+
+       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       if (err)
+               goto out;
+
+       abreq = aead_request_ctx(req);
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+                                    cryptlen, req->iv);
+
+       err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+       authenc_esn_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
+                                   unsigned int flags)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct crypto_ahash *auth = ctx->auth;
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       u8 *hash = areq_ctx->tail;
+       int err;
+
+       hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+                           crypto_ahash_alignmask(auth) + 1);
+
+       ahash_request_set_tfm(ahreq, auth);
+
+       err = crypto_ahash_init(ahreq);
+       if (err)
+               return ERR_PTR(err);
+
+       ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+                                  areq_ctx->update_complete, req);
+
+       err = crypto_ahash_update(ahreq);
+       if (err)
+               return ERR_PTR(err);
+
+       ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+                                  areq_ctx->update_complete2, req);
+
+       err = crypto_ahash_update(ahreq);
+       if (err)
+               return ERR_PTR(err);
+
+       ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
+                               areq_ctx->trailen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+                                  areq_ctx->complete, req);
+
+       err = crypto_ahash_finup(ahreq);
+       if (err)
+               return ERR_PTR(err);
+
+       return hash;
+}
+
+static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
+                                    unsigned int flags)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct scatterlist *dst = req->dst;
+       struct scatterlist *assoc = req->assoc;
+       struct scatterlist *cipher = areq_ctx->cipher;
+       struct scatterlist *hsg = areq_ctx->hsg;
+       struct scatterlist *tsg = areq_ctx->tsg;
+       struct scatterlist *assoc1;
+       struct scatterlist *assoc2;
+       unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+       unsigned int cryptlen = req->cryptlen;
+       struct page *dstp;
+       u8 *vdst;
+       u8 *hash;
+
+       dstp = sg_page(dst);
+       vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+
+       if (ivsize) {
+               sg_init_table(cipher, 2);
+               sg_set_buf(cipher, iv, ivsize);
+               scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
+               dst = cipher;
+               cryptlen += ivsize;
+       }
+
+       if (sg_is_last(assoc))
+               return -EINVAL;
+
+       assoc1 = assoc + 1;
+       if (sg_is_last(assoc1))
+               return -EINVAL;
+
+       assoc2 = assoc + 2;
+       if (!sg_is_last(assoc2))
+               return -EINVAL;
+
+       sg_init_table(hsg, 2);
+       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+
+       sg_init_table(tsg, 1);
+       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+
+       areq_ctx->cryptlen = cryptlen;
+       areq_ctx->headlen = assoc->length + assoc2->length;
+       areq_ctx->trailen = assoc1->length;
+       areq_ctx->sg = dst;
+
+       areq_ctx->complete = authenc_esn_geniv_ahash_done;
+       areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
+       areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
+
+       hash = crypto_authenc_esn_ahash(req, flags);
+       if (IS_ERR(hash))
+               return PTR_ERR(hash);
+
+       scatterwalk_map_and_copy(hash, dst, cryptlen,
+                                crypto_aead_authsize(authenc_esn), 1);
+       return 0;
+}
+
+
+static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
+                                           int err)
+{
+       struct aead_request *areq = req->data;
+
+       if (!err) {
+               struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
+               struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+               struct ablkcipher_request *abreq = aead_request_ctx(areq);
+               u8 *iv = (u8 *)(abreq + 1) +
+                        crypto_ablkcipher_reqsize(ctx->enc);
+
+               err = crypto_authenc_esn_genicv(areq, iv, 0);
+       }
+
+       authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_encrypt(struct aead_request *req)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct crypto_ablkcipher *enc = ctx->enc;
+       struct scatterlist *dst = req->dst;
+       unsigned int cryptlen = req->cryptlen;
+       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+                                                   + ctx->reqoff);
+       u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+       int err;
+
+       ablkcipher_request_set_tfm(abreq, enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       crypto_authenc_esn_encrypt_done, req);
+       ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
+
+       memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+
+       err = crypto_ablkcipher_encrypt(abreq);
+       if (err)
+               return err;
+
+       return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
+                                              int err)
+{
+       struct aead_request *areq = req->data;
+
+       if (!err) {
+               struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+
+               err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
+       }
+
+       authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
+{
+       struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct aead_request *areq = &req->areq;
+       struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+       u8 *iv = req->giv;
+       int err;
+
+       skcipher_givcrypt_set_tfm(greq, ctx->enc);
+       skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
+                                      crypto_authenc_esn_givencrypt_done, areq);
+       skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
+                                   areq->iv);
+       skcipher_givcrypt_set_giv(greq, iv, req->seq);
+
+       err = crypto_skcipher_givencrypt(greq);
+       if (err)
+               return err;
+
+       return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static int crypto_authenc_esn_verify(struct aead_request *req)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       u8 *ohash;
+       u8 *ihash;
+       unsigned int authsize;
+
+       areq_ctx->complete = authenc_esn_verify_ahash_done;
+       areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+
+       ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
+       if (IS_ERR(ohash))
+               return PTR_ERR(ohash);
+
+       authsize = crypto_aead_authsize(authenc_esn);
+       ihash = ohash + authsize;
+       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+                                authsize, 0);
+       return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+}
+
+static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
+                                     unsigned int cryptlen)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct scatterlist *src = req->src;
+       struct scatterlist *assoc = req->assoc;
+       struct scatterlist *cipher = areq_ctx->cipher;
+       struct scatterlist *hsg = areq_ctx->hsg;
+       struct scatterlist *tsg = areq_ctx->tsg;
+       struct scatterlist *assoc1;
+       struct scatterlist *assoc2;
+       unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+       struct page *srcp;
+       u8 *vsrc;
+
+       srcp = sg_page(src);
+       vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
+
+       if (ivsize) {
+               sg_init_table(cipher, 2);
+               sg_set_buf(cipher, iv, ivsize);
+               scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
+               src = cipher;
+               cryptlen += ivsize;
+       }
+
+       if (sg_is_last(assoc))
+               return -EINVAL;
+
+       assoc1 = assoc + 1;
+       if (sg_is_last(assoc1))
+               return -EINVAL;
+
+       assoc2 = assoc + 2;
+       if (!sg_is_last(assoc2))
+               return -EINVAL;
+
+       sg_init_table(hsg, 2);
+       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+
+       sg_init_table(tsg, 1);
+       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+
+       areq_ctx->cryptlen = cryptlen;
+       areq_ctx->headlen = assoc->length + assoc2->length;
+       areq_ctx->trailen = assoc1->length;
+       areq_ctx->sg = src;
+
+       areq_ctx->complete = authenc_esn_verify_ahash_done;
+       areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+       areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
+
+       return crypto_authenc_esn_verify(req);
+}
+
+static int crypto_authenc_esn_decrypt(struct aead_request *req)
+{
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct ablkcipher_request *abreq = aead_request_ctx(req);
+       unsigned int cryptlen = req->cryptlen;
+       unsigned int authsize = crypto_aead_authsize(authenc_esn);
+       u8 *iv = req->iv;
+       int err;
+
+       if (cryptlen < authsize)
+               return -EINVAL;
+       cryptlen -= authsize;
+
+       err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+       if (err)
+               return err;
+
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
+
+       return crypto_ablkcipher_decrypt(abreq);
+}
+
+static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+       struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
+       struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_ahash *auth;
+       struct crypto_ablkcipher *enc;
+       int err;
+
+       auth = crypto_spawn_ahash(&ictx->auth);
+       if (IS_ERR(auth))
+               return PTR_ERR(auth);
+
+       enc = crypto_spawn_skcipher(&ictx->enc);
+       err = PTR_ERR(enc);
+       if (IS_ERR(enc))
+               goto err_free_ahash;
+
+       ctx->auth = auth;
+       ctx->enc = enc;
+
+       ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+                           crypto_ahash_alignmask(auth),
+                           crypto_ahash_alignmask(auth) + 1) +
+                     crypto_ablkcipher_ivsize(enc);
+
+       tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
+                               ctx->reqoff +
+                               max_t(unsigned int,
+                               crypto_ahash_reqsize(auth) +
+                               sizeof(struct ahash_request),
+                               sizeof(struct skcipher_givcrypt_request) +
+                               crypto_ablkcipher_reqsize(enc));
+
+       return 0;
+
+err_free_ahash:
+       crypto_free_ahash(auth);
+       return err;
+}
+
+static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_ahash(ctx->auth);
+       crypto_free_ablkcipher(ctx->enc);
+}
+
+static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+{
+       struct crypto_attr_type *algt;
+       struct crypto_instance *inst;
+       struct hash_alg_common *auth;
+       struct crypto_alg *auth_base;
+       struct crypto_alg *enc;
+       struct authenc_esn_instance_ctx *ctx;
+       const char *enc_name;
+       int err;
+
+       algt = crypto_get_attr_type(tb);
+       err = PTR_ERR(algt);
+       if (IS_ERR(algt))
+               return ERR_PTR(err);
+
+       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+               return ERR_PTR(-EINVAL);
+
+       auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+                              CRYPTO_ALG_TYPE_AHASH_MASK);
+       if (IS_ERR(auth))
+               return ERR_CAST(auth);
+
+       auth_base = &auth->base;
+
+       enc_name = crypto_attr_alg_name(tb[2]);
+       err = PTR_ERR(enc_name);
+       if (IS_ERR(enc_name))
+               goto out_put_auth;
+
+       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!inst)
+               goto out_put_auth;
+
+       ctx = crypto_instance_ctx(inst);
+
+       err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+       if (err)
+               goto err_free_inst;
+
+       crypto_set_skcipher_spawn(&ctx->enc, inst);
+       err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+                                  crypto_requires_sync(algt->type,
+                                                       algt->mask));
+       if (err)
+               goto err_drop_auth;
+
+       enc = crypto_skcipher_spawn_alg(&ctx->enc);
+
+       err = -ENAMETOOLONG;
+       if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+                    "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
+           CRYPTO_MAX_ALG_NAME)
+               goto err_drop_enc;
+
+       if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "authencesn(%s,%s)", auth_base->cra_driver_name,
+                    enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+               goto err_drop_enc;
+
+       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+       inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.cra_priority = enc->cra_priority *
+                                10 + auth_base->cra_priority;
+       inst->alg.cra_blocksize = enc->cra_blocksize;
+       inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
+       inst->alg.cra_type = &crypto_aead_type;
+
+       inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
+       inst->alg.cra_aead.maxauthsize = auth->digestsize;
+
+       inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+
+       inst->alg.cra_init = crypto_authenc_esn_init_tfm;
+       inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+
+       inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
+       inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
+       inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
+       inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+
+out:
+       crypto_mod_put(auth_base);
+       return inst;
+
+err_drop_enc:
+       crypto_drop_skcipher(&ctx->enc);
+err_drop_auth:
+       crypto_drop_ahash(&ctx->auth);
+err_free_inst:
+       kfree(inst);
+out_put_auth:
+       inst = ERR_PTR(err);
+       goto out;
+}
+
+static void crypto_authenc_esn_free(struct crypto_instance *inst)
+{
+       struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+       crypto_drop_skcipher(&ctx->enc);
+       crypto_drop_ahash(&ctx->auth);
+       kfree(inst);
+}
+
+static struct crypto_template crypto_authenc_esn_tmpl = {
+       .name = "authencesn",
+       .alloc = crypto_authenc_esn_alloc,
+       .free = crypto_authenc_esn_free,
+       .module = THIS_MODULE,
+};
+
+static int __init crypto_authenc_esn_module_init(void)
+{
+       return crypto_register_template(&crypto_authenc_esn_tmpl);
+}
+
+static void __exit crypto_authenc_esn_module_exit(void)
+{
+       crypto_unregister_template(&crypto_authenc_esn_tmpl);
+}
+
+module_init(crypto_authenc_esn_module_init);
+module_exit(crypto_authenc_esn_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
index 54784bb..edc2586 100644 (file)
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
        u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
+struct acpi_gpe_notify_object {
+       struct acpi_namespace_node *node;
+       struct acpi_gpe_notify_object *next;
+};
+
 union acpi_gpe_dispatch_info {
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level */
        struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
-       struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
+       struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
 };
 
 /*
index 14988a8..f472521 100644 (file)
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
        acpi_status status;
        struct acpi_gpe_event_info *local_gpe_event_info;
        struct acpi_evaluate_info *info;
+       struct acpi_gpe_notify_object *notify_object;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                 * from this thread -- because handlers may in turn run other
                 * control methods.
                 */
-               status =
-                   acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
-                                                device_node,
-                                                ACPI_NOTIFY_DEVICE_WAKE);
+               status = acpi_ev_queue_notify_request(
+                               local_gpe_event_info->dispatch.device.node,
+                               ACPI_NOTIFY_DEVICE_WAKE);
+
+               notify_object = local_gpe_event_info->dispatch.device.next;
+               while (ACPI_SUCCESS(status) && notify_object) {
+                       status = acpi_ev_queue_notify_request(
+                                       notify_object->node,
+                                       ACPI_NOTIFY_DEVICE_WAKE);
+                       notify_object = notify_object->next;
+               }
+
                break;
 
        case ACPI_GPE_DISPATCH_METHOD:
index e9562a7..52aaff3 100644 (file)
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
        acpi_status status = AE_BAD_PARAMETER;
        struct acpi_gpe_event_info *gpe_event_info;
        struct acpi_namespace_node *device_node;
+       struct acpi_gpe_notify_object *notify_object;
        acpi_cpu_flags flags;
+       u8 gpe_dispatch_mask;
 
        ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
 
@@ -212,37 +214,62 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               goto unlock_and_exit;
+       }
+
+       if (wake_device == ACPI_ROOT_OBJECT) {
+               goto out;
+       }
+
+       /*
+        * If there is no method or handler for this GPE, then the
+        * wake_device will be notified whenever this GPE fires (aka
+        * "implicit notify") Note: The GPE is assumed to be
+        * level-triggered (for windows compatibility).
+        */
+       gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+       if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+           && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+               goto out;
+       }
+
        /* Validate wake_device is of type Device */
 
        device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
        if (device_node->type != ACPI_TYPE_DEVICE) {
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
+               goto unlock_and_exit;
        }
 
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+       if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
+               gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+                                        ACPI_GPE_LEVEL_TRIGGERED);
+               gpe_event_info->dispatch.device.node = device_node;
+               gpe_event_info->dispatch.device.next = NULL;
+       } else {
+               /* There are multiple devices to notify implicitly. */
 
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (gpe_event_info) {
-               /*
-                * If there is no method or handler for this GPE, then the
-                * wake_device will be notified whenever this GPE fires (aka
-                * "implicit notify") Note: The GPE is assumed to be
-                * level-triggered (for windows compatibility).
-                */
-               if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-                   ACPI_GPE_DISPATCH_NONE) {
-                       gpe_event_info->flags =
-                           (ACPI_GPE_DISPATCH_NOTIFY |
-                            ACPI_GPE_LEVEL_TRIGGERED);
-                       gpe_event_info->dispatch.device_node = device_node;
+               notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+               if (!notify_object) {
+                       status = AE_NO_MEMORY;
+                       goto unlock_and_exit;
                }
 
-               gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-               status = AE_OK;
+               notify_object->node = device_node;
+               notify_object->next = gpe_event_info->dispatch.device.next;
+               gpe_event_info->dispatch.device.next = notify_object;
        }
 
+ out:
+       gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+       status = AE_OK;
+
+ unlock_and_exit:
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
        return_ACPI_STATUS(status);
 }
index 5df67f1..384f7ab 100644 (file)
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                        size_t count, loff_t *ppos)
 {
        static char *buf;
-       static int uncopied_bytes;
+       static u32 max_size;
+       static u32 uncopied_bytes;
+
        struct acpi_table_header table;
        acpi_status status;
 
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                if (copy_from_user(&table, user_buf,
                                   sizeof(struct acpi_table_header)))
                        return -EFAULT;
-               uncopied_bytes = table.length;
-               buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+               uncopied_bytes = max_size = table.length;
+               buf = kzalloc(max_size, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
        }
 
-       if (uncopied_bytes < count) {
-               kfree(buf);
+       if (buf == NULL)
+               return -EINVAL;
+
+       if ((*ppos > max_size) ||
+           (*ppos + count > max_size) ||
+           (*ppos + count < count) ||
+           (count > uncopied_bytes))
                return -EINVAL;
-       }
 
        if (copy_from_user(buf + (*ppos), user_buf, count)) {
                kfree(buf);
+               buf = NULL;
                return -EFAULT;
        }
 
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
        if (!uncopied_bytes) {
                status = acpi_install_method(buf);
                kfree(buf);
+               buf = NULL;
                if (ACPI_FAILURE(status))
                        return -EINVAL;
                add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
index b093181..c90c76a 100644 (file)
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port);
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 {
-       u32 dummy;
        void __iomem *virt_addr;
-       int size = width / 8, unmap = 0;
+       unsigned int size = width / 8;
+       bool unmap = false;
+       u32 dummy;
 
        rcu_read_lock();
        virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-       rcu_read_unlock();
        if (!virt_addr) {
+               rcu_read_unlock();
                virt_addr = acpi_os_ioremap(phys_addr, size);
-               unmap = 1;
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
        }
+
        if (!value)
                value = &dummy;
 
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 
        if (unmap)
                iounmap(virt_addr);
+       else
+               rcu_read_unlock();
 
        return AE_OK;
 }
@@ -674,14 +680,17 @@ acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
        void __iomem *virt_addr;
-       int size = width / 8, unmap = 0;
+       unsigned int size = width / 8;
+       bool unmap = false;
 
        rcu_read_lock();
        virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-       rcu_read_unlock();
        if (!virt_addr) {
+               rcu_read_unlock();
                virt_addr = acpi_os_ioremap(phys_addr, size);
-               unmap = 1;
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
        }
 
        switch (width) {
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 
        if (unmap)
                iounmap(virt_addr);
+       else
+               rcu_read_unlock();
 
        return AE_OK;
 }
index 42d3d72..5af3479 100644 (file)
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device)
        if (!device)
                return 0;
 
+       /* Is this device able to support video switching ? */
+       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
+           ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+               video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
+
        /* Is this device able to retrieve a video ROM ? */
        if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
                video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
index ed65014..7bfbe40 100644 (file)
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void)
                struct acpi_device *dev = container_of(node,
                                                       struct acpi_device,
                                                       wakeup_list);
-               if (device_can_wakeup(&dev->dev))
+               if (device_can_wakeup(&dev->dev)) {
+                       /* Button GPEs are supposed to be always enabled. */
+                       acpi_enable_gpe(dev->wakeup.gpe_device,
+                                       dev->wakeup.gpe_number);
                        device_set_wakeup_enable(&dev->dev, true);
+               }
        }
        mutex_unlock(&acpi_device_lock);
        return 0;
index 3288263..b8d96ce 100644 (file)
@@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
        { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
        { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,6 +380,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },        /* 6145 */
        { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },        /* 6121 */
        { PCI_DEVICE(0x1b4b, 0x9123),
+         .class = PCI_CLASS_STORAGE_SATA_AHCI,
+         .class_mask = 0xffffff,
          .driver_data = board_ahci_yes_fbs },                  /* 88se9128 */
 
        /* Promise */
index a31fe96..d4e52e2 100644 (file)
@@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * device and controller are SATA.
         */
        { "PIONEER DVD-RW  DVRTD08",    "1.00", ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-212D",   "1.28", ATA_HORKAGE_NOSETXFER },
 
        /* End Marker */
        { }
index 5defc74..600f635 100644 (file)
@@ -1099,9 +1099,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
                struct request_queue *q = sdev->request_queue;
                void *buf;
 
-               /* set the min alignment and padding */
-               blk_queue_update_dma_alignment(sdev->request_queue,
-                                              ATA_DMA_PAD_SZ - 1);
+               sdev->sector_size = ATA_SECT_SIZE;
+
+               /* set DMA padding */
                blk_queue_update_dma_pad(sdev->request_queue,
                                         ATA_DMA_PAD_SZ - 1);
 
@@ -1115,13 +1115,25 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
 
                blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
        } else {
-               /* ATA devices must be sector aligned */
                sdev->sector_size = ata_id_logical_sector_size(dev->id);
-               blk_queue_update_dma_alignment(sdev->request_queue,
-                                              sdev->sector_size - 1);
                sdev->manage_start_stop = 1;
        }
 
+       /*
+        * ata_pio_sectors() expects buffer for each sector to not cross
+        * page boundary.  Enforce it by requiring buffers to be sector
+        * aligned, which works iff sector_size is not larger than
+        * PAGE_SIZE.  ATAPI devices also need the alignment as
+        * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
+        */
+       if (sdev->sector_size > PAGE_SIZE)
+               ata_dev_printk(dev, KERN_WARNING,
+                       "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+                       sdev->sector_size);
+
+       blk_queue_update_dma_alignment(sdev->request_queue,
+                                      sdev->sector_size - 1);
+
        if (dev->flags & ATA_DFLAG_AN)
                set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
 
index d7e57db..538ec38 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_hpt366"
-#define DRV_VERSION    "0.6.9"
+#define DRV_VERSION    "0.6.10"
 
 struct hpt_clock {
        u8      xfer_mode;
@@ -160,8 +160,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
 
        while (list[i] != NULL) {
                if (!strcmp(list[i], model_num)) {
-                       printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-                               modestr, list[i]);
+                       pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+                                  modestr, list[i]);
                        return 1;
                }
                i++;
index efdd18b..4c5b518 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_hpt37x"
-#define DRV_VERSION    "0.6.18"
+#define DRV_VERSION    "0.6.22"
 
 struct hpt_clock {
        u8      xfer_speed;
@@ -229,8 +229,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
 
        while (list[i] != NULL) {
                if (!strcmp(list[i], model_num)) {
-                       printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-                               modestr, list[i]);
+                       pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+                                  modestr, list[i]);
                        return 1;
                }
                i++;
@@ -642,7 +642,6 @@ static struct ata_port_operations hpt372_port_ops = {
 static struct ata_port_operations hpt374_fn1_port_ops = {
        .inherits       = &hpt372_port_ops,
        .cable_detect   = hpt374_fn1_cable_detect,
-       .prereset       = hpt37x_pre_reset,
 };
 
 /**
@@ -803,7 +802,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                .udma_mask = ATA_UDMA6,
                .port_ops = &hpt302_port_ops
        };
-       /* HPT374 - UDMA100, function 1 uses different prereset method */
+       /* HPT374 - UDMA100, function 1 uses different cable_detect method */
        static const struct ata_port_info info_hpt374_fn0 = {
                .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = ATA_PIO4,
@@ -838,7 +837,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
        if (rc)
                return rc;
 
-       if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+       switch (dev->device) {
+       case PCI_DEVICE_ID_TTI_HPT366:
                /* May be a later chip in disguise. Check */
                /* Older chips are in the HPT366 driver. Ignore them */
                if (rev < 3)
@@ -863,54 +863,50 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                        chip_table = &hpt372;
                        break;
                default:
-                       printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, "
+                       pr_err(DRV_NAME ": Unknown HPT366 subtype, "
                               "please report (%d).\n", rev);
                        return -ENODEV;
                }
-       } else {
-               switch (dev->device) {
-               case PCI_DEVICE_ID_TTI_HPT372:
-                       /* 372N if rev >= 2 */
-                       if (rev >= 2)
-                               return -ENODEV;
-                       ppi[0] = &info_hpt372;
-                       chip_table = &hpt372a;
-                       break;
-               case PCI_DEVICE_ID_TTI_HPT302:
-                       /* 302N if rev > 1 */
-                       if (rev > 1)
-                               return -ENODEV;
-                       ppi[0] = &info_hpt302;
-                       /* Check this */
-                       chip_table = &hpt302;
-                       break;
-               case PCI_DEVICE_ID_TTI_HPT371:
-                       if (rev > 1)
-                               return -ENODEV;
-                       ppi[0] = &info_hpt302;
-                       chip_table = &hpt371;
-                       /*
-                        * Single channel device, master is not present
-                        * but the BIOS (or us for non x86) must mark it
-                        * absent
-                        */
-                       pci_read_config_byte(dev, 0x50, &mcr1);
-                       mcr1 &= ~0x04;
-                       pci_write_config_byte(dev, 0x50, mcr1);
-                       break;
-               case PCI_DEVICE_ID_TTI_HPT374:
-                       chip_table = &hpt374;
-                       if (!(PCI_FUNC(dev->devfn) & 1))
-                               *ppi = &info_hpt374_fn0;
-                       else
-                               *ppi = &info_hpt374_fn1;
-                       break;
-               default:
-                       printk(KERN_ERR
-                              "pata_hpt37x: PCI table is bogus, please report (%d).\n",
-                              dev->device);
-                               return -ENODEV;
-               }
+               break;
+       case PCI_DEVICE_ID_TTI_HPT372:
+               /* 372N if rev >= 2 */
+               if (rev >= 2)
+                       return -ENODEV;
+               ppi[0] = &info_hpt372;
+               chip_table = &hpt372a;
+               break;
+       case PCI_DEVICE_ID_TTI_HPT302:
+               /* 302N if rev > 1 */
+               if (rev > 1)
+                       return -ENODEV;
+               ppi[0] = &info_hpt302;
+               /* Check this */
+               chip_table = &hpt302;
+               break;
+       case PCI_DEVICE_ID_TTI_HPT371:
+               if (rev > 1)
+                       return -ENODEV;
+               ppi[0] = &info_hpt302;
+               chip_table = &hpt371;
+               /*
+                * Single channel device, master is not present but the BIOS
+                * (or us for non x86) must mark it absent
+                */
+               pci_read_config_byte(dev, 0x50, &mcr1);
+               mcr1 &= ~0x04;
+               pci_write_config_byte(dev, 0x50, mcr1);
+               break;
+       case PCI_DEVICE_ID_TTI_HPT374:
+               chip_table = &hpt374;
+               if (!(PCI_FUNC(dev->devfn) & 1))
+                       *ppi = &info_hpt374_fn0;
+               else
+                       *ppi = &info_hpt374_fn1;
+               break;
+       default:
+               pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
+                      dev->device);
+               return -ENODEV;
        }
        /* Ok so this is a chip we support */
 
@@ -957,8 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                u8 sr;
                u32 total = 0;
 
-               printk(KERN_WARNING
-                      "pata_hpt37x: BIOS has not set timing clocks.\n");
+               pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
 
                /* This is the process the HPT371 BIOS is reported to use */
                for (i = 0; i < 128; i++) {
@@ -1014,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                                               (f_high << 16) | f_low | 0x100);
                }
                if (adjust == 8) {
-                       printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
+                       pr_err(DRV_NAME ": DPLL did not stabilize!\n");
                        return -ENODEV;
                }
                if (dpll == 3)
@@ -1022,8 +1017,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                else
                        private_data = (void *)hpt37x_timings_50;
 
-               printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
-                      MHz[clock_slot], MHz[dpll]);
+               pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
+                       MHz[clock_slot], MHz[dpll]);
        } else {
                private_data = (void *)chip_table->clocks[clock_slot];
                /*
@@ -1036,8 +1031,9 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
                        ppi[0] = &info_hpt370_33;
                if (clock_slot < 2 && ppi[0] == &info_hpt370a)
                        ppi[0] = &info_hpt370a_33;
-               printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
-                      chip_table->name, MHz[clock_slot]);
+
+               pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
+                       chip_table->name, MHz[clock_slot]);
        }
 
        /* Now kick off ATA set up */
index d2239bb..eca68ca 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_hpt3x2n"
-#define DRV_VERSION    "0.3.13"
+#define DRV_VERSION    "0.3.14"
 
 enum {
        HPT_PCI_FAST    =       (1 << 31),
@@ -418,7 +418,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
                u16 sr;
                u32 total = 0;
 
-               printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n");
+               pr_warning(DRV_NAME ": BIOS clock data not set.\n");
 
                /* This is the process the HPT371 BIOS is reported to use */
                for (i = 0; i < 128; i++) {
@@ -528,8 +528,7 @@ hpt372n:
                ppi[0] = &info_hpt372n;
                break;
        default:
-               printk(KERN_ERR
-                      "pata_hpt3x2n: PCI table is bogus please report (%d).\n",
+               pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
                       dev->device);
                return -ENODEV;
        }
@@ -579,12 +578,11 @@ hpt372n:
                pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
        }
        if (adjust == 8) {
-               printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
+               pr_err(DRV_NAME ": DPLL did not stabilize!\n");
                return -ENODEV;
        }
 
-       printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
-              pci_mhz);
+       pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
 
        /*
         * Set our private data up. We only need a few flags
index 8cc536e..d7d8026 100644 (file)
@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
 };
 
 static struct ata_port_operations mpc52xx_ata_port_ops = {
-       .inherits               = &ata_sff_port_ops,
+       .inherits               = &ata_bmdma_port_ops,
        .sff_dev_select         = mpc52xx_ata_dev_select,
        .set_piomode            = mpc52xx_ata_set_piomode,
        .set_dmamode            = mpc52xx_ata_set_dmamode,
index bca9cb8..487a547 100644 (file)
@@ -151,7 +151,7 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int
        spin_unlock_irqrestore(&idt77105_priv_lock, flags);
        if (arg == NULL)
                return 0;
-       return copy_to_user(arg, &PRIV(dev)->stats,
+       return copy_to_user(arg, &stats,
                    sizeof(struct idt77105_stats)) ? -EFAULT : 0;
 }
 
index 73fb1c4..25ef1a4 100644 (file)
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
        }
 
        skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
-       if (!skb && net_ratelimit()) {
-               dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+       if (!skb) {
+               if (net_ratelimit())
+                       dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
                return -ENOMEM;
        }
        header = (void *)skb_put(skb, sizeof(*header));
index 656493a..42615b4 100644 (file)
@@ -407,12 +407,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
                goto out;
        }
 
+       /* Maybe the parent is now able to suspend. */
        if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
-               spin_unlock_irq(&dev->power.lock);
+               spin_unlock(&dev->power.lock);
 
-               pm_request_idle(parent);
+               spin_lock(&parent->power.lock);
+               rpm_idle(parent, RPM_ASYNC);
+               spin_unlock(&parent->power.lock);
 
-               spin_lock_irq(&dev->power.lock);
+               spin_lock(&dev->power.lock);
        }
 
  out:
index d7f463d..40528ba 100644 (file)
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND)     += xen-blkfront.o
 obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 
-swim_mod-objs  := swim.o swim_asm.o
+swim_mod-y     := swim.o swim_asm.o
index e76d997..06ea82c 100644 (file)
@@ -3,4 +3,4 @@
 #
 
 obj-$(CONFIG_ATA_OVER_ETH)     += aoe.o
-aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
index 516d5bb..9279272 100644 (file)
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
        sector_t total_size;
        InquiryData_struct *inq_buff = NULL;
 
-       for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+       for (logvol = 0; logvol <= h->highest_lun; logvol++) {
                if (!h->drv[logvol])
                        continue;
                if (memcmp(h->drv[logvol]->LunID, drv->LunID,
index 8cbfaa6..fe81c85 100644 (file)
@@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
                return;
        }
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
                retcode = ERR_PERM;
                goto fail;
        }
index b9ba04f..77fc76f 100644 (file)
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
                        struct block_device *bdev = opened_bdev[cnt];
                        if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
                                continue;
-                       __invalidate_device(bdev);
+                       __invalidate_device(bdev, true);
                }
                mutex_unlock(&open_lock);
        } else {
index 44e18c0..dbf31ec 100644 (file)
@@ -78,7 +78,6 @@
 
 #include <asm/uaccess.h>
 
-static DEFINE_MUTEX(loop_mutex);
 static LIST_HEAD(loop_devices);
 static DEFINE_MUTEX(loop_devices_mutex);
 
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
 {
        struct loop_device *lo = bdev->bd_disk->private_data;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
        lo->lo_refcnt++;
        mutex_unlock(&lo->lo_ctl_mutex);
-       mutex_unlock(&loop_mutex);
 
        return 0;
 }
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
        struct loop_device *lo = disk->private_data;
        int err;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
 
        if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
 out:
        mutex_unlock(&lo->lo_ctl_mutex);
 out_unlocked:
-       mutex_unlock(&loop_mutex);
        return 0;
 }
 
@@ -1641,6 +1636,9 @@ out:
 
 static void loop_free(struct loop_device *lo)
 {
+       if (!lo->lo_queue->queue_lock)
+               lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
+
        blk_cleanup_queue(lo->lo_queue);
        put_disk(lo->lo_disk);
        list_del(&lo->lo_list);
index a32fb41..e6fc716 100644 (file)
@@ -53,7 +53,6 @@
 #define DBG_BLKDEV      0x0100
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
-static DEFINE_MUTEX(nbd_mutex);
 static unsigned int debugflags;
 #endif /* NDEBUG */
 
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
        dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
                        lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 
-       mutex_lock(&nbd_mutex);
        mutex_lock(&lo->tx_lock);
        error = __nbd_ioctl(bdev, lo, cmd, arg);
        mutex_unlock(&lo->tx_lock);
-       mutex_unlock(&nbd_mutex);
 
        return error;
 }
index 949ed09..5577ed6 100644 (file)
 
 #define VERSION "1.0"
 
+#define ATH3K_DNLOAD                           0x01
+#define ATH3K_GETSTATE                         0x05
+#define ATH3K_SET_NORMAL_MODE                  0x07
+#define ATH3K_GETVERSION                       0x09
+#define USB_REG_SWITCH_VID_PID                 0x0a
+
+#define ATH3K_MODE_MASK                                0x3F
+#define ATH3K_NORMAL_MODE                      0x0E
+
+#define ATH3K_PATCH_UPDATE                     0x80
+#define ATH3K_SYSCFG_UPDATE                    0x40
+
+#define ATH3K_XTAL_FREQ_26M                    0x00
+#define ATH3K_XTAL_FREQ_40M                    0x01
+#define ATH3K_XTAL_FREQ_19P2                   0x02
+#define ATH3K_NAME_LEN                         0xFF
+
+struct ath3k_version {
+       unsigned int    rom_version;
+       unsigned int    build_version;
+       unsigned int    ram_version;
+       unsigned char   ref_clock;
+       unsigned char   reserved[0x07];
+};
 
 static struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 */
@@ -39,54 +63,69 @@ static struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
 
+       /* Atheros AR9285 Malbec with sflash firmware */
+       { USB_DEVICE(0x03F0, 0x311D) },
+
+       /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x3004) },
+
+       /* Atheros AR5BBU12 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xE02C) },
+
        { }     /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, ath3k_table);
 
-#define USB_REQ_DFU_DNLOAD     1
-#define BULK_SIZE              4096
+#define BTUSB_ATH3012          0x80
+/* This table is to load patch and sysconfig files
+ * for AR3012 */
+static struct usb_device_id ath3k_blist_tbl[] = {
+
+       /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
 
-struct ath3k_data {
-       struct usb_device *udev;
-       u8 *fw_data;
-       u32 fw_size;
-       u32 fw_sent;
+       { }     /* Terminating entry */
 };
 
-static int ath3k_load_firmware(struct ath3k_data *data,
-                               unsigned char *firmware,
-                               int count)
+#define USB_REQ_DFU_DNLOAD     1
+#define BULK_SIZE              4096
+#define FW_HDR_SIZE            20
+
+static int ath3k_load_firmware(struct usb_device *udev,
+                               const struct firmware *firmware)
 {
        u8 *send_buf;
        int err, pipe, len, size, sent = 0;
+       int count = firmware->size;
 
-       BT_DBG("ath3k %p udev %p", data, data->udev);
+       BT_DBG("udev %p", udev);
 
-       pipe = usb_sndctrlpipe(data->udev, 0);
+       pipe = usb_sndctrlpipe(udev, 0);
+
+       send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+       if (!send_buf) {
+               BT_ERR("Can't allocate memory chunk for firmware");
+               return -ENOMEM;
+       }
 
-       if ((usb_control_msg(data->udev, pipe,
+       memcpy(send_buf, firmware->data, 20);
+       if ((err = usb_control_msg(udev, pipe,
                                USB_REQ_DFU_DNLOAD,
                                USB_TYPE_VENDOR, 0, 0,
-                               firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+                               send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
                BT_ERR("Can't change to loading configuration err");
-               return -EBUSY;
+               goto error;
        }
        sent += 20;
        count -= 20;
 
-       send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
-       if (!send_buf) {
-               BT_ERR("Can't allocate memory chunk for firmware");
-               return -ENOMEM;
-       }
-
        while (count) {
                size = min_t(uint, count, BULK_SIZE);
-               pipe = usb_sndbulkpipe(data->udev, 0x02);
-               memcpy(send_buf, firmware + sent, size);
+               pipe = usb_sndbulkpipe(udev, 0x02);
+               memcpy(send_buf, firmware->data + sent, size);
 
-               err = usb_bulk_msg(data->udev, pipe, send_buf, size,
+               err = usb_bulk_msg(udev, pipe, send_buf, size,
                                        &len, 3000);
 
                if (err || (len != size)) {
@@ -107,62 +146,270 @@ error:
        return err;
 }
 
+static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+{
+       int pipe = 0;
+
+       pipe = usb_rcvctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+                       USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+                       state, 0x01, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_get_version(struct usb_device *udev,
+                       struct ath3k_version *version)
+{
+       int pipe = 0;
+
+       pipe = usb_rcvctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+                       USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
+                       sizeof(struct ath3k_version),
+                       USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_fwfile(struct usb_device *udev,
+               const struct firmware *firmware)
+{
+       u8 *send_buf;
+       int err, pipe, len, size, count, sent = 0;
+       int ret;
+
+       count = firmware->size;
+
+       send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+       if (!send_buf) {
+               BT_ERR("Can't allocate memory chunk for firmware");
+               return -ENOMEM;
+       }
+
+       size = min_t(uint, count, FW_HDR_SIZE);
+       memcpy(send_buf, firmware->data, size);
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
+                       USB_TYPE_VENDOR, 0, 0, send_buf,
+                       size, USB_CTRL_SET_TIMEOUT);
+       if (ret < 0) {
+               BT_ERR("Can't change to loading configuration err");
+               kfree(send_buf);
+               return ret;
+       }
+
+       sent += size;
+       count -= size;
+
+       while (count) {
+               size = min_t(uint, count, BULK_SIZE);
+               pipe = usb_sndbulkpipe(udev, 0x02);
+
+               memcpy(send_buf, firmware->data + sent, size);
+
+               err = usb_bulk_msg(udev, pipe, send_buf, size,
+                                       &len, 3000);
+               if (err || (len != size)) {
+                       BT_ERR("Error in firmware loading err = %d,"
+                               "len = %d, size = %d", err, len, size);
+                       kfree(send_buf);
+                       return err;
+               }
+               sent  += size;
+               count -= size;
+       }
+
+       kfree(send_buf);
+       return 0;
+}
+
+static int ath3k_switch_pid(struct usb_device *udev)
+{
+       int pipe = 0;
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
+                       USB_TYPE_VENDOR, 0, 0,
+                       NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_set_normal_mode(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       int pipe = 0, ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to normal mode err");
+               return ret;
+       }
+
+       if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) {
+               BT_DBG("firmware was already in normal mode");
+               return 0;
+       }
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
+                       USB_TYPE_VENDOR, 0, 0,
+                       NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_patch(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       char filename[ATH3K_NAME_LEN] = {0};
+       const struct firmware *firmware;
+       struct ath3k_version fw_version, pt_version;
+       int ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to load ram patch err");
+               return ret;
+       }
+
+       if (fw_state & ATH3K_PATCH_UPDATE) {
+               BT_DBG("Patch was already downloaded");
+               return 0;
+       }
+
+       ret = ath3k_get_version(udev, &fw_version);
+       if (ret < 0) {
+               BT_ERR("Can't get version to change to load ram patch err");
+               return ret;
+       }
+
+       snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
+               fw_version.rom_version);
+
+       ret = request_firmware(&firmware, filename, &udev->dev);
+       if (ret < 0) {
+               BT_ERR("Patch file not found %s", filename);
+               return ret;
+       }
+
+       pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
+       pt_version.build_version = *(int *)
+               (firmware->data + firmware->size - 4);
+
+       if ((pt_version.rom_version != fw_version.rom_version) ||
+               (pt_version.build_version <= fw_version.build_version)) {
+               BT_ERR("Patch file version did not match with firmware");
+               release_firmware(firmware);
+               return -EINVAL;
+       }
+
+       ret = ath3k_load_fwfile(udev, firmware);
+       release_firmware(firmware);
+
+       return ret;
+}
+
+static int ath3k_load_syscfg(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       char filename[ATH3K_NAME_LEN] = {0};
+       const struct firmware *firmware;
+       struct ath3k_version fw_version;
+       int clk_value, ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to load configration err");
+               return -EBUSY;
+       }
+
+       ret = ath3k_get_version(udev, &fw_version);
+       if (ret < 0) {
+               BT_ERR("Can't get version to change to load ram patch err");
+               return ret;
+       }
+
+       switch (fw_version.ref_clock) {
+
+       case ATH3K_XTAL_FREQ_26M:
+               clk_value = 26;
+               break;
+       case ATH3K_XTAL_FREQ_40M:
+               clk_value = 40;
+               break;
+       case ATH3K_XTAL_FREQ_19P2:
+               clk_value = 19;
+               break;
+       default:
+               clk_value = 0;
+               break;
+       }
+
+       snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+               fw_version.rom_version, clk_value, ".dfu");
+
+       ret = request_firmware(&firmware, filename, &udev->dev);
+       if (ret < 0) {
+               BT_ERR("Configuration file not found %s", filename);
+               return ret;
+       }
+
+       ret = ath3k_load_fwfile(udev, firmware);
+       release_firmware(firmware);
+
+       return ret;
+}
+
 static int ath3k_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
        const struct firmware *firmware;
        struct usb_device *udev = interface_to_usbdev(intf);
-       struct ath3k_data *data;
-       int size;
+       int ret;
 
        BT_DBG("intf %p id %p", intf, id);
 
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
+       /* match device ID in ath3k blacklist table */
+       if (!id->driver_info) {
+               const struct usb_device_id *match;
+               match = usb_match_id(intf, ath3k_blist_tbl);
+               if (match)
+                       id = match;
+       }
 
-       data->udev = udev;
+       /* load patch and sysconfig files for AR3012 */
+       if (id->driver_info & BTUSB_ATH3012) {
+               ret = ath3k_load_patch(udev);
+               if (ret < 0) {
+                       BT_ERR("Loading patch file failed");
+                       return ret;
+               }
+               ret = ath3k_load_syscfg(udev);
+               if (ret < 0) {
+                       BT_ERR("Loading sysconfig file failed");
+                       return ret;
+               }
+               ret = ath3k_set_normal_mode(udev);
+               if (ret < 0) {
+                       BT_ERR("Set normal mode failed");
+                       return ret;
+               }
+               ath3k_switch_pid(udev);
+               return 0;
+       }
 
        if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
-               kfree(data);
+               BT_ERR("Error loading firmware");
                return -EIO;
        }
 
-       size = max_t(uint, firmware->size, 4096);
-       data->fw_data = kmalloc(size, GFP_KERNEL);
-       if (!data->fw_data) {
-               release_firmware(firmware);
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       memcpy(data->fw_data, firmware->data, firmware->size);
-       data->fw_size = firmware->size;
-       data->fw_sent = 0;
+       ret = ath3k_load_firmware(udev, firmware);
        release_firmware(firmware);
 
-       usb_set_intfdata(intf, data);
-       if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
-               usb_set_intfdata(intf, NULL);
-               kfree(data->fw_data);
-               kfree(data);
-               return -EIO;
-       }
-
-       return 0;
+       return ret;
 }
 
 static void ath3k_disconnect(struct usb_interface *intf)
 {
-       struct ath3k_data *data = usb_get_intfdata(intf);
-
        BT_DBG("ath3k_disconnect intf %p", intf);
-
-       kfree(data->fw_data);
-       kfree(data);
 }
 
 static struct usb_driver ath3k_driver = {
index 1da773f..7e0ebd4 100644 (file)
@@ -102,6 +102,15 @@ static struct usb_device_id blacklist_table[] = {
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
 
+       /* Atheros AR9285 Malbec with sflash firmware */
+       { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+       /* Atheros 3012 with sflash firmware */
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
+
+       /* Atheros AR5BBU12 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
        { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -708,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb)
                pipe = usb_sndisocpipe(data->udev,
                                        data->isoc_tx_ep->bEndpointAddress);
 
-               urb->dev      = data->udev;
-               urb->pipe     = pipe;
-               urb->context  = skb;
-               urb->complete = btusb_isoc_tx_complete;
-               urb->interval = data->isoc_tx_ep->bInterval;
+               usb_fill_int_urb(urb, data->udev, pipe,
+                               skb->data, skb->len, btusb_isoc_tx_complete,
+                               skb, data->isoc_tx_ep->bInterval);
 
                urb->transfer_flags  = URB_ISO_ASAP;
-               urb->transfer_buffer = skb->data;
-               urb->transfer_buffer_length = skb->len;
 
                __fill_isoc_descriptor(urb, skb->len,
                                le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
@@ -826,7 +831,7 @@ static void btusb_work(struct work_struct *work)
 
        if (hdev->conn_hash.sco_num > 0) {
                if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
-                       err = usb_autopm_get_interface(data->isoc);
+                       err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
                        if (err < 0) {
                                clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
                                usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +860,7 @@ static void btusb_work(struct work_struct *work)
 
                __set_isoc_interface(hdev, 0);
                if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
-                       usb_autopm_put_interface(data->isoc);
+                       usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
        }
 }
 
@@ -1038,8 +1043,6 @@ static int btusb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, data);
 
-       usb_enable_autosuspend(interface_to_usbdev(intf));
-
        return 0;
 }
 
index 3c6cabc..48ad2a7 100644 (file)
@@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
        hdev->flush = hci_uart_flush;
        hdev->send  = hci_uart_send_frame;
        hdev->destruct = hci_uart_destruct;
+       hdev->parent = hu->tty->dev;
 
        hdev->owner = THIS_MODULE;
 
index 14033a3..e2c48a7 100644 (file)
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi)
        }
 
        ENSURE(drive_status, CDC_DRIVE_STATUS );
-       ENSURE(media_changed, CDC_MEDIA_CHANGED);
+       if (cdo->check_events == NULL && cdo->media_changed == NULL)
+               *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
        ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
        ENSURE(lock_door, CDC_LOCK);
        ENSURE(select_speed, CDC_SELECT_SPEED);
index 5bc765d..8238f89 100644 (file)
@@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT)     += synclink_gt.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)               += sx.o generic_serial.o
 obj-$(CONFIG_RIO)              += rio/ generic_serial.o
+obj-$(CONFIG_VIRTIO_CONSOLE)   += virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)       += raw.o
 obj-$(CONFIG_SGI_SNSC)         += snsc.o snsc_event.o
 obj-$(CONFIG_MSPEC)            += mspec.o
index fcd867d..d8b1b57 100644 (file)
@@ -50,7 +50,7 @@ config AGP_ATI
 
 config AGP_AMD
        tristate "AMD Irongate, 761, and 762 chipset support"
-       depends on AGP && (X86_32 || ALPHA)
+       depends on AGP && X86_32
        help
          This option gives you AGP support for the GLX component of
          X on AMD Irongate, 761, and 762 chipsets.
index b1b4362..45681c0 100644 (file)
@@ -41,22 +41,8 @@ static int amd_create_page_map(struct amd_page_map *page_map)
        if (page_map->real == NULL)
                return -ENOMEM;
 
-#ifndef CONFIG_X86
-       SetPageReserved(virt_to_page(page_map->real));
-       global_cache_flush();
-       page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
-                                           PAGE_SIZE);
-       if (page_map->remapped == NULL) {
-               ClearPageReserved(virt_to_page(page_map->real));
-               free_page((unsigned long) page_map->real);
-               page_map->real = NULL;
-               return -ENOMEM;
-       }
-       global_cache_flush();
-#else
        set_memory_uc((unsigned long)page_map->real, 1);
        page_map->remapped = page_map->real;
-#endif
 
        for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
                writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
 
 static void amd_free_page_map(struct amd_page_map *page_map)
 {
-#ifndef CONFIG_X86
-       iounmap(page_map->remapped);
-       ClearPageReserved(virt_to_page(page_map->real));
-#else
        set_memory_wb((unsigned long)page_map->real, 1);
-#endif
        free_page((unsigned long) page_map->real);
 }
 
index 9252e85..780498d 100644 (file)
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
 #else
                        printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
 #endif
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        return -ENODEV;
                }
 
                /* First check that we have at least one AMD64 NB */
-               if (!pci_dev_present(amd_nb_misc_ids))
+               if (!pci_dev_present(amd_nb_misc_ids)) {
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        return -ENODEV;
+               }
 
                /* Look for any AGP bridge */
                agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
                err = driver_attach(&agp_amd64_pci_driver.driver);
-               if (err == 0 && agp_bridges_found == 0)
+               if (err == 0 && agp_bridges_found == 0) {
+                       pci_unregister_driver(&agp_amd64_pci_driver);
                        err = -ENODEV;
+               }
        }
        return err;
 }
index 857df10..b0a0dcc 100644 (file)
@@ -773,21 +773,15 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
 
        dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
-       /*
-       * If the device has not been properly setup, the following will catch
-       * the problem and should stop the system from crashing.
-       * 20030610 - hamish@zot.org
-       */
-       if (pci_enable_device(pdev)) {
-               dev_err(&pdev->dev, "can't enable PCI device\n");
-               agp_put_bridge(bridge);
-               return -ENODEV;
-       }
-
        /*
        * The following fixes the case where the BIOS has "forgotten" to
        * provide an address range for the GART.
        * 20030610 - hamish@zot.org
+       * This happens before pci_enable_device() intentionally;
+       * calling pci_enable_device() before assigning the resource
+       * will result in the GART being disabled on machines with such
+       * BIOSs (the GART ends up with a BAR starting at 0, which
+       * conflicts a lot of other devices).
        */
        r = &pdev->resource[0];
        if (!r->start && r->end) {
@@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                }
        }
 
+       /*
+       * If the device has not been properly setup, the following will catch
+       * the problem and should stop the system from crashing.
+       * 20030610 - hamish@zot.org
+       */
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev, "can't enable PCI device\n");
+               agp_put_bridge(bridge);
+               return -ENODEV;
+       }
+
        /* Fill in the mode register */
        if (cap_ptr) {
                pci_read_config_dword(pdev,
index c195bfe..5feebe2 100644 (file)
 #define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
 
 #define I915_IFPADDR    0x60
+#define I830_HIC        0x70
 
 /* Intel 965G registers */
 #define I965_MSAC 0x62
index 826ab09..0d09b53 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
+#include <linux/delay.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
@@ -68,13 +69,10 @@ static struct _intel_private {
        phys_addr_t gma_bus_addr;
        u32 PGETBL_save;
        u32 __iomem *gtt;               /* I915G */
+       bool clear_fake_agp; /* on first access via agp, fill with scratch */
        int num_dcache_entries;
-       union {
-               void __iomem *i9xx_flush_page;
-               void *i8xx_flush_page;
-       };
+       void __iomem *i9xx_flush_page;
        char *i81x_gtt_table;
-       struct page *i8xx_page;
        struct resource ifp_resource;
        int resource_valid;
        struct page *scratch_page;
@@ -721,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
 
 static void i830_cleanup(void)
 {
-       if (intel_private.i8xx_flush_page) {
-               kunmap(intel_private.i8xx_flush_page);
-               intel_private.i8xx_flush_page = NULL;
-       }
-
-       __free_page(intel_private.i8xx_page);
-       intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-       /* return if we've already set the flush mechanism up */
-       if (intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_page = alloc_page(GFP_KERNEL);
-       if (!intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-       if (!intel_private.i8xx_flush_page)
-               i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -757,14 +733,27 @@ static void intel_i830_setup_flush(void)
  */
 static void i830_chipset_flush(void)
 {
-       unsigned int *pg = intel_private.i8xx_flush_page;
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+       /* Forcibly evict everything from the CPU write buffers.
+        * clflush appears to be insufficient.
+        */
+       wbinvd_on_all_cpus();
 
-       memset(pg, 0, 1024);
+       /* Now we've only seen documents for this magic bit on 855GM,
+        * we hope it exists for the other gen2 chipsets...
+        *
+        * Also works as advertised on my 845G.
+        */
+       writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+              intel_private.registers+I830_HIC);
 
-       if (cpu_has_clflush)
-               clflush_cache_range(pg, 1024);
-       else if (wbinvd_on_all_cpus() != 0)
-               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+       while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+               if (time_after(jiffies, timeout))
+                       break;
+
+               udelay(50);
+       }
 }
 
 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -848,8 +837,6 @@ static int i830_setup(void)
 
        intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
 
-       intel_i830_setup_flush();
-
        return 0;
 }
 
@@ -869,21 +856,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
 
 static int intel_fake_agp_configure(void)
 {
-       int i;
-
        if (!intel_enable_gtt())
            return -EIO;
 
+       intel_private.clear_fake_agp = true;
        agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-       for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
-               intel_private.driver->write_entry(intel_private.scratch_page_dma,
-                                                 i, 0);
-       }
-       readl(intel_private.gtt+i-1);   /* PCI Posting. */
-
-       global_cache_flush();
-
        return 0;
 }
 
@@ -945,6 +923,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 {
        int ret = -EINVAL;
 
+       if (intel_private.clear_fake_agp) {
+               int start = intel_private.base.stolen_size / PAGE_SIZE;
+               int end = intel_private.base.gtt_mappable_entries;
+               intel_gtt_clear_range(start, end - start);
+               intel_private.clear_fake_agp = false;
+       }
+
        if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
                return i810_insert_dcache_entries(mem, pg_start, type);
 
index e397df3..1640244 100644 (file)
@@ -183,16 +183,16 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
 }
 
 #ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
-# define acquire_console_sem()
-# define release_console_sem()
+# define console_lock()
+# define console_unlock()
 #endif
 static int
 bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
        int i;
-       acquire_console_sem();
+       console_lock();
        i = bfin_jc_circ_write(buf, count);
-       release_console_sem();
+       console_unlock();
        wake_up_process(bfin_jc_kthread);
        return i;
 }
index b6ae6e9..62787e3 100644 (file)
@@ -320,6 +320,7 @@ static int unload_when_empty = 1;
 static int add_smi(struct smi_info *smi);
 static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
@@ -899,6 +900,14 @@ static void sender(void                *send_info,
        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
+       /*
+        * last_timeout_jiffies is updated here to avoid
+        * smi_timeout() handler passing very large time_diff
+        * value to smi_event_handler() that causes
+        * the send command to abort.
+        */
+       smi_info->last_timeout_jiffies = jiffies;
+
        mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
 
        if (smi_info->thread)
@@ -3450,16 +3459,7 @@ static int __devinit init_ipmi_si(void)
        mutex_lock(&smi_infos_lock);
        if (unload_when_empty && list_empty(&smi_infos)) {
                mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
-               if (pci_registered)
-                       pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
-               if (of_registered)
-                       of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
-               driver_unregister(&ipmi_driver.driver);
+               cleanup_ipmi_si();
                printk(KERN_WARNING PFX
                       "Unable to find any System Interface(s)\n");
                return -ENODEV;
index 777181a..bcbbc71 100644 (file)
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
                            test_bit(IS_ANY_T1, &dev->flags))) {
                                DEBUGP(4, dev, "Perform AUTOPPS\n");
                                set_bit(IS_AUTOPPS_ACT, &dev->flags);
-                               ptsreq.protocol = ptsreq.protocol =
-                                   (0x01 << dev->proto);
+                               ptsreq.protocol = (0x01 << dev->proto);
                                ptsreq.flags = 0x01;
                                ptsreq.pts1 = 0x00;
                                ptsreq.pts2 = 0x00;
index 94b8eb4..444155a 100644 (file)
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
 static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 {
        struct ipw_dev *ipw = priv_data;
-       struct resource *io_resource;
        int ret;
 
        p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
        if (ret)
                return ret;
 
-       io_resource = request_region(p_dev->resource[0]->start,
-                               resource_size(p_dev->resource[0]),
-                               IPWIRELESS_PCCARD_NAME);
+       if (!request_region(p_dev->resource[0]->start,
+                           resource_size(p_dev->resource[0]),
+                           IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit;
+       }
 
        p_dev->resource[2]->flags |=
                WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
        ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
        if (ret != 0)
-               goto exit2;
+               goto exit1;
 
        ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
 
-       ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+       ipw->common_memory = ioremap(p_dev->resource[2]->start,
                                resource_size(p_dev->resource[2]));
-       request_mem_region(p_dev->resource[2]->start,
-                       resource_size(p_dev->resource[2]),
-                       IPWIRELESS_PCCARD_NAME);
+       if (!request_mem_region(p_dev->resource[2]->start,
+                               resource_size(p_dev->resource[2]),
+                               IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit2;
+       }
 
        p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
                                        WIN_ENABLE;
        p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
        ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
        if (ret != 0)
-               goto exit2;
+               goto exit3;
 
        ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
        if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
        ipw->attr_memory = ioremap(p_dev->resource[3]->start,
                                resource_size(p_dev->resource[3]));
-       request_mem_region(p_dev->resource[3]->start,
-                       resource_size(p_dev->resource[3]),
-                       IPWIRELESS_PCCARD_NAME);
+       if (!request_mem_region(p_dev->resource[3]->start,
+                               resource_size(p_dev->resource[3]),
+                               IPWIRELESS_PCCARD_NAME)) {
+               ret = -EBUSY;
+               goto exit4;
+       }
 
        return 0;
 
+exit4:
+       iounmap(ipw->attr_memory);
 exit3:
+       release_mem_region(p_dev->resource[2]->start,
+                       resource_size(p_dev->resource[2]));
 exit2:
-       if (ipw->common_memory) {
-               release_mem_region(p_dev->resource[2]->start,
-                               resource_size(p_dev->resource[2]));
-               iounmap(ipw->common_memory);
-       }
+       iounmap(ipw->common_memory);
 exit1:
-       release_resource(io_resource);
+       release_region(p_dev->resource[0]->start,
+                      resource_size(p_dev->resource[0]));
+exit:
        pcmcia_disable_device(p_dev);
-       return -1;
+       return ret;
 }
 
 static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
 
 static void release_ipwireless(struct ipw_dev *ipw)
 {
+       release_region(ipw->link->resource[0]->start,
+                      resource_size(ipw->link->resource[0]));
        if (ipw->common_memory) {
                release_mem_region(ipw->link->resource[2]->start,
                                resource_size(ipw->link->resource[2]));
index c17a305..dd21df5 100644 (file)
@@ -493,9 +493,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
                 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
                 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 
-       if (is_itpm(to_pnp_dev(dev)))
-               itpm = 1;
-
        if (itpm)
                dev_info(dev, "Intel iTPM workaround enabled\n");
 
@@ -637,6 +634,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
        else
                interrupts = 0;
 
+       if (is_itpm(pnp_dev))
+               itpm = 1;
+
        return tpm_tis_init(&pnp_dev->dev, start, len, irq);
 }
 
similarity index 98%
rename from drivers/tty/hvc/virtio_console.c
rename to drivers/char/virtio_console.c
index 896a2ce..84b164d 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
- * Copyright (C) 2009, 2010 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
 #include <linux/virtio_console.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include "hvc_console.h"
+#include "../tty/hvc/hvc_console.h"
 
 /*
  * This is a global struct for storing common data for all the devices
@@ -387,6 +388,10 @@ static void discard_port_data(struct port *port)
        unsigned int len;
        int ret;
 
+       if (!port->portdev) {
+               /* Device has been unplugged.  vqs are already gone. */
+               return;
+       }
        vq = port->in_vq;
        if (port->inbuf)
                buf = port->inbuf;
@@ -469,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
        void *buf;
        unsigned int len;
 
+       if (!port->portdev) {
+               /* Device has been unplugged.  vqs are already gone. */
+               return;
+       }
        while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
                kfree(buf);
                port->outvq_full = false;
@@ -1462,6 +1471,17 @@ static void control_work_handler(struct work_struct *work)
        spin_unlock(&portdev->cvq_lock);
 }
 
+static void out_intr(struct virtqueue *vq)
+{
+       struct port *port;
+
+       port = find_port_by_vq(vq->vdev->priv, vq);
+       if (!port)
+               return;
+
+       wake_up_interruptible(&port->waitqueue);
+}
+
 static void in_intr(struct virtqueue *vq)
 {
        struct port *port;
@@ -1566,7 +1586,7 @@ static int init_vqs(struct ports_device *portdev)
         */
        j = 0;
        io_callbacks[j] = in_intr;
-       io_callbacks[j + 1] = NULL;
+       io_callbacks[j + 1] = out_intr;
        io_names[j] = "input";
        io_names[j + 1] = "output";
        j += 2;
@@ -1580,7 +1600,7 @@ static int init_vqs(struct ports_device *portdev)
                for (i = 1; i < nr_ports; i++) {
                        j += 2;
                        io_callbacks[j] = in_intr;
-                       io_callbacks[j + 1] = NULL;
+                       io_callbacks[j + 1] = out_intr;
                        io_names[j] = "input";
                        io_names[j + 1] = "output";
                }
index cfb0f52..effe797 100644 (file)
@@ -202,17 +202,21 @@ static int __init init_acpi_pm_clocksource(void)
                        printk(KERN_INFO "PM-Timer had inconsistent results:"
                               " 0x%#llx, 0x%#llx - aborting.\n",
                               value1, value2);
+                       pmtmr_ioport = 0;
                        return -EINVAL;
                }
                if (i == ACPI_PM_READ_CHECKS) {
                        printk(KERN_INFO "PM-Timer failed consistency check "
                               " (0x%#llx) - aborting.\n", value1);
+                       pmtmr_ioport = 0;
                        return -ENODEV;
                }
        }
 
-       if (verify_pmtmr_rate() != 0)
+       if (verify_pmtmr_rate() != 0){
+               pmtmr_ioport = 0;
                return -ENODEV;
+       }
 
        return clocksource_register_hz(&clocksource_acpi_pm,
                                                PMTMR_TICKS_PER_SEC);
index 01b886e..79c47e8 100644 (file)
@@ -196,9 +196,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
        clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
        clkevt.clkevt.cpumask = cpumask_of(0);
 
-       setup_irq(irq, &tc_irqaction);
-
        clockevents_register_device(&clkevt.clkevt);
+
+       setup_irq(irq, &tc_irqaction);
 }
 
 #else /* !CONFIG_GENERIC_CLOCKEVENTS */
index 1109f68..5cb4d09 100644 (file)
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        ret = sysdev_driver_register(&cpu_sysdev_class,
                                        &cpufreq_sysdev_driver);
+       if (ret)
+               goto err_null_driver;
 
-       if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+       if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
                int i;
                ret = -ENODEV;
 
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                if (ret) {
                        dprintk("no CPU initialized for driver %s\n",
                                                        driver_data->name);
-                       sysdev_driver_unregister(&cpu_sysdev_class,
-                                               &cpufreq_sysdev_driver);
-
-                       spin_lock_irqsave(&cpufreq_driver_lock, flags);
-                       cpufreq_driver = NULL;
-                       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+                       goto err_sysdev_unreg;
                }
        }
 
-       if (!ret) {
-               register_hotcpu_notifier(&cpufreq_cpu_notifier);
-               dprintk("driver %s up and running\n", driver_data->name);
-               cpufreq_debug_enable_ratelimit();
-       }
+       register_hotcpu_notifier(&cpufreq_cpu_notifier);
+       dprintk("driver %s up and running\n", driver_data->name);
+       cpufreq_debug_enable_ratelimit();
 
+       return 0;
+err_sysdev_unreg:
+       sysdev_driver_unregister(&cpu_sysdev_class,
+                       &cpufreq_sysdev_driver);
+err_null_driver:
+       spin_lock_irqsave(&cpufreq_driver_lock, flags);
+       cpufreq_driver = NULL;
+       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
index 297f48b..07bca49 100644 (file)
@@ -79,6 +79,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/dmapool.h>
 #include <linux/dmaengine.h>
 #include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
 }
 
 /*
- * Overall DMAC remains enabled always.
+ * Pause the channel by setting the HALT bit.
  *
- * Disabling individual channels could lose data.
+ * For M->P transfers, pause the DMAC first and then stop the peripheral -
+ * the FIFO can only drain if the peripheral is still requesting data.
+ * (note: this can still timeout if the DMAC FIFO never drains of data.)
  *
- * Disable the peripheral DMA after disabling the DMAC in order to allow
- * the DMAC FIFO to drain, and hence allow the channel to show inactive
+ * For P->M transfers, disable the peripheral first to stop it filling
+ * the DMAC FIFO, and then pause the DMAC.
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
        u32 val;
+       int timeout;
 
        /* Set the HALT bit and wait for the FIFO to drain */
        val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
        writel(val, ch->base + PL080_CH_CONFIG);
 
        /* Wait for channel inactive */
-       while (pl08x_phy_channel_busy(ch))
-               cpu_relax();
+       for (timeout = 1000; timeout; timeout--) {
+               if (!pl08x_phy_channel_busy(ch))
+                       break;
+               udelay(1);
+       }
+       if (pl08x_phy_channel_busy(ch))
+               pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
 }
 
 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
 }
 
 
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status.  This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+       struct pl08x_phy_chan *ch)
 {
-       u32 val;
+       u32 val = readl(ch->base + PL080_CH_CONFIG);
 
-       pl08x_pause_phy_chan(ch);
+       val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+                PL080_CONFIG_TC_IRQ_MASK);
 
-       /* Disable channel */
-       val = readl(ch->base + PL080_CH_CONFIG);
-       val &= ~PL080_CONFIG_ENABLE;
-       val &= ~PL080_CONFIG_ERR_IRQ_MASK;
-       val &= ~PL080_CONFIG_TC_IRQ_MASK;
        writel(val, ch->base + PL080_CH_CONFIG);
+
+       writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+       writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
 }
 
 static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 {
        unsigned long flags;
 
+       spin_lock_irqsave(&ch->lock, flags);
+
        /* Stop the channel and clear its interrupts */
-       pl08x_stop_phy_chan(ch);
-       writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
-       writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+       pl08x_terminate_phy_chan(pl08x, ch);
 
        /* Mark it as free */
-       spin_lock_irqsave(&ch->lock, flags);
        ch->serving = NULL;
        spin_unlock_irqrestore(&ch->lock, flags);
 }
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                plchan->state = PL08X_CHAN_IDLE;
 
                if (plchan->phychan) {
-                       pl08x_stop_phy_chan(plchan->phychan);
+                       pl08x_terminate_phy_chan(pl08x, plchan->phychan);
 
                        /*
                         * Mark physical channel as free and free any slave
index e53d438..e18eaab 100644 (file)
@@ -49,6 +49,7 @@ struct imxdma_channel {
 
 struct imxdma_engine {
        struct device                   *dev;
+       struct device_dma_parameters    dma_parms;
        struct dma_device               dma_device;
        struct imxdma_channel           channel[MAX_DMA_CHANNELS];
 };
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
        else
                dmamode = DMA_MODE_WRITE;
 
+       switch (imxdmac->word_size) {
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               if (sgl->length & 3 || sgl->dma_address & 3)
+                       return NULL;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               if (sgl->length & 1 || sgl->dma_address & 1)
+                       return NULL;
+               break;
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               break;
+       default:
+               return NULL;
+       }
+
        ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
                 dma_length, imxdmac->per_address, dmamode);
        if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&imxdma->dma_device.channels);
 
+       dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+       dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
        /* Initialize channel parameters */
        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
                struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
                imxdmac->imxdma = imxdma;
                spin_lock_init(&imxdmac->lock);
 
-               dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
-               dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
                imxdmac->chan.device = &imxdma->dma_device;
-               imxdmac->chan.chan_id = i;
                imxdmac->channel = i;
 
                /* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, imxdma);
 
+       imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
+       dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
+
        ret = dma_async_device_register(&imxdma->dma_device);
        if (ret) {
                dev_err(&pdev->dev, "unable to register\n");
index d5a5d4d..b6d1455 100644 (file)
@@ -230,7 +230,7 @@ struct sdma_engine;
  * struct sdma_channel - housekeeping for a SDMA channel
  *
  * @sdma               pointer to the SDMA engine for this channel
- * @channel            the channel number, matches dmaengine chan_id
+ * @channel            the channel number, matches dmaengine chan_id + 1
  * @direction          transfer type. Needed for setting SDMA script
  * @peripheral_type    Peripheral type. Needed for setting SDMA script
  * @event_id0          aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
 
 struct sdma_engine {
        struct device                   *dev;
+       struct device_dma_parameters    dma_parms;
        struct sdma_channel             channel[MAX_DMA_CHANNELS];
        struct sdma_channel_control     *channel_control;
        void __iomem                    *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
                if (bd->mode.status & BD_RROR)
                        sdmac->status = DMA_ERROR;
                else
-                       sdmac->status = DMA_SUCCESS;
+                       sdmac->status = DMA_IN_PROGRESS;
 
                bd->mode.status |= BD_DONE;
                sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
        __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 }
 
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 {
-       dma_cookie_t cookie = sdma->chan.cookie;
+       dma_cookie_t cookie = sdmac->chan.cookie;
 
        if (++cookie < 0)
                cookie = 1;
 
-       sdma->chan.cookie = cookie;
-       sdma->desc.cookie = cookie;
+       sdmac->chan.cookie = cookie;
+       sdmac->desc.cookie = cookie;
 
        return cookie;
 }
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
        cookie = sdma_assign_cookie(sdmac);
 
-       sdma_enable_channel(sdma, tx->chan->chan_id);
+       sdma_enable_channel(sdma, sdmac->channel);
 
        spin_unlock_irq(&sdmac->lock);
 
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
        struct imx_dma_data *data = chan->private;
        int prio, ret;
 
-       /* No need to execute this for internal channel 0 */
-       if (chan->chan_id == 0)
-               return 0;
-
        if (!data)
                return -EINVAL;
 
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
        int ret, i, count;
-       int channel = chan->chan_id;
+       int channel = sdmac->channel;
        struct scatterlist *sg;
 
        if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                        ret =  -EINVAL;
                        goto err_out;
                }
-               if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+               switch (sdmac->word_size) {
+               case DMA_SLAVE_BUSWIDTH_4_BYTES:
                        bd->mode.command = 0;
-               else
-                       bd->mode.command = sdmac->word_size;
+                       if (count & 3 || sg->dma_address & 3)
+                               return NULL;
+                       break;
+               case DMA_SLAVE_BUSWIDTH_2_BYTES:
+                       bd->mode.command = 2;
+                       if (count & 1 || sg->dma_address & 1)
+                               return NULL;
+                       break;
+               case DMA_SLAVE_BUSWIDTH_1_BYTE:
+                       bd->mode.command = 1;
+                       break;
+               default:
+                       return NULL;
+               }
 
                param = BD_DONE | BD_EXTD | BD_CONT;
 
-               if (sdmac->flags & IMX_DMA_SG_LOOP) {
+               if (i + 1 == sg_len) {
                        param |= BD_INTR;
-                       if (i + 1 == sg_len)
-                               param |= BD_WRAP;
+                       param |= BD_LAST;
+                       param &= ~BD_CONT;
                }
 
-               if (i + 1 == sg_len)
-                       param |= BD_INTR;
-
                dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
                                i, count, sg->dma_address,
                                param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
        return &sdmac->desc;
 err_out:
+       sdmac->status = DMA_ERROR;
        return NULL;
 }
 
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
        int num_periods = buf_len / period_len;
-       int channel = chan->chan_id;
+       int channel = sdmac->channel;
        int ret, i = 0, buf = 0;
 
        dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        dma_cookie_t last_used;
-       enum dma_status ret;
 
        last_used = chan->cookie;
 
-       ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
        dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
-       return ret;
+       return sdmac->status;
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
        /* download the RAM image for SDMA */
        sdma_load_script(sdma, ram_code,
                        header->ram_code_size,
-                       sdma->script_addrs->ram_code_start_addr);
+                       addr->ram_code_start_addr);
        clk_disable(sdma->clk);
 
        sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
        struct resource *iores;
        struct sdma_platform_data *pdata = pdev->dev.platform_data;
        int i;
-       dma_cap_mask_t mask;
        struct sdma_engine *sdma;
 
        sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
 
        sdma->version = pdata->sdma_version;
 
+       dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+       dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
        INIT_LIST_HEAD(&sdma->dma_device.channels);
        /* Initialize channel parameters */
        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
                sdmac->sdma = sdma;
                spin_lock_init(&sdmac->lock);
 
-               dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
-               dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
                sdmac->chan.device = &sdma->dma_device;
-               sdmac->chan.chan_id = i;
                sdmac->channel = i;
 
-               /* Add the channel to the DMAC list */
-               list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+               /*
+                * Add the channel to the DMAC list. Do not add channel 0 though
+                * because we need it internally in the SDMA driver. This also means
+                * that channel 0 in dmaengine counting matches sdma channel 1.
+                */
+               if (i)
+                       list_add_tail(&sdmac->chan.device_node,
+                                       &sdma->dma_device.channels);
        }
 
        ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
        sdma->dma_device.device_control = sdma_control;
        sdma->dma_device.device_issue_pending = sdma_issue_pending;
+       sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+       dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
        ret = dma_async_device_register(&sdma->dma_device);
        if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
                goto err_init;
        }
 
-       /* request channel 0. This is an internal control channel
-        * to the SDMA engine and not available to clients.
-        */
-       dma_cap_zero(mask);
-       dma_cap_set(DMA_SLAVE, mask);
-       dma_request_channel(mask, NULL, NULL);
-
        dev_info(sdma->dev, "initialized\n");
 
        return 0;
@@ -1348,7 +1354,7 @@ err_clk:
 err_request_region:
 err_irq:
        kfree(sdma);
-       return 0;
+       return ret;
 }
 
 static int __exit sdma_remove(struct platform_device *pdev)
index cb26ee9..c1a125e 100644 (file)
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
        reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
        idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
 
-       /*
-        * Problem (observed with channel DMAIC_7): after enabling the channel
-        * and initialising buffers, there comes an interrupt with current still
-        * pointing at buffer 0, whereas it should use buffer 0 first and only
-        * generate an interrupt when it is done, then current should already
-        * point to buffer 1. This spurious interrupt also comes on channel
-        * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
-        * first interrupt, there comes the second with current correctly
-        * pointing to buffer 1 this time. But sometimes this second interrupt
-        * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
-        * the channel seems to prevent the channel from hanging, but it doesn't
-        * prevent the spurious interrupt. This might also be unsafe. Think
-        * about the IDMAC controller trying to switch to a buffer, when we
-        * clear the ready bit, and re-enable it a moment later.
-        */
-       reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
-       idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
-       idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
-
-       reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
-       idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
-       idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
-
        spin_unlock_irqrestore(&ipu->lock, flags);
 
        return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
 
        /* Other interrupts do not interfere with this channel */
        spin_lock(&ichan->lock);
-       if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
-                    ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
-                    !list_is_last(ichan->queue.next, &ichan->queue))) {
-               int i = 100;
-
-               /* This doesn't help. See comment in ipu_disable_channel() */
-               while (--i) {
-                       curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
-                       if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
-                               break;
-                       cpu_relax();
-               }
-
-               if (!i) {
-                       spin_unlock(&ichan->lock);
-                       dev_dbg(dev,
-                               "IRQ on active buffer on channel %x, active "
-                               "%d, ready %x, %x, current %x!\n", chan_id,
-                               ichan->active_buffer, ready0, ready1, curbuf);
-                       return IRQ_NONE;
-               } else
-                       dev_dbg(dev,
-                               "Buffer deactivated on channel %x, active "
-                               "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
-                               ichan->active_buffer, ready0, ready1, curbuf, i);
-       }
-
        if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
                     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
                     )) {
index 4a5ecc5..23e0355 100644 (file)
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
-       int ganged;
-
        debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
        debugf1("  NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
        debugf1("  DramHoleValid: %s\n",
                (pvt->dhar & DHAR_VALID) ? "yes" : "no");
 
+       amd64_debug_display_dimm_sizes(0, pvt);
+
        /* everything below this point is Fam10h and above */
-       if (boot_cpu_data.x86 == 0xf) {
-               amd64_debug_display_dimm_sizes(0, pvt);
+       if (boot_cpu_data.x86 == 0xf)
                return;
-       }
+
+       amd64_debug_display_dimm_sizes(1, pvt);
 
        amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
 
        /* Only if NOT ganged does dclr1 have valid info */
        if (!dct_ganging_enabled(pvt))
                amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
-       /*
-        * Determine if ganged and then dump memory sizes for first controller,
-        * and if NOT ganged dump info for 2nd controller.
-        */
-       ganged = dct_ganging_enabled(pvt);
-
-       amd64_debug_display_dimm_sizes(0, pvt);
-
-       if (!ganged)
-               amd64_debug_display_dimm_sizes(1, pvt);
 }
 
 /* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
                       WARN_ON(ctrl != 0);
        }
 
-       debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
-               ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+       dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+       dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
 
-       dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-       dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+       debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
 
        edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
 
index e28e416..bcb1126 100644 (file)
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
 
 static void __init dmi_dump_ids(void)
 {
+       const char *board;      /* Board Name is optional */
+
        printk(KERN_DEBUG "DMI: ");
-       print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
-       printk(KERN_CONT "/");
+       print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
+       printk(KERN_CONT " ");
        print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+       board = dmi_get_system_info(DMI_BOARD_NAME);
+       if (board) {
+               printk(KERN_CONT "/");
+               print_filtered(board);
+       }
        printk(KERN_CONT ", BIOS ");
        print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
        printk(KERN_CONT " ");
index d81cc74..54d70a4 100644 (file)
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
 
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-       struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
+       struct lnw_gpio *lnw = get_irq_data(irq);
        u32 base, gpio;
        void __iomem *gedr;
        u32 gedr_v;
@@ -206,7 +206,12 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
                /* clear the edge detect status bit */
                writel(gedr_v, gedr);
        }
-       desc->chip->eoi(irq);
+
+       if (desc->chip->irq_eoi)
+               desc->chip->irq_eoi(irq_get_irq_data(irq));
+       else
+               dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
+
 }
 
 static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
index a261972..b473429 100644 (file)
@@ -60,6 +60,7 @@ struct pca953x_chip {
        unsigned gpio_start;
        uint16_t reg_output;
        uint16_t reg_direction;
+       struct mutex i2c_lock;
 
 #ifdef CONFIG_GPIO_PCA953X_IRQ
        struct mutex irq_lock;
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
 
        chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+       mutex_lock(&chip->i2c_lock);
        reg_val = chip->reg_direction | (1u << off);
        ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
        if (ret)
-               return ret;
+               goto exit;
 
        chip->reg_direction = reg_val;
-       return 0;
+       ret = 0;
+exit:
+       mutex_unlock(&chip->i2c_lock);
+       return ret;
 }
 
 static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 
        chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+       mutex_lock(&chip->i2c_lock);
        /* set output level */
        if (val)
                reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 
        ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
        if (ret)
-               return ret;
+               goto exit;
 
        chip->reg_output = reg_val;
 
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
        reg_val = chip->reg_direction & ~(1u << off);
        ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
        if (ret)
-               return ret;
+               goto exit;
 
        chip->reg_direction = reg_val;
-       return 0;
+       ret = 0;
+exit:
+       mutex_unlock(&chip->i2c_lock);
+       return ret;
 }
 
 static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
 
        chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+       mutex_lock(&chip->i2c_lock);
        ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+       mutex_unlock(&chip->i2c_lock);
        if (ret < 0) {
                /* NOTE:  diagnostic already emitted; that's all we should
                 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
 
        chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+       mutex_lock(&chip->i2c_lock);
        if (val)
                reg_val = chip->reg_output | (1u << off);
        else
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
 
        ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
        if (ret)
-               return;
+               goto exit;
 
        chip->reg_output = reg_val;
+exit:
+       mutex_unlock(&chip->i2c_lock);
 }
 
 static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client,
 
        chip->names = pdata->names;
 
+       mutex_init(&chip->i2c_lock);
+
        /* initialize cached registers from their original values.
         * we can't share this chip with another i2c master.
         */
index bea966f..0902d44 100644 (file)
@@ -100,7 +100,10 @@ config DRM_I830
 config DRM_I915
        tristate "i915 driver"
        depends on AGP_INTEL
+       # we need shmfs for the swappable backing store, and in particular
+       # the shmem_readpage() which depends upon tmpfs
        select SHMEM
+       select TMPFS
        select DRM_KMS_HELPER
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
index 2baa670..654faa8 100644 (file)
@@ -2674,3 +2674,23 @@ out:
        mutex_unlock(&dev->mode_config.mutex);
        return ret;
 }
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               if (crtc->funcs->reset)
+                       crtc->funcs->reset(crtc);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               if (encoder->funcs->reset)
+                       encoder->funcs->reset(encoder);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->funcs->reset)
+                       connector->funcs->reset(connector);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
index 952b3d4..9236965 100644 (file)
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        struct drm_encoder *encoder;
        bool ret = true;
 
-       adjusted_mode = drm_mode_duplicate(dev, mode);
-
        crtc->enabled = drm_helper_crtc_in_use(crtc);
-
        if (!crtc->enabled)
                return true;
 
+       adjusted_mode = drm_mode_duplicate(dev, mode);
+
        saved_hwmode = crtc->hwmode;
        saved_mode = crtc->mode;
        saved_x = crtc->x;
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
         */
        drm_calc_timestamping_constants(crtc);
 
-       /* XXX free adjustedmode */
-       drm_mode_destroy(dev, adjusted_mode);
        /* FIXME: add subpixel order */
 done:
+       drm_mode_destroy(dev, adjusted_mode);
        if (!ret) {
                crtc->hwmode = saved_hwmode;
                crtc->mode = saved_mode;
@@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 
        crtc_funcs = set->crtc->helper_private;
 
+       if (!set->mode)
+               set->fb = NULL;
+
        if (set->fb) {
                DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
                                set->crtc->base.id, set->fb->base.id,
                                (int)set->num_connectors, set->x, set->y);
        } else {
-               DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
-                               set->crtc->base.id, (int)set->num_connectors,
-                               set->x, set->y);
+               DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+               set->mode = NULL;
+               set->num_connectors = 0;
        }
 
        dev = set->crtc->dev;
@@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
 
        if (mode_changed) {
-               set->crtc->enabled = (set->mode != NULL);
-               if (set->mode != NULL) {
+               set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+               if (set->crtc->enabled) {
                        DRM_DEBUG_KMS("attempting to set mode from"
                                        " userspace\n");
                        drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                ret = -EINVAL;
                                goto fail;
                        }
+                       DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+                       for (i = 0; i < set->num_connectors; i++) {
+                               DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+                                             drm_get_connector_name(set->connectors[i]));
+                               set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+                       }
                }
                drm_helper_disable_unused_functions(dev);
        } else if (fb_changed) {
@@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                        goto fail;
                }
        }
-       DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
-       for (i = 0; i < set->num_connectors; i++) {
-               DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
-                             drm_get_connector_name(set->connectors[i]));
-               set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
-       }
 
        kfree(save_connectors);
        kfree(save_encoders);
index 6977a1c..f73ef43 100644 (file)
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
        struct drm_crtc_helper_funcs *crtc_funcs;
        u16 *red, *green, *blue, *transp;
        struct drm_crtc *crtc;
-       int i, rc = 0;
+       int i, j, rc = 0;
        int start;
 
        for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
                transp = cmap->transp;
                start = cmap->start;
 
-               for (i = 0; i < cmap->len; i++) {
+               for (j = 0; j < cmap->len; j++) {
                        u16 hred, hgreen, hblue, htransp = 0xffff;
 
                        hred = *red++;
index 3cdbaf3..be9a9c0 100644 (file)
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data)
 #endif
 
        mutex_lock(&dev->struct_mutex);
-       seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+       seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
                   atomic_read(&dev->vma_count),
-                  high_memory, (u64)virt_to_phys(high_memory));
+                  high_memory, (void *)virt_to_phys(high_memory));
 
        list_for_each_entry(pt, &dev->vmalist, head) {
                vma = pt->vma;
                if (!vma)
                        continue;
                seq_printf(m,
-                          "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-                          pt->pid, vma->vm_start, vma->vm_end,
+                          "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+                          pt->pid,
+                          (void *)vma->vm_start, (void *)vma->vm_end,
                           vma->vm_flags & VM_READ ? 'r' : '-',
                           vma->vm_flags & VM_WRITE ? 'w' : '-',
                           vma->vm_flags & VM_EXEC ? 'x' : '-',
index 0054e95..28d1d3c 100644 (file)
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * available. In that case we can't account for this and just
         * hope for the best.
         */
-       if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+       if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
                atomic_inc(&dev->_vblank_count[crtc]);
+               smp_mb__after_atomic_inc();
+       }
 
        /* Invalidate all timestamps while vblank irq's are off. */
        clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
        /* Dot clock in Hz: */
        dotclock = (u64) crtc->hwmode.clock * 1000;
 
+       /* Fields of interlaced scanout modes are only halve a frame duration.
+        * Double the dotclock to get halve the frame-/line-/pixelduration.
+        */
+       if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+               dotclock *= 2;
+
        /* Valid dotclock? */
        if (dotclock > 0) {
                /* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                return -EAGAIN;
        }
 
-       /* Don't know yet how to handle interlaced or
-        * double scan modes. Just no-op for now.
-        */
-       if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
-               DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
-               return -ENOTSUPP;
-       }
-
        /* Get current scanout position with system timestamp.
         * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
         * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
        if (rc) {
                tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
                vblanktimestamp(dev, crtc, tslot) = t_vblank;
-               smp_wmb();
        }
 
+       smp_mb__before_atomic_inc();
        atomic_add(diff, &dev->_vblank_count[crtc]);
+       smp_mb__after_atomic_inc();
 }
 
 /**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
        struct drm_modeset_ctl *modeset = data;
-       int crtc, ret = 0;
+       int ret = 0;
+       unsigned int crtc;
 
        /* If drm_vblank_init() hasn't been called yet, just no-op */
        if (!dev->num_crtcs)
@@ -1250,7 +1252,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
 {
        u32 vblcount;
        s64 diff_ns;
@@ -1258,7 +1260,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
        unsigned long irqflags;
 
        if (!dev->num_crtcs)
-               return;
+               return false;
 
        /* Need timestamp lock to prevent concurrent execution with
         * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1271,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
        /* Vblank irq handling disabled. Nothing to do. */
        if (!dev->vblank_enabled[crtc]) {
                spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-               return;
+               return false;
        }
 
        /* Fetch corresponding timestamp for this vblank interval from
@@ -1293,15 +1295,16 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
         * e.g., due to spurious vblank interrupts. We need to
         * ignore those for accounting.
         */
-       if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+       if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
                /* Store new timestamp in ringbuffer. */
                vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
-               smp_wmb();
 
                /* Increment cooked vblank count. This also atomically commits
                 * the timestamp computed above.
                 */
+               smp_mb__before_atomic_inc();
                atomic_inc(&dev->_vblank_count[crtc]);
+               smp_mb__after_atomic_inc();
        } else {
                DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
                          crtc, (int) diff_ns);
@@ -1311,5 +1314,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
        drm_handle_vblank_events(dev, crtc);
 
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+       return true;
 }
 EXPORT_SYMBOL(drm_handle_vblank);
index 3601466..4ff9b6c 100644 (file)
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                int max_freq;
 
                /* RPSTAT1 is in the GT power well */
-               __gen6_force_wake_get(dev_priv);
+               __gen6_gt_force_wake_get(dev_priv);
 
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           max_freq * 100);
 
-               __gen6_force_wake_put(dev_priv);
+               __gen6_gt_force_wake_put(dev_priv);
        } else {
                seq_printf(m, "no P-state info available\n");
        }
index 844f3c9..e33d9be 100644 (file)
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       int ret;
 
        master_priv->sarea = drm_getsarea(dev);
        if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        }
 
        if (init->ring_size != 0) {
-               if (ring->obj != NULL) {
+               if (LP_RING(dev_priv)->obj != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
                        return -EINVAL;
                }
 
-               ring->size = init->ring_size;
-
-               ring->map.offset = init->ring_start;
-               ring->map.size = init->ring_size;
-               ring->map.type = 0;
-               ring->map.flags = 0;
-               ring->map.mtrr = 0;
-
-               drm_core_ioremap_wc(&ring->map, dev);
-
-               if (ring->map.handle == NULL) {
+               ret = intel_render_ring_init_dri(dev,
+                                                init->ring_start,
+                                                init->ring_size);
+               if (ret) {
                        i915_dma_cleanup(dev);
-                       DRM_ERROR("can not ioremap virtual address for"
-                                 " ring buffer\n");
-                       return -ENOMEM;
+                       return ret;
                }
        }
 
-       ring->virtual_start = ring->map.handle;
-
        dev_priv->cpp = init->cpp;
        dev_priv->back_offset = init->back_offset;
        dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                DRM_INFO("failed to find VBIOS tables\n");
 
-       /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+       /* If we have > 1 VGA cards, then we need to arbitrate access
+        * to the common VGA resources.
+        *
+        * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+        * then we do not take part in VGA arbitration and the
+        * vga_client_register() fails with -ENODEV.
+        */
        ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-       if (ret)
+       if (ret && ret != -ENODEV)
                goto cleanup_ringbuffer;
 
        intel_register_dsm_handler();
@@ -1900,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN2(dev))
                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
+       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
        mmio_bar = IS_GEN2(dev) ? 1 : 0;
        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
        if (!dev_priv->regs) {
index 72fea2b..22ec066 100644 (file)
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
@@ -60,7 +66,7 @@ extern int intel_agp_enabled;
 
 #define INTEL_VGA_DEVICE(id, info) {           \
        .class = PCI_CLASS_DISPLAY_VGA << 8,    \
-       .class_mask = 0xffff00,                 \
+       .class_mask = 0xff0000,                 \
        .vendor = 0x8086,                       \
        .device = id,                           \
        .subvendor = PCI_ANY_ID,                \
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
        }
 }
 
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+       int loop = 500;
+       u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+       while (fifo < 20 && loop--) {
+               udelay(10);
+               fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+       }
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -354,12 +370,13 @@ static int i915_drm_thaw(struct drm_device *dev)
                error = i915_gem_init_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
 
+               drm_mode_config_reset(dev);
                drm_irq_install(dev);
 
                /* Resume the modeset for every activated CRTC */
                drm_helper_resume_force_mode(dev);
 
-               if (dev_priv->renderctx && dev_priv->pwrctx)
+               if (IS_IRONLAKE_M(dev))
                        ironlake_enable_rc6(dev);
        }
 
@@ -542,6 +559,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
 
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
+               drm_mode_config_reset(dev);
                drm_irq_install(dev);
                mutex_lock(&dev->struct_mutex);
        }
@@ -566,6 +584,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
 static int __devinit
 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       /* Only bind to function 0 of the device. Early generations
+        * used function 1 as a placeholder for multi-head. This causes
+        * us confusion instead, especially on the systems where both
+        * functions have the same PCI-ID!
+        */
+       if (PCI_FUNC(pdev->devfn))
+               return -ENODEV;
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -752,6 +778,9 @@ static int __init i915_init(void)
                driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
+       if (!(driver.driver_features & DRIVER_MODESET))
+               driver.get_vblank_timestamp = NULL;
+
        return drm_init(&driver);
 }
 
index 5969f46..456f404 100644 (file)
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
                /** List of all objects in gtt_space. Used to restore gtt
                 * mappings on resume */
                struct list_head gtt_list;
-               /** End of mappable part of GTT */
+
+               /** Usable portion of the GTT for GEM */
+               unsigned long gtt_start;
                unsigned long gtt_mappable_end;
+               unsigned long gtt_end;
 
                struct io_mapping *gtt_mapping;
                int gtt_mtrr;
@@ -953,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
 extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
 extern unsigned int i915_lvds_downclock;
 extern unsigned int i915_panel_use_ssc;
+extern unsigned int i915_enable_rc6;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1173,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1349,22 +1357,32 @@ __i915_write(64, q)
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
 {
        u32 val;
 
        if (dev_priv->info->gen >= 6) {
-               __gen6_force_wake_get(dev_priv);
+               __gen6_gt_force_wake_get(dev_priv);
                val = I915_READ(reg);
-               __gen6_force_wake_put(dev_priv);
+               __gen6_gt_force_wake_put(dev_priv);
        } else
                val = I915_READ(reg);
 
        return val;
 }
 
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+                               u32 reg, u32 val)
+{
+       if (dev_priv->info->gen >= 6)
+               __gen6_gt_wait_for_fifo(dev_priv);
+       I915_WRITE(reg, val);
+}
+
 static inline void
 i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
 {
index 3dfc848..36e66cc 100644 (file)
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       drm_mm_init(&dev_priv->mm.gtt_space, start,
-                   end - start);
+       drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 
+       dev_priv->mm.gtt_start = start;
+       dev_priv->mm.gtt_mappable_end = mappable_end;
+       dev_priv->mm.gtt_end = end;
        dev_priv->mm.gtt_total = end - start;
        dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-       dev_priv->mm.gtt_mappable_end = mappable_end;
+
+       /* Take over this portion of the GTT */
+       intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 }
 
 int
@@ -1394,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
-static uint32_t
+uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 
        seqno = ring->get_seqno(ring);
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
+       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
                if (seqno >= ring->sync_seqno[i])
                        ring->sync_seqno[i] = 0;
 
index dcfdf41..50ab161 100644 (file)
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
        if (from == NULL || to == from)
                return 0;
 
-       /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+       /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
                return i915_gem_object_wait_rendering(obj, true);
 
        idx = intel_ring_sync_index(from, to);
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
 
        seqno = i915_gem_next_request_seqno(dev, ring);
-       for (i = 0; i < I915_NUM_RINGS-1; i++) {
+       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
                if (seqno < ring->sync_seqno[i]) {
                        /* The GPU can not handle its semaphore value wrapping,
                         * so every billion or so execbuffers, we need to stall
index 70433ae..b0abdc6 100644 (file)
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
+       /* First fill our portion of the GTT with scratch pages */
+       intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+                             (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
        list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
                i915_gem_clflush_object(obj);
 
index 22a32b9..d64843e 100644 (file)
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                        (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
                         i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-               obj->tiling_changed = true;
-               obj->tiling_mode = args->tiling_mode;
-               obj->stride = args->stride;
+               /* Rebind if we need a change of alignment */
+               if (!obj->map_and_fenceable) {
+                       u32 unfenced_alignment =
+                               i915_gem_get_unfenced_gtt_alignment(obj);
+                       if (obj->gtt_offset & (unfenced_alignment - 1))
+                               ret = i915_gem_object_unbind(obj);
+               }
+
+               if (ret == 0) {
+                       obj->tiling_changed = true;
+                       obj->tiling_mode = args->tiling_mode;
+                       obj->stride = args->stride;
+               }
        }
+       /* we have to maintain this existing ABI... */
+       args->stride = obj->stride;
+       args->tiling_mode = obj->tiling_mode;
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
-       return 0;
+       return ret;
 }
 
 /**
index b8e509a..8a9e08b 100644 (file)
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
 {
-       struct drm_crtc *drmcrtc;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
 
-       if (crtc < 0 || crtc >= dev->num_crtcs) {
-               DRM_ERROR("Invalid crtc %d\n", crtc);
+       if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+               DRM_ERROR("Invalid crtc %d\n", pipe);
                return -EINVAL;
        }
 
        /* Get drm_crtc to timestamp: */
-       drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+       crtc = intel_get_crtc_for_pipe(dev, pipe);
+       if (crtc == NULL) {
+               DRM_ERROR("Invalid crtc %d\n", pipe);
+               return -EINVAL;
+       }
+
+       if (!crtc->enabled) {
+               DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+               return -EBUSY;
+       }
 
        /* Helper routine in DRM core does all the work: */
-       return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
-                                                    vblank_time, flags, drmcrtc);
+       return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+                                                    vblank_time, flags,
+                                                    crtc);
 }
 
 /*
@@ -305,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *encoder;
 
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
        list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
                if (encoder->hot_plug)
                        encoder->hot_plug(encoder);
@@ -348,8 +361,12 @@ static void notify_ring(struct drm_device *dev,
                        struct intel_ring_buffer *ring)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 seqno = ring->get_seqno(ring);
+       u32 seqno;
 
+       if (ring->obj == NULL)
+               return;
+
+       seqno = ring->get_seqno(ring);
        trace_i915_gem_request_complete(dev, seqno);
 
        ring->irq_seqno = seqno;
@@ -831,6 +848,8 @@ static void i915_capture_error_state(struct drm_device *dev)
                i++;
        error->pinned_bo_count = i - error->active_bo_count;
 
+       error->active_bo = NULL;
+       error->pinned_bo = NULL;
        if (i) {
                error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
                                           GFP_ATOMIC);
@@ -1179,18 +1198,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                                intel_finish_page_flip_plane(dev, 1);
                }
 
-               if (pipea_stats & vblank_status) {
+               if (pipea_stats & vblank_status &&
+                   drm_handle_vblank(dev, 0)) {
                        vblank++;
-                       drm_handle_vblank(dev, 0);
                        if (!dev_priv->flip_pending_is_done) {
                                i915_pageflip_stall_check(dev, 0);
                                intel_finish_page_flip(dev, 0);
                        }
                }
 
-               if (pipeb_stats & vblank_status) {
+               if (pipeb_stats & vblank_status &&
+                   drm_handle_vblank(dev, 1)) {
                        vblank++;
-                       drm_handle_vblank(dev, 1);
                        if (!dev_priv->flip_pending_is_done) {
                                i915_pageflip_stall_check(dev, 1);
                                intel_finish_page_flip(dev, 1);
@@ -1278,12 +1297,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        if (master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-       ret = -ENODEV;
        if (ring->irq_get(ring)) {
                DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
                            READ_BREADCRUMB(dev_priv) >= irq_nr);
                ring->irq_put(ring);
-       }
+       } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+               ret = -EBUSY;
 
        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1632,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        } else {
                hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                               SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-               hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
-               I915_WRITE(FDI_RXA_IMR, 0);
-               I915_WRITE(FDI_RXB_IMR, 0);
+               hotplug_mask |= SDE_AUX_MASK;
        }
 
        dev_priv->pch_irq_mask = ~hotplug_mask;
index 40a407f..2abe240 100644 (file)
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW            MI_INSTR(0x26, 2) /* for GEN6 */
+#define MI_FLUSH_DW            MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_INVALIDATE_TLB    (1<<18)
+#define   MI_INVALIDATE_BSD    (1<<7)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE  (1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define   GEN6_BLITTER_SYNC_STATUS                     (1 << 24)
 #define   GEN6_BLITTER_USER_INTERRUPT                  (1 << 22)
 
+#define GEN6_BLITTER_ECOSKPD   0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT                      16
+#define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL    0x12050
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK      (1 << 16)
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE          (1 << 0)
 #define DISPLAY_PORT_PLL_BIOS_2         0x46014
 
 #define PCH_DSPCLK_GATE_D      0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE           (1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE          (1 << 8)
 # define DPFDUNIT_CLOCK_GATE_DISABLE           (1 << 7)
 # define DPARBUNIT_CLOCK_GATE_DISABLE          (1 << 5)
 
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
 
+#define  GT_FIFO_FREE_ENTRIES                  0x120008
+
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
index 17035b8..8a77ff4 100644 (file)
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
        return 0;
 }
 
+static void intel_crt_reset(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct intel_crt *crt = intel_attached_crt(connector);
+
+       if (HAS_PCH_SPLIT(dev))
+               crt->force_hotplug_required = 1;
+}
+
 /*
  * Routines for controlling stuff on the analog port
  */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
 };
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
+       .reset = intel_crt_reset,
        .dpms = drm_helper_connector_dpms,
        .detect = intel_crt_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
index 98967f3..49fb54f 100644 (file)
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
        return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 blt_ecoskpd;
+
+       /* Make sure blitter notifies FBC of writes */
+       __gen6_gt_force_wake_get(dev_priv);
+       blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+       blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+               GEN6_BLITTER_LOCK_SHIFT;
+       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+       blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+       blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+                        GEN6_BLITTER_LOCK_SHIFT);
+       I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+       POSTING_READ(GEN6_BLITTER_ECOSKPD);
+       __gen6_gt_force_wake_put(dev_priv);
+}
+
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
        struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                I915_WRITE(SNB_DPFC_CTL_SA,
                           SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+               sandybridge_blit_fbc_update(dev);
        }
 
        DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -1609,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
                wait_event(dev_priv->pending_flip_queue,
+                          atomic_read(&dev_priv->mm.wedged) ||
                           atomic_read(&obj->pending_flip) == 0);
 
                /* Big Hammer, we also need to ensure that any pending
                 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
                 * current scanout is retired before unpinning the old
                 * framebuffer.
+                *
+                * This should only fail upon a hung GPU, in which case we
+                * can safely continue.
                 */
                ret = i915_gem_object_flush_gpu(obj, false);
-               if (ret) {
-                       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               (void) ret;
        }
 
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2024,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
                   atomic_read(&obj->pending_flip) == 0);
 }
 
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+
+       /*
+        * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+        * must be driven by its own crtc; no sharing is possible.
+        */
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               if (encoder->base.crtc != crtc)
+                       continue;
+
+               switch (encoder->type) {
+               case INTEL_OUTPUT_EDP:
+                       if (!intel_encoder_is_pch_edp(&encoder->base))
+                               return false;
+                       continue;
+               }
+       }
+
+       return true;
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -2032,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        u32 reg, temp;
+       bool is_pch_port = false;
 
        if (intel_crtc->active)
                return;
@@ -2045,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                        I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
        }
 
-       ironlake_fdi_enable(crtc);
+       is_pch_port = intel_crtc_driving_pch(crtc);
+
+       if (is_pch_port)
+               ironlake_fdi_enable(crtc);
+       else {
+               /* disable CPU FDI tx and PCH FDI rx */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+               POSTING_READ(reg);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(0x7 << 16);
+               temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+               I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+               POSTING_READ(reg);
+               udelay(100);
+
+               /* Ironlake workaround, disable clock pointer after downing FDI */
+               if (HAS_PCH_IBX(dev))
+                       I915_WRITE(FDI_RX_CHICKEN(pipe),
+                                  I915_READ(FDI_RX_CHICKEN(pipe) &
+                                            ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+               /* still set train pattern 1 */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+               I915_WRITE(reg, temp);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               } else {
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1;
+               }
+               /* BPC in FDI rx is consistent with that in PIPECONF */
+               temp &= ~(0x07 << 16);
+               temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
+               udelay(100);
+       }
 
        /* Enable panel fitting for LVDS */
        if (dev_priv->pch_pf_size &&
@@ -2079,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                intel_flush_display_plane(dev, plane);
        }
 
+       /* Skip the PCH stuff if possible */
+       if (!is_pch_port)
+               goto done;
+
        /* For PCH output, training FDI link */
        if (IS_GEN6(dev))
                gen6_fdi_link_train(crtc);
@@ -2163,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        I915_WRITE(reg, temp | TRANS_ENABLE);
        if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
                DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
        intel_crtc_load_lut(crtc);
        intel_update_fbc(dev);
        intel_crtc_update_cursor(crtc, true);
@@ -5530,6 +5630,16 @@ cleanup_work:
        return ret;
 }
 
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       /* Reset flags back to the 'unknown' status so that they
+        * will be correctly set on the initial modeset.
+        */
+       intel_crtc->dpms_mode = -1;
+}
+
 static struct drm_crtc_helper_funcs intel_helper_funcs = {
        .dpms = intel_crtc_dpms,
        .mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
 };
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
+       .reset = intel_crtc_reset,
        .cursor_set = intel_crtc_cursor_set,
        .cursor_move = intel_crtc_cursor_move,
        .gamma_set = intel_crtc_gamma_set,
@@ -5631,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-       intel_crtc->cursor_addr = 0;
-       intel_crtc->dpms_mode = -1;
+       intel_crtc_reset(&intel_crtc->base);
        intel_crtc->active = true; /* force the pipe off on setup_init_config */
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -6172,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
         * userspace...
         */
        I915_WRITE(GEN6_RC_STATE, 0);
-       __gen6_force_wake_get(dev_priv);
+       __gen6_gt_force_wake_get(dev_priv);
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6270,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
 
-       __gen6_force_wake_put(dev_priv);
+       __gen6_gt_force_wake_put(dev_priv);
 }
 
 void intel_enable_clock_gating(struct drm_device *dev)
@@ -6286,7 +6396,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
 
                if (IS_GEN5(dev)) {
                        /* Required for FBC */
-                       dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+                       dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+                               DPFCRUNIT_CLOCK_GATE_DISABLE |
+                               DPFDUNIT_CLOCK_GATE_DISABLE;
                        /* Required for CxSR */
                        dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 
@@ -6429,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
        }
 }
 
-void intel_disable_clock_gating(struct drm_device *dev)
+static void ironlake_teardown_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (dev_priv->renderctx) {
-               struct drm_i915_gem_object *obj = dev_priv->renderctx;
-
-               I915_WRITE(CCID, 0);
-               POSTING_READ(CCID);
-
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(&obj->base);
+               i915_gem_object_unpin(dev_priv->renderctx);
+               drm_gem_object_unreference(&dev_priv->renderctx->base);
                dev_priv->renderctx = NULL;
        }
 
        if (dev_priv->pwrctx) {
-               struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+               i915_gem_object_unpin(dev_priv->pwrctx);
+               drm_gem_object_unreference(&dev_priv->pwrctx->base);
+               dev_priv->pwrctx = NULL;
+       }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (I915_READ(PWRCTXA)) {
+               /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+               wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+                        50);
 
                I915_WRITE(PWRCTXA, 0);
                POSTING_READ(PWRCTXA);
 
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(&obj->base);
-               dev_priv->pwrctx = NULL;
+               I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+               POSTING_READ(RSTDBYCTL);
        }
+
+       ironlake_teardown_rc6(dev);
 }
 
-static void ironlake_disable_rc6(struct drm_device *dev)
+static int ironlake_setup_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
-       I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
-       wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
-                10);
-       POSTING_READ(CCID);
-       I915_WRITE(PWRCTXA, 0);
-       POSTING_READ(PWRCTXA);
-       I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-       POSTING_READ(RSTDBYCTL);
-       i915_gem_object_unpin(dev_priv->renderctx);
-       drm_gem_object_unreference(&dev_priv->renderctx->base);
-       dev_priv->renderctx = NULL;
-       i915_gem_object_unpin(dev_priv->pwrctx);
-       drm_gem_object_unreference(&dev_priv->pwrctx->base);
-       dev_priv->pwrctx = NULL;
+       if (dev_priv->renderctx == NULL)
+               dev_priv->renderctx = intel_alloc_context_page(dev);
+       if (!dev_priv->renderctx)
+               return -ENOMEM;
+
+       if (dev_priv->pwrctx == NULL)
+               dev_priv->pwrctx = intel_alloc_context_page(dev);
+       if (!dev_priv->pwrctx) {
+               ironlake_teardown_rc6(dev);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 void ironlake_enable_rc6(struct drm_device *dev)
@@ -6482,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       /* rc6 disabled by default due to repeated reports of hanging during
+        * boot and resume.
+        */
+       if (!i915_enable_rc6)
+               return;
+
+       ret = ironlake_setup_rc6(dev);
+       if (ret)
+               return;
+
        /*
         * GPU can automatically power down the render unit if given a page
         * to save state.
         */
        ret = BEGIN_LP_RING(6);
        if (ret) {
-               ironlake_disable_rc6(dev);
+               ironlake_teardown_rc6(dev);
                return;
        }
+
        OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
        OUT_RING(MI_SET_CONTEXT);
        OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6507,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
+
 /* Set up chip specific display functions */
 static void intel_init_display(struct drm_device *dev)
 {
@@ -6749,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
        if (IS_GEN6(dev))
                gen6_enable_rps(dev_priv);
 
-       if (IS_IRONLAKE_M(dev)) {
-               dev_priv->renderctx = intel_alloc_context_page(dev);
-               if (!dev_priv->renderctx)
-                       goto skip_rc6;
-               dev_priv->pwrctx = intel_alloc_context_page(dev);
-               if (!dev_priv->pwrctx) {
-                       i915_gem_object_unpin(dev_priv->renderctx);
-                       drm_gem_object_unreference(&dev_priv->renderctx->base);
-                       dev_priv->renderctx = NULL;
-                       goto skip_rc6;
-               }
+       if (IS_IRONLAKE_M(dev))
                ironlake_enable_rc6(dev);
-       }
 
-skip_rc6:
        INIT_WORK(&dev_priv->idle_work, intel_idle_update);
        setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
                    (unsigned long)dev);
index 1f4242b..51cb4e3 100644 (file)
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        return 0;
 }
 
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct edid *edid;
+       bool has_audio = false;
+
+       edid = drm_get_edid(connector, &intel_dp->adapter);
+       if (edid) {
+               has_audio = drm_detect_monitor_audio(edid);
+
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+
+       return has_audio;
+}
+
 static int
 intel_dp_set_property(struct drm_connector *connector,
                      struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
                return ret;
 
        if (property == intel_dp->force_audio_property) {
-               if (val == intel_dp->force_audio)
+               int i = val;
+               bool has_audio;
+
+               if (i == intel_dp->force_audio)
                        return 0;
 
-               intel_dp->force_audio = val;
+               intel_dp->force_audio = i;
 
-               if (val > 0 && intel_dp->has_audio)
-                       return 0;
-               if (val < 0 && !intel_dp->has_audio)
+               if (i == 0)
+                       has_audio = intel_dp_detect_audio(connector);
+               else
+                       has_audio = i > 0;
+
+               if (has_audio == intel_dp->has_audio)
                        return 0;
 
-               intel_dp->has_audio = val > 0;
+               intel_dp->has_audio = has_audio;
                goto done;
        }
 
index 74db255..2c43104 100644 (file)
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
                                    u16 *blue, int regno);
 extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
index 0d0273e..c635c9e 100644 (file)
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
                                   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 }
 
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct edid *edid;
+       bool has_audio = false;
+
+       edid = drm_get_edid(connector,
+                           &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+       if (edid) {
+               if (edid->input & DRM_EDID_INPUT_DIGITAL)
+                       has_audio = drm_detect_monitor_audio(edid);
+
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+
+       return has_audio;
+}
+
 static int
 intel_hdmi_set_property(struct drm_connector *connector,
                      struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
                return ret;
 
        if (property == intel_hdmi->force_audio_property) {
-               if (val == intel_hdmi->force_audio)
+               int i = val;
+               bool has_audio;
+
+               if (i == intel_hdmi->force_audio)
                        return 0;
 
-               intel_hdmi->force_audio = val;
+               intel_hdmi->force_audio = i;
 
-               if (val > 0 && intel_hdmi->has_audio)
-                       return 0;
-               if (val < 0 && !intel_hdmi->has_audio)
+               if (i == 0)
+                       has_audio = intel_hdmi_detect_audio(connector);
+               else
+                       has_audio = i > 0;
+
+               if (has_audio == intel_hdmi->has_audio)
                        return 0;
 
-               intel_hdmi->has_audio = val > 0;
+               intel_hdmi->has_audio = has_audio;
                goto done;
        }
 
index ace8d5d..bcdba7b 100644 (file)
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                return true;
        }
 
-       /* Make sure pre-965s set dither correctly */
-       if (INTEL_INFO(dev)->gen < 4) {
-               if (dev_priv->lvds_dither)
-                       pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-       }
-
        /* Native modes don't need fitting */
        if (adjusted_mode->hdisplay == mode->hdisplay &&
            adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
        }
 
 out:
+       /* If not enabling scaling, be consistent and always use 0. */
        if ((pfit_control & PFIT_ENABLE) == 0) {
                pfit_control = 0;
                pfit_pgm_ratios = 0;
        }
+
+       /* Make sure pre-965 set dither correctly */
+       if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
        if (pfit_control != intel_lvds->pfit_control ||
            pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
                intel_lvds->pfit_control = pfit_control;
index f295a7a..64fd644 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <acpi/video.h>
 
 #include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
                return -ENOTSUPP;
        }
 
-       base = ioremap(asls, OPREGION_SIZE);
+       base = acpi_os_ioremap(asls, OPREGION_SIZE);
        if (!base)
                return -ENOMEM;
 
index c65992d..f8f86e5 100644 (file)
@@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                        val &= ~1;
                        pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
                        val *= lbpc;
-                       val >>= 1;
                }
        }
 
@@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 
        if (is_backlight_combination_mode(dev)){
                u32 max = intel_panel_get_max_backlight(dev);
-               u8 lpbc;
+               u8 lbpc;
 
-               lpbc = level * 0xfe / max + 1;
-               level /= lpbc;
-               pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+               lbpc = level * 0xfe / max + 1;
+               level /= lbpc;
+               pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
index f6b9baa..445f27e 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+       int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+       if (space < 0)
+               space += ring->size;
+       return space;
+}
+
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
                i915_kernel_lost_context(ring->dev);
        else {
-               ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+               ring->head = I915_READ_HEAD(ring);
                ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
+               ring->space = ring_space(ring);
        }
 
        return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
        }
 
        ring->tail = 0;
-       ring->space = ring->head - 8;
+       ring->space = ring_space(ring);
 
        return 0;
 }
 
 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 {
-       int reread = 0;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long end;
        u32 head;
 
+       /* If the reported head position has wrapped or hasn't advanced,
+        * fallback to the slow and accurate path.
+        */
+       head = intel_read_status_page(ring, 4);
+       if (head > ring->head) {
+               ring->head = head;
+               ring->space = ring_space(ring);
+               if (ring->space >= n)
+                       return 0;
+       }
+
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
        do {
-               /* If the reported head position has wrapped or hasn't advanced,
-                * fallback to the slow and accurate path.
-                */
-               head = intel_read_status_page(ring, 4);
-               if (reread)
-                       head = I915_READ_HEAD(ring);
-               ring->head = head & HEAD_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->size;
+               ring->head = I915_READ_HEAD(ring);
+               ring->space = ring_space(ring);
                if (ring->space >= n) {
                        trace_i915_ring_wait_end(dev);
                        return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
                msleep(1);
                if (atomic_read(&dev_priv->mm.wedged))
                        return -EAGAIN;
-               reread = 1;
        } while (!time_after(jiffies, end));
        trace_i915_ring_wait_end (dev);
        return -EBUSY;
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
-                          u32 invalidate_domains,
-                          u32 flush_domains)
+                          u32 invalidate, u32 flush)
 {
+       uint32_t cmd;
        int ret;
 
-       if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+       if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_FLUSH_DW);
-       intel_ring_emit(ring, 0);
+       cmd = MI_FLUSH_DW;
+       if (invalidate & I915_GEM_GPU_DOMAINS)
+               cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+       intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
        return 0;
 }
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
 }
 
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-                          u32 invalidate_domains,
-                          u32 flush_domains)
+                         u32 invalidate, u32 flush)
 {
+       uint32_t cmd;
        int ret;
 
-       if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+       if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
                return 0;
 
        ret = blt_ring_begin(ring, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_FLUSH_DW);
-       intel_ring_emit(ring, 0);
+       cmd = MI_FLUSH_DW;
+       if (invalidate & I915_GEM_DOMAIN_RENDER)
+               cmd |= MI_INVALIDATE_TLB;
+       intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
        return 0;
 }
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        return intel_init_ring_buffer(dev, ring);
 }
 
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+       *ring = render_ring;
+       if (INTEL_INFO(dev)->gen >= 6) {
+               ring->add_request = gen6_add_request;
+               ring->irq_get = gen6_render_ring_get_irq;
+               ring->irq_put = gen6_render_ring_put_irq;
+       } else if (IS_GEN5(dev)) {
+               ring->add_request = pc_render_add_request;
+               ring->get_seqno = pc_render_get_seqno;
+       }
+
+       ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+
+       ring->size = size;
+       ring->effective_size = ring->size;
+       if (IS_I830(ring->dev))
+               ring->effective_size -= 128;
+
+       ring->map.offset = start;
+       ring->map.size = size;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+       if (ring->map.handle == NULL) {
+               DRM_ERROR("can not ioremap virtual address for"
+                         " ring buffer\n");
+               return -ENOMEM;
+       }
+
+       ring->virtual_start = (void __force __iomem *)ring->map.handle;
+       return 0;
+}
+
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
index 5b0abfa..3430686 100644 (file)
@@ -14,22 +14,23 @@ struct  intel_hw_status_page {
        struct          drm_i915_gem_object *obj;
 };
 
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
 
 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
 
 #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
 
 #define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
 
 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
 
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
 #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -166,4 +167,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index 45cd376..7c50cdc 100644 (file)
@@ -46,6 +46,7 @@
                          SDVO_TV_MASK)
 
 #define IS_TV(c)       (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
                return false;
        }
 
-       i = 3;
-       while (status == SDVO_CMD_STATUS_PENDING && i--) {
-               if (!intel_sdvo_read_byte(intel_sdvo,
-                                         SDVO_I2C_CMD_STATUS,
-                                         &status))
-                       return false;
-       }
-       if (status != SDVO_CMD_STATUS_SUCCESS) {
-               DRM_DEBUG_KMS("command returns response %s [%d]\n",
-                             status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
-                             status);
-               return false;
-       }
-
        return true;
 }
 
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
        u8 status;
        int i;
 
+       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
        /*
         * The documentation states that all commands will be
         * processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
         *
         * Check 5 times in case the hardware failed to read the docs.
         */
-       do {
+       if (!intel_sdvo_read_byte(intel_sdvo,
+                                 SDVO_I2C_CMD_STATUS,
+                                 &status))
+               goto log_fail;
+
+       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+               udelay(15);
                if (!intel_sdvo_read_byte(intel_sdvo,
                                          SDVO_I2C_CMD_STATUS,
                                          &status))
-                       return false;
-       } while (status == SDVO_CMD_STATUS_PENDING && --retry);
+                       goto log_fail;
+       }
 
-       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
        if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
                DRM_LOG_KMS("(%s)", cmd_status_names[status]);
        else
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
        return true;
 
 log_fail:
-       DRM_LOG_KMS("\n");
+       DRM_LOG_KMS("... failed\n");
        return false;
 }
 
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
 static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
                                              u8 ddc_bus)
 {
+       /* This must be the immediately preceding write before the i2c xfer */
        return intel_sdvo_write_cmd(intel_sdvo,
                                    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
                                    &ddc_bus, 1);
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
 
 static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
 {
-       return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+       if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+               return false;
+
+       return intel_sdvo_read_response(intel_sdvo, NULL, 0);
 }
 
 static bool
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
 
        intel_dip_infoframe_csum(&avi_if);
 
-       if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+       if (!intel_sdvo_set_value(intel_sdvo,
+                                 SDVO_CMD_SET_HBUF_INDEX,
                                  set_buf_index, 2))
                return false;
 
        for (i = 0; i < sizeof(avi_if); i += 8) {
-               if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+               if (!intel_sdvo_set_value(intel_sdvo,
+                                         SDVO_CMD_SET_HBUF_DATA,
                                          data, 8))
                        return false;
                data++;
        }
 
-       return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+       return intel_sdvo_set_value(intel_sdvo,
+                                   SDVO_CMD_SET_HBUF_TXRATE,
                                    &tx_rate, 1);
 }
 
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
                                intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
                                intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
                        }
-               }
+               } else
+                       status = connector_status_disconnected;
                connector->display_info.raw_edid = NULL;
                kfree(edid);
        }
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 
        if ((intel_sdvo_connector->output_flag & response) == 0)
                ret = connector_status_disconnected;
-       else if (response & SDVO_TMDS_MASK)
+       else if (IS_TMDS(intel_sdvo_connector))
                ret = intel_sdvo_hdmi_sink_detect(connector);
-       else
-               ret = connector_status_connected;
+       else {
+               struct edid *edid;
+
+               /* if we have an edid check it matches the connection */
+               edid = intel_sdvo_get_edid(connector);
+               if (edid == NULL)
+                       edid = intel_sdvo_get_analog_edid(connector);
+               if (edid != NULL) {
+                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
+                               ret = connector_status_disconnected;
+                       else
+                               ret = connector_status_connected;
+                       connector->display_info.raw_edid = NULL;
+                       kfree(edid);
+               } else
+                       ret = connector_status_connected;
+       }
 
        /* May update encoder flag for like clock for SDVO TV, etc.*/
        if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                edid = intel_sdvo_get_analog_edid(connector);
 
        if (edid != NULL) {
-               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+               bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+               if (connector_is_digital == monitor_is_digital) {
                        drm_mode_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
+
                connector->display_info.raw_edid = NULL;
                kfree(edid);
        }
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+       struct edid *edid;
+       bool has_audio = false;
+
+       if (!intel_sdvo->is_hdmi)
+               return false;
+
+       edid = intel_sdvo_get_edid(connector);
+       if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+               has_audio = drm_detect_monitor_audio(edid);
+
+       return has_audio;
+}
+
 static int
 intel_sdvo_set_property(struct drm_connector *connector,
                        struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
                return ret;
 
        if (property == intel_sdvo_connector->force_audio_property) {
-               if (val == intel_sdvo_connector->force_audio)
+               int i = val;
+               bool has_audio;
+
+               if (i == intel_sdvo_connector->force_audio)
                        return 0;
 
-               intel_sdvo_connector->force_audio = val;
+               intel_sdvo_connector->force_audio = i;
 
-               if (val > 0 && intel_sdvo->has_hdmi_audio)
-                       return 0;
-               if (val < 0 && !intel_sdvo->has_hdmi_audio)
+               if (i == 0)
+                       has_audio = intel_sdvo_detect_hdmi_audio(connector);
+               else
+                       has_audio = i > 0;
+
+               if (has_audio == intel_sdvo->has_hdmi_audio)
                        return 0;
 
-               intel_sdvo->has_hdmi_audio = val > 0;
+               intel_sdvo->has_hdmi_audio = has_audio;
                goto done;
        }
 
index 93206e4..fe4a53a 100644 (file)
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+                     struct drm_connector *connector)
 {
        struct drm_encoder *encoder = &intel_tv->base.base;
        struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
        int type;
 
        /* Disable TV interrupts around load detect or we'll recurse */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_disable_pipestat(dev_priv, 0,
-                             PIPE_HOTPLUG_INTERRUPT_ENABLE |
-                             PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               i915_disable_pipestat(dev_priv, 0,
+                                     PIPE_HOTPLUG_INTERRUPT_ENABLE |
+                                     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       }
 
        save_tv_dac = tv_dac = I915_READ(TV_DAC);
        save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
        I915_WRITE(TV_CTL, save_tv_ctl);
 
        /* Restore interrupt config */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       i915_enable_pipestat(dev_priv, 0,
-                            PIPE_HOTPLUG_INTERRUPT_ENABLE |
-                            PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               i915_enable_pipestat(dev_priv, 0,
+                                    PIPE_HOTPLUG_INTERRUPT_ENABLE |
+                                    PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       }
 
        return type;
 }
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
        drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
        if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-               type = intel_tv_detect_type(intel_tv);
+               type = intel_tv_detect_type(intel_tv, connector);
        } else if (force) {
                struct drm_crtc *crtc;
                int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
                crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
                                                  &mode, &dpms_mode);
                if (crtc) {
-                       type = intel_tv_detect_type(intel_tv);
+                       type = intel_tv_detect_type(intel_tv, connector);
                        intel_release_load_detect_pipe(&intel_tv->base, connector,
                                                       dpms_mode);
                } else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
        intel_encoder = &intel_tv->base;
        connector = &intel_connector->base;
 
+       /* The documentation, for the older chipsets at least, recommend
+        * using a polling method rather than hotplug detection for TVs.
+        * This is because in order to perform the hotplug detection, the PLLs
+        * for the TV must be kept alive increasing power drain and starving
+        * bandwidth from other encoders. Notably for instance, it causes
+        * pipe underruns on Crestline when this encoder is supposedly idle.
+        *
+        * More recent chipsets favour HDMI rather than integrated S-Video.
+        */
+       connector->polled =
+               DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
        drm_connector_init(dev, connector, &intel_tv_connector_funcs,
                           DRM_MODE_CONNECTOR_SVIDEO);
 
index 2aef5cd..6bdab89 100644 (file)
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
                entry->tvconf.has_component_output = false;
                break;
        case OUTPUT_LVDS:
-               if ((conn & 0x00003f00) != 0x10)
+               if ((conn & 0x00003f00) >> 8 != 0x10)
                        entry->lvdsconf.use_straps_for_mode = true;
                entry->lvdsconf.use_power_scripts = true;
                break;
@@ -6310,6 +6310,9 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 static bool
 apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct dcb_table *dcb = &dev_priv->vbios.dcb;
+
        /* Dell Precision M6300
         *   DCB entry 2: 02025312 00000010
         *   DCB entry 3: 02026312 00000020
@@ -6327,6 +6330,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
                        return false;
        }
 
+       /* GeForce3 Ti 200
+        *
+        * DCB reports an LVDS output that should be TMDS:
+        *   DCB entry 1: f2005014 ffffffff
+        */
+       if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+               if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+                       fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
+                       return false;
+               }
+       }
+
        return true;
 }
 
index a7fae26..a521840 100644 (file)
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
                DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
        nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
-       nouveau_vm_put(&nvbo->vma);
+       if (nvbo->vma.node) {
+               nouveau_vm_unmap(&nvbo->vma);
+               nouveau_vm_put(&nvbo->vma);
+       }
        kfree(nvbo);
 }
 
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
                }
        }
 
+       nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
        nouveau_bo_placement_set(nvbo, flags, 0);
 
        nvbo->channel = chan;
@@ -166,17 +170,17 @@ static void
 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+       int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
 
        if (dev_priv->card_type == NV_10 &&
-           nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+           nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+           nvbo->bo.mem.num_pages < vram_pages / 2) {
                /*
                 * Make sure that the color and depth buffers are handled
                 * by independent memory controller units. Up to a 9x
                 * speed up when alpha-blending and depth-test are enabled
                 * at the same time.
                 */
-               int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
                if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
                        nvbo->placement.fpfn = vram_pages / 2;
                        nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
index a21e000..390d82c 100644 (file)
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
        int high_w = 0, high_h = 0, high_v = 0;
 
        list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+               mode->vrefresh = drm_mode_vrefresh(mode);
                if (helper->mode_valid(connector, mode) != MODE_OK ||
                    (mode->flags & DRM_MODE_FLAG_INTERLACE))
                        continue;
index 65699bf..b368ed7 100644 (file)
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
                return ret;
 
        /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
-       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+       ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+                                    &chan->m2mf_ntfy);
        if (ret)
                return ret;
 
index 13bb672..f658a04 100644 (file)
@@ -234,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                pci_set_power_state(pdev, PCI_D3hot);
        }
 
-       acquire_console_sem();
+       console_lock();
        nouveau_fbcon_set_suspend(dev, 1);
-       release_console_sem();
+       console_unlock();
        nouveau_fbcon_restore_accel(dev);
        return 0;
 
@@ -359,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
                nv_crtc->lut.depth = 0;
        }
 
-       acquire_console_sem();
+       console_lock();
        nouveau_fbcon_set_suspend(dev, 0);
-       release_console_sem();
+       console_unlock();
 
        nouveau_fbcon_zfill_all(dev);
 
index 01bffc4..982d70b 100644 (file)
@@ -848,14 +848,12 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
                                     struct nouveau_fence *fence);
 extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
 
-/* nvc0_vram.c */
-extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
-
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
 extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
-                                  int cout, uint32_t *offset);
+                                  int cout, uint32_t start, uint32_t end,
+                                  uint32_t *offset);
 extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
 extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
                                         struct drm_file *);
index 26347b7..b0fb9bd 100644 (file)
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
        ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
                        mem->page_alignment << PAGE_SHIFT, size_nc,
                        (nvbo->tile_flags >> 8) & 0xff, &node);
-       if (ret)
-               return ret;
+       if (ret) {
+               mem->mm_node = NULL;
+               return (ret == -ENOSPC) ? 0 : ret;
+       }
 
        node->page_shift = 12;
        if (nvbo->vma.node)
index 8844b50..7609756 100644 (file)
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
                return 0;
        }
 
-       return -ENOMEM;
+       return -ENOSPC;
 }
 
 int
index fe29d60..5ea1676 100644 (file)
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
 
 int
 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
-                      int size, uint32_t *b_offset)
+                      int size, uint32_t start, uint32_t end,
+                      uint32_t *b_offset)
 {
        struct drm_device *dev = chan->dev;
        struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
        uint32_t offset;
        int target, ret;
 
-       mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+       mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+                                         start, end, 0);
        if (mem)
-               mem = drm_mm_get_block(mem, size, 0);
+               mem = drm_mm_get_block_range(mem, size, 0, start, end);
        if (!mem) {
                NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
                return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
        if (IS_ERR(chan))
                return PTR_ERR(chan);
 
-       ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+       ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+                                    &na->offset);
        nouveau_channel_put(&chan);
        return ret;
 }
index fb846a3..4399e2f 100644 (file)
@@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 
        if (pm->hwmon) {
-               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+               sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
                hwmon_device_unregister(pm->hwmon);
        }
 #endif
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
        struct nouveau_pm_level *perflvl;
 
-       if (pm->cur == &pm->boot)
+       if (!pm->cur || pm->cur == &pm->boot)
                return;
 
        perflvl = pm->cur;
index 7ecc4ad..8d9968e 100644 (file)
@@ -265,8 +265,8 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
        struct i2c_board_info info[] = {
                { I2C_BOARD_INFO("w83l785ts", 0x2d) },
                { I2C_BOARD_INFO("w83781d", 0x2d) },
-               { I2C_BOARD_INFO("f75375", 0x2e) },
                { I2C_BOARD_INFO("adt7473", 0x2e) },
+               { I2C_BOARD_INFO("f75375", 0x2e) },
                { I2C_BOARD_INFO("lm99", 0x4c) },
                { }
        };
index ef23550..c82db37 100644 (file)
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
                bool duallink, dummy;
 
-               nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
-                                             clock, &duallink, &dummy);
+               nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+                                             &duallink, &dummy);
                if (duallink)
                        regp->fp_control |= (8 << 28);
        } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                return;
 
        if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
-               struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
                /* when removing an output, crtc may not be set, but PANEL_OFF
                 * must still be run
                 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                           nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
 
                if (mode == DRM_MODE_DPMS_ON) {
-                       if (!nv_connector->native_mode) {
-                               NV_ERROR(dev, "Not turning on LVDS without native mode\n");
-                               return;
-                       }
                        call_lvds_script(dev, nv_encoder->dcb, head,
-                                        LVDS_PANEL_ON, nv_connector->native_mode->clock);
+                                        LVDS_PANEL_ON, nv_encoder->mode.clock);
                } else
                        /* pxclk of 0 is fine for PANEL_OFF, and for a
                         * disconnected LVDS encoder there is no native_mode
index 8870d72..18d30c2 100644 (file)
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
        struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
        switch (dev_priv->chipset) {
+       case 0x40:
+       case 0x41: /* guess */
+       case 0x42:
+       case 0x43:
+       case 0x45: /* guess */
+       case 0x4e:
+               nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+               nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+               nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+               nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+               nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+               nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+               break;
        case 0x44:
        case 0x4a:
-       case 0x4e:
                nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
                break;
-
        case 0x46:
        case 0x47:
        case 0x49:
        case 0x4b:
+       case 0x4c:
+       case 0x67:
+       default:
                nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
                nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
                break;
-
-       default:
-               nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
-               nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
-               nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-               nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-               nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-               nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
-               break;
        }
 }
 
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
                break;
        default:
                switch (dev_priv->chipset) {
-               case 0x46:
-               case 0x47:
-               case 0x49:
-               case 0x4b:
-                       nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
-                       nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
-                       break;
-               default:
+               case 0x41:
+               case 0x42:
+               case 0x43:
+               case 0x45:
+               case 0x4e:
+               case 0x44:
+               case 0x4a:
                        nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
                        nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
                        break;
+               default:
+                       nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+                       nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+                       break;
                }
                nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
                nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
index 14e24e9..0ea090f 100644 (file)
@@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev)
                        nv50_evo_channel_del(&dev_priv->evo);
                        return ret;
                }
-       } else
-       if (dev_priv->chipset != 0x50) {
+       } else {
                ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
                                          0, 0xffffffff, 0x00010000);
                if (ret) {
index 2d7ea75..37e21d2 100644 (file)
@@ -256,6 +256,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+       struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
        int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
        unsigned long flags;
 
@@ -265,6 +266,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
                return;
 
        spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+       pfifo->reassign(dev, false);
        pgraph->fifo_access(dev, false);
 
        if (pgraph->channel(dev) == chan)
@@ -275,6 +277,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
        dev_priv->engine.instmem.flush(dev);
 
        pgraph->fifo_access(dev, true);
+       pfifo->reassign(dev, true);
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
index ea00418..e57caa2 100644 (file)
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 void
 nv50_instmem_flush(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x00330c, 0x00000001);
        if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
+       spin_unlock(&dev_priv->ramin_lock);
 }
 
 void
 nv84_instmem_flush(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x070000, 0x00000001);
        if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
+       spin_unlock(&dev_priv->ramin_lock);
 }
 
index 38e523e..6144156 100644 (file)
@@ -45,11 +45,6 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
        }
 
        if (phys & 1) {
-               if (dev_priv->vram_sys_base) {
-                       phys += dev_priv->vram_sys_base;
-                       phys |= 0x30;
-               }
-
                if (coverage <= 32 * 1024 * 1024)
                        phys |= 0x60;
                else if (coverage <= 64 * 1024 * 1024)
@@ -174,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
 void
 nv50_vm_flush_engine(struct drm_device *dev, int engine)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       spin_lock(&dev_priv->ramin_lock);
        nv_wr32(dev, 0x100c80, (engine << 16) | 1);
        if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
                NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+       spin_unlock(&dev_priv->ramin_lock);
 }
index e6ea7d8..eb18a7e 100644 (file)
@@ -31,6 +31,7 @@
 #include "nvc0_graph.h"
 
 static void nvc0_graph_isr(struct drm_device *);
+static void nvc0_runk140_isr(struct drm_device *);
 static int  nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
 
 void
@@ -281,6 +282,7 @@ nvc0_graph_destroy(struct drm_device *dev)
                return;
 
        nouveau_irq_unregister(dev, 12);
+       nouveau_irq_unregister(dev, 25);
 
        nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
        nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -390,6 +392,7 @@ nvc0_graph_create(struct drm_device *dev)
        }
 
        nouveau_irq_register(dev, 12, nvc0_graph_isr);
+       nouveau_irq_register(dev, 25, nvc0_runk140_isr);
        NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
        NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
        NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
@@ -512,8 +515,8 @@ nvc0_graph_init_gpc_1(struct drm_device *dev)
                        nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
                        nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
                        nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
-                       nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
-                       nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+                       nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
+                       nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
                }
                nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
                nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -777,3 +780,19 @@ nvc0_graph_isr(struct drm_device *dev)
 
        nv_wr32(dev, 0x400500, 0x00010001);
 }
+
+static void
+nvc0_runk140_isr(struct drm_device *dev)
+{
+       u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
+
+       while (units) {
+               u32 unit = ffs(units) - 1;
+               u32 reg = 0x140000 + unit * 0x2000;
+               u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
+               u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
+
+               NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+               units &= ~(1 << unit);
+       }
+}
index b9e68b2..f880ff7 100644 (file)
@@ -1830,7 +1830,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
 
        for (tp = 0, id = 0; tp < 4; tp++) {
                for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-                       if (tp <= priv->tp_nr[gpc]) {
+                       if (tp < priv->tp_nr[gpc]) {
                                nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
                                nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
                                nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
index b0ab185..a4e5e53 100644 (file)
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
 
        switch (radeon_crtc->rmx_type) {
        case RMX_CENTER:
-               args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-               args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-               args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
-               args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+               args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+               args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+               args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+               args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
                break;
        case RMX_ASPECT:
                a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
                a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
 
                if (a1 > a2) {
-                       args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
-                       args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+                       args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+                       args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
                } else if (a2 > a1) {
-                       args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
-                       args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+                       args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+                       args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
                }
                break;
        case RMX_FULL:
        default:
-               args.usOverscanRight = radeon_crtc->h_border;
-               args.usOverscanLeft = radeon_crtc->h_border;
-               args.usOverscanBottom = radeon_crtc->v_border;
-               args.usOverscanTop = radeon_crtc->v_border;
+               args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+               args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+               args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+               args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
                break;
        }
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
        memset(&args, 0, sizeof(args));
 
        if (ASIC_IS_DCE5(rdev)) {
-               args.v3.usSpreadSpectrumAmountFrac = 0;
+               args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
                args.v3.ucSpreadSpectrumType = ss->type;
                switch (pll_id) {
                case ATOM_PPLL1:
                        args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
-                       args.v3.usSpreadSpectrumAmount = ss->amount;
-                       args.v3.usSpreadSpectrumStep = ss->step;
+                       args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+                       args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                        break;
                case ATOM_PPLL2:
                        args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
-                       args.v3.usSpreadSpectrumAmount = ss->amount;
-                       args.v3.usSpreadSpectrumStep = ss->step;
+                       args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+                       args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                        break;
                case ATOM_DCPLL:
                        args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
-                       args.v3.usSpreadSpectrumAmount = 0;
-                       args.v3.usSpreadSpectrumStep = 0;
+                       args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
+                       args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
                        break;
                case ATOM_PPLL_INVALID:
                        return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
                switch (pll_id) {
                case ATOM_PPLL1:
                        args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
-                       args.v2.usSpreadSpectrumAmount = ss->amount;
-                       args.v2.usSpreadSpectrumStep = ss->step;
+                       args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+                       args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                        break;
                case ATOM_PPLL2:
                        args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
-                       args.v2.usSpreadSpectrumAmount = ss->amount;
-                       args.v2.usSpreadSpectrumStep = ss->step;
+                       args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+                       args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                        break;
                case ATOM_DCPLL:
                        args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
-                       args.v2.usSpreadSpectrumAmount = 0;
-                       args.v2.usSpreadSpectrumStep = 0;
+                       args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
+                       args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
                        break;
                case ATOM_PPLL_INVALID:
                        return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
                else
                        pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
        }
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,23 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        dp_clock = dig_connector->dp_clock;
                                }
                        }
-#if 0 /* doesn't work properly on some laptops */
+
                        /* use recommended ref_div for ss */
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                if (ss_enabled) {
                                        if (ss->refdiv) {
+                                               pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                                                pll->flags |= RADEON_PLL_USE_REF_DIV;
                                                pll->reference_div = ss->refdiv;
+                                               if (ASIC_IS_AVIVO(rdev))
+                                                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                                        }
                                }
                        }
-#endif
+
                        if (ASIC_IS_AVIVO(rdev)) {
                                /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
                                if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
                                        adjusted_clock = mode->clock * 2;
                                if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+                               if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                                       pll->flags |= RADEON_PLL_IS_LCD;
                        } else {
                                if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
                                        pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -606,14 +610,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
                                args.v1.ucTransmitterID = radeon_encoder->encoder_id;
                                args.v1.ucEncodeMode = encoder_mode;
-                               if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                       if (ss_enabled)
-                                               args.v1.ucConfig |=
-                                                       ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-                               } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+                               if (ss_enabled)
                                        args.v1.ucConfig |=
                                                ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-                               }
 
                                atom_execute_table(rdev->mode_info.atom_context,
                                                   index, (uint32_t *)&args);
@@ -624,12 +623,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
                                args.v3.sInput.ucEncodeMode = encoder_mode;
                                args.v3.sInput.ucDispPllConfig = 0;
+                               if (ss_enabled)
+                                       args.v3.sInput.ucDispPllConfig |=
+                                               DISPPLL_CONFIG_SS_ENABLE;
                                if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
                                        if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                               if (ss_enabled)
-                                                       args.v3.sInput.ucDispPllConfig |=
-                                                               DISPPLL_CONFIG_SS_ENABLE;
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
                                                /* 16200 or 27000 */
@@ -649,18 +648,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        }
                                } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                        if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                               if (ss_enabled)
-                                                       args.v3.sInput.ucDispPllConfig |=
-                                                               DISPPLL_CONFIG_SS_ENABLE;
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
                                                /* 16200 or 27000 */
                                                args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
-                                       } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
-                                               if (ss_enabled)
-                                                       args.v3.sInput.ucDispPllConfig |=
-                                                               DISPPLL_CONFIG_SS_ENABLE;
-                                       } else {
+                                       } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
                                                if (mode->clock > 165000)
                                                        args.v3.sInput.ucDispPllConfig |=
                                                                DISPPLL_CONFIG_DUAL_LINK;
@@ -670,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                   index, (uint32_t *)&args);
                                adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
                                if (args.v3.sOutput.ucRefDiv) {
+                                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                                        pll->flags |= RADEON_PLL_USE_REF_DIV;
                                        pll->reference_div = args.v3.sOutput.ucRefDiv;
                                }
                                if (args.v3.sOutput.ucPostDiv) {
+                                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                                        pll->flags |= RADEON_PLL_USE_POST_DIV;
                                        pll->post_div = args.v3.sOutput.ucPostDiv;
                                }
@@ -727,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
                         * SetPixelClock provides the dividers
                         */
                        args.v5.ucCRTC = ATOM_CRTC_INVALID;
-                       args.v5.usPixelClock = dispclk;
+                       args.v5.usPixelClock = cpu_to_le16(dispclk);
                        args.v5.ucPpll = ATOM_DCPLL;
                        break;
                case 6:
                        /* if the default dcpll clock is specified,
                         * SetPixelClock provides the dividers
                         */
-                       args.v6.ulDispEngClkFreq = dispclk;
+                       args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
                        args.v6.ucPpll = ATOM_DCPLL;
                        break;
                default:
@@ -963,8 +957,12 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        /* adjust pixel clock as needed */
        adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
-       radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-                          &ref_div, &post_div);
+       if (ASIC_IS_AVIVO(rdev))
+               radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+                                        &ref_div, &post_div);
+       else
+               radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+                                         &ref_div, &post_div);
 
        atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
@@ -993,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        }
 }
 
-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
-                                     struct drm_framebuffer *fb,
-                                     int x, int y, int atomic)
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                int x, int y, int atomic)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
@@ -1006,6 +1004,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
        struct radeon_bo *rbo;
        uint64_t fb_location;
        uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+       u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
        int r;
 
        /* no fb bound */
@@ -1057,11 +1056,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
        case 16:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
                break;
        case 24:
        case 32:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
                break;
        default:
                DRM_ERROR("Unsupported screen depth %d\n",
@@ -1106,6 +1111,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
               (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
        WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+       WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1127,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
               (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-               WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
-                      EVERGREEN_INTERLEAVE_EN);
-       else
-               WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
        if (!atomic && fb && fb != crtc->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
                rbo = radeon_fb->obj->driver_private;
@@ -1162,6 +1162,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        struct drm_framebuffer *target_fb;
        uint64_t fb_location;
        uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+       u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
        int r;
 
        /* no fb bound */
@@ -1215,12 +1216,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
                    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
                break;
        case 24:
        case 32:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
                    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
                break;
        default:
                DRM_ERROR("Unsupported screen depth %d\n",
@@ -1260,6 +1267,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
               radeon_crtc->crtc_offset, (u32) fb_location);
        WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+       if (rdev->family >= CHIP_R600)
+               WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1281,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
               (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-                      AVIVO_D1MODE_INTERLEAVE_EN);
-       else
-               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
        if (!atomic && fb && fb != crtc->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
                rbo = radeon_fb->obj->driver_private;
@@ -1310,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        struct radeon_device *rdev = dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev))
-               return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
+               return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
        else if (ASIC_IS_AVIVO(rdev))
                return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
        else
@@ -1325,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
        struct radeon_device *rdev = dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev))
-               return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+               return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
        else if (ASIC_IS_AVIVO(rdev))
                return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
        else
index 4e7778d..695de9a 100644 (file)
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
 int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
 {
        int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
-       int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+       int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
 
-       if ((lanes == 0) || (bw == 0))
+       if ((lanes == 0) || (dp_clock == 0))
                return MODE_CLOCK_HIGH;
 
        return MODE_OK;
index a8973ac..d270b3f 100644 (file)
@@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 }
 
 /* get temperature in millidegrees */
-u32 evergreen_get_temp(struct radeon_device *rdev)
+int evergreen_get_temp(struct radeon_device *rdev)
 {
        u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
                ASIC_T_SHIFT;
        u32 actual_temp = 0;
 
-       if ((temp >> 10) & 1)
-               actual_temp = 0;
-       else if ((temp >> 9) & 1)
+       if (temp & 0x400)
+               actual_temp = -256;
+       else if (temp & 0x200)
                actual_temp = 255;
-       else
-               actual_temp = (temp >> 1) & 0xff;
+       else if (temp & 0x100) {
+               actual_temp = temp & 0x1ff;
+               actual_temp |= ~0x1ff;
+       } else
+               actual_temp = temp & 0xff;
 
-       return actual_temp * 1000;
+       return (actual_temp * 1000) / 2;
 }
 
-u32 sumo_get_temp(struct radeon_device *rdev)
+int sumo_get_temp(struct radeon_device *rdev)
 {
        u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
-       u32 actual_temp = (temp >> 1) & 0xff;
+       int actual_temp = temp - 49;
 
        return actual_temp * 1000;
 }
@@ -1182,6 +1185,22 @@ static void evergreen_mc_program(struct radeon_device *rdev)
 /*
  * CP.
  */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       /* set to DX10/11 mode */
+       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(rdev, 1);
+       /* FIXME: implement */
+       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(rdev, ib->length_dw);
+}
+
 
 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 {
@@ -1192,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
                return -EINVAL;
 
        r700_cp_stop(rdev);
-       WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+       WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+              BUF_SWAP_32BIT |
+#endif
+              RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
        fw_data = (const __be32 *)rdev->pfp_fw->data;
        WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1233,7 +1256,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
 
-       r = radeon_ring_lock(rdev, evergreen_default_size + 15);
+       r = radeon_ring_lock(rdev, evergreen_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
@@ -1266,6 +1289,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        radeon_ring_write(rdev, 0xffffffff);
        radeon_ring_write(rdev, 0xffffffff);
 
+       radeon_ring_write(rdev, 0xc0026900);
+       radeon_ring_write(rdev, 0x00000316);
+       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(rdev, 0x00000010); /*  */
+
        radeon_ring_unlock_commit(rdev);
 
        return 0;
@@ -1306,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB_WPTR, 0);
 
        /* set the wb address wether it's enabled or not */
-       WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+       WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+              RB_RPTR_SWAP(2) |
+#endif
+              ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
        WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
        WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2072,6 +2104,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
        WREG32(VGT_GS_VERTEX_REUSE, 16);
+       WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
        WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2201,6 +2234,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
        struct evergreen_mc_save save;
        u32 grbm_reset = 0;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               return 0;
+
        dev_info(rdev->dev, "GPU softreset \n");
        dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
                RREG32(GRBM_STATUS));
@@ -2603,8 +2639,8 @@ restart_ih:
        while (rptr != wptr) {
                /* wptr/rptr are in bytes! */
                ring_index = rptr / 4;
-               src_id =  rdev->ih.ring[ring_index] & 0xff;
-               src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+               src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+               src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
                switch (src_id) {
                case 1: /* D1 vblank/vline */
index b758dc7..2adfb03 100644 (file)
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
        if (h < 8)
                h = 8;
 
-       cb_color_info = ((format << 2) | (1 << 24));
+       cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 
        /* high addr, stride */
        sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+       sq_vtx_constant_word2 |= (2 << 30);
+#endif
        /* xyzw swizzles */
        sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
 
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
        sq_tex_resource_word0 = (1 << 0); /* 2D */
        sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
                                  ((w - 1) << 18));
-       sq_tex_resource_word1 = ((h - 1) << 0);
+       sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
        /* xyzw swizzles */
        sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
 
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
        radeon_ring_write(rdev, DI_PT_RECTLIST);
 
        radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+       radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+                         (2 << 2) |
+#endif
+                         DI_INDEX_SIZE_16_BIT);
 
        radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
        radeon_ring_write(rdev, 1);
@@ -232,7 +239,7 @@ draw_auto(struct radeon_device *rdev)
 
 }
 
-/* emits 30 */
+/* emits 36 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -245,6 +252,8 @@ set_default_state(struct radeon_device *rdev)
        int num_hs_threads, num_ls_threads;
        int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
        int num_hs_stack_entries, num_ls_stack_entries;
+       u64 gpu_addr;
+       int dwords;
 
        switch (rdev->family) {
        case CHIP_CEDAR:
@@ -497,6 +506,18 @@ set_default_state(struct radeon_device *rdev)
        radeon_ring_write(rdev, 0x00000000);
        radeon_ring_write(rdev, 0x00000000);
 
+       /* set to DX10/11 mode */
+       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(rdev, 1);
+
+       /* emit an IB pointing at default state */
+       dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+       gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+       radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+       radeon_ring_write(rdev, dwords);
+
 }
 
 static inline uint32_t i2f(uint32_t input)
@@ -527,8 +548,10 @@ static inline uint32_t i2f(uint32_t input)
 int evergreen_blit_init(struct radeon_device *rdev)
 {
        u32 obj_size;
-       int r;
+       int i, r, dwords;
        void *ptr;
+       u32 packet2s[16];
+       int num_packet2s = 0;
 
        /* pin copy shader into vram if already initialized */
        if (rdev->r600_blit.shader_obj)
@@ -536,8 +559,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
 
        mutex_init(&rdev->r600_blit.mutex);
        rdev->r600_blit.state_offset = 0;
-       rdev->r600_blit.state_len = 0;
-       obj_size = 0;
+
+       rdev->r600_blit.state_len = evergreen_default_size;
+
+       dwords = rdev->r600_blit.state_len;
+       while (dwords & 0xf) {
+               packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+               dwords++;
+       }
+
+       obj_size = dwords * 4;
+       obj_size = ALIGN(obj_size, 256);
 
        rdev->r600_blit.vs_offset = obj_size;
        obj_size += evergreen_vs_size * 4;
@@ -567,8 +599,16 @@ int evergreen_blit_init(struct radeon_device *rdev)
                return r;
        }
 
-       memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
-       memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+       memcpy_toio(ptr + rdev->r600_blit.state_offset,
+                   evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+       if (num_packet2s)
+               memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+                           packet2s, num_packet2s * 4);
+       for (i = 0; i < evergreen_vs_size; i++)
+               *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+       for (i = 0; i < evergreen_ps_size; i++)
+               *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
@@ -652,7 +692,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        /* set default  + shaders */
-       ring_size += 46; /* shaders + def state */
+       ring_size += 52; /* shaders + def state */
        ring_size += 10; /* fence emit for VB IB */
        ring_size += 5; /* done copy */
        ring_size += 10; /* fence emit for done copy */
@@ -660,7 +700,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
        if (r)
                return r;
 
-       set_default_state(rdev); /* 30 */
+       set_default_state(rdev); /* 36 */
        set_shaders(rdev); /* 16 */
        return 0;
 }
index ef1d28c..3a10399 100644 (file)
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
        0x00000000,
        0x3c000000,
        0x67961001,
+#ifdef __BIG_ENDIAN
+       0x000a0000,
+#else
        0x00080000,
+#endif
        0x00000000,
        0x1c000000,
        0x67961000,
+#ifdef __BIG_ENDIAN
+       0x00020008,
+#else
        0x00000008,
+#endif
        0x00000000,
 };
 
index 36d32d8..eb4acf4 100644 (file)
@@ -98,6 +98,7 @@
 #define                BUF_SWAP_32BIT                                  (2 << 16)
 #define        CP_RB_RPTR                                      0x8700
 #define        CP_RB_RPTR_ADDR                                 0xC10C
+#define                RB_RPTR_SWAP(x)                                 ((x) << 0)
 #define        CP_RB_RPTR_ADDR_HI                              0xC110
 #define        CP_RB_RPTR_WR                                   0xC108
 #define        CP_RB_WPTR                                      0xC114
 #define                FORCE_EOV_MAX_CLK_CNT(x)                        ((x) << 0)
 #define                FORCE_EOV_MAX_REZ_CNT(x)                        ((x) << 16)
 #define PA_SC_LINE_STIPPLE                             0x28A0C
+#define        PA_SU_LINE_STIPPLE_VALUE                        0x8A60
 #define        PA_SC_LINE_STIPPLE_STATE                        0x8B10
 
 #define        SCRATCH_REG0                                    0x8500
 #define        PACKET3_DISPATCH_DIRECT                         0x15
 #define        PACKET3_DISPATCH_INDIRECT                       0x16
 #define        PACKET3_INDIRECT_BUFFER_END                     0x17
+#define        PACKET3_MODE_CONTROL                            0x18
 #define        PACKET3_SET_PREDICATION                         0x20
 #define        PACKET3_REG_RMW                                 0x21
 #define        PACKET3_COND_EXEC                               0x22
index 607241c..5a82b6b 100644 (file)
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
        last_reg = strtol(last_reg_s, NULL, 16);
 
        do {
-               if (fgets(buf, 1024, file) == NULL)
+               if (fgets(buf, 1024, file) == NULL) {
+                       fclose(file);
                        return -1;
+               }
                len = strlen(buf);
                if (ftell(file) == end)
                        done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
                                fprintf(stderr,
                                        "Error matching regular expression %d in %s\n",
                                        r, filename);
+                               fclose(file);
                                return -1;
                        } else {
                                buf[match[0].rm_eo] = 0;
index 46da514..93fa735 100644 (file)
@@ -1031,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
               REG_SET(RADEON_INDIRECT1_START, indirect1_start));
-       WREG32(0x718, 0);
-       WREG32(0x744, 0x00004D4D);
+       WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+       WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
        WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
        radeon_ring_start(rdev);
        r = radeon_ring_test(rdev);
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                }
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
+               track->zb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                }
                track->cb[0].robj = reloc->robj;
                track->cb[0].offset = idx_value;
+               track->cb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                }
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_CUBIC_OFFSET_T0_0:
        case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                track->textures[0].cube_info[i].offset = idx_value;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[0].cube_info[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_CUBIC_OFFSET_T1_0:
        case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                track->textures[1].cube_info[i].offset = idx_value;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[1].cube_info[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_CUBIC_OFFSET_T2_0:
        case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                track->textures[2].cube_info[i].offset = idx_value;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[2].cube_info[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case RADEON_RE_WIDTH_HEIGHT:
                track->maxy = ((idx_value >> 16) & 0x7FF);
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_COLORPITCH:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                ib[idx] = tmp;
 
                track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+               track->cb_dirty = true;
                break;
        case RADEON_RB3D_DEPTHPITCH:
                track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_CNTL:
                switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_ZSTENCILCNTL:
                switch (idx_value & 0xf) {
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                default:
                        break;
                }
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_ZPASS_ADDR:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        uint32_t temp = idx_value >> 4;
                        for (i = 0; i < track->num_texture; i++)
                                track->textures[i].enabled = !!(temp & (1 << i));
+                       track->tex_dirty = true;
                }
                break;
        case RADEON_SE_VF_CNTL:
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
                track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
                track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_TEX_PITCH_0:
        case RADEON_PP_TEX_PITCH_1:
        case RADEON_PP_TEX_PITCH_2:
                i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
                track->textures[i].pitch = idx_value + 32;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_TXFILTER_0:
        case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                tmp = (idx_value >> 27) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_h = false;
+               track->tex_dirty = true;
                break;
        case RADEON_PP_TXFORMAT_0:
        case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                }
                track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
                track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+               track->tex_dirty = true;
                break;
        case RADEON_PP_CUBIC_FACES_0:
        case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
                        track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
                }
+               track->tex_dirty = true;
                break;
        default:
                printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2347,10 +2366,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
 
        temp = RREG32(RADEON_CONFIG_CNTL);
        if (state == false) {
-               temp &= ~(1<<8);
-               temp |= (1<<9);
+               temp &= ~RADEON_CFG_VGA_RAM_EN;
+               temp |= RADEON_CFG_VGA_IO_DIS;
        } else {
-               temp &= ~(1<<9);
+               temp &= ~RADEON_CFG_VGA_IO_DIS;
        }
        WREG32(RADEON_CONFIG_CNTL, temp);
 }
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
        unsigned long size;
        unsigned prim_walk;
        unsigned nverts;
-       unsigned num_cb = track->num_cb;
+       unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
-       if (!track->zb_cb_clear && !track->color_channel_mask &&
+       if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
            !track->blend_read_enable)
                num_cb = 0;
 
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
                        return -EINVAL;
                }
        }
-       if (track->z_enabled) {
+       track->cb_dirty = false;
+
+       if (track->zb_dirty && track->z_enabled) {
                if (track->zb.robj == NULL) {
                        DRM_ERROR("[drm] No buffer for z buffer !\n");
                        return -EINVAL;
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
                        return -EINVAL;
                }
        }
+       track->zb_dirty = false;
+
+       if (track->aa_dirty && track->aaresolve) {
+               if (track->aa.robj == NULL) {
+                       DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+                       return -EINVAL;
+               }
+               /* I believe the format comes from colorbuffer0. */
+               size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+               size += track->aa.offset;
+               if (size > radeon_bo_size(track->aa.robj)) {
+                       DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+                                 "(need %lu have %lu) !\n", i, size,
+                                 radeon_bo_size(track->aa.robj));
+                       DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+                                 i, track->aa.pitch, track->cb[0].cpp,
+                                 track->aa.offset, track->maxy);
+                       return -EINVAL;
+               }
+       }
+       track->aa_dirty = false;
+
        prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
        if (track->vap_vf_cntl & (1 << 14)) {
                nverts = track->vap_alt_nverts;
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
                          prim_walk);
                return -EINVAL;
        }
-       return r100_cs_track_texture_check(rdev, track);
+
+       if (track->tex_dirty) {
+               track->tex_dirty = false;
+               return r100_cs_track_texture_check(rdev, track);
+       }
+       return 0;
 }
 
 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
 {
        unsigned i, face;
 
+       track->cb_dirty = true;
+       track->zb_dirty = true;
+       track->tex_dirty = true;
+       track->aa_dirty = true;
+
        if (rdev->family < CHIP_R300) {
                track->num_cb = 1;
                if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
                track->num_texture = 16;
                track->maxy = 4096;
                track->separate_cube = 0;
+               track->aaresolve = false;
+               track->aa.robj = NULL;
        }
 
        for (i = 0; i < track->num_cb; i++) {
@@ -3522,7 +3577,7 @@ int r100_ring_test(struct radeon_device *rdev)
        if (i < rdev->usec_timeout) {
                DRM_INFO("ring test succeeded in %d usecs\n", i);
        } else {
-               DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+               DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
@@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
        r100_mc_program(rdev);
        /* Resume clock */
        r100_clock_startup(rdev);
-       /* Initialize GPU configuration (# pipes, ...) */
-//     r100_gpu_init(rdev);
        /* Initialize GART (initialize after TTM so we can allocate
         * memory through TTM but finalize after TTM) */
        r100_enable_bm(rdev);
index af65600..2fef9de 100644 (file)
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
        unsigned                compress_format;
 };
 
-struct r100_cs_track_limits {
-       unsigned num_cb;
-       unsigned num_texture;
-       unsigned max_levels;
-};
-
 struct r100_cs_track {
-       struct radeon_device *rdev;
        unsigned                        num_cb;
        unsigned                        num_texture;
        unsigned                        maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
        struct r100_cs_track_array      arrays[11];
        struct r100_cs_track_cb         cb[R300_MAX_CB];
        struct r100_cs_track_cb         zb;
+       struct r100_cs_track_cb         aa;
        struct r100_cs_track_texture    textures[R300_TRACK_MAX_TEXTURE];
        bool                            z_enabled;
        bool                            separate_cube;
        bool                            zb_cb_clear;
        bool                            blend_read_enable;
+       bool                            cb_dirty;
+       bool                            zb_dirty;
+       bool                            tex_dirty;
+       bool                            aa_dirty;
+       bool                            aaresolve;
 };
 
 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
index d2408c3..f240583 100644 (file)
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
+               track->zb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
                track->cb[0].robj = reloc->robj;
                track->cb[0].offset = idx_value;
+               track->cb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case R200_PP_CUBIC_OFFSET_F1_0:
        case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                track->textures[i].cube_info[face - 1].offset = idx_value;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].cube_info[face - 1].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        case RADEON_RE_WIDTH_HEIGHT:
                track->maxy = ((idx_value >> 16) & 0x7FF);
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_COLORPITCH:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                ib[idx] = tmp;
 
                track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+               track->cb_dirty = true;
                break;
        case RADEON_RB3D_DEPTHPITCH:
                track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_CNTL:
                switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
 
                track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_ZSTENCILCNTL:
                switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                default:
                        break;
                }
+               track->zb_dirty = true;
                break;
        case RADEON_RB3D_ZPASS_ADDR:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        uint32_t temp = idx_value >> 4;
                        for (i = 0; i < track->num_texture; i++)
                                track->textures[i].enabled = !!(temp & (1 << i));
+                       track->tex_dirty = true;
                }
                break;
        case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                i = (reg - R200_PP_TXSIZE_0) / 32;
                track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
                track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+               track->tex_dirty = true;
                break;
        case R200_PP_TXPITCH_0:
        case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXPITCH_5:
                i = (reg - R200_PP_TXPITCH_0) / 32;
                track->textures[i].pitch = idx_value + 32;
+               track->tex_dirty = true;
                break;
        case R200_PP_TXFILTER_0:
        case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                tmp = (idx_value >> 27) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_h = false;
+               track->tex_dirty = true;
                break;
        case R200_PP_TXMULTI_CTL_0:
        case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].tex_coord_type = 1;
                        break;
                }
+               track->tex_dirty = true;
                break;
        case R200_PP_TXFORMAT_0:
        case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                }
                track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
                track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+               track->tex_dirty = true;
                break;
        case R200_PP_CUBIC_FACES_0:
        case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
                        track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
                }
+               track->tex_dirty = true;
                break;
        default:
                printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
index cf862ca..069efa8 100644 (file)
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
        mb();
 }
 
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
        void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
        }
        addr = (lower_32_bits(addr) >> 8) |
               ((upper_32_bits(addr) & 0xff) << 24) |
-              0xc;
+              R300_PTE_WRITEABLE | R300_PTE_READABLE;
        /* on x86 we want this to be CPU endian, on powerpc
         * on powerpc without HW swappers, it'll get swapped on way
         * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
        WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
        WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
        /* Clear error */
-       WREG32_PCIE(0x18, 0);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
        tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
        tmp |= RADEON_PCIE_TX_GART_EN;
        tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -664,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                }
                track->cb[i].robj = reloc->robj;
                track->cb[i].offset = idx_value;
+               track->cb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R300_ZB_DEPTHOFFSET:
@@ -676,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                }
                track->zb.robj = reloc->robj;
                track->zb.offset = idx_value;
+               track->zb_dirty = true;
                ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R300_TX_OFFSET_0:
@@ -714,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                tmp |= tile_flags;
                ib[idx] = tmp;
                track->textures[i].robj = reloc->robj;
+               track->tex_dirty = true;
                break;
        /* Tracked registers */
        case 0x2084:
@@ -740,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                if (p->rdev->family < CHIP_RV515) {
                        track->maxy -= 1440;
                }
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                break;
        case 0x4E00:
                /* RB3D_CCTL */
@@ -749,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+               track->cb_dirty = true;
                break;
        case 0x4E38:
        case 0x4E3C:
@@ -811,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                  ((idx_value >> 21) & 0xF));
                        return -EINVAL;
                }
+               track->cb_dirty = true;
                break;
        case 0x4F00:
                /* ZB_CNTL */
@@ -819,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                } else {
                        track->z_enabled = false;
                }
+               track->zb_dirty = true;
                break;
        case 0x4F10:
                /* ZB_FORMAT */
@@ -835,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                                  (idx_value & 0xF));
                        return -EINVAL;
                }
+               track->zb_dirty = true;
                break;
        case 0x4F24:
                /* ZB_DEPTHPITCH */
@@ -858,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                ib[idx] = tmp;
 
                track->zb.pitch = idx_value & 0x3FFC;
+               track->zb_dirty = true;
                break;
        case 0x4104:
+               /* TX_ENABLE */
                for (i = 0; i < 16; i++) {
                        bool enabled;
 
                        enabled = !!(idx_value & (1 << i));
                        track->textures[i].enabled = enabled;
                }
+               track->tex_dirty = true;
                break;
        case 0x44C0:
        case 0x44C4:
@@ -895,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case R300_TX_FORMAT_X16:
+               case R300_TX_FORMAT_FL_I16:
                case R300_TX_FORMAT_Y8X8:
                case R300_TX_FORMAT_Z5Y6X5:
                case R300_TX_FORMAT_Z6Y5X5:
@@ -907,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case R300_TX_FORMAT_Y16X16:
+               case R300_TX_FORMAT_FL_I16A16:
                case R300_TX_FORMAT_Z11Y11X10:
                case R300_TX_FORMAT_Z10Y11X11:
                case R300_TX_FORMAT_W8Z8Y8X8:
@@ -948,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        DRM_ERROR("Invalid texture format %u\n",
                                  (idx_value & 0x1F));
                        return -EINVAL;
-                       break;
                }
+               track->tex_dirty = true;
                break;
        case 0x4400:
        case 0x4404:
@@ -977,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                if (tmp == 2 || tmp == 4 || tmp == 6) {
                        track->textures[i].roundup_h = false;
                }
+               track->tex_dirty = true;
                break;
        case 0x4500:
        case 0x4504:
@@ -1014,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
                        return -EINVAL;
                }
+               track->tex_dirty = true;
                break;
        case 0x4480:
        case 0x4484:
@@ -1043,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                track->textures[i].use_pitch = !!tmp;
                tmp = (idx_value >> 22) & 0xF;
                track->textures[i].txdepth = tmp;
+               track->tex_dirty = true;
                break;
        case R300_ZB_ZPASS_ADDR:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1057,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x4e0c:
                /* RB3D_COLOR_CHANNEL_MASK */
                track->color_channel_mask = idx_value;
+               track->cb_dirty = true;
                break;
        case 0x43a4:
                /* SC_HYPERZ_EN */
@@ -1070,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x4f1c:
                /* ZB_BW_CNTL */
                track->zb_cb_clear = !!(idx_value & (1 << 5));
+               track->cb_dirty = true;
+               track->zb_dirty = true;
                if (p->rdev->hyperz_filp != p->filp) {
                        if (idx_value & (R300_HIZ_ENABLE |
                                         R300_RD_COMP_ENABLE |
@@ -1081,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x4e04:
                /* RB3D_BLENDCNTL */
                track->blend_read_enable = !!(idx_value & (1 << 2));
+               track->cb_dirty = true;
+               break;
+       case R300_RB3D_AARESOLVE_OFFSET:
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                 idx, reg);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               track->aa.robj = reloc->robj;
+               track->aa.offset = idx_value;
+               track->aa_dirty = true;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+               break;
+       case R300_RB3D_AARESOLVE_PITCH:
+               track->aa.pitch = idx_value & 0x3FFE;
+               track->aa_dirty = true;
                break;
-       case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+       case R300_RB3D_AARESOLVE_CTL:
+               track->aaresolve = idx_value & 0x1;
+               track->aa_dirty = true;
                break;
        case 0x4f30: /* ZB_MASK_OFFSET */
        case 0x4f34: /* ZB_ZMASK_PITCH */
index 1a0d536..f0bce39 100644 (file)
 #define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
 #define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
 
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
 #define R300_RB3D_AARESOLVE_CTL             0x4E88
 /* gap */
 
index c387346..0b59ed7 100644 (file)
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev)
                       "programming pipes. Bad things might happen.\n");
        }
        /* get max number of pipes */
-       gb_pipe_select = RREG32(0x402C);
+       gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
        num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
 
        /* SE chips have 1 pipe */
index 3c8677f..2ce80d9 100644 (file)
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev)
                WREG32(0x4128, 0xFF);
        }
        r420_pipes_init(rdev);
-       gb_pipe_select = RREG32(0x402C);
-       tmp = RREG32(0x170C);
+       gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+       tmp = RREG32(R300_DST_PIPE_CONFIG);
        pipe_select_current = (tmp >> 2) & 3;
        tmp = (1 << pipe_select_current) |
              (((gb_pipe_select >> 8) & 0xF) << 4);
index aca2236..de88624 100644 (file)
@@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev);
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
 /* get temperature in millidegrees */
-u32 rv6xx_get_temp(struct radeon_device *rdev)
+int rv6xx_get_temp(struct radeon_device *rdev)
 {
        u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
                ASIC_T_SHIFT;
+       int actual_temp = temp & 0xff;
 
-       return temp * 1000;
+       if (temp & 0x100)
+               actual_temp -= 256;
+
+       return actual_temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1287,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                        S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
        u32 tmp;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               return 0;
+
        dev_info(rdev->dev, "GPU softreset \n");
        dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
                RREG32(R_008010_GRBM_STATUS));
@@ -2098,7 +2105,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
 
        r600_cp_stop(rdev);
 
-       WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+       WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+              BUF_SWAP_32BIT |
+#endif
+              RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
        /* Reset cp */
        WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2185,7 +2196,11 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB_WPTR, 0);
 
        /* set the wb address whether it's enabled or not */
-       WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+       WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+              RB_RPTR_SWAP(2) |
+#endif
+              ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
        WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
        WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2621,7 +2636,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
        /* FIXME: implement */
        radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+       radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
        radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
        radeon_ring_write(rdev, ib->length_dw);
 }
@@ -3290,8 +3309,8 @@ restart_ih:
        while (rptr != wptr) {
                /* wptr/rptr are in bytes! */
                ring_index = rptr / 4;
-               src_id =  rdev->ih.ring[ring_index] & 0xff;
-               src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+               src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+               src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
                switch (src_id) {
                case 1: /* D1 vblank/vline */
index ca5c29f..7f10434 100644 (file)
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
        ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
 
        for (i = 0; i < r6xx_vs_size; i++)
-               vs[i] = r6xx_vs[i];
+               vs[i] = cpu_to_le32(r6xx_vs[i]);
        for (i = 0; i < r6xx_ps_size; i++)
-               ps[i] = r6xx_ps[i];
+               ps[i] = cpu_to_le32(r6xx_ps[i]);
 
        dev_priv->blit_vb->used = 512;
 
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
        DRM_DEBUG("\n");
 
        sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+       sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
        BEGIN_RING(9);
        OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
        OUT_RING(DI_PT_RECTLIST);
 
        OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+       OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
        OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
 
        OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
        OUT_RING(1);
index 86e5aa0..41f7aaf 100644 (file)
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
        if (h < 8)
                h = 8;
 
-       cb_color_info = ((format << 2) | (1 << 27));
+       cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
        u32 sq_vtx_constant_word2;
 
        sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+       sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
        radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
        radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
        if (h < 1)
                h = 1;
 
-       sq_tex_resource_word0 = (1 << 0);
+       sq_tex_resource_word0 = (1 << 0) | (1 << 3);
        sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
                                  ((w - 1) << 19));
 
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
        radeon_ring_write(rdev, DI_PT_RECTLIST);
 
        radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+       radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+                         (2 << 2) |
+#endif
+                         DI_INDEX_SIZE_16_BIT);
 
        radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
        radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
        dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
        radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+       radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (gpu_addr & 0xFFFFFFFC));
        radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
        radeon_ring_write(rdev, dwords);
 
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
 int r600_blit_init(struct radeon_device *rdev)
 {
        u32 obj_size;
-       int r, dwords;
+       int i, r, dwords;
        void *ptr;
        u32 packet2s[16];
        int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
 
        dwords = rdev->r600_blit.state_len;
        while (dwords & 0xf) {
-               packet2s[num_packet2s++] = PACKET2(0);
+               packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
                dwords++;
        }
 
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
        if (num_packet2s)
                memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
                            packet2s, num_packet2s * 4);
-       memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
-       memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+       for (i = 0; i < r6xx_vs_size; i++)
+               *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+       for (i = 0; i < r6xx_ps_size; i++)
+               *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
index e8151c1..2d1f6c5 100644 (file)
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
        0x00000000,
        0x3c000000,
        0x68cd1000,
+#ifdef __BIG_ENDIAN
+       0x000a0000,
+#else
        0x00080000,
+#endif
        0x00000000,
 };
 
index 4f4cd8b..c3ab959 100644 (file)
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
        r600_do_cp_stop(dev_priv);
 
        RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+                    R600_BUF_SWAP_32BIT |
+#endif
                     R600_RB_NO_UPDATE |
                     R600_RB_BLKSZ(15) |
                     R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
        r600_do_cp_stop(dev_priv);
 
        RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+                    R600_BUF_SWAP_32BIT |
+#endif
                     R600_RB_NO_UPDATE |
-                    (15 << 8) |
-                    (3 << 0));
+                    R600_RB_BLKSZ(15) |
+                    R600_RB_BUFSZ(3));
 
        RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
        RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
 
        if (!dev_priv->writeback_works) {
                /* Disable writeback to avoid unnecessary bus master transfer */
-               RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
-                            RADEON_RB_NO_UPDATE);
+               RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+                            R600_BUF_SWAP_32BIT |
+#endif
+                            RADEON_READ(R600_CP_RB_CNTL) |
+                            R600_RB_NO_UPDATE);
                RADEON_WRITE(R600_SCRATCH_UMSK, 0);
        }
 }
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
 
        RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
        cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
-       RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
+       RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+                    R600_BUF_SWAP_32BIT |
+#endif
+                    R600_RB_RPTR_WR_ENA);
 
        RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
        RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
                        + dev_priv->gart_vm_start;
        }
        RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-                    rptr_addr & 0xffffffff);
+#ifdef __BIG_ENDIAN
+                    (2 << 0) |
+#endif
+                    (rptr_addr & 0xfffffffc));
        RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
                     upper_32_bits(rptr_addr));
 
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
        {
                u64 scratch_addr;
 
-               scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
+               scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
                scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
                scratch_addr += R600_SCRATCH_REG_OFFSET;
                scratch_addr >>= 8;
index 7831e08..153095f 100644 (file)
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        }
 
        if (!IS_ALIGNED(pitch, pitch_align)) {
-               dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
-                        __func__, __LINE__, pitch);
+               dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+                        __func__, __LINE__, pitch, pitch_align, array_mode);
                return -EINVAL;
        }
        if (!IS_ALIGNED(height, height_align)) {
-               dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
-                        __func__, __LINE__, height);
+               dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+                        __func__, __LINE__, height, height_align, array_mode);
                return -EINVAL;
        }
        if (!IS_ALIGNED(base_offset, base_align)) {
-               dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+               dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+                        base_offset, base_align, array_mode);
                return -EINVAL;
        }
 
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
                         * broken userspace.
                         */
                } else {
-                       dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+                       dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+                                array_mode,
+                                track->cb_color_bo_offset[i], tmp,
+                                radeon_bo_size(track->cb_color_bo[i]));
                        return -EINVAL;
                }
        }
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                        }
 
                        if (!IS_ALIGNED(pitch, pitch_align)) {
-                               dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
-                                        __func__, __LINE__, pitch);
+                               dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+                                        __func__, __LINE__, pitch, pitch_align, array_mode);
                                return -EINVAL;
                        }
                        if (!IS_ALIGNED(height, height_align)) {
-                               dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
-                                        __func__, __LINE__, height);
+                               dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+                                        __func__, __LINE__, height, height_align, array_mode);
                                return -EINVAL;
                        }
                        if (!IS_ALIGNED(base_offset, base_align)) {
-                               dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+                               dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+                                        base_offset, base_align, array_mode);
                                return -EINVAL;
                        }
 
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                        nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
                        tmp = ntiles * bpe * 64 * nviews;
                        if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
-                               dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
-                                               track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
-                                               radeon_bo_size(track->db_bo));
+                               dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+                                        array_mode,
+                                        track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+                                        radeon_bo_size(track->db_bo));
                                return -EINVAL;
                        }
                }
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
        /* XXX check height as well... */
 
        if (!IS_ALIGNED(pitch, pitch_align)) {
-               dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
-                        __func__, __LINE__, pitch);
+               dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+                        __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
                return -EINVAL;
        }
        if (!IS_ALIGNED(base_offset, base_align)) {
-               dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
-                        __func__, __LINE__, base_offset);
+               dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+                        __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
                return -EINVAL;
        }
        if (!IS_ALIGNED(mip_offset, base_align)) {
-               dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
-                        __func__, __LINE__, mip_offset);
+               dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+                        __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
                return -EINVAL;
        }
 
index 33cda01..f869897 100644 (file)
 #define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
 #define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
 
-
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
 
 #define R600_HDP_NONSURFACE_BASE                                0x2c04
 
index a5d898b..04bac0b 100644 (file)
 #define                ROQ_IB2_START(x)                                ((x) << 8)
 #define        CP_RB_BASE                                      0xC100
 #define        CP_RB_CNTL                                      0xC104
-#define                RB_BUFSZ(x)                                     ((x)<<0)
-#define                RB_BLKSZ(x)                                     ((x)<<8)
-#define                RB_NO_UPDATE                                    (1<<27)
-#define                RB_RPTR_WR_ENA                                  (1<<31)
+#define                RB_BUFSZ(x)                                     ((x) << 0)
+#define                RB_BLKSZ(x)                                     ((x) << 8)
+#define                RB_NO_UPDATE                                    (1 << 27)
+#define                RB_RPTR_WR_ENA                                  (1 << 31)
 #define                BUF_SWAP_32BIT                                  (2 << 16)
 #define        CP_RB_RPTR                                      0x8700
 #define        CP_RB_RPTR_ADDR                                 0xC10C
+#define                RB_RPTR_SWAP(x)                                 ((x) << 0)
 #define        CP_RB_RPTR_ADDR_HI                              0xC110
 #define        CP_RB_RPTR_WR                                   0xC108
 #define        CP_RB_WPTR                                      0xC114
index 71d2a55..56c48b6 100644 (file)
@@ -179,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
 void rs690_pm_info(struct radeon_device *rdev);
-extern u32 rv6xx_get_temp(struct radeon_device *rdev);
-extern u32 rv770_get_temp(struct radeon_device *rdev);
-extern u32 evergreen_get_temp(struct radeon_device *rdev);
-extern u32 sumo_get_temp(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -812,8 +812,7 @@ struct radeon_pm {
        fixed20_12              sclk;
        fixed20_12              mclk;
        fixed20_12              needed_bandwidth;
-       /* XXX: use a define for num power modes */
-       struct radeon_power_state power_state[8];
+       struct radeon_power_state *power_state;
        /* number of valid power states */
        int                     num_power_states;
        int                     current_power_state_index;
index 3a1b161..e75d63b 100644 (file)
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = {
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring_ib_execute = &evergreen_ring_ib_execute,
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = {
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring_ib_execute = &evergreen_ring_ib_execute,
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = {
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring_ib_execute = &evergreen_ring_ib_execute,
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
index e01f077..c59bd98 100644 (file)
@@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev);
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
                        uint64_t src_offset, uint64_t dst_offset,
                        unsigned num_pages, struct radeon_fence *fence);
index 1573202..02d5c41 100644 (file)
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
                        /* some evergreen boards have bad data for this entry */
                        if (ASIC_IS_DCE4(rdev)) {
                                if ((i == 7) &&
-                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
                                    (gpio->sucI2cId.ucAccess == 0)) {
                                        gpio->sucI2cId.ucAccess = 0x97;
                                        gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
                        /* some DCE3 boards have bad data for this entry */
                        if (ASIC_IS_DCE3(rdev)) {
                                if ((i == 4) &&
-                                   (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
                                    (gpio->sucI2cId.ucAccess == 0x94))
                                        gpio->sucI2cId.ucAccess = 0x14;
                        }
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
                        /* some evergreen boards have bad data for this entry */
                        if (ASIC_IS_DCE4(rdev)) {
                                if ((i == 7) &&
-                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
                                    (gpio->sucI2cId.ucAccess == 0)) {
                                        gpio->sucI2cId.ucAccess = 0x97;
                                        gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
                        /* some DCE3 boards have bad data for this entry */
                        if (ASIC_IS_DCE3(rdev)) {
                                if ((i == 4) &&
-                                   (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
                                    (gpio->sucI2cId.ucAccess == 0x94))
                                        gpio->sucI2cId.ucAccess = 0x14;
                        }
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
                        pin = &gpio_info->asGPIO_Pin[i];
                        if (id == pin->ucGPIO_ID) {
                                gpio.id = pin->ucGPIO_ID;
-                               gpio.reg = pin->usGpioPin_AIndex * 4;
+                               gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
                                gpio.mask = (1 << pin->ucGpioPinBitShift);
                                gpio.valid = true;
                                break;
@@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *line_mux = 0x90;
        }
 
-       /* mac rv630 */
-       if ((dev->pdev->device == 0x9588) &&
-           (dev->pdev->subsystem_vendor == 0x106b) &&
-           (dev->pdev->subsystem_device == 0x00a6)) {
-               if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
-                   (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
-                       *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
-                       *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
-               }
+       /* mac rv630, rv730, others */
+       if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+           (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+               *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+               *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
        }
 
        /* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -1167,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                                p1pll->pll_out_min = 64800;
                        else
                                p1pll->pll_out_min = 20000;
-               } else if (p1pll->pll_out_min > 64800) {
-                       /* Limiting the pll output range is a good thing generally as
-                        * it limits the number of possible pll combinations for a given
-                        * frequency presumably to the ones that work best on each card.
-                        * However, certain duallink DVI monitors seem to like
-                        * pll combinations that would be limited by this at least on
-                        * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
-                        * family.
-                        */
-                       p1pll->pll_out_min = 64800;
                }
 
                p1pll->pll_in_min =
@@ -1288,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
                                      data_offset);
                switch (crev) {
                case 1:
-                       if (igp_info->info.ulBootUpMemoryClock)
+                       if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
                                return true;
                        break;
                case 2:
-                       if (igp_info->info_2.ulBootUpSidePortClock)
+                       if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
                                return true;
                        break;
                default:
@@ -1456,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 
                        for (i = 0; i < num_indices; i++) {
                                if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                   (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
                                        ss->percentage =
                                                le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
                                        ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
                        for (i = 0; i < num_indices; i++) {
                                if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                   (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
                                        ss->percentage =
                                                le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
                                        ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1484,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
                        for (i = 0; i < num_indices; i++) {
                                if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                   (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
                                        ss->percentage =
                                                le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
                                        ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1567,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                if (misc & ATOM_DOUBLE_CLOCK_MODE)
                        lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
-               lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
-               lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+               lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+               lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
 
                /* set crtc values */
                drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1583,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                        lvds->linkb = false;
 
                /* parse the lcd record table */
-               if (lvds_info->info.usModePatchTableOffset) {
+               if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
                        ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
                        ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
                        bool bad_record = false;
                        u8 *record = (u8 *)(mode_info->atom_context->bios +
                                            data_offset +
-                                           lvds_info->info.usModePatchTableOffset);
+                                           le16_to_cpu(lvds_info->info.usModePatchTableOffset));
                        while (*record != ATOM_RECORD_END_TYPE) {
                                switch (*record) {
                                case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -1991,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
        num_modes = power_info->info.ucNumOfPowerModeEntries;
        if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
                num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+       if (!rdev->pm.power_state)
+               return state_index;
        /* last mode is usually default, array is low to high */
        for (i = 0; i < num_modes; i++) {
                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2200,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
                firmware_info =
                        (union firmware_info *)(mode_info->atom_context->bios +
                                                data_offset);
-               vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+               vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
        }
 
        return vddc;
@@ -2295,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
                        VOLTAGE_SW;
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-                       clock_info->evergreen.usVDDC;
+                       le16_to_cpu(clock_info->evergreen.usVDDC);
        } else {
                sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
                sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2306,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
                        VOLTAGE_SW;
                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-                       clock_info->r600.usVDDC;
+                       le16_to_cpu(clock_info->r600.usVDDC);
        }
 
        if (rdev->flags & RADEON_IS_IGP) {
@@ -2342,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
        radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+                                      power_info->pplib.ucNumStates, GFP_KERNEL);
+       if (!rdev->pm.power_state)
+               return state_index;
        /* first mode is usually default, followed by low to high */
        for (i = 0; i < power_info->pplib.ucNumStates; i++) {
                mode_index = 0;
@@ -2415,13 +2408,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
        radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
        state_array = (struct StateArray *)
                (mode_info->atom_context->bios + data_offset +
-                power_info->pplib.usStateArrayOffset);
+                le16_to_cpu(power_info->pplib.usStateArrayOffset));
        clock_info_array = (struct ClockInfoArray *)
                (mode_info->atom_context->bios + data_offset +
-                power_info->pplib.usClockInfoArrayOffset);
+                le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
        non_clock_info_array = (struct NonClockInfoArray *)
                (mode_info->atom_context->bios + data_offset +
-                power_info->pplib.usNonClockInfoArrayOffset);
+                le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+                                      state_array->ucNumEntries, GFP_KERNEL);
+       if (!rdev->pm.power_state)
+               return state_index;
        for (i = 0; i < state_array->ucNumEntries; i++) {
                mode_index = 0;
                power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2495,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                        break;
                }
        } else {
-               /* add the default mode */
-               rdev->pm.power_state[state_index].type =
-                       POWER_STATE_TYPE_DEFAULT;
-               rdev->pm.power_state[state_index].num_clock_modes = 1;
-               rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
-               rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
-               rdev->pm.power_state[state_index].default_clock_mode =
-                       &rdev->pm.power_state[state_index].clock_info[0];
-               rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-               rdev->pm.power_state[state_index].pcie_lanes = 16;
-               rdev->pm.default_power_state_index = state_index;
-               rdev->pm.power_state[state_index].flags = 0;
-               state_index++;
+               rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+               if (rdev->pm.power_state) {
+                       /* add the default mode */
+                       rdev->pm.power_state[state_index].type =
+                               POWER_STATE_TYPE_DEFAULT;
+                       rdev->pm.power_state[state_index].num_clock_modes = 1;
+                       rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+                       rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+                       rdev->pm.power_state[state_index].default_clock_mode =
+                               &rdev->pm.power_state[state_index].clock_info[0];
+                       rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+                       rdev->pm.power_state[state_index].pcie_lanes = 16;
+                       rdev->pm.default_power_state_index = state_index;
+                       rdev->pm.power_state[state_index].flags = 0;
+                       state_index++;
+               }
        }
 
        rdev->pm.num_power_states = state_index;
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
        int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-       return args.ulReturnEngineClock;
+       return le32_to_cpu(args.ulReturnEngineClock);
 }
 
 uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
        int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-       return args.ulReturnMemoryClock;
+       return le32_to_cpu(args.ulReturnMemoryClock);
 }
 
 void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
        SET_ENGINE_CLOCK_PS_ALLOCATION args;
        int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
 
-       args.ulTargetEngineClock = eng_clock;   /* 10 khz */
+       args.ulTargetEngineClock = cpu_to_le32(eng_clock);      /* 10 khz */
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
        if (rdev->flags & RADEON_IS_IGP)
                return;
 
-       args.ulTargetMemoryClock = mem_clock;   /* 10 khz */
+       args.ulTargetMemoryClock = cpu_to_le32(mem_clock);      /* 10 khz */
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2623,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
        bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
 
        /* tell the bios not to handle mode switching */
-       bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+       bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
        if (rdev->family >= CHIP_R600) {
                WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2674,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
        else
                bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
 
-       if (lock)
+       if (lock) {
                bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
-       else
+               bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+       } else {
                bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+               bios_6_scratch |= ATOM_S6_ACC_MODE;
+       }
 
        if (rdev->family >= CHIP_R600)
                WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
index 591fcae..cf7c8d5 100644 (file)
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                           (rdev->pdev->subsystem_device == 0x4a48)) {
                        /* Mac X800 */
                        rdev->mode_info.connector_table = CT_MAC_X800;
+               } else if ((rdev->pdev->device == 0x4150) &&
+                          (rdev->pdev->subsystem_vendor == 0x1002) &&
+                          (rdev->pdev->subsystem_device == 0x4150)) {
+                       /* Mac G5 9600 */
+                       rdev->mode_info.connector_table = CT_MAC_G5_9600;
                } else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                                            CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
                                            &hpd);
                break;
+       case CT_MAC_G5_9600:
+               DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI - tv dac, dvo */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+               hpd.hpd = RADEON_HPD_1; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP2_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP2_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT2_SUPPORT,
+                                                                 2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 0,
+                                           ATOM_DEVICE_DFP2_SUPPORT |
+                                           ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+                                           &hpd);
+               /* ADC - primary dac, internal tmds */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+               hpd.hpd = RADEON_HPD_2; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP1_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT1_SUPPORT,
+                                                                 1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 1,
+                                           ATOM_DEVICE_DFP1_SUPPORT |
+                                           ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+                                           &hpd);
+               break;
        default:
                DRM_INFO("Connector table: %d (invalid)\n",
                         rdev->mode_info.connector_table);
@@ -2442,6 +2489,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
 
        rdev->pm.default_power_state_index = -1;
 
+       /* allocate 2 power states */
+       rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+       if (!rdev->pm.power_state) {
+               rdev->pm.default_power_state_index = state_index;
+               rdev->pm.num_power_states = 0;
+
+               rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+               rdev->pm.current_clock_mode_index = 0;
+               return;
+       }
+
        if (rdev->flags & RADEON_IS_MOBILITY) {
                offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
                if (offset) {
index 26091d6..4954e2d 100644 (file)
@@ -891,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
                pci_disable_device(dev->pdev);
                pci_set_power_state(dev->pdev, PCI_D3hot);
        }
-       acquire_console_sem();
+       console_lock();
        radeon_fbdev_set_suspend(rdev, 1);
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 
@@ -905,11 +905,11 @@ int radeon_resume_kms(struct drm_device *dev)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        pci_set_power_state(dev->pdev, PCI_D0);
        pci_restore_state(dev->pdev);
        if (pci_enable_device(dev->pdev)) {
-               release_console_sem();
+               console_unlock();
                return -1;
        }
        pci_set_master(dev->pdev);
@@ -920,7 +920,7 @@ int radeon_resume_kms(struct drm_device *dev)
        radeon_restore_bios_scratch_regs(rdev);
 
        radeon_fbdev_set_suspend(rdev, 0);
-       release_console_sem();
+       console_unlock();
 
        /* reset hpd state */
        radeon_hpd_init(rdev);
@@ -936,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev)
 int radeon_gpu_reset(struct radeon_device *rdev)
 {
        int r;
+       int resched;
 
        radeon_save_bios_scratch_regs(rdev);
+       /* block TTM */
+       resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
        radeon_suspend(rdev);
 
        r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
                radeon_resume(rdev);
                radeon_restore_bios_scratch_regs(rdev);
                drm_helper_resume_force_mode(rdev->ddev);
+               ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
                return 0;
        }
        /* bad news, how to tell it to userspace ? */
index d26dabf..3e7e7f9 100644 (file)
@@ -780,6 +780,125 @@ static int radeon_ddc_dump(struct drm_connector *connector)
        return ret;
 }
 
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+                            u32 target_clock,
+                            u32 post_div,
+                            u32 ref_div,
+                            u32 *fb_div,
+                            u32 *frac_fb_div)
+{
+       u32 tmp = post_div * ref_div;
+
+       tmp *= target_clock;
+       *fb_div = tmp / pll->reference_freq;
+       *frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+               *fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+                             u32 target_clock)
+{
+       u32 vco, post_div, tmp;
+
+       if (pll->flags & RADEON_PLL_USE_POST_DIV)
+               return pll->post_div;
+
+       if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+               if (pll->flags & RADEON_PLL_IS_LCD)
+                       vco = pll->lcd_pll_out_min;
+               else
+                       vco = pll->pll_out_min;
+       } else {
+               if (pll->flags & RADEON_PLL_IS_LCD)
+                       vco = pll->lcd_pll_out_max;
+               else
+                       vco = pll->pll_out_max;
+       }
+
+       post_div = vco / target_clock;
+       tmp = vco % target_clock;
+
+       if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+               if (tmp)
+                       post_div++;
+       } else {
+               if (!tmp)
+                       post_div--;
+       }
+
+       if (post_div > pll->max_post_div)
+               post_div = pll->max_post_div;
+       else if (post_div < pll->min_post_div)
+               post_div = pll->min_post_div;
+
+       return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+                             u32 freq,
+                             u32 *dot_clock_p,
+                             u32 *fb_div_p,
+                             u32 *frac_fb_div_p,
+                             u32 *ref_div_p,
+                             u32 *post_div_p)
+{
+       u32 target_clock = freq / 10;
+       u32 post_div = avivo_get_post_div(pll, target_clock);
+       u32 ref_div = pll->min_ref_div;
+       u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+       if (pll->flags & RADEON_PLL_USE_REF_DIV)
+               ref_div = pll->reference_div;
+
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+               avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+               frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+               if (frac_fb_div >= 5) {
+                       frac_fb_div -= 5;
+                       frac_fb_div = frac_fb_div / 10;
+                       frac_fb_div++;
+               }
+               if (frac_fb_div >= 10) {
+                       fb_div++;
+                       frac_fb_div = 0;
+               }
+       } else {
+               while (ref_div <= pll->max_ref_div) {
+                       avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+                                        &fb_div, &frac_fb_div);
+                       if (frac_fb_div >= (pll->reference_freq / 2))
+                               fb_div++;
+                       frac_fb_div = 0;
+                       tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+                       tmp = (tmp * 10000) / target_clock;
+
+                       if (tmp > (10000 + MAX_TOLERANCE))
+                               ref_div++;
+                       else if (tmp >= (10000 - MAX_TOLERANCE))
+                               break;
+                       else
+                               ref_div++;
+               }
+       }
+
+       *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+               (ref_div * post_div * 10);
+       *fb_div_p = fb_div;
+       *frac_fb_div_p = frac_fb_div;
+       *ref_div_p = ref_div;
+       *post_div_p = post_div;
+       DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+                     *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
 static inline uint32_t radeon_div(uint64_t n, uint32_t d)
 {
        uint64_t mod;
@@ -790,13 +909,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
        return n;
 }
 
-void radeon_compute_pll(struct radeon_pll *pll,
-                       uint64_t freq,
-                       uint32_t *dot_clock_p,
-                       uint32_t *fb_div_p,
-                       uint32_t *frac_fb_div_p,
-                       uint32_t *ref_div_p,
-                       uint32_t *post_div_p)
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+                              uint64_t freq,
+                              uint32_t *dot_clock_p,
+                              uint32_t *fb_div_p,
+                              uint32_t *frac_fb_div_p,
+                              uint32_t *ref_div_p,
+                              uint32_t *post_div_p)
 {
        uint32_t min_ref_div = pll->min_ref_div;
        uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +945,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
                pll_out_max = pll->pll_out_max;
        }
 
+       if (pll_out_min > 64800)
+               pll_out_min = 64800;
+
        if (pll->flags & RADEON_PLL_USE_REF_DIV)
                min_ref_div = max_ref_div = pll->reference_div;
        else {
@@ -965,6 +1087,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
        *frac_fb_div_p = best_frac_feedback_div;
        *ref_div_p = best_ref_div;
        *post_div_p = best_post_div;
+       DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+                     freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+                     best_ref_div, best_post_div);
+
 }
 
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
index d5680a0..275b26a 100644 (file)
@@ -48,7 +48,7 @@
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
- *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
  */
 #define KMS_DRIVER_MAJOR       2
 #define KMS_DRIVER_MINOR       8
index 448eba8..5cba46b 100644 (file)
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
 #define R600_CP_RB_CNTL                                        0xc104
 #       define R600_RB_BUFSZ(x)                                ((x) << 0)
 #       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#      define R600_BUF_SWAP_32BIT                             (2 << 16)
 #       define R600_RB_NO_UPDATE                               (1 << 27)
 #       define R600_RB_RPTR_WR_ENA                             (1 << 31)
 #define R600_CP_RB_RPTR_WR                                     0xc108
index 8fd1842..b427488 100644 (file)
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_monitor_audio(radeon_connector->edid)) {
+               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
                        /* fix me */
                        if (ASIC_IS_DCE4(rdev))
                                return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_monitor_audio(radeon_connector->edid)) {
+               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
                        /* fix me */
                        if (ASIC_IS_DCE4(rdev))
                                return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_monitor_audio(radeon_connector->edid)) {
+               else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
                        /* fix me */
                        if (ASIC_IS_DCE4(rdev))
                                return ATOM_ENCODER_MODE_DVI;
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
        args.v1.ucAction = action;
        if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-               args.v1.usInitInfo = connector_object_id;
+               args.v1.usInitInfo = cpu_to_le16(connector_object_id);
        } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
                args.v1.asMode.ucLaneSel = lane_num;
                args.v1.asMode.ucLaneSet = lane_set;
@@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
        if (!ASIC_IS_DCE4(rdev))
                return;
 
-       if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+       if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
            (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
                return;
 
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
                case 3:
                        args.v3.sExtEncoder.ucAction = action;
                        if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-                               args.v3.sExtEncoder.usConnectorId = connector_object_id;
+                               args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
                        else
                                args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
                        args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
        }
 
        /* set scaler clears this on some chips */
-       /* XXX check DCE4 */
-       if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
-               if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
-                       WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-                              AVIVO_D1MODE_INTERLEAVE_EN);
+       if (ASIC_IS_AVIVO(rdev) &&
+           (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+               if (ASIC_IS_DCE4(rdev)) {
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+                                      EVERGREEN_INTERLEAVE_EN);
+                       else
+                               WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+               } else {
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+                                      AVIVO_D1MODE_INTERLEAVE_EN);
+                       else
+                               WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+               }
        }
 }
 
index 66324b5..cc44bdf 100644 (file)
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        u32 tiling_flags = 0;
        int ret;
        int aligned_size, size;
+       int height = mode_cmd->height;
 
        /* need to align pitch with crtc limits */
        mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-       size = mode_cmd->pitch * mode_cmd->height;
+       if (rdev->family >= CHIP_R600)
+               height = ALIGN(mode_cmd->height, 8);
+       size = mode_cmd->pitch * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
index a289646..9ec830c 100644 (file)
@@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
 
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
+       int i;
        int r = 0;
 
        INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 
        spin_lock_init(&rdev->irq.sw_lock);
+       for (i = 0; i < rdev->num_crtc; i++)
+               spin_lock_init(&rdev->irq.pflip_lock[i]);
        r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
        if (r) {
                return r;
index 28a53e4..8387d32 100644 (file)
@@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                }
                radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
                break;
+       case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+               /* return clock value in KHz */
+               value = rdev->clock.spll.reference_freq * 10;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
@@ -243,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
        struct radeon_device *rdev = dev->dev_private;
        if (rdev->hyperz_filp == file_priv)
                rdev->hyperz_filp = NULL;
+       if (rdev->cmask_filp == file_priv)
+               rdev->cmask_filp = NULL;
 }
 
 /*
index ace2e63..cf0638c 100644 (file)
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
        DRM_DEBUG_KMS("\n");
 
        if (!use_bios_divs) {
-               radeon_compute_pll(pll, mode->clock,
-                                  &freq, &feedback_div, &frac_fb_div,
-                                  &reference_div, &post_divider);
+               radeon_compute_pll_legacy(pll, mode->clock,
+                                         &freq, &feedback_div, &frac_fb_div,
+                                         &reference_div, &post_divider);
 
                for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
                        if (post_div->divider == post_divider)
index 12bdeab..a670caa 100644 (file)
@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
 #define RADEON_PLL_USE_POST_DIV         (1 << 12)
 #define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
 
 struct radeon_pll {
        /* reference frequency */
@@ -208,6 +209,7 @@ enum radeon_connector_table {
        CT_EMAC,
        CT_RN50_POWER,
        CT_MAC_X800,
+       CT_MAC_G5_9600,
 };
 
 enum radeon_dvo_chip {
@@ -510,13 +512,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                             struct radeon_atom_ss *ss,
                                             int id, u32 clock);
 
-extern void radeon_compute_pll(struct radeon_pll *pll,
-                              uint64_t freq,
-                              uint32_t *dot_clock_p,
-                              uint32_t *fb_div_p,
-                              uint32_t *frac_fb_div_p,
-                              uint32_t *ref_div_p,
-                              uint32_t *post_div_p);
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+                                     uint64_t freq,
+                                     uint32_t *dot_clock_p,
+                                     uint32_t *fb_div_p,
+                                     uint32_t *frac_fb_div_p,
+                                     uint32_t *ref_div_p,
+                                     uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+                                    u32 freq,
+                                    u32 *dot_clock_p,
+                                    u32 *fb_div_p,
+                                    u32 *frac_fb_div_p,
+                                    u32 *ref_div_p,
+                                    u32 *post_div_p);
 
 extern void radeon_setup_encoder_clones(struct drm_device *dev);
 
index 3b1b2bf..2aed03b 100644 (file)
@@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
 {
        struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
        struct radeon_device *rdev = ddev->dev_private;
-       u32 temp;
+       int temp;
 
        switch (rdev->pm.int_thermal_type) {
        case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
 #endif
        }
 
+       if (rdev->pm.power_state)
+               kfree(rdev->pm.power_state);
+
        radeon_hwmon_fini(rdev);
 }
 
index 3cd4dac..ec93a75 100644 (file)
 #define RADEON_CONFIG_APER_SIZE             0x0108
 #define RADEON_CONFIG_BONDS                 0x00e8
 #define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
 #       define RADEON_CFG_ATI_REV_A11       (0   << 16)
 #       define RADEON_CFG_ATI_REV_A12       (1   << 16)
 #       define RADEON_CFG_ATI_REV_A13       (2   << 16)
index 1272e4b..e5b2cf1 100644 (file)
@@ -787,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
                radeon_mem_types_list[i].show = &radeon_mm_dump_table;
                radeon_mem_types_list[i].driver_features = 0;
                if (i == 0)
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+                       radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
                else
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
+                       radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
 
        }
        /* Add ttm page pool to debugfs */
index b506ec1..e8a1786 100644 (file)
@@ -683,9 +683,7 @@ r300 0x4f60
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
index 8c1214c..722074e 100644 (file)
@@ -130,7 +130,6 @@ r420 0x4f60
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4200 GA_POINT_S0
 0x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
index 0828d80..d9f6286 100644 (file)
@@ -749,9 +749,7 @@ rs600 0x6d40
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
index ef422bb..911a8fb 100644 (file)
@@ -164,7 +164,6 @@ rv515 0x6d40
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4114 SU_TEX_WRAP_PS3
 0x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
 0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4FD4 ZB_STENCILREFMASK_BF
index 5512e4e..c76283d 100644 (file)
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
        radeon_gart_table_ram_free(rdev);
 }
 
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
        uint32_t entry;
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4) |
-               0xc;
+               RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
        entry = cpu_to_le32(entry);
        rdev->gart.table.ram.ptr[i] = entry;
        return 0;
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                /* read MC_STATUS */
-               tmp = RREG32(0x0150);
-               if (tmp & (1 << 2)) {
+               tmp = RREG32(RADEON_MC_STATUS);
+               if (tmp & RADEON_MC_IDLE) {
                        return 0;
                }
                DRM_UDELAY(1);
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
        r420_pipes_init(rdev);
        if (rs400_mc_wait_for_idle(rdev)) {
                printk(KERN_WARNING "rs400: Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
+                      "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
        }
 }
 
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
                seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
                tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
                seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
-               tmp = RREG32_MC(0x100);
+               tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
                seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
-               tmp = RREG32(0x134);
+               tmp = RREG32(RS690_HDP_FB_LOCATION);
                seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
        } else {
                tmp = RREG32(RADEON_AGP_BASE);
index 0137d3e..6638c8e 100644 (file)
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
                switch (crev) {
                case 1:
                        tmp.full = dfixed_const(100);
-                       rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
                        rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-                       if (info->info.usK8MemoryClock)
+                       if (le16_to_cpu(info->info.usK8MemoryClock))
                                rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
                        else if (rdev->clock.default_mclk) {
                                rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
                        break;
                case 2:
                        tmp.full = dfixed_const(100);
-                       rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
                        rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-                       if (info->info_v2.ulBootUpUMAClock)
-                               rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+                       if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+                               rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
                        else if (rdev->clock.default_mclk)
                                rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
                        else
                                rdev->pm.igp_system_mclk.full = dfixed_const(66700);
                        rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-                       rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+                       rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
                        rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
                        rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
                        break;
index 5d569f4..64b57af 100644 (file)
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev)
                          ISYNC_CPSCRATCH_IDLEGUI);
        radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
        radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(0x170C, 0));
-       radeon_ring_write(rdev, 1 << 31);
+       radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+       radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
        radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
        radeon_ring_write(rdev, 0);
        radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
        radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+       radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
        radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
        radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
        radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev)
        }
        rv515_vga_render_disable(rdev);
        r420_pipes_init(rdev);
-       gb_pipe_select = RREG32(0x402C);
-       tmp = RREG32(0x170C);
+       gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+       tmp = RREG32(R300_DST_PIPE_CONFIG);
        pipe_select_current = (tmp >> 2) & 3;
        tmp = (1 << pipe_select_current) |
              (((gb_pipe_select >> 8) & 0xF) << 4);
index 491dc90..d8ba676 100644 (file)
@@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 }
 
 /* get temperature in millidegrees */
-u32 rv770_get_temp(struct radeon_device *rdev)
+int rv770_get_temp(struct radeon_device *rdev)
 {
        u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
                ASIC_T_SHIFT;
-       u32 actual_temp = 0;
-
-       if ((temp >> 9) & 1)
-               actual_temp = 0;
-       else
-               actual_temp = (temp >> 1) & 0xff;
-
-       return actual_temp * 1000;
+       int actual_temp;
+
+       if (temp & 0x400)
+               actual_temp = -256;
+       else if (temp & 0x200)
+               actual_temp = 255;
+       else if (temp & 0x100) {
+               actual_temp = temp & 0x1ff;
+               actual_temp |= ~0x1ff;
+       } else
+               actual_temp = temp & 0xff;
+
+       return (actual_temp * 1000) / 2;
 }
 
 void rv770_pm_misc(struct radeon_device *rdev)
@@ -316,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
                return -EINVAL;
 
        r700_cp_stop(rdev);
-       WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+       WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+              BUF_SWAP_32BIT |
+#endif
+              RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
        /* Reset cp */
        WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
index abc8cf5..79fa588 100644 (file)
 #define                ROQ_IB1_START(x)                                ((x) << 0)
 #define                ROQ_IB2_START(x)                                ((x) << 8)
 #define        CP_RB_CNTL                                      0xC104
-#define                RB_BUFSZ(x)                                     ((x)<<0)
-#define                RB_BLKSZ(x)                                     ((x)<<8)
-#define                RB_NO_UPDATE                                    (1<<27)
-#define                RB_RPTR_WR_ENA                                  (1<<31)
+#define                RB_BUFSZ(x)                                     ((x) << 0)
+#define                RB_BLKSZ(x)                                     ((x) << 8)
+#define                RB_NO_UPDATE                                    (1 << 27)
+#define                RB_RPTR_WR_ENA                                  (1 << 31)
 #define                BUF_SWAP_32BIT                                  (2 << 16)
 #define        CP_RB_RPTR                                      0x8700
 #define        CP_RB_RPTR_ADDR                                 0xC10C
index 09aea5f..70e60a4 100644 (file)
@@ -1,11 +1,13 @@
 config STUB_POULSBO
        tristate "Intel GMA500 Stub Driver"
        depends on PCI
+       depends on NET # for THERMAL
        # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_CLASS_DEVICE if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
+       select THERMAL if ACPI
        help
          Choose this option if you have a system that has Intel GMA500
          (Poulsbo) integrated graphics. If M is selected, the module will
index c380c65..ace2b16 100644 (file)
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
                        void (*irq_set_state)(void *cookie, bool state),
                        unsigned int (*set_vga_decode)(void *cookie, bool decode))
 {
-       int ret = -1;
+       int ret = -ENODEV;
        struct vga_device *vgadev;
        unsigned long flags;
 
index 773e484..297bc9a 100644 (file)
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
          will be called k8temp.
 
 config SENSORS_K10TEMP
-       tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+       tristate "AMD Family 10h/11h/12h/14h temperature sensor"
        depends on X86 && PCI
        help
          If you say yes here you get support for the temperature
          sensor(s) inside your CPU. Supported are later revisions of
-         the AMD Family 10h and all revisions of the AMD Family 11h
-         microarchitectures.
+         the AMD Family 10h and all revisions of the AMD Family 11h,
+         12h (Llano), and 14h (Brazos) microarchitectures.
 
          This driver can also be built as a module.  If so, the module
          will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
          called jz4740-hwmon.
 
 config SENSORS_JC42
-       tristate "JEDEC JC42.4 compliant temperature sensors"
+       tristate "JEDEC JC42.4 compliant memory module temperature sensors"
        depends on I2C
        help
-         If you say yes here you get support for Jedec JC42.4 compliant
-         temperature sensors. Support will include, but not be limited to,
-         ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
-         MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+         If you say yes here, you get support for JEDEC JC42.4 compliant
+         temperature sensors, which are used on many DDR3 memory modules for
+         mobile devices and servers.  Support will include, but not be limited
+         to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+         MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
 
          This driver can also be built as a module.  If so, the module
          will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
        help
          If you say yes here you get support for National Semiconductor LM85
          sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
-         EMC6D101 and EMC6D102.
+         EMC6D101, EMC6D102, and EMC6D103.
 
          This driver can also be built as a module.  If so, the module
          will be called lm85.
index 86d822a..d46c0c7 100644 (file)
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
        { "ad7414", 0 },
        {}
 };
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
 
 static struct i2c_driver ad7414_driver = {
        .driver = {
index f13c843..5cc3e37 100644 (file)
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
        { "adt7411", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
 
 static struct i2c_driver adt7411_driver = {
        .driver         = {
index ce0372f..4c07436 100644 (file)
@@ -1072,6 +1072,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
                        node->sda.dev_attr.show = grp->show;
                        node->sda.dev_attr.store = grp->store;
                        attr = &node->sda.dev_attr.attr;
+                       sysfs_attr_init(attr);
                        attr->name = node->name;
                        attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
                        ret = sysfs_create_file(&pdev->dev.kobj, attr);
index 2d68cf3..b5e8920 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 
 #include <acpi/acpi.h>
 #include <acpi/acpixf.h>
 
 #define ATK_HID "ATK0110"
 
+static bool new_if;
+module_param(new_if, bool, 0);
+MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
+
+static const struct dmi_system_id __initconst atk_force_new_if[] = {
+       {
+               /* Old interface has broken MCH temp monitoring */
+               .ident = "Asus Sabertooth X58",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+               }
+       },
+       { }
+};
+
 /* Minimum time between readings, enforced in order to avoid
  * hogging the CPU.
  */
@@ -1302,7 +1318,9 @@ static int atk_probe_if(struct atk_data *data)
         * analysis of multiple DSDTs indicates that when both interfaces
         * are present the new one (GGRP/GITM) is not functional.
         */
-       if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+       if (new_if)
+               dev_info(dev, "Overriding interface detection\n");
+       if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle && !new_if)
                data->old_interface = true;
        else if (data->enumerate_handle && data->read_handle &&
                        data->write_handle)
@@ -1420,6 +1438,9 @@ static int __init atk0110_init(void)
                return -EBUSY;
        }
 
+       if (dmi_check_system(atk_force_new_if))
+               new_if = true;
+
        ret = acpi_bus_register_driver(&atk_driver);
        if (ret)
                pr_info("acpi_bus_register_driver failed: %d\n", ret);
index 5dea9fa..cd2a6e4 100644 (file)
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client)
 }
 
 static const unsigned short emc1403_address_list[] = {
-       0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+       0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
 };
 
 static const struct i2c_device_id emc1403_idtable[] = {
index 340fc78..9349912 100644 (file)
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
 
 /* Configuration register defines */
 #define JC42_CFG_CRIT_ONLY     (1 << 2)
+#define JC42_CFG_TCRIT_LOCK    (1 << 6)
+#define JC42_CFG_EVENT_LOCK    (1 << 7)
 #define JC42_CFG_SHUTDOWN      (1 << 8)
 #define JC42_CFG_HYST_SHIFT    9
 #define JC42_CFG_HYST_MASK     0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct jc42_data *data = i2c_get_clientdata(client);
-       long val;
+       unsigned long val;
        int diff, hyst;
        int err;
        int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
 
 static DEVICE_ATTR(temp1_input, S_IRUGO,
                   show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
                   show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
                   show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
                   show_temp_max, set_temp_max);
 
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
                   show_temp_crit_hyst, set_temp_crit_hyst);
 static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
                   show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
        NULL
 };
 
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+                                 struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct i2c_client *client = to_i2c_client(dev);
+       struct jc42_data *data = i2c_get_clientdata(client);
+       unsigned int config = data->config;
+       bool readonly;
+
+       if (attr == &dev_attr_temp1_crit.attr)
+               readonly = config & JC42_CFG_TCRIT_LOCK;
+       else if (attr == &dev_attr_temp1_min.attr ||
+                attr == &dev_attr_temp1_max.attr)
+               readonly = config & JC42_CFG_EVENT_LOCK;
+       else if (attr == &dev_attr_temp1_crit_hyst.attr)
+               readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+       else
+               readonly = true;
+
+       return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
 static const struct attribute_group jc42_group = {
        .attrs = jc42_attributes,
+       .is_visible = jc42_attribute_mode,
 };
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
index da5a240..82bf65a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
  *
@@ -25,7 +25,7 @@
 #include <linux/pci.h>
 #include <asm/processor.h>
 
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_LICENSE("GPL");
 
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
 static const struct pci_device_id k10temp_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
index 1b674b7..d805e8e 100644 (file)
@@ -957,7 +957,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
 
        /* bail if we did not get an IRQ from the bus layer */
        if (!dev->irq) {
-               pr_err("No IRQ. Disabling /dev/freefall\n");
+               pr_debug("No IRQ. Disabling /dev/freefall\n");
                goto out;
        }
 
index 776aeb3..508cb29 100644 (file)
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
  * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
  * For remote temperature, low and high limits, it uses signed 11-bit values
  * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
+ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
+ * than the register reading. Remote temperature setpoints have to be
+ * adapted accordingly.
  */
 
 #define FAN_FROM_REG(reg)      ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@ struct lm63_data {
        struct mutex update_lock;
        char valid; /* zero until following fields are valid */
        unsigned long last_updated; /* in jiffies */
+       int kind;
+       int temp2_offset;
 
        /* registers values */
        u8 config, config_fan;
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
        return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
 }
 
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
-                         char *buf)
+/*
+ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
+ * For remote sensor registers temp2_offset has to be considered,
+ * for local sensor it must not.
+ * So we need separate 8bit accessors for local and remote sensor.
+ */
+static ssize_t show_local_temp8(struct device *dev,
+                               struct device_attribute *devattr,
+                               char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct lm63_data *data = lm63_update_device(dev);
        return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
 }
 
-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
-                        const char *buf, size_t count)
+static ssize_t show_remote_temp8(struct device *dev,
+                                struct device_attribute *devattr,
+                                char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct lm63_data *data = lm63_update_device(dev);
+       return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+                      + data->temp2_offset);
+}
+
+static ssize_t set_local_temp8(struct device *dev,
+                              struct device_attribute *dummy,
+                              const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct lm63_data *data = lm63_update_device(dev);
-       return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
+       return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
+                      + data->temp2_offset);
 }
 
 static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
        int nr = attr->index;
 
        mutex_lock(&data->update_lock);
-       data->temp11[nr] = TEMP11_TO_REG(val);
+       data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
        i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
                                  data->temp11[nr] >> 8);
        i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
 {
        struct lm63_data *data = lm63_update_device(dev);
        return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+                      + data->temp2_offset
                       - TEMP8_FROM_REG(data->temp2_crit_hyst));
 }
 
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
        long hyst;
 
        mutex_lock(&data->update_lock);
-       hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
+       hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
        i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
                                  HYST_TO_REG(hyst));
        mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
 static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
-       set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
+       set_local_temp8, 1);
 
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
        set_temp11, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
        set_temp11, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ */
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
+       NULL, 2);
 static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
        set_temp2_crit_hyst);
 
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
        data->valid = 0;
        mutex_init(&data->update_lock);
 
-       /* Initialize the LM63 chip */
+       /* Set the device type */
+       data->kind = id->driver_data;
+       if (data->kind == lm64)
+               data->temp2_offset = 16000;
+
+       /* Initialize chip */
        lm63_init_client(new_client);
 
        /* Register sysfs hooks */
index 1e22984..d2cc286 100644 (file)
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
 enum chips {
        any_chip, lm85b, lm85c,
        adm1027, adt7463, adt7468,
-       emc6d100, emc6d102
+       emc6d100, emc6d102, emc6d103
 };
 
 /* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
 #define        LM85_VERSTEP_EMC6D100_A0        0x60
 #define        LM85_VERSTEP_EMC6D100_A1        0x61
 #define        LM85_VERSTEP_EMC6D102           0x65
+#define        LM85_VERSTEP_EMC6D103_A0        0x68
+#define        LM85_VERSTEP_EMC6D103_A1        0x69
+#define        LM85_VERSTEP_EMC6D103S          0x6A    /* Also known as EMC6D103:A2 */
 
 #define        LM85_REG_CONFIG                 0x40
 
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
        { "emc6d100", emc6d100 },
        { "emc6d101", emc6d100 },
        { "emc6d102", emc6d102 },
+       { "emc6d103", emc6d103 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
                case LM85_VERSTEP_EMC6D102:
                        type_name = "emc6d102";
                        break;
+               case LM85_VERSTEP_EMC6D103_A0:
+               case LM85_VERSTEP_EMC6D103_A1:
+                       type_name = "emc6d103";
+                       break;
+               /*
+                * Registers apparently missing in EMC6D103S/EMC6D103:A2
+                * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+                * (according to the data sheets), but used unconditionally
+                * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+                * So skip EMC6D103S for now.
+               case LM85_VERSTEP_EMC6D103S:
+                       type_name = "emc6d103s";
+                       break;
+                */
                }
        } else {
                dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
        case adt7468:
        case emc6d100:
        case emc6d102:
+       case emc6d103:
                data->freq_map = adm1027_freq_map;
                break;
        default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
                        /* More alarm bits */
                        data->alarms |= lm85_read_value(client,
                                                EMC6D100_REG_ALARM3) << 16;
-               } else if (data->type == emc6d102) {
+               } else if (data->type == emc6d102 || data->type == emc6d103) {
                        /* Have to read LSB bits after the MSB ones because
                           the reading of the MSB bits has frozen the
                           LSBs (backward from the ADM1027).
index 2e067dd..50ea1f4 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/ktime.h>
+#include <linux/slab.h>
 
 #define PCH_EVENT_SET  0       /* I2C Interrupt Event Set Status */
 #define PCH_EVENT_NONE 1       /* I2C Interrupt Event Clear Status */
index ef3bcb1..61653f0 100644 (file)
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
 static int ocores_i2c_of_probe(struct platform_device* pdev,
                                struct ocores_i2c* i2c)
 {
-       __be32* val;
+       const __be32* val;
 
        val = of_get_property(pdev->dev.of_node, "regstep", NULL);
        if (!val) {
index b605ff3..58a58c7 100644 (file)
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
                         * REVISIT: Some wkup sources might not be needed.
                         */
                        dev->westate = OMAP_I2C_WE_ALL;
-                       if (dev->rev < OMAP_I2C_REV_ON_4430)
-                               omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
-                                                               dev->westate);
+                       omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
                }
        }
        omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -847,11 +845,15 @@ complete:
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                }
+               /*
+                * ProDB0017052: Clear ARDY bit twice
+                */
                if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
                                        OMAP_I2C_STAT_AL)) {
                        omap_i2c_ack_stat(dev, stat &
                                (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
-                               OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+                               OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+                               OMAP_I2C_STAT_ARDY));
                        omap_i2c_complete_cmd(dev, err);
                        return IRQ_HANDLED;
                }
@@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+       if (!pm_runtime_suspended(dev))
+               if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+                       dev->bus->pm->runtime_suspend(dev);
+
+       return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+       if (!pm_runtime_suspended(dev))
+               if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+                       dev->bus->pm->runtime_resume(dev);
+
+       return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+       .suspend = omap_i2c_suspend,
+       .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
 static struct platform_driver omap_i2c_driver = {
        .probe          = omap_i2c_probe,
        .remove         = omap_i2c_remove,
        .driver         = {
                .name   = "omap_i2c",
                .owner  = THIS_MODULE,
+               .pm     = OMAP_I2C_PM_OPS,
        },
 };
 
index 495be45..266135d 100644 (file)
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
        adap->owner = THIS_MODULE;
        /* DDC class but actually often used for more generic I2C */
        adap->class = I2C_CLASS_DDC;
-       strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+       strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
                sizeof(adap->name));
        adap->nr = bus_nr;
        adap->algo = &stu300_algo;
index 7acb32e..4a5c4a4 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <asm/mwait.h>
+#include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
@@ -84,6 +85,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
 
+/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
 /*
  * Set this flag for states where the HW flushes the TLB for us
  * and so we don't need cross-calls to keep it consistent.
@@ -263,7 +270,7 @@ static void __setup_broadcast_timer(void *arg)
        clockevents_notify(reason, &cpu);
 }
 
-static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
                unsigned long action, void *hcpu)
 {
        int hotcpu = (unsigned long)hcpu;
@@ -273,18 +280,23 @@ static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
                smp_call_function_single(hotcpu, __setup_broadcast_timer,
                        (void *)true, 1);
                break;
-       case CPU_DOWN_PREPARE:
-               smp_call_function_single(hotcpu, __setup_broadcast_timer,
-                       (void *)false, 1);
-               break;
        }
        return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata setup_broadcast_notifier = {
+static struct notifier_block setup_broadcast_notifier = {
        .notifier_call = setup_broadcast_cpuhp_notify,
 };
 
+static void auto_demotion_disable(void *dummy)
+{
+       unsigned long long msr_bits;
+
+       rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+       msr_bits &= ~auto_demotion_disable_flags;
+       wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
 /*
  * intel_idle_probe()
  */
@@ -328,11 +340,17 @@ static int intel_idle_probe(void)
        case 0x25:      /* Westmere */
        case 0x2C:      /* Westmere */
                cpuidle_state_table = nehalem_cstates;
+               auto_demotion_disable_flags =
+                       (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
                break;
 
        case 0x1C:      /* 28 - Atom Processor */
+               cpuidle_state_table = atom_cstates;
+               break;
+
        case 0x26:      /* 38 - Lincroft Atom Processor */
                cpuidle_state_table = atom_cstates;
+               auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
                break;
 
        case 0x2A:      /* SNB */
@@ -440,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
                        return -EIO;
                }
        }
+       if (auto_demotion_disable_flags)
+               smp_call_function(auto_demotion_disable, NULL, 1);
 
        return 0;
 }
index 8aba0ba..e0ef5fd 100644 (file)
@@ -183,20 +183,15 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 {
        __be32 src_ip = src_in->sin_addr.s_addr;
        __be32 dst_ip = dst_in->sin_addr.s_addr;
-       struct flowi fl;
        struct rtable *rt;
        struct neighbour *neigh;
        int ret;
 
-       memset(&fl, 0, sizeof fl);
-       fl.nl_u.ip4_u.daddr = dst_ip;
-       fl.nl_u.ip4_u.saddr = src_ip;
-       fl.oif = addr->bound_dev_if;
-
-       ret = ip_route_output_key(&init_net, &rt, &fl);
-       if (ret)
+       rt = ip_route_output(&init_net, dst_ip, src_ip, 0, addr->bound_dev_if);
+       if (IS_ERR(rt)) {
+               ret = PTR_ERR(rt);
                goto out;
-
+       }
        src_in->sin_family = AF_INET;
        src_in->sin_addr.s_addr = rt->rt_src;
 
@@ -236,28 +231,28 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                         struct sockaddr_in6 *dst_in,
                         struct rdma_dev_addr *addr)
 {
-       struct flowi fl;
+       struct flowi6 fl6;
        struct neighbour *neigh;
        struct dst_entry *dst;
        int ret;
 
-       memset(&fl, 0, sizeof fl);
-       ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
-       ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
-       fl.oif = addr->bound_dev_if;
+       memset(&fl6, 0, sizeof fl6);
+       ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
+       ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+       fl6.flowi6_oif = addr->bound_dev_if;
 
-       dst = ip6_route_output(&init_net, NULL, &fl);
+       dst = ip6_route_output(&init_net, NULL, &fl6);
        if ((ret = dst->error))
                goto put;
 
-       if (ipv6_addr_any(&fl.fl6_src)) {
+       if (ipv6_addr_any(&fl6.saddr)) {
                ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
-                                        &fl.fl6_dst, 0, &fl.fl6_src);
+                                        &fl6.daddr, 0, &fl6.saddr);
                if (ret)
                        goto put;
 
                src_in->sin6_family = AF_INET6;
-               ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
+               ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
        }
 
        if (dst->dev->flags & IFF_LOOPBACK) {
index e38be1b..fbbfa24 100644 (file)
@@ -1079,7 +1079,7 @@ static void ib_sa_remove_one(struct ib_device *device)
 
        ib_unregister_event_handler(&sa_dev->event_handler);
 
-       flush_scheduled_work();
+       flush_workqueue(ib_wq);
 
        for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
                if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
index ca12acf..ec1e9da 100644 (file)
@@ -636,6 +636,16 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
        }
 }
 
+static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
+                              struct rdma_route *route)
+{
+       struct rdma_dev_addr *dev_addr;
+
+       dev_addr = &route->addr.dev_addr;
+       rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
+       rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
+}
+
 static ssize_t ucma_query_route(struct ucma_file *file,
                                const char __user *inbuf,
                                int in_len, int out_len)
@@ -670,8 +680,10 @@ static ssize_t ucma_query_route(struct ucma_file *file,
 
        resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
        resp.port_num = ctx->cm_id->port_num;
-       if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
-               switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+       switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+       case RDMA_TRANSPORT_IB:
+               switch (rdma_port_get_link_layer(ctx->cm_id->device,
+                       ctx->cm_id->port_num)) {
                case IB_LINK_LAYER_INFINIBAND:
                        ucma_copy_ib_route(&resp, &ctx->cm_id->route);
                        break;
@@ -681,6 +693,12 @@ static ssize_t ucma_query_route(struct ucma_file *file,
                default:
                        break;
                }
+               break;
+       case RDMA_TRANSPORT_IWARP:
+               ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+               break;
+       default:
+               break;
        }
 
 out:
index 9ce7819..2ec716f 100644 (file)
@@ -107,7 +107,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
        r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
        if (r) {
                init_waitqueue_head(&r->wait_object);
-               r->reply_msg = (u64) NULL;
+               r->reply_msg = 0;
                r->event = 0;
                r->cm_id = NULL;
                r->qp = NULL;
@@ -123,7 +123,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
  */
 void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
-       r->reply_msg = (u64) NULL;
+       r->reply_msg = 0;
        if (atomic_dec_and_test(&r->refcnt)) {
                kfree(r);
        }
@@ -151,7 +151,7 @@ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
 void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
        if (atomic_dec_and_test(&r->refcnt)) {
-               if (r->reply_msg != (u64) NULL)
+               if (r->reply_msg != 0)
                        vq_repbuf_free(c2dev,
                                       (void *) (unsigned long) r->reply_msg);
                kfree(r);
index d02dcc6..3216bca 100644 (file)
@@ -338,23 +338,11 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
                                 __be16 peer_port, u8 tos)
 {
        struct rtable *rt;
-       struct flowi fl = {
-               .oif = 0,
-               .nl_u = {
-                        .ip4_u = {
-                                  .daddr = peer_ip,
-                                  .saddr = local_ip,
-                                  .tos = tos}
-                        },
-               .proto = IPPROTO_TCP,
-               .uli_u = {
-                         .ports = {
-                                   .sport = local_port,
-                                   .dport = peer_port}
-                         }
-       };
-
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+
+       rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
+                                  peer_port, local_port, IPPROTO_TCP,
+                                  tos, 0);
+       if (IS_ERR(rt))
                return NULL;
        return rt;
 }
index 0dc62b1..97a876a 100644 (file)
@@ -315,23 +315,11 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
                                 __be16 peer_port, u8 tos)
 {
        struct rtable *rt;
-       struct flowi fl = {
-               .oif = 0,
-               .nl_u = {
-                        .ip4_u = {
-                                  .daddr = peer_ip,
-                                  .saddr = local_ip,
-                                  .tos = tos}
-                        },
-               .proto = IPPROTO_TCP,
-               .uli_u = {
-                         .ports = {
-                                   .sport = local_port,
-                                   .dport = peer_port}
-                         }
-       };
-
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+
+       rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
+                                  peer_port, local_port, IPPROTO_TCP,
+                                  tos, 0);
+       if (IS_ERR(rt))
                return NULL;
        return rt;
 }
@@ -380,7 +368,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
                                          16)) | FW_WR_FLOWID(ep->hwtid));
 
        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
-       flowc->mnemval[0].val = cpu_to_be32(0);
+       flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
        flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
index 2080090..4f0be25 100644 (file)
@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                V_FW_RI_RES_WR_DCAEN(0) |
                V_FW_RI_RES_WR_DCACPU(0) |
                V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(3) |
+               V_FW_RI_RES_WR_FBMAX(2) |
                V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
                V_FW_RI_RES_WR_CIDXFTHRESH(0) |
                V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                V_FW_RI_RES_WR_DCAEN(0) |
                V_FW_RI_RES_WR_DCACPU(0) |
                V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(3) |
+               V_FW_RI_RES_WR_FBMAX(2) |
                V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
                V_FW_RI_RES_WR_CIDXFTHRESH(0) |
                V_FW_RI_RES_WR_EQSIZE(eqsize));
index 3b4ec32..3d7f366 100644 (file)
@@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
                                nesdev, nesdev->netdev[0]->name);
                netdev = nesdev->netdev[0];
                nesvnic = netdev_priv(netdev);
-               is_bonded = (netdev->master == event_netdev);
+               is_bonded = netif_is_bond_slave(netdev) &&
+                           (netdev->master == event_netdev);
                if ((netdev == event_netdev) || is_bonded) {
                        if (nesvnic->rdma_enabled == 0) {
                                nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
index 009ec81..ef32915 100644 (file)
@@ -1104,21 +1104,19 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
 static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
 {
        struct rtable *rt;
-       struct flowi fl;
        struct neighbour *neigh;
        int rc = arpindex;
        struct net_device *netdev;
        struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
 
-       memset(&fl, 0, sizeof fl);
-       fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-       if (ip_route_output_key(&init_net, &rt, &fl)) {
+       rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+       if (IS_ERR(rt)) {
                printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
                                __func__, dst_ip);
                return rc;
        }
 
-       if (nesvnic->netdev->master)
+       if (netif_is_bond_slave(netdev))
                netdev = nesvnic->netdev->master;
        else
                netdev = nesvnic->netdev;
index 8b606fd..08c1948 100644 (file)
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                        netif_carrier_on(nesvnic->netdev);
 
                                        spin_lock(&nesvnic->port_ibevent_lock);
-                                       if (nesdev->iw_status == 0) {
-                                               nesdev->iw_status = 1;
-                                               nes_port_ibevent(nesvnic);
+                                       if (nesvnic->of_device_registered) {
+                                               if (nesdev->iw_status == 0) {
+                                                       nesdev->iw_status = 1;
+                                                       nes_port_ibevent(nesvnic);
+                                               }
                                        }
                                        spin_unlock(&nesvnic->port_ibevent_lock);
                                }
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                        netif_carrier_off(nesvnic->netdev);
 
                                        spin_lock(&nesvnic->port_ibevent_lock);
-                                       if (nesdev->iw_status == 1) {
-                                               nesdev->iw_status = 0;
-                                               nes_port_ibevent(nesvnic);
+                                       if (nesvnic->of_device_registered) {
+                                               if (nesdev->iw_status == 1) {
+                                                       nesdev->iw_status = 0;
+                                                       nes_port_ibevent(nesvnic);
+                                               }
                                        }
                                        spin_unlock(&nesvnic->port_ibevent_lock);
                                }
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
                                netif_carrier_on(nesvnic->netdev);
 
                                spin_lock(&nesvnic->port_ibevent_lock);
-                               if (nesdev->iw_status == 0) {
-                                       nesdev->iw_status = 1;
-                                       nes_port_ibevent(nesvnic);
+                               if (nesvnic->of_device_registered) {
+                                       if (nesdev->iw_status == 0) {
+                                               nesdev->iw_status = 1;
+                                               nes_port_ibevent(nesvnic);
+                                       }
                                }
                                spin_unlock(&nesvnic->port_ibevent_lock);
                        }
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
                                netif_carrier_off(nesvnic->netdev);
 
                                spin_lock(&nesvnic->port_ibevent_lock);
-                               if (nesdev->iw_status == 1) {
-                                       nesdev->iw_status = 0;
-                                       nes_port_ibevent(nesvnic);
+                               if (nesvnic->of_device_registered) {
+                                       if (nesdev->iw_status == 1) {
+                                               nesdev->iw_status = 0;
+                                               nes_port_ibevent(nesvnic);
+                                       }
                                }
                                spin_unlock(&nesvnic->port_ibevent_lock);
                        }
index 50cceb3..b01809a 100644 (file)
@@ -623,7 +623,6 @@ struct qib_chippport_specific {
        u8 ibmalfusesnap;
        struct qib_qsfp_data qsfp_data;
        char epmsgbuf[192]; /* for port error interrupt msg buffer */
-       u8 bounced;
 };
 
 static struct {
@@ -1881,23 +1880,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                    IB_PHYSPORTSTATE_DISABLED)
                        qib_set_ib_7322_lstate(ppd, 0,
                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-               else {
-                       u32 lstate;
-                       /*
-                        * We need the current logical link state before
-                        * lflags are set in handle_e_ibstatuschanged.
-                        */
-                       lstate = qib_7322_iblink_state(ibcs);
-
-                       if (IS_QMH(dd) && !ppd->cpspec->bounced &&
-                           ltstate == IB_PHYSPORTSTATE_LINKUP &&
-                           (lstate >= IB_PORT_INIT &&
-                               lstate <= IB_PORT_ACTIVE)) {
-                               ppd->cpspec->bounced = 1;
-                               qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
-                                       IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
-                       }
-
+               else
                        /*
                         * Since going into a recovery state causes the link
                         * state to go down and since recovery is transitory,
@@ -1911,7 +1894,6 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
                                qib_handle_e_ibstatuschanged(ppd, ibcs);
-               }
        }
        if (*msg && iserr)
                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2381,6 +2363,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
 
+       /* Hold the link state machine for mezz boards */
+       if (IS_QMH(dd) || IS_QME(dd))
+               qib_set_ib_7322_lstate(ppd, 0,
+                                      QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
        /* Also enable IBSTATUSCHG interrupt.  */
        val = qib_read_kreg_port(ppd, krp_errmask);
        qib_write_kreg_port(ppd, krp_errmask,
@@ -5702,6 +5689,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
                                ppd->cpspec->h1_val = h1;
                        /* now change the IBC and serdes, overriding generic */
                        init_txdds_table(ppd, 1);
+                       /* Re-enable the physical state machine on mezz boards
+                        * now that the correct settings have been set. */
+                       if (IS_QMH(dd) || IS_QME(dd))
+                               qib_set_ib_7322_lstate(ppd, 0,
+                                           QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
                        any++;
                }
                if (*nxt == '\n')
index 8245237..eca0c41 100644 (file)
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
         * there are still requests that haven't been acked.
         */
        if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
-           !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+           !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+           (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
                start_timer(qp);
 
        while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
        }
 
        spin_lock_irqsave(&qp->s_lock, flags);
+       if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+               goto ack_done;
 
        /* Ignore invalid responses. */
        if (qib_cmp24(psn, qp->s_next_psn) >= 0)
index 23cf8fc..5b8f59d 100644 (file)
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
        event->owner = owner;
 
        list_add_tail(&event->node, &gameport_event_list);
-       schedule_work(&gameport_event_work);
+       queue_work(system_long_wq, &gameport_event_work);
 
 out:
        spin_unlock_irqrestore(&gameport_event_lock, flags);
index 7985114..11905b6 100644 (file)
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
-                            struct input_handler *src_handler,
                             unsigned int type, unsigned int code, int value)
 {
        struct input_handler *handler;
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev,
                                continue;
 
                        handler = handle->handler;
-
-                       /*
-                        * If this is the handler that injected this
-                        * particular event we want to skip it to avoid
-                        * filters firing again and again.
-                        */
-                       if (handler == src_handler)
-                               continue;
-
                        if (!handler->filter) {
                                if (filtered)
                                        break;
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data)
        if (test_bit(dev->repeat_key, dev->key) &&
            is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-               input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
+               input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
 
                if (dev->sync) {
                        /*
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data)
                         * Otherwise assume that the driver will send
                         * SYN_REPORT once it's done.
                         */
-                       input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+                       input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
                }
 
                if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
 #define INPUT_PASS_TO_ALL      (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
-                                 struct input_handler *src_handler,
                                  unsigned int code, int *pval)
 {
        bool is_mt_event;
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev,
        /* Flush pending "slot" event */
        if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
                input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-               input_pass_event(dev, src_handler,
-                                EV_ABS, ABS_MT_SLOT, dev->slot);
+               input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
        }
 
        return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
-                              struct input_handler *src_handler,
                               unsigned int type, unsigned int code, int value)
 {
        int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev,
 
        case EV_ABS:
                if (is_event_supported(code, dev->absbit, ABS_MAX))
-                       disposition = input_handle_abs_event(dev, src_handler,
-                                                            code, &value);
+                       disposition = input_handle_abs_event(dev, code, &value);
 
                break;
 
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev,
                dev->event(dev, type, code, value);
 
        if (disposition & INPUT_PASS_TO_HANDLERS)
-               input_pass_event(dev, src_handler, type, code, value);
+               input_pass_event(dev, type, code, value);
 }
 
 /**
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev,
 
                spin_lock_irqsave(&dev->event_lock, flags);
                add_input_randomness(type, code, value);
-               input_handle_event(dev, NULL, type, code, value);
+               input_handle_event(dev, type, code, value);
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 }
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle,
                rcu_read_lock();
                grab = rcu_dereference(dev->grab);
                if (!grab || grab == handle)
-                       input_handle_event(dev, handle->handler,
-                                          type, code, value);
+                       input_handle_event(dev, type, code, value);
                rcu_read_unlock();
 
                spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev)
                for (code = 0; code <= KEY_MAX; code++) {
                        if (is_event_supported(code, dev->keybit, KEY_MAX) &&
                            __test_and_clear_bit(code, dev->key)) {
-                               input_pass_event(dev, NULL, EV_KEY, code, 0);
+                               input_pass_event(dev, EV_KEY, code, 0);
                        }
                }
-               input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+               input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
        }
 }
 
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev,
            !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
            __test_and_clear_bit(old_keycode, dev->key)) {
 
-               input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
+               input_pass_event(dev, EV_KEY, old_keycode, 0);
                if (dev->sync)
-                       input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+                       input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
        }
 
  out:
index 4175073..c7a9202 100644 (file)
@@ -343,6 +343,16 @@ config KEYBOARD_NOMADIK
          To compile this driver as a module, choose M here: the
          module will be called nmk-ske-keypad.
 
+config KEYBOARD_TEGRA
+       tristate "NVIDIA Tegra internal matrix keyboard controller support"
+       depends on ARCH_TEGRA
+       help
+         Say Y here if you want to use a matrix keyboard connected directly
+         to the internal keyboard controller on Tegra SoCs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called tegra-kbc.
+
 config KEYBOARD_OPENCORES
        tristate "OpenCores Keyboard Controller"
        help
index 4e5571b..468c627 100644 (file)
@@ -42,6 +42,7 @@ obj-$(CONFIG_KEYBOARD_STMPE)          += stmpe-keypad.o
 obj-$(CONFIG_KEYBOARD_STOWAWAY)                += stowaway.o
 obj-$(CONFIG_KEYBOARD_SUNKBD)          += sunkbd.o
 obj-$(CONFIG_KEYBOARD_TC3589X)         += tc3589x-keypad.o
+obj-$(CONFIG_KEYBOARD_TEGRA)           += tegra-kbc.o
 obj-$(CONFIG_KEYBOARD_TNETV107X)       += tnetv107x-keypad.o
 obj-$(CONFIG_KEYBOARD_TWL4030)         += twl4030_keypad.o
 obj-$(CONFIG_KEYBOARD_XTKBD)           += xtkbd.o
index 6069abe..eb30063 100644 (file)
@@ -322,7 +322,7 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
        struct gpio_keys_button *button = bdata->button;
        struct input_dev *input = bdata->input;
        unsigned int type = button->type ?: EV_KEY;
-       int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+       int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
 
        input_event(input, type, button->code, !!state);
        input_sync(input);
@@ -410,8 +410,8 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
        if (!button->can_disable)
                irqflags |= IRQF_SHARED;
 
-       error = request_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
-       if (error) {
+       error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
+       if (error < 0) {
                dev_err(dev, "Unable to claim irq %d; error %d\n",
                        irq, error);
                goto fail3;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
new file mode 100644 (file)
index 0000000..99ce903
--- /dev/null
@@ -0,0 +1,783 @@
+/*
+ * Keyboard class input driver for the NVIDIA Tegra SoC internal matrix
+ * keyboard controller
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/kbc.h>
+
+#define KBC_MAX_DEBOUNCE_CNT   0x3ffu
+
+/* KBC row scan time and delay for beginning the row scan. */
+#define KBC_ROW_SCAN_TIME      16
+#define KBC_ROW_SCAN_DLY       5
+
+/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
+#define KBC_CYCLE_USEC 32
+
+/* KBC Registers */
+
+/* KBC Control Register */
+#define KBC_CONTROL_0  0x0
+#define KBC_FIFO_TH_CNT_SHIFT(cnt)     (cnt << 14)
+#define KBC_DEBOUNCE_CNT_SHIFT(cnt)    (cnt << 4)
+#define KBC_CONTROL_FIFO_CNT_INT_EN    (1 << 3)
+#define KBC_CONTROL_KBC_EN             (1 << 0)
+
+/* KBC Interrupt Register */
+#define KBC_INT_0      0x4
+#define KBC_INT_FIFO_CNT_INT_STATUS    (1 << 2)
+
+#define KBC_ROW_CFG0_0 0x8
+#define KBC_COL_CFG0_0 0x18
+#define KBC_INIT_DLY_0 0x28
+#define KBC_RPT_DLY_0  0x2c
+#define KBC_KP_ENT0_0  0x30
+#define KBC_KP_ENT1_0  0x34
+#define KBC_ROW0_MASK_0        0x38
+
+#define KBC_ROW_SHIFT  3
+
+struct tegra_kbc {
+       void __iomem *mmio;
+       struct input_dev *idev;
+       unsigned int irq;
+       unsigned int wake_enable_rows;
+       unsigned int wake_enable_cols;
+       spinlock_t lock;
+       unsigned int repoll_dly;
+       unsigned long cp_dly_jiffies;
+       bool use_fn_map;
+       const struct tegra_kbc_platform_data *pdata;
+       unsigned short keycode[KBC_MAX_KEY * 2];
+       unsigned short current_keys[KBC_MAX_KPENT];
+       unsigned int num_pressed_keys;
+       struct timer_list timer;
+       struct clk *clk;
+};
+
+static const u32 tegra_kbc_default_keymap[] = {
+       KEY(0, 2, KEY_W),
+       KEY(0, 3, KEY_S),
+       KEY(0, 4, KEY_A),
+       KEY(0, 5, KEY_Z),
+       KEY(0, 7, KEY_FN),
+
+       KEY(1, 7, KEY_LEFTMETA),
+
+       KEY(2, 6, KEY_RIGHTALT),
+       KEY(2, 7, KEY_LEFTALT),
+
+       KEY(3, 0, KEY_5),
+       KEY(3, 1, KEY_4),
+       KEY(3, 2, KEY_R),
+       KEY(3, 3, KEY_E),
+       KEY(3, 4, KEY_F),
+       KEY(3, 5, KEY_D),
+       KEY(3, 6, KEY_X),
+
+       KEY(4, 0, KEY_7),
+       KEY(4, 1, KEY_6),
+       KEY(4, 2, KEY_T),
+       KEY(4, 3, KEY_H),
+       KEY(4, 4, KEY_G),
+       KEY(4, 5, KEY_V),
+       KEY(4, 6, KEY_C),
+       KEY(4, 7, KEY_SPACE),
+
+       KEY(5, 0, KEY_9),
+       KEY(5, 1, KEY_8),
+       KEY(5, 2, KEY_U),
+       KEY(5, 3, KEY_Y),
+       KEY(5, 4, KEY_J),
+       KEY(5, 5, KEY_N),
+       KEY(5, 6, KEY_B),
+       KEY(5, 7, KEY_BACKSLASH),
+
+       KEY(6, 0, KEY_MINUS),
+       KEY(6, 1, KEY_0),
+       KEY(6, 2, KEY_O),
+       KEY(6, 3, KEY_I),
+       KEY(6, 4, KEY_L),
+       KEY(6, 5, KEY_K),
+       KEY(6, 6, KEY_COMMA),
+       KEY(6, 7, KEY_M),
+
+       KEY(7, 1, KEY_EQUAL),
+       KEY(7, 2, KEY_RIGHTBRACE),
+       KEY(7, 3, KEY_ENTER),
+       KEY(7, 7, KEY_MENU),
+
+       KEY(8, 4, KEY_RIGHTSHIFT),
+       KEY(8, 5, KEY_LEFTSHIFT),
+
+       KEY(9, 5, KEY_RIGHTCTRL),
+       KEY(9, 7, KEY_LEFTCTRL),
+
+       KEY(11, 0, KEY_LEFTBRACE),
+       KEY(11, 1, KEY_P),
+       KEY(11, 2, KEY_APOSTROPHE),
+       KEY(11, 3, KEY_SEMICOLON),
+       KEY(11, 4, KEY_SLASH),
+       KEY(11, 5, KEY_DOT),
+
+       KEY(12, 0, KEY_F10),
+       KEY(12, 1, KEY_F9),
+       KEY(12, 2, KEY_BACKSPACE),
+       KEY(12, 3, KEY_3),
+       KEY(12, 4, KEY_2),
+       KEY(12, 5, KEY_UP),
+       KEY(12, 6, KEY_PRINT),
+       KEY(12, 7, KEY_PAUSE),
+
+       KEY(13, 0, KEY_INSERT),
+       KEY(13, 1, KEY_DELETE),
+       KEY(13, 3, KEY_PAGEUP),
+       KEY(13, 4, KEY_PAGEDOWN),
+       KEY(13, 5, KEY_RIGHT),
+       KEY(13, 6, KEY_DOWN),
+       KEY(13, 7, KEY_LEFT),
+
+       KEY(14, 0, KEY_F11),
+       KEY(14, 1, KEY_F12),
+       KEY(14, 2, KEY_F8),
+       KEY(14, 3, KEY_Q),
+       KEY(14, 4, KEY_F4),
+       KEY(14, 5, KEY_F3),
+       KEY(14, 6, KEY_1),
+       KEY(14, 7, KEY_F7),
+
+       KEY(15, 0, KEY_ESC),
+       KEY(15, 1, KEY_GRAVE),
+       KEY(15, 2, KEY_F5),
+       KEY(15, 3, KEY_TAB),
+       KEY(15, 4, KEY_F1),
+       KEY(15, 5, KEY_F2),
+       KEY(15, 6, KEY_CAPSLOCK),
+       KEY(15, 7, KEY_F6),
+
+       /* Software Handled Function Keys */
+       KEY(20, 0, KEY_KP7),
+
+       KEY(21, 0, KEY_KP9),
+       KEY(21, 1, KEY_KP8),
+       KEY(21, 2, KEY_KP4),
+       KEY(21, 4, KEY_KP1),
+
+       KEY(22, 1, KEY_KPSLASH),
+       KEY(22, 2, KEY_KP6),
+       KEY(22, 3, KEY_KP5),
+       KEY(22, 4, KEY_KP3),
+       KEY(22, 5, KEY_KP2),
+       KEY(22, 7, KEY_KP0),
+
+       KEY(27, 1, KEY_KPASTERISK),
+       KEY(27, 3, KEY_KPMINUS),
+       KEY(27, 4, KEY_KPPLUS),
+       KEY(27, 5, KEY_KPDOT),
+
+       KEY(28, 5, KEY_VOLUMEUP),
+
+       KEY(29, 3, KEY_HOME),
+       KEY(29, 4, KEY_END),
+       KEY(29, 5, KEY_BRIGHTNESSDOWN),
+       KEY(29, 6, KEY_VOLUMEDOWN),
+       KEY(29, 7, KEY_BRIGHTNESSUP),
+
+       KEY(30, 0, KEY_NUMLOCK),
+       KEY(30, 1, KEY_SCROLLLOCK),
+       KEY(30, 2, KEY_MUTE),
+
+       KEY(31, 4, KEY_HELP),
+};
+
+static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+       .keymap         = tegra_kbc_default_keymap,
+       .keymap_size    = ARRAY_SIZE(tegra_kbc_default_keymap),
+};
+
+static void tegra_kbc_report_released_keys(struct input_dev *input,
+                                          unsigned short old_keycodes[],
+                                          unsigned int old_num_keys,
+                                          unsigned short new_keycodes[],
+                                          unsigned int new_num_keys)
+{
+       unsigned int i, j;
+
+       for (i = 0; i < old_num_keys; i++) {
+               for (j = 0; j < new_num_keys; j++)
+                       if (old_keycodes[i] == new_keycodes[j])
+                               break;
+
+               if (j == new_num_keys)
+                       input_report_key(input, old_keycodes[i], 0);
+       }
+}
+
+static void tegra_kbc_report_pressed_keys(struct input_dev *input,
+                                         unsigned char scancodes[],
+                                         unsigned short keycodes[],
+                                         unsigned int num_pressed_keys)
+{
+       unsigned int i;
+
+       for (i = 0; i < num_pressed_keys; i++) {
+               input_event(input, EV_MSC, MSC_SCAN, scancodes[i]);
+               input_report_key(input, keycodes[i], 1);
+       }
+}
+
+static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
+{
+       unsigned char scancodes[KBC_MAX_KPENT];
+       unsigned short keycodes[KBC_MAX_KPENT];
+       u32 val = 0;
+       unsigned int i;
+       unsigned int num_down = 0;
+       unsigned long flags;
+       bool fn_keypress = false;
+
+       spin_lock_irqsave(&kbc->lock, flags);
+       for (i = 0; i < KBC_MAX_KPENT; i++) {
+               if ((i % 4) == 0)
+                       val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
+
+               if (val & 0x80) {
+                       unsigned int col = val & 0x07;
+                       unsigned int row = (val >> 3) & 0x0f;
+                       unsigned char scancode =
+                               MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
+
+                       scancodes[num_down] = scancode;
+                       keycodes[num_down] = kbc->keycode[scancode];
+                       /* If driver uses Fn map, do not report the Fn key. */
+                       if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+                               fn_keypress = true;
+                       else
+                               num_down++;
+               }
+
+               val >>= 8;
+       }
+
+       /*
+        * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+        * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+        */
+       if (fn_keypress) {
+               for (i = 0; i < num_down; i++) {
+                       scancodes[i] += KBC_MAX_KEY;
+                       keycodes[i] = kbc->keycode[scancodes[i]];
+               }
+       }
+
+       spin_unlock_irqrestore(&kbc->lock, flags);
+
+       tegra_kbc_report_released_keys(kbc->idev,
+                                      kbc->current_keys, kbc->num_pressed_keys,
+                                      keycodes, num_down);
+       tegra_kbc_report_pressed_keys(kbc->idev, scancodes, keycodes, num_down);
+       input_sync(kbc->idev);
+
+       memcpy(kbc->current_keys, keycodes, sizeof(kbc->current_keys));
+       kbc->num_pressed_keys = num_down;
+}
+
+static void tegra_kbc_keypress_timer(unsigned long data)
+{
+       struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+       unsigned long flags;
+       u32 val;
+       unsigned int i;
+
+       val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
+       if (val) {
+               unsigned long dly;
+
+               tegra_kbc_report_keys(kbc);
+
+               /*
+                * If more than one keys are pressed we need not wait
+                * for the repoll delay.
+                */
+               dly = (val == 1) ? kbc->repoll_dly : 1;
+               mod_timer(&kbc->timer, jiffies + msecs_to_jiffies(dly));
+       } else {
+               /* Release any pressed keys and exit the polling loop */
+               for (i = 0; i < kbc->num_pressed_keys; i++)
+                       input_report_key(kbc->idev, kbc->current_keys[i], 0);
+               input_sync(kbc->idev);
+
+               kbc->num_pressed_keys = 0;
+
+               /* All keys are released so enable the keypress interrupt */
+               spin_lock_irqsave(&kbc->lock, flags);
+               val = readl(kbc->mmio + KBC_CONTROL_0);
+               val |= KBC_CONTROL_FIFO_CNT_INT_EN;
+               writel(val, kbc->mmio + KBC_CONTROL_0);
+               spin_unlock_irqrestore(&kbc->lock, flags);
+       }
+}
+
+static irqreturn_t tegra_kbc_isr(int irq, void *args)
+{
+       struct tegra_kbc *kbc = args;
+       u32 val, ctl;
+
+       /*
+        * Until all keys are released, defer further processing to
+        * the polling loop in tegra_kbc_keypress_timer
+        */
+       ctl = readl(kbc->mmio + KBC_CONTROL_0);
+       ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
+       writel(ctl, kbc->mmio + KBC_CONTROL_0);
+
+       /*
+        * Quickly bail out & reenable interrupts if the fifo threshold
+        * count interrupt wasn't the interrupt source
+        */
+       val = readl(kbc->mmio + KBC_INT_0);
+       writel(val, kbc->mmio + KBC_INT_0);
+
+       if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
+               /*
+                * Schedule timer to run when hardware is in continuous
+                * polling mode.
+                */
+               mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+       } else {
+               ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
+               writel(ctl, kbc->mmio + KBC_CONTROL_0);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
+{
+       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+       int i;
+       unsigned int rst_val;
+
+       BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
+       rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
+
+       for (i = 0; i < KBC_MAX_ROW; i++)
+               writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
+
+       if (filter) {
+               for (i = 0; i < pdata->wake_cnt; i++) {
+                       u32 val, addr;
+                       addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
+                       val = readl(kbc->mmio + addr);
+                       val &= ~(1 << pdata->wake_cfg[i].col);
+                       writel(val, kbc->mmio + addr);
+               }
+       }
+}
+
+static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
+{
+       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+       int i;
+
+       for (i = 0; i < KBC_MAX_GPIO; i++) {
+               u32 r_shft = 5 * (i % 6);
+               u32 c_shft = 4 * (i % 8);
+               u32 r_mask = 0x1f << r_shft;
+               u32 c_mask = 0x0f << c_shft;
+               u32 r_offs = (i / 6) * 4 + KBC_ROW_CFG0_0;
+               u32 c_offs = (i / 8) * 4 + KBC_COL_CFG0_0;
+               u32 row_cfg = readl(kbc->mmio + r_offs);
+               u32 col_cfg = readl(kbc->mmio + c_offs);
+
+               row_cfg &= ~r_mask;
+               col_cfg &= ~c_mask;
+
+               if (pdata->pin_cfg[i].is_row)
+                       row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+               else
+                       col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+
+               writel(row_cfg, kbc->mmio + r_offs);
+               writel(col_cfg, kbc->mmio + c_offs);
+       }
+}
+
+static int tegra_kbc_start(struct tegra_kbc *kbc)
+{
+       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+       unsigned long flags;
+       unsigned int debounce_cnt;
+       u32 val = 0;
+
+       clk_enable(kbc->clk);
+
+       /* Reset the KBC controller to clear all previous status.*/
+       tegra_periph_reset_assert(kbc->clk);
+       udelay(100);
+       tegra_periph_reset_deassert(kbc->clk);
+       udelay(100);
+
+       tegra_kbc_config_pins(kbc);
+       tegra_kbc_setup_wakekeys(kbc, false);
+
+       writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+
+       /* Keyboard debounce count is maximum of 12 bits. */
+       debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+       val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
+       val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
+       val |= KBC_CONTROL_FIFO_CNT_INT_EN;  /* interrupt on FIFO threshold */
+       val |= KBC_CONTROL_KBC_EN;     /* enable */
+       writel(val, kbc->mmio + KBC_CONTROL_0);
+
+       /*
+        * Compute the delay(ns) from interrupt mode to continuous polling
+        * mode so the timer routine is scheduled appropriately.
+        */
+       val = readl(kbc->mmio + KBC_INIT_DLY_0);
+       kbc->cp_dly_jiffies = usecs_to_jiffies((val & 0xfffff) * 32);
+
+       kbc->num_pressed_keys = 0;
+
+       /*
+        * Atomically clear out any remaining entries in the key FIFO
+        * and enable keyboard interrupts.
+        */
+       spin_lock_irqsave(&kbc->lock, flags);
+       while (1) {
+               val = readl(kbc->mmio + KBC_INT_0);
+               val >>= 4;
+               if (!val)
+                       break;
+
+               val = readl(kbc->mmio + KBC_KP_ENT0_0);
+               val = readl(kbc->mmio + KBC_KP_ENT1_0);
+       }
+       writel(0x7, kbc->mmio + KBC_INT_0);
+       spin_unlock_irqrestore(&kbc->lock, flags);
+
+       enable_irq(kbc->irq);
+
+       return 0;
+}
+
+static void tegra_kbc_stop(struct tegra_kbc *kbc)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&kbc->lock, flags);
+       val = readl(kbc->mmio + KBC_CONTROL_0);
+       val &= ~1;
+       writel(val, kbc->mmio + KBC_CONTROL_0);
+       spin_unlock_irqrestore(&kbc->lock, flags);
+
+       disable_irq(kbc->irq);
+       del_timer_sync(&kbc->timer);
+
+       clk_disable(kbc->clk);
+}
+
+static int tegra_kbc_open(struct input_dev *dev)
+{
+       struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+       return tegra_kbc_start(kbc);
+}
+
+static void tegra_kbc_close(struct input_dev *dev)
+{
+       struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+       return tegra_kbc_stop(kbc);
+}
+
+static bool __devinit
+tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
+                       struct device *dev, unsigned int *num_rows)
+{
+       int i;
+
+       *num_rows = 0;
+
+       for (i = 0; i < KBC_MAX_GPIO; i++) {
+               const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+
+               if (pin_cfg->is_row) {
+                       if (pin_cfg->num >= KBC_MAX_ROW) {
+                               dev_err(dev,
+                                       "pin_cfg[%d]: invalid row number %d\n",
+                                       i, pin_cfg->num);
+                               return false;
+                       }
+                       (*num_rows)++;
+               } else {
+                       if (pin_cfg->num >= KBC_MAX_COL) {
+                               dev_err(dev,
+                                       "pin_cfg[%d]: invalid column number %d\n",
+                                       i, pin_cfg->num);
+                               return false;
+                       }
+               }
+       }
+
+       return true;
+}
+
+static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+{
+       const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
+       const struct matrix_keymap_data *keymap_data;
+       struct tegra_kbc *kbc;
+       struct input_dev *input_dev;
+       struct resource *res;
+       int irq;
+       int err;
+       int i;
+       int num_rows = 0;
+       unsigned int debounce_cnt;
+       unsigned int scan_time_rows;
+
+       if (!pdata)
+               return -EINVAL;
+
+       if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               return -ENXIO;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
+               return -ENXIO;
+       }
+
+       kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
+       input_dev = input_allocate_device();
+       if (!kbc || !input_dev) {
+               err = -ENOMEM;
+               goto err_free_mem;
+       }
+
+       kbc->pdata = pdata;
+       kbc->idev = input_dev;
+       kbc->irq = irq;
+       spin_lock_init(&kbc->lock);
+       setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+
+       res = request_mem_region(res->start, resource_size(res), pdev->name);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to request I/O memory\n");
+               err = -EBUSY;
+               goto err_free_mem;
+       }
+
+       kbc->mmio = ioremap(res->start, resource_size(res));
+       if (!kbc->mmio) {
+               dev_err(&pdev->dev, "failed to remap I/O memory\n");
+               err = -ENXIO;
+               goto err_free_mem_region;
+       }
+
+       kbc->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(kbc->clk)) {
+               dev_err(&pdev->dev, "failed to get keyboard clock\n");
+               err = PTR_ERR(kbc->clk);
+               goto err_iounmap;
+       }
+
+       kbc->wake_enable_rows = 0;
+       kbc->wake_enable_cols = 0;
+       for (i = 0; i < pdata->wake_cnt; i++) {
+               kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
+               kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
+       }
+
+       /*
+        * The time delay between two consecutive reads of the FIFO is
+        * the sum of the repeat time and the time taken for scanning
+        * the rows. There is an additional delay before the row scanning
+        * starts. The repoll delay is computed in milliseconds.
+        */
+       debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+       scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
+       kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+       kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+
+       input_dev->name = pdev->name;
+       input_dev->id.bustype = BUS_HOST;
+       input_dev->dev.parent = &pdev->dev;
+       input_dev->open = tegra_kbc_open;
+       input_dev->close = tegra_kbc_close;
+
+       input_set_drvdata(input_dev, kbc);
+
+       input_dev->evbit[0] = BIT_MASK(EV_KEY);
+       input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+       input_dev->keycode = kbc->keycode;
+       input_dev->keycodesize = sizeof(kbc->keycode[0]);
+       input_dev->keycodemax = KBC_MAX_KEY;
+       if (pdata->use_fn_map)
+               input_dev->keycodemax *= 2;
+
+       kbc->use_fn_map = pdata->use_fn_map;
+       keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
+       matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
+                                  input_dev->keycode, input_dev->keybit);
+
+       err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
+                         pdev->name, kbc);
+       if (err) {
+               dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
+               goto err_put_clk;
+       }
+
+       disable_irq(kbc->irq);
+
+       err = input_register_device(kbc->idev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to register input device\n");
+               goto err_free_irq;
+       }
+
+       platform_set_drvdata(pdev, kbc);
+       device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+       return 0;
+
+err_free_irq:
+       free_irq(kbc->irq, pdev);
+err_put_clk:
+       clk_put(kbc->clk);
+err_iounmap:
+       iounmap(kbc->mmio);
+err_free_mem_region:
+       release_mem_region(res->start, resource_size(res));
+err_free_mem:
+       input_free_device(kbc->idev);
+       kfree(kbc);
+
+       return err;
+}
+
+static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+{
+       struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+       struct resource *res;
+
+       free_irq(kbc->irq, pdev);
+       clk_put(kbc->clk);
+
+       input_unregister_device(kbc->idev);
+       iounmap(kbc->mmio);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       kfree(kbc);
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_kbc_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+
+       if (device_may_wakeup(&pdev->dev)) {
+               tegra_kbc_setup_wakekeys(kbc, true);
+               enable_irq_wake(kbc->irq);
+               /* Forcefully clear the interrupt status */
+               writel(0x7, kbc->mmio + KBC_INT_0);
+               msleep(30);
+       } else {
+               mutex_lock(&kbc->idev->mutex);
+               if (kbc->idev->users)
+                       tegra_kbc_stop(kbc);
+               mutex_unlock(&kbc->idev->mutex);
+       }
+
+       return 0;
+}
+
+static int tegra_kbc_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+       int err = 0;
+
+       if (device_may_wakeup(&pdev->dev)) {
+               disable_irq_wake(kbc->irq);
+               tegra_kbc_setup_wakekeys(kbc, false);
+       } else {
+               mutex_lock(&kbc->idev->mutex);
+               if (kbc->idev->users)
+                       err = tegra_kbc_start(kbc);
+               mutex_unlock(&kbc->idev->mutex);
+       }
+
+       return err;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+
+static struct platform_driver tegra_kbc_driver = {
+       .probe          = tegra_kbc_probe,
+       .remove         = __devexit_p(tegra_kbc_remove),
+       .driver = {
+               .name   = "tegra-kbc",
+               .owner  = THIS_MODULE,
+               .pm     = &tegra_kbc_pm_ops,
+       },
+};
+
+static void __exit tegra_kbc_exit(void)
+{
+       platform_driver_unregister(&tegra_kbc_driver);
+}
+module_exit(tegra_kbc_exit);
+
+static int __init tegra_kbc_init(void)
+{
+       return platform_driver_register(&tegra_kbc_driver);
+}
+module_init(tegra_kbc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
+MODULE_DESCRIPTION("Tegra matrix keyboard controller driver");
+MODULE_ALIAS("platform:tegra-kbc");
index b4a81eb..c8f097a 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -219,9 +220,9 @@ static int __devinit keypad_probe(struct platform_device *pdev)
        }
 
        kp->clk = clk_get(dev, NULL);
-       if (!kp->clk) {
+       if (IS_ERR(kp->clk)) {
                dev_err(dev, "cannot claim device clock\n");
-               error = -EINVAL;
+               error = PTR_ERR(kp->clk);
                goto error_clk;
        }
 
index 9dfd6e5..1f38302 100644 (file)
@@ -69,11 +69,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
        }
 
        if (value > 20 && value < 32767)
-#ifndef FREQ
-               count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
-#else
-               count = (FREQ / (value * 4)) - 1;
-#endif
+               count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
 
        ixp4xx_spkr_control(pin, count);
 
index 1f8e010..7e64d01 100644 (file)
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
 
        /* request the IRQs */
        err = request_irq(encoder->irq_a, &rotary_encoder_irq,
-                         IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+                         IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                          DRV_NAME, encoder);
        if (err) {
                dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
        }
 
        err = request_irq(encoder->irq_b, &rotary_encoder_irq,
-                         IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+                         IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                          DRV_NAME, encoder);
        if (err) {
                dev_err(&pdev->dev, "unable to request IRQ %d\n",
index da392c2..aa186cf 100644 (file)
@@ -755,23 +755,26 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        struct synaptics_data old_priv = *priv;
+       int retry = 0;
+       int error;
 
-       psmouse_reset(psmouse);
+       do {
+               psmouse_reset(psmouse);
+               error = synaptics_detect(psmouse, 0);
+       } while (error && ++retry < 3);
 
-       if (synaptics_detect(psmouse, 0))
+       if (error)
                return -1;
 
+       if (retry > 1)
+               printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
+                       retry);
+
        if (synaptics_query_hardware(psmouse)) {
                printk(KERN_ERR "Unable to query Synaptics hardware.\n");
                return -1;
        }
 
-       if (old_priv.identity != priv->identity ||
-           old_priv.model_id != priv->model_id ||
-           old_priv.capabilities != priv->capabilities ||
-           old_priv.ext_cap != priv->ext_cap)
-               return -1;
-
        if (synaptics_set_absolute_mode(psmouse)) {
                printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
                return -1;
@@ -782,6 +785,19 @@ static int synaptics_reconnect(struct psmouse *psmouse)
                return -1;
        }
 
+       if (old_priv.identity != priv->identity ||
+           old_priv.model_id != priv->model_id ||
+           old_priv.capabilities != priv->capabilities ||
+           old_priv.ext_cap != priv->ext_cap) {
+               printk(KERN_ERR "Synaptics hardware appears to be different: "
+                       "id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
+                       old_priv.identity, priv->identity,
+                       old_priv.model_id, priv->model_id,
+                       old_priv.capabilities, priv->capabilities,
+                       old_priv.ext_cap, priv->ext_cap);
+               return -1;
+       }
+
        return 0;
 }
 
index 25e5d04..7453938 100644 (file)
 #define SYN_EXT_CAP_REQUESTS(c)                (((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)    (((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)         (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte        mask    name                    meaning
+ * ----        ----    -------                 ------------
+ * 1   0x01    adjustable threshold    capacitive button sensitivity
+ *                                     can be adjusted
+ * 1   0x02    report max              query 0x0d gives max coord reported
+ * 1   0x04    clearpad                sensor is ClearPad product
+ * 1   0x08    advanced gesture        not particularly meaningful
+ * 1   0x10    clickpad bit 0          1-button ClickPad
+ * 1   0x60    multifinger mode        identifies firmware finger counting
+ *                                     (not reporting!) algorithm.
+ *                                     Not particularly meaningful
+ * 1   0x80    covered pad             W clipped to 14, 15 == pad mostly covered
+ * 2   0x01    clickpad bit 1          2-button ClickPad
+ * 2   0x02    deluxe LED controls     touchpad support LED commands
+ *                                     ala multimedia control bar
+ * 2   0x04    reduced filtering       firmware does less filtering on
+ *                                     position data, driver should watch
+ *                                     for noise.
+ */
 #define SYN_CAP_CLICKPAD(ex0c)         ((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)     ((ex0c) & 0x000100) /* 2-button ClickPad */
 #define SYN_CAP_MAX_DIMENSIONS(ex0c)   ((ex0c) & 0x020000)
index 448c772..8528165 100644 (file)
@@ -111,9 +111,11 @@ static void ct82c710_close(struct serio *serio)
 static int ct82c710_open(struct serio *serio)
 {
        unsigned char status;
+       int err;
 
-       if (request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL))
-               return -1;
+       err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL);
+       if (err)
+               return err;
 
        status = inb_p(CT82C710_STATUS);
 
@@ -131,7 +133,7 @@ static int ct82c710_open(struct serio *serio)
                status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON);
                outb_p(status, CT82C710_STATUS);
                free_irq(CT82C710_IRQ, NULL);
-               return -1;
+               return -EBUSY;
        }
 
        return 0;
index db5b0bc..ba70058 100644 (file)
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event)
        kfree(event);
 }
 
-static void serio_remove_duplicate_events(struct serio_event *event)
+static void serio_remove_duplicate_events(void *object,
+                                         enum serio_event_type type)
 {
        struct serio_event *e, *next;
        unsigned long flags;
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event)
        spin_lock_irqsave(&serio_event_lock, flags);
 
        list_for_each_entry_safe(e, next, &serio_event_list, node) {
-               if (event->object == e->object) {
+               if (object == e->object) {
                        /*
                         * If this event is of different type we should not
                         * look further - we only suppress duplicate events
                         * that were sent back-to-back.
                         */
-                       if (event->type != e->type)
+                       if (type != e->type)
                                break;
 
                        list_del_init(&e->node);
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work)
                        break;
                }
 
-               serio_remove_duplicate_events(event);
+               serio_remove_duplicate_events(event->object, event->type);
                serio_free_event(event);
        }
 
@@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
        event->owner = owner;
 
        list_add_tail(&event->node, &serio_event_list);
-       schedule_work(&serio_event_work);
+       queue_work(system_long_wq, &serio_event_work);
 
 out:
        spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
        } else if (!strncmp(buf, "rescan", count)) {
                serio_disconnect_port(serio);
                serio_find_driver(serio);
+               serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
        } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
                serio_disconnect_port(serio);
                error = serio_bind_driver(serio, to_serio_driver(drv));
                put_driver(drv);
+               serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
        } else {
                error = -EINVAL;
        }
index 6e362de..8755f5f 100644 (file)
@@ -116,14 +116,15 @@ static void serport_ldisc_close(struct tty_struct *tty)
 
 /*
  * serport_ldisc_receive() is called by the low level tty driver when characters
- * are ready for us. We forward the characters, one by one to the 'interrupt'
- * routine.
+ * are ready for us. We forward the characters and flags, one by one to the
+ * 'interrupt' routine.
  */
 
 static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
        struct serport *serport = (struct serport*) tty->disc_data;
        unsigned long flags;
+       unsigned int ch_flags;
        int i;
 
        spin_lock_irqsave(&serport->lock, flags);
@@ -131,8 +132,23 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
        if (!test_bit(SERPORT_ACTIVE, &serport->flags))
                goto out;
 
-       for (i = 0; i < count; i++)
-               serio_interrupt(serport->serio, cp[i], 0);
+       for (i = 0; i < count; i++) {
+               switch (fp[i]) {
+               case TTY_FRAME:
+                       ch_flags = SERIO_FRAME;
+                       break;
+
+               case TTY_PARITY:
+                       ch_flags = SERIO_PARITY;
+                       break;
+
+               default:
+                       ch_flags = 0;
+                       break;
+               }
+
+               serio_interrupt(serport->serio, cp[i], ch_flags);
+       }
 
 out:
        spin_unlock_irqrestore(&serport->lock, flags);
index a29a781..7729e54 100644 (file)
@@ -201,6 +201,7 @@ int sparse_keymap_setup(struct input_dev *dev,
                        break;
 
                case KE_SW:
+               case KE_VSW:
                        __set_bit(EV_SW, dev->evbit);
                        __set_bit(entry->sw.code, dev->swbit);
                        break;
index fc38149..cf8fb9f 100644 (file)
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
        /* Retrieve the physical and logical size for OEM devices */
        error = wacom_retrieve_hid_descriptor(intf, features);
        if (error)
-               goto fail2;
+               goto fail3;
 
        wacom_setup_device_quirks(features);
 
index 5187829..367fa82 100644 (file)
@@ -1101,6 +1101,13 @@ void wacom_setup_device_quirks(struct wacom_features *features)
        }
 }
 
+static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
+                                             unsigned int physical_max)
+{
+       /* Touch physical dimensions are in 100th of mm */
+       return (logical_max * 100) / physical_max;
+}
+
 void wacom_setup_input_capabilities(struct input_dev *input_dev,
                                    struct wacom_wac *wacom_wac)
 {
@@ -1228,8 +1235,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
        case TABLETPC:
                if (features->device_type == BTN_TOOL_DOUBLETAP ||
                    features->device_type == BTN_TOOL_TRIPLETAP) {
-                       input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
-                       input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+                       input_abs_set_res(input_dev, ABS_X,
+                               wacom_calculate_touch_res(features->x_max,
+                                                       features->x_phy));
+                       input_abs_set_res(input_dev, ABS_Y,
+                               wacom_calculate_touch_res(features->y_max,
+                                                       features->y_phy));
                        __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
                }
 
@@ -1272,6 +1283,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                        input_set_abs_params(input_dev, ABS_MT_PRESSURE,
                                             0, features->pressure_max,
                                             features->pressure_fuzz, 0);
+                       input_abs_set_res(input_dev, ABS_X,
+                               wacom_calculate_touch_res(features->x_max,
+                                                       features->x_phy));
+                       input_abs_set_res(input_dev, ABS_Y,
+                               wacom_calculate_touch_res(features->y_max,
+                                                       features->y_phy));
                } else if (features->device_type == BTN_TOOL_PEN) {
                        __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
                        __set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1426,6 +1443,10 @@ static struct wacom_features wacom_features_0xD3 =
        { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN,     21648, 13530, 1023, 63, BAMBOO_PT };
 static const struct wacom_features wacom_features_0xD4 =
        { "Wacom Bamboo Pen",     WACOM_PKGLEN_BBFUN,     14720,  9200,  255, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD6 =
+       { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN,   14720,  9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD7 =
+       { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720,  9200, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xD8 =
        { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN,   21648, 13530, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xDA =
@@ -1507,6 +1528,8 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xD2) },
        { USB_DEVICE_WACOM(0xD3) },
        { USB_DEVICE_WACOM(0xD4) },
+       { USB_DEVICE_WACOM(0xD6) },
+       { USB_DEVICE_WACOM(0xD7) },
        { USB_DEVICE_WACOM(0xD8) },
        { USB_DEVICE_WACOM(0xDA) },
        { USB_DEVICE_WACOM(0xDB) },
index 14ea54b..4bf2316 100644 (file)
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
        struct ads7846_platform_data *pdata = spi->dev.platform_data;
        int err;
 
-       /* REVISIT when the irq can be triggered active-low, or if for some
+       /*
+        * REVISIT when the irq can be triggered active-low, or if for some
         * reason the touchscreen isn't hooked up, we don't need to access
         * the pendown state.
         */
-       if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
-               dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
-               return -EINVAL;
-       }
 
        if (pdata->get_pendown_state) {
                ts->get_pendown_state = pdata->get_pendown_state;
-               return 0;
-       }
+       } else if (gpio_is_valid(pdata->gpio_pendown)) {
 
-       err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
-       if (err) {
-               dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
-                       pdata->gpio_pendown);
-               return err;
-       }
+               err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+               if (err) {
+                       dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+                               pdata->gpio_pendown);
+                       return err;
+               }
 
-       ts->gpio_pendown = pdata->gpio_pendown;
+               ts->gpio_pendown = pdata->gpio_pendown;
+
+       } else {
+               dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
  err_put_regulator:
        regulator_put(ts->reg);
  err_free_gpio:
-       if (ts->gpio_pendown != -1)
+       if (!ts->get_pendown_state)
                gpio_free(ts->gpio_pendown);
  err_cleanup_filter:
        if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi)
        regulator_disable(ts->reg);
        regulator_put(ts->reg);
 
-       if (ts->gpio_pendown != -1)
+       if (!ts->get_pendown_state) {
+               /*
+                * If we are not using specialized pendown method we must
+                * have been relying on gpio we set up ourselves.
+                */
                gpio_free(ts->gpio_pendown);
+       }
 
        if (ts->filter_cleanup)
                ts->filter_cleanup(ts->filter_data);
index f7fa9ef..1507ce1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input.h>
 #include <linux/input/bu21013.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
 #define PEN_DOWN_INTR  0
 #define MAX_FINGERS    2
  * @chip: pointer to the touch panel controller
  * @in_dev: pointer to the input device structure
  * @intr_pin: interrupt pin value
+ * @regulator: pointer to the Regulator used for touch screen
  *
  * Touch panel device data structure
  */
@@ -149,6 +151,7 @@ struct bu21013_ts_data {
        const struct bu21013_platform_device *chip;
        struct input_dev *in_dev;
        unsigned int intr_pin;
+       struct regulator *regulator;
 };
 
 /**
@@ -456,6 +459,20 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        bu21013_data->in_dev = in_dev;
        bu21013_data->chip = pdata;
        bu21013_data->client = client;
+
+       bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+       if (IS_ERR(bu21013_data->regulator)) {
+               dev_err(&client->dev, "regulator_get failed\n");
+               error = PTR_ERR(bu21013_data->regulator);
+               goto err_free_mem;
+       }
+
+       error = regulator_enable(bu21013_data->regulator);
+       if (error < 0) {
+               dev_err(&client->dev, "regulator enable failed\n");
+               goto err_put_regulator;
+       }
+
        bu21013_data->touch_stopped = false;
        init_waitqueue_head(&bu21013_data->wait);
 
@@ -464,7 +481,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
                error = pdata->cs_en(pdata->cs_pin);
                if (error < 0) {
                        dev_err(&client->dev, "chip init failed\n");
-                       goto err_free_mem;
+                       goto err_disable_regulator;
                }
        }
 
@@ -485,9 +502,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        __set_bit(EV_ABS, in_dev->evbit);
 
        input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
-                                               pdata->x_max_res, 0, 0);
+                                               pdata->touch_x_max, 0, 0);
        input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
-                                               pdata->y_max_res, 0, 0);
+                                               pdata->touch_y_max, 0, 0);
        input_set_drvdata(in_dev, bu21013_data);
 
        error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
@@ -513,6 +530,10 @@ err_free_irq:
        bu21013_free_irq(bu21013_data);
 err_cs_disable:
        pdata->cs_dis(pdata->cs_pin);
+err_disable_regulator:
+       regulator_disable(bu21013_data->regulator);
+err_put_regulator:
+       regulator_put(bu21013_data->regulator);
 err_free_mem:
        input_free_device(in_dev);
        kfree(bu21013_data);
@@ -535,6 +556,10 @@ static int __devexit bu21013_remove(struct i2c_client *client)
        bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
 
        input_unregister_device(bu21013_data->in_dev);
+
+       regulator_disable(bu21013_data->regulator);
+       regulator_put(bu21013_data->regulator);
+
        kfree(bu21013_data);
 
        device_init_wakeup(&client->dev, false);
@@ -561,6 +586,8 @@ static int bu21013_suspend(struct device *dev)
        else
                disable_irq(bu21013_data->chip->irq);
 
+       regulator_disable(bu21013_data->regulator);
+
        return 0;
 }
 
@@ -577,6 +604,12 @@ static int bu21013_resume(struct device *dev)
        struct i2c_client *client = bu21013_data->client;
        int retval;
 
+       retval = regulator_enable(bu21013_data->regulator);
+       if (retval < 0) {
+               dev_err(&client->dev, "bu21013 regulator enable failed\n");
+               return retval;
+       }
+
        retval = bu21013_init_chip(bu21013_data);
        if (retval < 0) {
                dev_err(&client->dev, "bu21013 controller config failed\n");
index cf1dba2..22a3411 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -289,9 +290,9 @@ static int __devinit tsc_probe(struct platform_device *pdev)
        }
 
        ts->clk = clk_get(dev, NULL);
-       if (!ts->clk) {
+       if (IS_ERR(ts->clk)) {
                dev_err(dev, "cannot claim device clock\n");
-               error = -EINVAL;
+               error = PTR_ERR(ts->clk);
                goto error_clk;
        }
 
index 5cb8449..c14412e 100644 (file)
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
 #define W8001_PKTLEN_TPCCTL    11      /* control packet */
 #define W8001_PKTLEN_TOUCH2FG  13
 
+/* resolution in points/mm */
+#define W8001_PEN_RESOLUTION    100
+#define W8001_TOUCH_RESOLUTION  10
+
 struct w8001_coord {
        u8 rdy;
        u8 tsw;
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
                query->y = 1024;
                if (query->panel_res)
                        query->x = query->y = (1 << query->panel_res);
-               query->panel_res = 10;
+               query->panel_res = W8001_TOUCH_RESOLUTION;
        }
 }
 
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001)
 
                input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
                input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+               input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
+               input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
                input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
                if (coord.tilt_x && coord.tilt_y) {
                        input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001)
                w8001->max_touch_x = touch.x;
                w8001->max_touch_y = touch.y;
 
-               /* scale to pen maximum */
                if (w8001->max_pen_x && w8001->max_pen_y) {
+                       /* if pen is supported scale to pen maximum */
                        touch.x = w8001->max_pen_x;
                        touch.y = w8001->max_pen_y;
+                       touch.panel_res = W8001_PEN_RESOLUTION;
                }
 
                input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
                input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
+               input_abs_set_res(dev, ABS_X, touch.panel_res);
+               input_abs_set_res(dev, ABS_Y, touch.panel_res);
 
                switch (touch.sensor_id) {
                case 0:
index 18f8798..7bd5baa 100644 (file)
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
   stream interface.
   If synchronous service was requested, then function
   does return amount of data written to stream.
-  'final' does indicate that pice of data to be written is
+  'final' does indicate that piece of data to be written is
   final part of frame (necessary only by structured datatransfer)
   return  0 if zero lengh packet was written
   return -1 if stream is full
index 0858791..cfff0c4 100644 (file)
@@ -1247,10 +1247,10 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
        struct PStack *st = fi->userdata;
-       struct sk_buff *skb, *oskb;
+       struct sk_buff *skb;
        struct Layer2 *l2 = &st->l2;
        u_char header[MAX_HEADER_LEN];
-       int i;
+       int i, hdr_space_needed;
        int unsigned p1;
        u_long flags;
 
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
        if (!skb)
                return;
 
+       hdr_space_needed = l2headersize(l2, 0);
+       if (hdr_space_needed > skb_headroom(skb)) {
+               struct sk_buff *orig_skb = skb;
+
+               skb = skb_realloc_headroom(skb, hdr_space_needed);
+               if (!skb) {
+                       dev_kfree_skb(orig_skb);
+                       return;
+               }
+       }
        spin_lock_irqsave(&l2->lock, flags);
        if(test_bit(FLG_MOD128, &l2->flag))
                p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                l2->vs = (l2->vs + 1) % 8;
        }
        spin_unlock_irqrestore(&l2->lock, flags);
-       p1 = skb->data - skb->head;
-       if (p1 >= i)
-               memcpy(skb_push(skb, i), header, i);
-       else {
-               printk(KERN_WARNING
-               "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
-               oskb = skb;
-               skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
-               memcpy(skb_put(skb, i), header, i);
-               skb_copy_from_linear_data(oskb,
-                                         skb_put(skb, oskb->len), oskb->len);
-               dev_kfree_skb(oskb);
-       }
+       memcpy(skb_push(skb, i), header, i);
        st->l2.l2l1(st, PH_PULL | INDICATION, skb);
        test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
        if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
index 729df40..18b801a 100644 (file)
@@ -227,7 +227,6 @@ extern hysdn_card *card_root;       /* pointer to first card */
 /*************************/
 /* im/exported functions */
 /*************************/
-extern char *hysdn_getrev(const char *);
 
 /* hysdn_procconf.c */
 extern int hysdn_procconf_init(void);  /* init proc config filesys */
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
 
 /* hysdn_net.c */
 extern unsigned int hynet_enable; 
-extern char *hysdn_net_revision;
 extern int hysdn_net_create(hysdn_card *);     /* create a new net device */
 extern int hysdn_net_release(hysdn_card *);    /* delete the device */
 extern char *hysdn_net_getname(hysdn_card *);  /* get name of net interface */
index b7cc5c2..0ab42ac 100644 (file)
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
 MODULE_AUTHOR("Werner Cornelius");
 MODULE_LICENSE("GPL");
 
-static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
 static int cardmax;            /* number of found cards */
 hysdn_card *card_root = NULL;  /* pointer to first card */
 static hysdn_card *card_last = NULL;   /* pointer to first card */
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
 /* Additionally newer versions may be activated without rebooting.          */
 /****************************************************************************/
 
-/******************************************************/
-/* extract revision number from string for log output */
-/******************************************************/
-char *
-hysdn_getrev(const char *revision)
-{
-       char *rev;
-       char *p;
-
-       if ((p = strchr(revision, ':'))) {
-               rev = p + 2;
-               p = strchr(rev, '$');
-               *--p = 0;
-       } else
-               rev = "???";
-       return rev;
-}
-
-
 /****************************************************************************/
 /* init_module is called once when the module is loaded to do all necessary */
 /* things like autodetect...                                                */
@@ -175,13 +155,9 @@ static int hysdn_have_procfs;
 static int __init
 hysdn_init(void)
 {
-       char tmp[50];
        int rc;
 
-       strcpy(tmp, hysdn_init_revision);
-       printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
-       strcpy(tmp, hysdn_net_revision);
-       printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
+       printk(KERN_NOTICE "HYSDN: module loaded\n");
 
        rc = pci_register_driver(&hysdn_pci_driver);
        if (rc)
index feec8d8..11f2cce 100644 (file)
@@ -26,9 +26,6 @@
 unsigned int hynet_enable = 0xffffffff; 
 module_param(hynet_enable, uint, 0);
 
-/* store the actual version for log reporting */
-char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
-
 #define MAX_SKB_BUFFERS 20     /* number of buffers for keeping TX-data */
 
 /****************************************************************************/
index 96b3e39..5fe83bd 100644 (file)
@@ -23,7 +23,6 @@
 #include "hysdn_defs.h"
 
 static DEFINE_MUTEX(hysdn_conf_mutex);
-static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
 
 #define INFO_OUT_LEN 80                /* length of info line including lf */
 
@@ -404,7 +403,7 @@ hysdn_procconf_init(void)
                card = card->next;      /* next entry */
        }
 
-       printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
+       printk(KERN_NOTICE "HYSDN: procfs initialised\n");
        return (0);
 }                              /* hysdn_procconf_init */
 
index f2b5bab..1f355bb 100644 (file)
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
 static int __init icn_init(void)
 {
        char *p;
-       char rev[20];
+       char rev[21];
 
        memset(&dev, 0, sizeof(icn_dev));
        dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@ static int __init icn_init(void)
 
        if ((p = strchr(revision, ':'))) {
                strncpy(rev, p + 1, 20);
+               rev[20] = '\0';
                p = strchr(rev, '$');
                if (p)
                        *p = 0;
index da3fa8d..666daf7 100644 (file)
@@ -69,6 +69,7 @@ static int led_pwm_probe(struct platform_device *pdev)
                led_dat->pwm = pwm_request(cur_led->pwm_id,
                                cur_led->name);
                if (IS_ERR(led_dat->pwm)) {
+                       ret = PTR_ERR(led_dat->pwm);
                        dev_err(&pdev->dev, "unable to request PWM %d\n",
                                        cur_led->pwm_id);
                        goto err;
index 049eaf1..1f23e04 100644 (file)
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        spin_lock(&receiving_list_lock);
index 8a2f767..0ed7f6b 100644 (file)
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
 
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
        conf = linear_conf(mddev, mddev->raid_disks);
 
        if (!conf)
index b76cfc8..818313e 100644 (file)
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
        mddev_t *mddev = q->queuedata;
        int rv;
        int cpu;
+       unsigned int sectors;
 
        if (mddev == NULL || mddev->pers == NULL
            || !mddev->ready) {
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
        atomic_inc(&mddev->active_io);
        rcu_read_unlock();
 
+       /*
+        * save the sectors now since our bio can
+        * go away inside make_request
+        */
+       sectors = bio_sectors(bio);
        rv = mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
-                     bio_sectors(bio));
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
        part_stat_unlock();
 
        if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
 {
        mddev_t *mddev, *new = NULL;
 
+       if (unit && MAJOR(unit) != MD_MAJOR)
+               unit &= ~((1<<MdpMinorShift)-1);
+
  retry:
        spin_lock(&all_mddevs_lock);
 
@@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
                        __bdevname(dev, b));
                return PTR_ERR(bdev);
        }
-       if (!shared)
-               set_bit(AllReserved, &rdev->flags);
        rdev->bdev = bdev;
        return err;
 }
@@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                if (rdev->raid_disk != -1)
                        return -EBUSY;
 
+               if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
+                       return -EBUSY;
+
                if (rdev->mddev->pers->hot_add_disk == NULL)
                        return -EINVAL;
 
@@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 
                        mddev_lock(mddev);
                        list_for_each_entry(rdev2, &mddev->disks, same_set)
-                               if (test_bit(AllReserved, &rdev2->flags) ||
-                                   (rdev->bdev == rdev2->bdev &&
-                                    rdev != rdev2 &&
-                                    overlaps(rdev->data_offset, rdev->sectors,
-                                             rdev2->data_offset,
-                                             rdev2->sectors))) {
+                               if (rdev->bdev == rdev2->bdev &&
+                                   rdev != rdev2 &&
+                                   overlaps(rdev->data_offset, rdev->sectors,
+                                            rdev2->data_offset,
+                                            rdev2->sectors)) {
                                        overlap = 1;
                                        break;
                                }
@@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
        }
 
        mddev->array_sectors = sectors;
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       if (mddev->pers)
+       if (mddev->pers) {
+               set_capacity(mddev->gendisk, mddev->array_sectors);
                revalidate_disk(mddev->gendisk);
-
+       }
        return len;
 }
 
@@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
        }
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
+       mddev->changed = 1;
        kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 out:
        return err;
@@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
        mddev->sync_speed_min = mddev->sync_speed_max = 0;
        mddev->recovery = 0;
        mddev->in_sync = 0;
+       mddev->changed = 0;
        mddev->degraded = 0;
        mddev->safemode = 0;
        mddev->bitmap_info.offset = 0;
@@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
 
                set_capacity(disk, 0);
                mutex_unlock(&mddev->open_mutex);
+               mddev->changed = 1;
                revalidate_disk(disk);
 
                if (mddev->ro)
@@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
        mddev->delta_disks = raid_disks - mddev->raid_disks;
 
        rv = mddev->pers->check_reshape(mddev);
+       if (rv < 0)
+               mddev->delta_disks = 0;
        return rv;
 }
 
@@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
        atomic_inc(&mddev->openers);
        mutex_unlock(&mddev->open_mutex);
 
-       check_disk_size_change(mddev->gendisk, bdev);
+       check_disk_change(bdev);
  out:
        return err;
 }
@@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
 
        return 0;
 }
+
+static int md_media_changed(struct gendisk *disk)
+{
+       mddev_t *mddev = disk->private_data;
+
+       return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+       mddev_t *mddev = disk->private_data;
+
+       mddev->changed = 0;
+       return 0;
+}
 static const struct block_device_operations md_fops =
 {
        .owner          = THIS_MODULE,
@@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops =
        .compat_ioctl   = md_compat_ioctl,
 #endif
        .getgeo         = md_getgeo,
+       .media_changed  = md_media_changed,
+       .revalidate_disk= md_revalidate,
 };
 
 static int md_thread(void * arg)
@@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev)
        } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
                mddev->resync_min = mddev->curr_resync_completed;
        mddev->curr_resync = 0;
-       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
-               mddev->curr_resync_completed = 0;
-       sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
        set_bit(MD_RECOVERY_DONE, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
@@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                        }
                }
 
-       if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+       if (mddev->degraded && !mddev->recovery_disabled) {
                list_for_each_entry(rdev, &mddev->disks, same_set) {
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev)
                        /* Only thing we do on a ro array is remove
                         * failed devices.
                         */
-                       remove_and_add_spares(mddev);
+                       mdk_rdev_t *rdev;
+                       list_for_each_entry(rdev, &mddev->disks, same_set)
+                               if (rdev->raid_disk >= 0 &&
+                                   !test_bit(Blocked, &rdev->flags) &&
+                                   test_bit(Faulty, &rdev->flags) &&
+                                   atomic_read(&rdev->nr_pending)==0) {
+                                       if (mddev->pers->hot_remove_disk(
+                                                   mddev, rdev->raid_disk)==0) {
+                                               char nm[20];
+                                               sprintf(nm,"rd%d", rdev->raid_disk);
+                                               sysfs_remove_link(&mddev->kobj, nm);
+                                               rdev->raid_disk = -1;
+                                       }
+                               }
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        goto unlock;
                }
index eec517c..12215d4 100644 (file)
@@ -93,8 +93,6 @@ struct mdk_rdev_s
 #define        Faulty          1               /* device is known to have a fault */
 #define        In_sync         2               /* device is in_sync with rest of array */
 #define        WriteMostly     4               /* Avoid reading if at all possible */
-#define        AllReserved     6               /* If whole device is reserved for
-                                        * one array */
 #define        AutoDetected    7               /* added by auto-detect */
 #define Blocked                8               /* An error occured on an externally
                                         * managed array, don't allow writes
@@ -276,6 +274,8 @@ struct mddev_s
        atomic_t                        active;         /* general refcount */
        atomic_t                        openers;        /* number of active opens */
 
+       int                             changed;        /* True if we might need to
+                                                        * reread partition info */
        int                             degraded;       /* whether md should consider
                                                         * adding a spare
                                                         */
index 6d7ddf3..3a62d44 100644 (file)
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
         * bookkeeping area. [whatever we allocate in multipath_run(),
         * should be freed in multipath_stop()]
         */
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
        mddev->private = conf;
index a39f4c3..c0ac457 100644 (file)
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
                        rdev1->new_raid_disk = j;
                }
 
+               if (mddev->level == 1) {
+                       /* taiking over a raid1 array-
+                        * we have only one active disk
+                        */
+                       j = 0;
+                       rdev1->new_raid_disk = j;
+               }
+
                if (j < 0 || j >= mddev->raid_disks) {
                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
                               "aborting!\n", mdname(mddev), j);
@@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        /* if private is not null, we are here after takeover */
        if (mddev->private == NULL) {
@@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
        return priv_conf;
 }
 
+static void *raid0_takeover_raid1(mddev_t *mddev)
+{
+       raid0_conf_t *priv_conf;
+
+       /* Check layout:
+        *  - (N - 1) mirror drives must be already faulty
+        */
+       if ((mddev->raid_disks - 1) != mddev->degraded) {
+               printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+                      mdname(mddev));
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* Set new parameters */
+       mddev->new_level = 0;
+       mddev->new_layout = 0;
+       mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+       mddev->delta_disks = 1 - mddev->raid_disks;
+       mddev->raid_disks = 1;
+       /* make sure it will be not marked as dirty */
+       mddev->recovery_cp = MaxSector;
+
+       create_strip_zones(mddev, &priv_conf);
+       return priv_conf;
+}
+
 static void *raid0_takeover(mddev_t *mddev)
 {
        /* raid0 can take over:
         *  raid4 - if all data disks are active.
         *  raid5 - providing it is Raid4 layout and one disk is faulty
         *  raid10 - assuming we have all necessary active disks
+        *  raid1 - with (N -1) mirror drives faulty
         */
        if (mddev->level == 4)
                return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
        if (mddev->level == 10)
                return raid0_takeover_raid10(mddev);
 
+       if (mddev->level == 1)
+               return raid0_takeover_raid1(mddev);
+
+       printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+               mddev->level);
+
        return ERR_PTR(-EINVAL);
 }
 
index a23ffa3..06cd712 100644 (file)
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
+               /* Only take the spinlock to quiet a warning */
+               spin_lock(conf->mddev->queue->queue_lock);
                blk_remove_plug(conf->mddev->queue);
+               spin_unlock(conf->mddev->queue->queue_lock);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to
                 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r1_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
+               blk_plug_device_unlocked(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
        r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       mddev->queue->queue_lock = &conf->device_lock;
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
index 69b6595..747d061 100644 (file)
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
+               /* Spinlock only taken to quiet a warning */
+               spin_lock(conf->mddev->queue->queue_lock);
                blk_remove_plug(conf->mddev->queue);
+               spin_unlock(conf->mddev->queue->queue_lock);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r10_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
+               blk_plug_device_unlocked(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
        if (!conf)
                goto out;
 
-       mddev->queue->queue_lock = &conf->device_lock;
-
        mddev->thread = conf->thread;
        conf->thread = NULL;
 
@@ -2463,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
        mddev->recovery_cp = MaxSector;
 
        conf = setup_conf(mddev);
-       if (!IS_ERR(conf))
+       if (!IS_ERR(conf)) {
                list_for_each_entry(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0)
                                rdev->new_raid_disk = rdev->raid_disk * 2;
-               
+               conf->barrier = 1;
+       }
+
        return conf;
 }
 
index 5044bab..78536fd 100644 (file)
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
 
                mddev->queue->backing_dev_info.congested_data = mddev;
                mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-               mddev->queue->queue_lock = &conf->device_lock;
                mddev->queue->unplug_fn = raid5_unplug_queue;
 
                chunk_size = mddev->chunk_sectors << 9;
@@ -5517,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev)
        raid5_conf_t *conf = mddev->private;
        mdk_rdev_t *rdev;
        int spares = 0;
-       int added_devices = 0;
        unsigned long flags;
 
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5527,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev)
                return -ENOSPC;
 
        list_for_each_entry(rdev, &mddev->disks, same_set)
-               if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks)
-                    && !test_bit(Faulty, &rdev->flags))
+               if (!test_bit(In_sync, &rdev->flags)
+                   && !test_bit(Faulty, &rdev->flags))
                        spares++;
 
        if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5571,34 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev)
         * to correctly record the "partially reconstructed" state of
         * such devices during the reshape and confusion could result.
         */
-       if (mddev->delta_disks >= 0)
-           list_for_each_entry(rdev, &mddev->disks, same_set)
-               if (rdev->raid_disk < 0 &&
-                   !test_bit(Faulty, &rdev->flags)) {
-                       if (raid5_add_disk(mddev, rdev) == 0) {
-                               char nm[20];
-                               if (rdev->raid_disk >= conf->previous_raid_disks) {
-                                       set_bit(In_sync, &rdev->flags);
-                                       added_devices++;
-                               } else
-                                       rdev->recovery_offset = 0;
-                               sprintf(nm, "rd%d", rdev->raid_disk);
-                               if (sysfs_create_link(&mddev->kobj,
-                                                     &rdev->kobj, nm))
-                                       /* Failure here is OK */;
-                       } else
-                               break;
-               } else if (rdev->raid_disk >= conf->previous_raid_disks
-                          && !test_bit(Faulty, &rdev->flags)) {
-                       /* This is a spare that was manually added */
-                       set_bit(In_sync, &rdev->flags);
-                       added_devices++;
-               }
+       if (mddev->delta_disks >= 0) {
+               int added_devices = 0;
+               list_for_each_entry(rdev, &mddev->disks, same_set)
+                       if (rdev->raid_disk < 0 &&
+                           !test_bit(Faulty, &rdev->flags)) {
+                               if (raid5_add_disk(mddev, rdev) == 0) {
+                                       char nm[20];
+                                       if (rdev->raid_disk
+                                           >= conf->previous_raid_disks) {
+                                               set_bit(In_sync, &rdev->flags);
+                                               added_devices++;
+                                       } else
+                                               rdev->recovery_offset = 0;
+                                       sprintf(nm, "rd%d", rdev->raid_disk);
+                                       if (sysfs_create_link(&mddev->kobj,
+                                                             &rdev->kobj, nm))
+                                               /* Failure here is OK */;
+                               }
+                       } else if (rdev->raid_disk >= conf->previous_raid_disks
+                                  && !test_bit(Faulty, &rdev->flags)) {
+                               /* This is a spare that was manually added */
+                               set_bit(In_sync, &rdev->flags);
+                               added_devices++;
+                       }
 
-       /* When a reshape changes the number of devices, ->degraded
-        * is measured against the larger of the pre and post number of
-        * devices.*/
-       if (mddev->delta_disks > 0) {
+               /* When a reshape changes the number of devices,
+                * ->degraded is measured against the larger of the
+                * pre and post number of devices.
+                */
                spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
                        - added_devices;
index bc6a677..8c48521 100644 (file)
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
 #define TDA8290_ID 0x89
        u8 reg = 0x1f, id;
        struct i2c_msg msg_read[] = {
-               { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
-               { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+               { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+               { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
        };
 
        /* detect tda8290 */
        if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
-               printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+               printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
                               __func__, reg);
                return -ENODEV;
        }
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
 #define TDA8295C2_ID 0x8b
        u8 reg = 0x2f, id;
        struct i2c_msg msg_read[] = {
-               { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
-               { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+               { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+               { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
        };
 
-       /* detect tda8290 */
+       /* detect tda8295 */
        if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
-               printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+               printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
                               __func__, reg);
                return -ENODEV;
        }
index defd839..193cdb7 100644 (file)
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
        return 0;
 }
 
+static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
+               u16 pid, int onoff)
+{
+       struct dib0700_state *st = adapter->dev->priv;
+       if (st->is_dib7000pc)
+               return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
+       return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
+}
+
+static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+{
+       struct dib0700_state *st = adapter->dev->priv;
+       if (st->is_dib7000pc)
+               return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
+       return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
+}
+
 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
 {
     return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                        {
                                .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
                                .pid_filter_count = 32,
-                               .pid_filter       = stk70x0p_pid_filter,
-                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .pid_filter       = stk7700p_pid_filter,
+                               .pid_filter_ctrl  = stk7700p_pid_filter_ctrl,
                                .frontend_attach  = stk7700p_frontend_attach,
                                .tuner_attach     = stk7700p_tuner_attach,
 
index 9eea418..46ccd01 100644 (file)
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
 }
 
 /* Default firmware for LME2510C */
-const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
 
 static void lme_coldreset(struct usb_device *dev)
 {
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
        .caps = DVB_USB_IS_AN_I2C_ADAPTER,
        .usb_ctrl = DEVICE_SPECIFIC,
        .download_firmware = lme2510_download_firmware,
-       .firmware = lme_firmware,
+       .firmware = (const char *)&lme_firmware,
        .size_of_priv = sizeof(struct lme2510_state),
        .num_adapters = 1,
        .adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.74");
+MODULE_VERSION("1.75");
 MODULE_LICENSE("GPL");
index c7f5ccf..289a798 100644 (file)
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
 }
 EXPORT_SYMBOL(dib7000m_get_i2c_master);
 
+int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+       struct dib7000m_state *state = fe->demodulator_priv;
+       u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
+       val |= (onoff & 0x1) << 4;
+       dprintk("PID filter enabled %d", onoff);
+       return dib7000m_write_word(state, 294 + state->reg_offs, val);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
+
+int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+       struct dib7000m_state *state = fe->demodulator_priv;
+       dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+       return dib7000m_write_word(state, 300 + state->reg_offs + id,
+                       onoff ? (1 << 13) | pid : 0);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter);
+
 #if 0
 /* used with some prototype boards */
 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
index 113819c..81fcf22 100644 (file)
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
                                                   enum dibx000_i2c_interface,
                                                   int);
+extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
+extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
 #else
 static inline
 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
+static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
+                                               u16 pid, u8 onoff)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+       return -ENODEV;
+}
+
+static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
+                                               uint8_t onoff)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+       return -ENODEV;
+}
 #endif
 
 /* TODO
index 59feeb8..10a432a 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <asm/io.h>
-#include <asm/pgtable.h>
 #include <asm/page.h>
 #include <linux/kmod.h>
 #include <linux/vmalloc.h>
index f011c5d..1c5cc65 100644 (file)
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
  *
  * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
  *
@@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
        /* Carrier reports */
        if (ev.carrier_report) {
                sample = LIRC_FREQUENCY(ev.carrier);
+               IR_dprintk(2, "carrier report (freq: %d)\n", sample);
 
        /* Packet end */
        } else if (ev.timeout) {
@@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
                        return 0;
 
                sample = LIRC_TIMEOUT(ev.duration / 1000);
+               IR_dprintk(2, "timeout report (duration: %d)\n", sample);
 
        /* Normal sample */
        } else {
@@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
 
                sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
                                        LIRC_SPACE(ev.duration / 1000);
+               IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+                          TO_US(ev.duration), TO_STR(ev.pulse));
        }
 
        lirc_buffer_write(dev->raw->lirc.drv->rbuf,
index 73230ff..01f258a 100644 (file)
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
 {
        ktime_t                 now;
        s64                     delta; /* ns */
-       struct ir_raw_event     ev;
+       DEFINE_IR_RAW_EVENT(ev);
        int                     rc = 0;
 
        if (!dev->raw)
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
         * being called for the first time, note that delta can't
         * possibly be negative.
         */
-       ev.duration = 0;
        if (delta > IR_MAX_DURATION || !dev->raw->last_type)
                type |= IR_START_EVENT;
        else
index 3bf3337..2f5dc06 100644 (file)
@@ -3,6 +3,9 @@
  *
  * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
  *
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = {
        { 0x800f0426, KEY_EPG },                /* Guide */
        { 0x800f0427, KEY_ZOOM },               /* Aspect */
 
+       { 0x800f0432, KEY_MODE },               /* Visualization */
+       { 0x800f0433, KEY_PRESENTATION },       /* Slide Show */
+       { 0x800f0434, KEY_EJECTCD },
        { 0x800f043a, KEY_BRIGHTNESSUP },
 
        { 0x800f0446, KEY_TV },
index 079353e..e4f8eac 100644 (file)
@@ -148,6 +148,7 @@ enum mceusb_model_type {
        MCE_GEN2_TX_INV,
        POLARIS_EVK,
        CX_HYBRID_TV,
+       MULTIFUNCTION,
 };
 
 struct mceusb_model {
@@ -155,9 +156,10 @@ struct mceusb_model {
        u32 mce_gen2:1;
        u32 mce_gen3:1;
        u32 tx_mask_normal:1;
-       u32 is_polaris:1;
        u32 no_tx:1;
 
+       int ir_intfnum;
+
        const char *rc_map;     /* Allow specify a per-board map */
        const char *name;       /* per-board name */
 };
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = {
                .tx_mask_normal = 1,
        },
        [POLARIS_EVK] = {
-               .is_polaris = 1,
                /*
                 * In fact, the EVK is shipped without
                 * remotes, but we should have something handy,
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = {
                .name = "Conexant Hybrid TV (cx231xx) MCE IR",
        },
        [CX_HYBRID_TV] = {
-               .is_polaris = 1,
                .no_tx = 1, /* tx isn't wired up at all */
                .name = "Conexant Hybrid TV (cx231xx) MCE IR",
        },
+       [MULTIFUNCTION] = {
+               .mce_gen2 = 1,
+               .ir_intfnum = 2,
+       },
 };
 
 static struct usb_device_id mceusb_dev_table[] = {
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = {
        { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
        /* Philips/Spinel plus IR transceiver for ASUS */
        { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
-       /* Realtek MCE IR Receiver */
-       { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
+       /* Realtek MCE IR Receiver and card reader */
+       { USB_DEVICE(VENDOR_REALTEK, 0x0161),
+         .driver_info = MULTIFUNCTION },
        /* SMK/Toshiba G83C0004D410 */
        { USB_DEVICE(VENDOR_SMK, 0x031d),
          .driver_info = MCE_GEN2_TX_INV },
@@ -816,7 +821,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
        switch (ir->buf_in[index]) {
        /* 2-byte return value commands */
        case MCE_CMD_S_TIMEOUT:
-               ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+               ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
                break;
 
        /* 1-byte return value commands */
@@ -855,9 +860,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
                        break;
                case PARSE_IRDATA:
                        ir->rem--;
+                       init_ir_raw_event(&rawir);
                        rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
                        rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
-                                        * MS_TO_US(MCE_TIME_UNIT);
+                                        * US_TO_NS(MCE_TIME_UNIT);
 
                        dev_dbg(ir->dev, "Storing %s with duration %d\n",
                                rawir.pulse ? "pulse" : "space",
@@ -883,6 +889,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
                                             i, ir->rem + 1, false);
                        if (ir->rem)
                                ir->parser_state = PARSE_IRDATA;
+                       else
+                               ir_raw_event_reset(ir->rc);
                        break;
                }
 
@@ -1060,7 +1068,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
        rc->priv = ir;
        rc->driver_type = RC_DRIVER_IR_RAW;
        rc->allowed_protos = RC_TYPE_ALL;
-       rc->timeout = MS_TO_NS(1000);
+       rc->timeout = US_TO_NS(1000);
        if (!ir->flags.no_tx) {
                rc->s_tx_mask = mceusb_set_tx_mask;
                rc->s_tx_carrier = mceusb_set_tx_carrier;
@@ -1098,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        bool is_gen3;
        bool is_microsoft_gen1;
        bool tx_mask_normal;
-       bool is_polaris;
+       int ir_intfnum;
 
        dev_dbg(&intf->dev, "%s called\n", __func__);
 
@@ -1107,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        is_gen3 = mceusb_model[model].mce_gen3;
        is_microsoft_gen1 = mceusb_model[model].mce_gen1;
        tx_mask_normal = mceusb_model[model].tx_mask_normal;
-       is_polaris = mceusb_model[model].is_polaris;
+       ir_intfnum = mceusb_model[model].ir_intfnum;
 
-       if (is_polaris) {
-               /* Interface 0 is IR */
-               if (idesc->desc.bInterfaceNumber)
-                       return -ENODEV;
-       }
+       /* There are multi-function devices with non-IR interfaces */
+       if (idesc->desc.bInterfaceNumber != ir_intfnum)
+               return -ENODEV;
 
        /* step through the endpoints to find first bulk in and out endpoint */
        for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
index dd4caf8..d4d6449 100644 (file)
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
 
 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 {
-       /* set number of bytes needed for wake key comparison (default 67) */
-       nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+       /* set number of bytes needed for wake from s3 (default 65) */
+       nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
+                              CIR_WAKE_FIFO_CMP_DEEP);
 
        /* set tolerance/variance allowed per byte during wake compare */
        nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
@@ -460,7 +461,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
                return 0;
        }
 
-       carrier = (count * 1000000) / duration;
+       carrier = MS_TO_NS(count) / duration;
 
        if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
                nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +613,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
                sample = nvt->buf[i];
 
                rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
-               rawir.duration = (sample & BUF_LEN_MASK)
-                                       * SAMPLE_PERIOD * 1000;
+               rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+                                         * SAMPLE_PERIOD);
 
                if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
                        if (nvt->rawir.pulse == rawir.pulse)
index 1df8235..048135e 100644 (file)
@@ -305,8 +305,11 @@ struct nvt_dev {
 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY    0x20
 #define CIR_WAKE_IRFIFOSTS_RX_FULL     0x10
 
-/* CIR Wake FIFO buffer is 67 bytes long */
-#define CIR_WAKE_FIFO_LEN              67
+/*
+ * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
+ * the system comparing only 65 bytes (fails with this set to 67)
+ */
+#define CIR_WAKE_FIFO_CMP_BYTES                65
 /* CIR Wake byte comparison tolerance */
 #define CIR_WAKE_CMP_TOLERANCE         5
 
index 72be8a0..5b4422e 100644 (file)
@@ -458,21 +458,27 @@ static int ir_getkeycode(struct input_dev *idev,
                index = ir_lookup_by_scancode(rc_map, scancode);
        }
 
-       if (index >= rc_map->len) {
-               if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
-                       IR_dprintk(1, "unknown key for scancode 0x%04x\n",
-                                  scancode);
+       if (index < rc_map->len) {
+               entry = &rc_map->scan[index];
+
+               ke->index = index;
+               ke->keycode = entry->keycode;
+               ke->len = sizeof(entry->scancode);
+               memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
+
+       } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
+               /*
+                * We do not really know the valid range of scancodes
+                * so let's respond with KEY_RESERVED to anything we
+                * do not have mapping for [yet].
+                */
+               ke->index = index;
+               ke->keycode = KEY_RESERVED;
+       } else {
                retval = -EINVAL;
                goto out;
        }
 
-       entry = &rc_map->scan[index];
-
-       ke->index = index;
-       ke->keycode = entry->keycode;
-       ke->len = sizeof(entry->scancode);
-       memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
-
        retval = 0;
 
 out:
@@ -844,7 +850,7 @@ static ssize_t store_protocols(struct device *device,
                        count++;
                } else {
                        for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
-                               if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) {
+                               if (!strcasecmp(tmp, proto_names[i].name)) {
                                        tmp += strlen(proto_names[i].name);
                                        mask = proto_names[i].type;
                                        break;
index 6e2911c..e435d94 100644 (file)
@@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
                                sz->signal_start.tv_usec -
                                sz->signal_last.tv_usec);
                        rawir.duration -= sz->sum;
-                       rawir.duration *= 1000;
+                       rawir.duration = US_TO_NS(rawir.duration);
                        rawir.duration &= IR_MAX_DURATION;
                }
                sz_push(sz, rawir);
@@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
        rawir.duration = ((int) value) * SZ_RESOLUTION;
        rawir.duration += SZ_RESOLUTION / 2;
        sz->sum += rawir.duration;
-       rawir.duration *= 1000;
+       rawir.duration = US_TO_NS(rawir.duration);
        rawir.duration &= IR_MAX_DURATION;
        sz_push(sz, rawir);
 }
@@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz,
        rawir.duration = ((int) value) * SZ_RESOLUTION;
        rawir.duration += SZ_RESOLUTION / 2;
        sz->sum += rawir.duration;
-       rawir.duration *= 1000;
+       rawir.duration = US_TO_NS(rawir.duration);
        sz_push(sz, rawir);
 }
 
@@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb)
                                if (sz->timeout_enabled)
                                        sz_push(sz, rawir);
                                ir_raw_event_handle(sz->rdev);
+                               ir_raw_event_reset(sz->rdev);
                        } else {
                                sz_push_full_space(sz, sz->buf_in[i]);
                        }
@@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb)
                }
        }
 
+       ir_raw_event_handle(sz->rdev);
        usb_submit_urb(urb, GFP_ATOMIC);
 
        return;
@@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
        sz->decoder_state = PulseSpace;
        /* FIXME: don't yet have a way to set this */
        sz->timeout_enabled = true;
-       sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+       sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
                                IR_MAX_DURATION) | 0x03000000);
        #if 0
        /* not yet supported, depends on patches from maxim */
        /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
-       sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
-       sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+       sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+       sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
        #endif
 
        do_gettimeofday(&sz->signal_start);
index e41e4ad..9c475c6 100644 (file)
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv,
        if (rc < 0)
                return rc;
 
-       return videobuf_reqbufs(&fh->vb_vidq, rb);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_reqbufs(&fh->vb_vidq, rb);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
+
+       return rc;
 }
 
 static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv,
        if (rc < 0)
                return rc;
 
-       return videobuf_querybuf(&fh->vb_vidq, b);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_querybuf(&fh->vb_vidq, b);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_querybuf(&fh->vb_vbiq, b);
+
+       return rc;
 }
 
 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
        if (rc < 0)
                return rc;
 
-       return videobuf_qbuf(&fh->vb_vidq, b);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_qbuf(&fh->vb_vidq, b);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_qbuf(&fh->vb_vbiq, b);
+
+       return rc;
 }
 
 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
                dev->greenscreen_detected = 0;
        }
 
-       return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+       if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+       else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+               rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
+
+       return rc;
 }
 
 static struct v4l2_file_operations au0828_v4l_fops = {
index 8717773..68ad196 100644 (file)
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
        .i2c = &cx18_i2c_std,
 };
 
+static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
+       .type = CX18_CARD_HVR_1600_S5H1411,
+       .name = "Hauppauge HVR-1600",
+       .comment = "Simultaneous Digital and Analog TV capture supported\n",
+       .v4l2_capabilities = CX18_CAP_ENCODER,
+       .hw_audio_ctrl = CX18_HW_418_AV,
+       .hw_muxer = CX18_HW_CS5345,
+       .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
+                 CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
+                 CX18_HW_Z8F0811_IR_HAUP,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX18_AV_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX18_AV_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
+               { CX18_CARD_INPUT_SVIDEO2,    2, CX18_AV_SVIDEO2    },
+               { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
+               { CX18_CARD_INPUT_LINE_IN2,
+                 CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
+       .ddr = {
+               /* ESMT M13S128324A-5B memory */
+               .chip_config = 0x003,
+               .refresh = 0x30c,
+               .timing1 = 0x44220e82,
+               .timing2 = 0x08,
+               .tune_lane = 0,
+               .initial_emrs = 0,
+       },
+       .gpio_init.initial_value = 0x3001,
+       .gpio_init.direction = 0x3001,
+       .gpio_i2c_slave_reset = {
+               .active_lo_mask = 0x3001,
+               .msecs_asserted = 10,
+               .msecs_recovery = 40,
+               .ir_reset_mask  = 0x0001,
+       },
+       .i2c = &cx18_i2c_std,
+};
+
 static const struct cx18_card cx18_card_hvr1600_samsung = {
        .type = CX18_CARD_HVR_1600_SAMSUNG,
        .name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = {
        &cx18_card_toshiba_qosmio_dvbt,
        &cx18_card_leadtek_pvr2100,
        &cx18_card_leadtek_dvr3100h,
-       &cx18_card_gotview_dvd3
+       &cx18_card_gotview_dvd3,
+       &cx18_card_hvr1600_s5h1411
 };
 
 const struct cx18_card *cx18_get_card(u16 index)
index 944af8a..b1c3cbd 100644 (file)
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype,
                 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
                 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
                 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
+                "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
                 "\t\t\t 0 = Autodetect (default)\n"
                 "\t\t\t-1 = Ignore this card\n\t\t");
 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
        switch (cx->card->type) {
        case CX18_CARD_HVR_1600_ESMT:
        case CX18_CARD_HVR_1600_SAMSUNG:
+       case CX18_CARD_HVR_1600_S5H1411:
                tveeprom_hauppauge_analog(&c, tv, eedata);
                break;
        case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx)
           from the model number. Use the cardtype module option if you
           have one of these preproduction models. */
        switch (tv.model) {
-       case 74000 ... 74999:
+       case 74301: /* Retail models */
+       case 74321:
+       case 74351: /* OEM models */
+       case 74361:
+               /* Digital side is s5h1411/tda18271 */
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
+               break;
+       case 74021: /* Retail models */
+       case 74031:
+       case 74041:
+       case 74141:
+       case 74541: /* OEM models */
+       case 74551:
+       case 74591:
+       case 74651:
+       case 74691:
+       case 74751:
+       case 74891:
+               /* Digital side is s5h1409/mxl5005s */
                cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
                break;
        case 0x718:
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
                CX18_ERR("Invalid EEPROM\n");
                return;
        default:
-               CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
+               CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
+                        "(cardtype=1)\n", tv.model);
                cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
                break;
        }
index 306caac..f736679 100644 (file)
@@ -85,7 +85,8 @@
 #define CX18_CARD_LEADTEK_PVR2100     6 /* Leadtek WinFast PVR2100 */
 #define CX18_CARD_LEADTEK_DVR3100H    7 /* Leadtek WinFast DVR3100 H */
 #define CX18_CARD_GOTVIEW_PCI_DVD3    8 /* GoTView PCI DVD3 Hybrid */
-#define CX18_CARD_LAST               8
+#define CX18_CARD_HVR_1600_S5H1411    9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
+#define CX18_CARD_LAST               9
 
 #define CX18_ENC_STREAM_TYPE_MPG  0
 #define CX18_ENC_STREAM_TYPE_TS   1
index f0381d6..f41922b 100644 (file)
@@ -29,6 +29,8 @@
 #include "cx18-gpio.h"
 #include "s5h1409.h"
 #include "mxl5005s.h"
+#include "s5h1411.h"
+#include "tda18271.h"
 #include "zl10353.h"
 
 #include <linux/firmware.h>
@@ -76,6 +78,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
        .hvr1600_opt   = S5H1409_HVR1600_OPTIMIZE
 };
 
+/*
+ * CX18_CARD_HVR_1600_S5H1411
+ */
+static struct s5h1411_config hcw_s5h1411_config = {
+       .output_mode   = S5H1411_SERIAL_OUTPUT,
+       .gpio          = S5H1411_GPIO_OFF,
+       .vsb_if        = S5H1411_IF_44000,
+       .qam_if        = S5H1411_IF_4000,
+       .inversion     = S5H1411_INVERSION_ON,
+       .status_mode   = S5H1411_DEMODLOCKING,
+       .mpeg_timing   = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+       .atsc_6   = { .if_freq = 5380, .agc_mode = 3, .std = 3,
+                     .if_lvl = 6, .rfagc_top = 0x37 },
+       .qam_6    = { .if_freq = 4000, .agc_mode = 3, .std = 0,
+                     .if_lvl = 6, .rfagc_top = 0x37 },
+};
+
+static struct tda18271_config hauppauge_tda18271_config = {
+       .std_map = &hauppauge_tda18271_std_map,
+       .gate    = TDA18271_GATE_DIGITAL,
+       .output_opt = TDA18271_OUTPUT_LT_OFF,
+};
+
 /*
  * CX18_CARD_LEADTEK_DVR3100H
  */
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
        switch (cx->card->type) {
        case CX18_CARD_HVR_1600_ESMT:
        case CX18_CARD_HVR_1600_SAMSUNG:
+       case CX18_CARD_HVR_1600_S5H1411:
                v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
                v |= 0x00400000; /* Serial Mode */
                v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream)
                        ret = 0;
                }
                break;
+       case CX18_CARD_HVR_1600_S5H1411:
+               dvb->fe = dvb_attach(s5h1411_attach,
+                                    &hcw_s5h1411_config,
+                                    &cx->i2c_adap[0]);
+               if (dvb->fe != NULL)
+                       dvb_attach(tda18271_attach, dvb->fe,
+                                  0x60, &cx->i2c_adap[0],
+                                  &hauppauge_tda18271_config);
+               break;
        case CX18_CARD_LEADTEK_DVR3100H:
                dvb->fe = dvb_attach(zl10353_attach,
                                     &leadtek_dvr3100h_demod,
index ed3d8f5..307ff54 100644 (file)
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 
        if (!i2c_wait_done(i2c_adap))
                goto eio;
-       if (!i2c_slave_did_ack(i2c_adap)) {
-               retval = -ENXIO;
-               goto err;
-       }
        if (i2c_debug) {
                printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
                if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 
  eio:
        retval = -EIO;
- err:
        if (i2c_debug)
                printk(KERN_ERR " ERR: %d\n", retval);
        return retval;
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
 
                if (!i2c_wait_done(i2c_adap))
                        goto eio;
-               if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
-                       retval = -ENXIO;
-                       goto err;
-               }
                msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
                if (i2c_debug) {
                        dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
 
  eio:
        retval = -EIO;
- err:
        if (i2c_debug)
                printk(KERN_ERR " ERR: %d\n", retval);
        return retval;
index 6fc09dd..35796e0 100644 (file)
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client,
                kfree(state);
                return err;
        }
-       v4l2_ctrl_cluster(2, &state->volume);
+       if (!is_cx2583x(state))
+               v4l2_ctrl_cluster(2, &state->volume);
        v4l2_ctrl_handler_setup(&state->hdl);
 
        if (client->dev.platform_data) {
index 865216e..47236a5 100644 (file)
@@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
                        break;
                default:
 /*             case 0xdd:       * delay */
-                       msleep(action->val / 64 + 10);
+                       msleep(action->idx);
                        break;
                }
                action++;
@@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
                [SENSOR_GC0305] =       gc0305_matrix,
                [SENSOR_HDCS2020b] =    NULL,
                [SENSOR_HV7131B] =      NULL,
-               [SENSOR_HV7131R] =      NULL,
+               [SENSOR_HV7131R] =      po2030_matrix,
                [SENSOR_ICM105A] =      po2030_matrix,
                [SENSOR_MC501CB] =      NULL,
                [SENSOR_MT9V111_1] =    gc0305_matrix,
@@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev)
        case SENSOR_ADCM2700:
        case SENSOR_GC0305:
        case SENSOR_HV7131B:
+       case SENSOR_HV7131R:
        case SENSOR_OV7620:
        case SENSOR_PAS202B:
        case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
                reg_w(gspca_dev, 0x02, 0x003b);
                reg_w(gspca_dev, 0x00, 0x0038);
                break;
+       case SENSOR_HV7131R:
        case SENSOR_PAS202B:
                reg_w(gspca_dev, 0x03, 0x003b);
                reg_w(gspca_dev, 0x0c, 0x003a);
                reg_w(gspca_dev, 0x0b, 0x0039);
-               reg_w(gspca_dev, 0x0b, 0x0038);
+               if (sensor == SENSOR_PAS202B)
+                       reg_w(gspca_dev, 0x0b, 0x0038);
                break;
        }
 }
@@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
                reg_w(gspca_dev, 0x02, 0x003b);
                reg_w(gspca_dev, 0x00, 0x0038);
                break;
+       case SENSOR_HV7131R:
        case SENSOR_PAS202B:
                reg_w(gspca_dev, 0x03, 0x003b);
                reg_w(gspca_dev, 0x0c, 0x003a);
                reg_w(gspca_dev, 0x0b, 0x0039);
+               if (sd->sensor == SENSOR_HV7131R)
+                       reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
                break;
        }
 
@@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
                break;
        case SENSOR_PAS202B:
        case SENSOR_GC0305:
+       case SENSOR_HV7131R:
        case SENSOR_TAS5130C:
                reg_r(gspca_dev, 0x0008);
                /* fall thru */
@@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
                                                /* ms-win + */
                reg_w(gspca_dev, 0x40, 0x0117);
                break;
+       case SENSOR_HV7131R:
+               i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
+               i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+               i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+               reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+               break;
        case SENSOR_GC0305:
        case SENSOR_TAS5130C:
                reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
 {
        struct sd *sd = (struct sd *) gspca_dev;
 
-       if (data[0] == 0xff && data[1] == 0xd8) {       /* start of frame */
+       /* check the JPEG end of frame */
+       if (len >= 3
+        && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
                gspca_frame_add(gspca_dev, LAST_PACKET,
-                                       NULL, 0);
+                                       data, len - 1);
+               return;
+       }
+
+       /* check the JPEG start of a frame */
+       if (data[0] == 0xff && data[1] == 0xd8) {
                /* put the JPEG header in the new frame */
                gspca_frame_add(gspca_dev, FIRST_PACKET,
                        sd->jpeg_hdr, JPEG_HDR_SZ);
index a6572e5..a27d93b 100644 (file)
@@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface,
        struct hdpvr_device *dev;
        struct usb_host_interface *iface_desc;
        struct usb_endpoint_descriptor *endpoint;
+       struct i2c_client *client;
        size_t buffer_size;
        int i;
        int retval = -ENOMEM;
@@ -381,13 +382,21 @@ static int hdpvr_probe(struct usb_interface *interface,
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        retval = hdpvr_register_i2c_adapter(dev);
        if (retval < 0) {
-               v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+               v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
                goto error;
        }
 
-       retval = hdpvr_register_i2c_ir(dev);
-       if (retval < 0)
-               v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
+       client = hdpvr_register_ir_rx_i2c(dev);
+       if (!client) {
+               v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+               goto reg_fail;
+       }
+
+       client = hdpvr_register_ir_tx_i2c(dev);
+       if (!client) {
+               v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+               goto reg_fail;
+       }
 #endif
 
        /* let the user know what node this device is now attached to */
@@ -395,6 +404,10 @@ static int hdpvr_probe(struct usb_interface *interface,
                  video_device_node_name(dev->video_dev));
        return 0;
 
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+       i2c_del_adapter(&dev->i2c_adapter);
+#endif
 error:
        if (dev) {
                /* Destroy single thread */
@@ -424,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface)
        mutex_lock(&dev->io_mutex);
        hdpvr_cancel_queue(dev);
        mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+       i2c_del_adapter(&dev->i2c_adapter);
+#endif
        video_unregister_device(dev->video_dev);
        atomic_dec(&dev_nr);
 }
index 89b71fa..e53fa55 100644 (file)
 #define Z8F0811_IR_RX_I2C_ADDR 0x71
 
 
-static struct i2c_board_info hdpvr_i2c_board_info = {
-       I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
-       I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
-};
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
+{
+       struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+       struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+               I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+       };
+
+       init_data->name = "HD-PVR";
+       hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
+       return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
+}
+
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
 {
-       struct i2c_client *c;
        struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+       struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+               I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+       };
 
        /* Our default information for ir-kbd-i2c.c to use */
        init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
        init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
        init_data->type = RC_TYPE_RC5;
-       init_data->name = "HD PVR";
-       hdpvr_i2c_board_info.platform_data = init_data;
-
-       c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
+       init_data->name = "HD-PVR";
+       hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
 
-       return (c == NULL) ? -ENODEV : 0;
+       return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
 }
 
 static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
index ee74e3b..072f23c 100644 (file)
@@ -313,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev);
 /* i2c adapter registration */
 int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
 
 /*========================================================================*/
 /* buffer management */
index d2b20ad..a221ad6 100644 (file)
@@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
 
 static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
 {
+       int ret;
+       unsigned char buf[1] = { 0 };
+
+       /*
+        * This is the same apparent "are you ready?" poll command observed
+        * watching Windows driver traffic and implemented in lirc_zilog. With
+        * this added, we get far saner remote behavior with z8 chips on usb
+        * connected devices, even with the default polling interval of 100ms.
+        */
+       ret = i2c_master_send(ir->c, buf, 1);
+       if (ret != 1)
+               return (ret < 0) ? ret : -EINVAL;
+
        return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
 }
 
index 9b4faf0..9c29e96 100644 (file)
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
 static void ivtv_irq_dma_err(struct ivtv *itv)
 {
        u32 data[CX2341X_MBOX_MAX_DATA];
+       u32 status;
 
        del_timer(&itv->dma_timer);
+
        ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
+       status = read_reg(IVTV_REG_DMASTATUS);
        IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
-                               read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
-       write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
+                               status, itv->cur_dma_stream);
+       /*
+        * We do *not* write back to the IVTV_REG_DMASTATUS register to
+        * clear the error status, if either the encoder write (0x02) or
+        * decoder read (0x01) bus master DMA operation do not indicate
+        * completed.  We can race with the DMA engine, which may have
+        * transitioned to completed status *after* we read the register.
+        * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
+        * DMA engine has completed, will cause the DMA engine to stop working.
+        */
+       status &= 0x3;
+       if (status == 0x3)
+               write_reg(status, IVTV_REG_DMASTATUS);
+
        if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
            itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
                struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
 
-               /* retry */
-               if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
+               if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+                       /* retry */
+                       /*
+                        * FIXME - handle cases of DMA error similar to
+                        * encoder below, except conditioned on status & 0x1
+                        */
                        ivtv_dma_dec_start(s);
-               else
-                       ivtv_dma_enc_start(s);
-               return;
+                       return;
+               } else {
+                       if ((status & 0x2) == 0) {
+                               /*
+                                * CX2341x Bus Master DMA write is ongoing.
+                                * Reset the timer and let it complete.
+                                */
+                               itv->dma_timer.expires =
+                                               jiffies + msecs_to_jiffies(600);
+                               add_timer(&itv->dma_timer);
+                               return;
+                       }
+
+                       if (itv->dma_retries < 3) {
+                               /*
+                                * CX2341x Bus Master DMA write has ended.
+                                * Retry the write, starting with the first
+                                * xfer segment. Just retrying the current
+                                * segment is not sufficient.
+                                */
+                               s->sg_processed = 0;
+                               itv->dma_retries++;
+                               ivtv_dma_enc_start_xfer(s);
+                               return;
+                       }
+                       /* Too many retries, give up on this one */
+               }
+
        }
        if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
                ivtv_udma_start(itv);
index c179041..e7e7178 100644 (file)
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev)
        v4l2_m2m_release(dev->m2m_dev);
        del_timer_sync(&dev->timer);
        video_unregister_device(dev->vfd);
-       video_device_release(dev->vfd);
        v4l2_device_unregister(&dev->v4l2_dev);
        kfree(dev);
 
index ccc8849..451ecd4 100644 (file)
@@ -597,7 +597,6 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
                init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
                init_data->type                  = RC_TYPE_RC5;
                init_data->name                  = hdw->hdw_desc->description;
-               init_data->polling_interval      = 260; /* ms From lirc_zilog */
                /* IR Receiver */
                info.addr          = 0x71;
                info.platform_data = init_data;
index b63f8ca..561909b 100644 (file)
@@ -57,7 +57,7 @@
 #include <linux/usb.h>
 
 #define S2255_MAJOR_VERSION    1
-#define S2255_MINOR_VERSION    20
+#define S2255_MINOR_VERSION    21
 #define S2255_RELEASE          0
 #define S2255_VERSION          KERNEL_VERSION(S2255_MAJOR_VERSION, \
                                               S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@ struct s2255_fh {
 };
 
 /* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER    ((3 << 8) | 6)
+#define S2255_CUR_USB_FWVER    ((3 << 8) | 11)
 /* current DSP FW version */
-#define S2255_CUR_DSP_FWVER     8
+#define S2255_CUR_DSP_FWVER     10102
 /* Need DSP version 5+ for video status feature */
 #define S2255_MIN_DSP_STATUS      5
 #define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
 
 static void s2255_reset_dsppower(struct s2255_dev *dev)
 {
-       s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1);
+       s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
        msleep(10);
        s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
+       msleep(600);
+       s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
        return;
 }
 
index f35459d..0db9092 100644 (file)
@@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client,
        chip_id = name[5];
 
        /* Check whether this chip is part of the saa711x series */
-       if (memcmp(name, "1f711", 5)) {
+       if (memcmp(name + 1, "f711", 4)) {
                v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
                        client->addr << 1, name);
                return -ENODEV;
index e9a3eab..8c1d85e 100644 (file)
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
 {
        int rc;
 
-       workqueue = create_freezeable_workqueue("kmemstick");
+       workqueue = create_freezable_workqueue("kmemstick");
        if (!workqueue)
                return -ENOMEM;
 
index f71f229..1735c84 100644 (file)
@@ -76,8 +76,8 @@
 #define COPYRIGHT      "Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON       "3.04.17"
-#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON       "3.04.18"
+#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.18"
 #define WHAT_MAGIC_STRING              "@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
index a3856ed..e8deb8e 100644 (file)
@@ -596,6 +596,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
        return 1;
 }
 
+static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+       fasync_helper(-1, filep, 0, &async_queue);
+       return 0;
+}
+
 static int
 mptctl_fasync(int fd, struct file *filep, int mode)
 {
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
        .llseek =       no_llseek,
        .fasync =       mptctl_fasync,
        .unlocked_ioctl = mptctl_ioctl,
+       .release =      mptctl_release,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_mpctl_ioctl,
 #endif
index 59b8f53..0d9b82a 100644 (file)
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
        }
 
  out:
-       printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-           ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+       printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+           ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+           SCpnt, SCpnt->serial_number);
 
        return retval;
 }
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
 
        vdevice = SCpnt->device->hostdata;
        if (!vdevice || !vdevice->vtarget) {
-               retval = SUCCESS;
+               retval = 0;
                goto out;
        }
 
index 6a1f940..c45e630 100644 (file)
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
        unsigned long flags;
        struct asic3 *asic;
 
-       desc->chip->ack(irq);
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-       asic = desc->handler_data;
+       asic = get_irq_data(irq);
 
        for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
                u32 status;
index 33c923d..fdd8a1b 100644 (file)
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
 
        /* Voice codec interface client */
        cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
-       cell->name = "davinci_vcif";
+       cell->name = "davinci-vcif";
        cell->driver_data = davinci_vc;
 
        /* Voice codec CQ93VC client */
        cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
-       cell->name = "cq93vc";
+       cell->name = "cq93vc-codec";
        cell->driver_data = davinci_vc;
 
        ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
index 627cf57..e9018d1 100644 (file)
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
 static inline int __tps6586x_writes(struct i2c_client *client, int reg,
                                  int len, uint8_t *val)
 {
-       int ret;
+       int ret, i;
 
-       ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
-       if (ret < 0) {
-               dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
-               return ret;
+       for (i = 0; i < len; i++) {
+               ret = __tps6586x_write(client, reg + i, *(val + i));
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
index 000cb41..92b85e2 100644 (file)
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        idev->close      = ucb1x00_ts_close;
 
        __set_bit(EV_ABS, idev->evbit);
-       __set_bit(ABS_X, idev->absbit);
-       __set_bit(ABS_Y, idev->absbit);
-       __set_bit(ABS_PRESSURE, idev->absbit);
 
        input_set_drvdata(idev, ts);
 
+       ucb1x00_adc_enable(ts->ucb);
+       ts->x_res = ucb1x00_ts_read_xres(ts);
+       ts->y_res = ucb1x00_ts_read_yres(ts);
+       ucb1x00_adc_disable(ts->ucb);
+
+       input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+       input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+       input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
        err = input_register_device(idev);
        if (err)
                goto fail;
index 41233c7..f4016a0 100644 (file)
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* Don't actually go through with the suspend if the CODEC is
+        * still active (eg, for audio passthrough from CP. */
+       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read power status: %d\n", ret);
+       } else if (ret & WM8994_VMID_SEL_MASK) {
+               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+               return 0;
+       }
+
        /* GPIO configuration state is saved here since we may be configuring
         * the GPIO alternate functions even if we're not using the gpiolib
         * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
+       wm8994->suspended = true;
+
        ret = regulator_bulk_disable(wm8994->num_supplies,
                                     wm8994->supplies);
        if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* We may have lied to the PM core about suspending */
+       if (!wm8994->suspended)
+               return 0;
+
        ret = regulator_bulk_enable(wm8994->num_supplies,
                                    wm8994->supplies);
        if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
 
+       wm8994->suspended = false;
+
        return 0;
 }
 #endif
index 63ee4c1..b6e1c9a 100644 (file)
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
        { "bmp085", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
 
 static struct i2c_driver bmp085_driver = {
        .driver = {
index 5f6852d..44d4475 100644 (file)
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
 {
        int rc;
 
-       workqueue = create_freezeable_workqueue("tifm");
+       workqueue = create_freezable_workqueue("tifm");
        if (!workqueue)
                return -ENOMEM;
 
index 4d2ea8e..6df5a55 100644 (file)
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
        if (x86_hyper != &x86_hyper_vmware)
                return -ENODEV;
 
-       vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+       vmballoon_wq = create_freezable_workqueue("vmmemctl");
        if (!vmballoon_wq) {
                pr_err("failed to create workqueue\n");
                return -ENOMEM;
index 6625c05..150b5f3 100644 (file)
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work)
         * still present
         */
        if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
-           && mmc_card_is_removable(host))
+           && !(host->caps & MMC_CAP_NONREMOVABLE))
                host->bus_ops->detect(host);
 
        /*
index 5c4a54d..ebc62ad 100644 (file)
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
         */
        mmc_release_host(host);
        err = mmc_add_card(host->card);
-       mmc_claim_host(host);
        if (err)
                goto remove_added;
 
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
                        goto remove_added;
        }
 
+       mmc_claim_host(host);
        return 0;
 
 
 remove_added:
        /* Remove without lock if the device has been added. */
-       mmc_release_host(host);
        mmc_sdio_remove(host);
        mmc_claim_host(host);
 remove:
index bac7d62..0371bf5 100644 (file)
@@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
                goto out;
        }
 
-       mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+       mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
        if (!mmc) {
                ret = -ENOMEM;
                goto out;
index b3a0ab0..74218ad 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/mmc/host.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
@@ -827,8 +828,8 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
        }
 
        host->clk = clk_get(&pdev->dev, "mmc");
-       if (!host->clk) {
-               ret = -ENOENT;
+       if (IS_ERR(host->clk)) {
+               ret = PTR_ERR(host->clk);
                dev_err(&pdev->dev, "Failed to get mmc clock\n");
                goto err_free_host;
        }
index 5630228..2d6de3e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
@@ -46,10 +47,6 @@ static unsigned int fmax = 515633;
  *           is asserted (likewise for RX)
  * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
  *               is asserted (likewise for RX)
- * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
- *             and will not work at all.
- * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
- *             using DMA.
  * @sdio: variant supports SDIO
  * @st_clkdiv: true if using a ST-specific clock divider algorithm
  */
@@ -59,8 +56,6 @@ struct variant_data {
        unsigned int            datalength_bits;
        unsigned int            fifosize;
        unsigned int            fifohalfsize;
-       bool                    broken_blockend;
-       bool                    broken_blockend_dma;
        bool                    sdio;
        bool                    st_clkdiv;
 };
@@ -76,7 +71,6 @@ static struct variant_data variant_u300 = {
        .fifohalfsize           = 8 * 4,
        .clkreg_enable          = 1 << 13, /* HWFCEN */
        .datalength_bits        = 16,
-       .broken_blockend_dma    = true,
        .sdio                   = true,
 };
 
@@ -86,7 +80,6 @@ static struct variant_data variant_ux500 = {
        .clkreg                 = MCI_CLK_ENABLE,
        .clkreg_enable          = 1 << 14, /* HWFCEN */
        .datalength_bits        = 24,
-       .broken_blockend        = true,
        .sdio                   = true,
        .st_clkdiv              = true,
 };
@@ -210,8 +203,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
        host->data = data;
        host->size = data->blksz * data->blocks;
        host->data_xfered = 0;
-       host->blockend = false;
-       host->dataend = false;
 
        mmci_init_sg(host, data);
 
@@ -288,21 +279,26 @@ static void
 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
              unsigned int status)
 {
-       struct variant_data *variant = host->variant;
-
        /* First check for errors */
        if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+               u32 remain, success;
+
+               /* Calculate how far we are into the transfer */
+               remain = readl(host->base + MMCIDATACNT);
+               success = data->blksz * data->blocks - remain;
+
                dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
-               if (status & MCI_DATACRCFAIL)
+               if (status & MCI_DATACRCFAIL) {
+                       /* Last block was not successful */
+                       host->data_xfered = round_down(success - 1, data->blksz);
                        data->error = -EILSEQ;
-               else if (status & MCI_DATATIMEOUT)
+               } else if (status & MCI_DATATIMEOUT) {
+                       host->data_xfered = round_down(success, data->blksz);
                        data->error = -ETIMEDOUT;
-               else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
+               } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+                       host->data_xfered = round_down(success, data->blksz);
                        data->error = -EIO;
-
-               /* Force-complete the transaction */
-               host->blockend = true;
-               host->dataend = true;
+               }
 
                /*
                 * We hit an error condition.  Ensure that any data
@@ -321,61 +317,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
                }
        }
 
-       /*
-        * On ARM variants in PIO mode, MCI_DATABLOCKEND
-        * is always sent first, and we increase the
-        * transfered number of bytes for that IRQ. Then
-        * MCI_DATAEND follows and we conclude the transaction.
-        *
-        * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
-        * doesn't seem to immediately clear from the status,
-        * so we can't use it keep count when only one irq is
-        * used because the irq will hit for other reasons, and
-        * then the flag is still up. So we use the MCI_DATAEND
-        * IRQ at the end of the entire transfer because
-        * MCI_DATABLOCKEND is broken.
-        *
-        * In the U300, the IRQs can arrive out-of-order,
-        * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
-        * so for this case we use the flags "blockend" and
-        * "dataend" to make sure both IRQs have arrived before
-        * concluding the transaction. (This does not apply
-        * to the Ux500 which doesn't fire MCI_DATABLOCKEND
-        * at all.) In DMA mode it suffers from the same problem
-        * as the Ux500.
-        */
-       if (status & MCI_DATABLOCKEND) {
-               /*
-                * Just being a little over-cautious, we do not
-                * use this progressive update if the hardware blockend
-                * flag is unreliable: since it can stay high between
-                * IRQs it will corrupt the transfer counter.
-                */
-               if (!variant->broken_blockend)
-                       host->data_xfered += data->blksz;
-               host->blockend = true;
-       }
-
-       if (status & MCI_DATAEND)
-               host->dataend = true;
+       if (status & MCI_DATABLOCKEND)
+               dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
 
-       /*
-        * On variants with broken blockend we shall only wait for dataend,
-        * on others we must sync with the blockend signal since they can
-        * appear out-of-order.
-        */
-       if (host->dataend && (host->blockend || variant->broken_blockend)) {
+       if (status & MCI_DATAEND || data->error) {
                mmci_stop_data(host);
 
-               /* Reset these flags */
-               host->blockend = false;
-               host->dataend = false;
-
-               /*
-                * Variants with broken blockend flags need to handle the
-                * end of the entire transfer here.
-                */
-               if (variant->broken_blockend && !data->error)
+               if (!data->error)
+                       /* The error clause is handled above, success! */
                        host->data_xfered += data->blksz * data->blocks;
 
                if (!data->stop) {
@@ -394,15 +343,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
 
        host->cmd = NULL;
 
-       cmd->resp[0] = readl(base + MMCIRESPONSE0);
-       cmd->resp[1] = readl(base + MMCIRESPONSE1);
-       cmd->resp[2] = readl(base + MMCIRESPONSE2);
-       cmd->resp[3] = readl(base + MMCIRESPONSE3);
-
        if (status & MCI_CMDTIMEOUT) {
                cmd->error = -ETIMEDOUT;
        } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
                cmd->error = -EILSEQ;
+       } else {
+               cmd->resp[0] = readl(base + MMCIRESPONSE0);
+               cmd->resp[1] = readl(base + MMCIRESPONSE1);
+               cmd->resp[2] = readl(base + MMCIRESPONSE2);
+               cmd->resp[3] = readl(base + MMCIRESPONSE3);
        }
 
        if (!cmd->data || cmd->error) {
@@ -770,7 +719,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
        struct variant_data *variant = id->data;
        struct mmci_host *host;
        struct mmc_host *mmc;
-       unsigned int mask;
        int ret;
 
        /* must have platform data */
@@ -951,12 +899,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
                        goto irq0_free;
        }
 
-       mask = MCI_IRQENABLE;
-       /* Don't use the datablockend flag if it's broken */
-       if (variant->broken_blockend)
-               mask &= ~MCI_DATABLOCKEND;
-
-       writel(mask, host->base + MMCIMASK0);
+       writel(MCI_IRQENABLE, host->base + MMCIMASK0);
 
        amba_set_drvdata(dev, mmc);
 
index df06f01..c1df7b8 100644 (file)
 #define MCI_IRQENABLE  \
        (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|     \
        MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|       \
-       MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+       MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
 
 /* These interrupts are directed to IRQ1 when two IRQ lines are available */
 #define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@ struct mmci_host {
        struct timer_list       timer;
        unsigned int            oldstat;
 
-       bool                    blockend;
-       bool                    dataend;
-
        /* pio stuff */
        struct sg_mapping_iter  sg_miter;
        unsigned int            size;
index 5decfd0..153ab97 100644 (file)
@@ -383,14 +383,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
        host->curr.user_pages = 0;
 
        box = &nc->cmd[0];
-       for (i = 0; i < host->dma.num_ents; i++) {
-               box->cmd = CMD_MODE_BOX;
 
-       /* Initialize sg dma address */
-       sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
-                               + sg->offset;
+       /* location of command block must be 64 bit aligned */
+       BUG_ON(host->dma.cmd_busaddr & 0x07);
 
-       if (i == (host->dma.num_ents - 1))
+       nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+       host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+                              DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+       host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+
+       n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+                       host->dma.num_ents, host->dma.dir);
+       if (n == 0) {
+               printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+                       mmc_hostname(host->mmc));
+               host->dma.sg = NULL;
+               host->dma.num_ents = 0;
+               return -ENOMEM;
+       }
+
+       for_each_sg(host->dma.sg, sg, n, i) {
+
+               box->cmd = CMD_MODE_BOX;
+
+               if (i == n - 1)
                        box->cmd |= CMD_LC;
                rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
                        (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -418,27 +434,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
                        box->cmd |= CMD_DST_CRCI(crci);
                }
                box++;
-               sg++;
-       }
-
-       /* location of command block must be 64 bit aligned */
-       BUG_ON(host->dma.cmd_busaddr & 0x07);
-
-       nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
-       host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
-                              DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
-       host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
-       n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-                       host->dma.num_ents, host->dma.dir);
-/* dsb inside dma_map_sg will write nc out to mem as well */
-
-       if (n != host->dma.num_ents) {
-               printk(KERN_ERR "%s: Unable to map in all sg elements\n",
-                       mmc_hostname(host->mmc));
-               host->dma.sg = NULL;
-               host->dma.num_ents = 0;
-               return -ENOMEM;
        }
 
        return 0;
@@ -1331,9 +1326,6 @@ msmsdcc_probe(struct platform_device *pdev)
        if (host->timer.function)
                pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
-#if BUSCLK_PWRSAVE
-       msmsdcc_disable_clocks(host, 1);
-#endif
        return 0;
  cmd_irq_free:
        free_irq(cmd_irqres->start, host);
index 1720358..5309ab9 100644 (file)
@@ -277,10 +277,43 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
        host->clock = clock;
 }
 
+/**
+ * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_8bit_width() and support 8bit width.
+ */
+static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+{
+       u8 ctrl;
+
+       ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+       switch (width) {
+       case MMC_BUS_WIDTH_8:
+               ctrl |= SDHCI_CTRL_8BITBUS;
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               break;
+       case MMC_BUS_WIDTH_4:
+               ctrl |= SDHCI_CTRL_4BITBUS;
+               ctrl &= ~SDHCI_CTRL_8BITBUS;
+               break;
+       default:
+               break;
+       }
+
+       sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+       return 0;
+}
+
 static struct sdhci_ops sdhci_s3c_ops = {
        .get_max_clock          = sdhci_s3c_get_max_clk,
        .set_clock              = sdhci_s3c_set_clock,
        .get_min_clock          = sdhci_s3c_get_min_clock,
+       .platform_8bit_width    = sdhci_s3c_platform_8bit_width,
 };
 
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -473,6 +506,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
        if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
                host->mmc->caps = MMC_CAP_NONREMOVABLE;
 
+       if (pdata->host_caps)
+               host->mmc->caps |= pdata->host_caps;
+
        host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
                         SDHCI_QUIRK_32BIT_DMA_SIZE);
 
index f8f65df..f08f944 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/kernel.h>
-#include <linux/usb.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/mmc/host.h>
index d9d7efb..6322d1f 100644 (file)
@@ -930,7 +930,7 @@ int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 
        init_completion(&dev->dma_done);
 
-       dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+       dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
 
        if (!dev->card_workqueue)
                goto error9;
index 67822cf..ac0d6a8 100644 (file)
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
 static __init int sm_module_init(void)
 {
        int error = 0;
-       cache_flush_workqueue = create_freezeable_workqueue("smflush");
+       cache_flush_workqueue = create_freezable_workqueue("smflush");
 
        if (IS_ERR(cache_flush_workqueue))
                return PTR_ERR(cache_flush_workqueue);
index f49e49d..5ebe280 100644 (file)
@@ -672,33 +672,7 @@ static int io_init(struct ubi_device *ubi)
                ubi->nor_flash = 1;
        }
 
-       /*
-        * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
-        * for these purposes, not @mtd->writesize. At the moment this does not
-        * matter for NAND, because currently @mtd->writebufsize is equivalent to
-        * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
-        * have @mtd->writebufsize which is multiple of @mtd->writesize.
-        *
-        * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
-        * UBI and UBIFS recovery algorithms rely on the fact that if there was
-        * an unclean power cut, then we can find offset of the last corrupted
-        * node, align the offset to @ubi->min_io_size, read the rest of the
-        * eraseblock starting from this offset, and check whether there are
-        * only 0xFF bytes. If yes, then we are probably dealing with a
-        * corruption caused by a power cut, if not, then this is probably some
-        * severe corruption.
-        *
-        * Thus, we have to use the maximum write unit size of the flash, which
-        * is @mtd->writebufsize, because @mtd->writesize is the minimum write
-        * size, not the maximum.
-        */
-       if (ubi->mtd->type == MTD_NANDFLASH)
-               ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
-       else if (ubi->mtd->type == MTD_NORFLASH)
-               ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
-
-       ubi->min_io_size = ubi->mtd->writebufsize;
-
+       ubi->min_io_size = ubi->mtd->writesize;
        ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
 
        /*
index 0382332..46e1b1a 100644 (file)
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
 config AX88796
        tristate "ASIX AX88796 NE2000 clone support"
        depends on ARM || MIPS || SUPERH
-       select CRC32
-       select MII
+       select PHYLIB
+       select MDIO_BITBANG
        help
          AX88796 driver, using platform bus to provide
          chip detection and resources
@@ -1498,7 +1498,7 @@ config FORCEDETH
 config CS89x0
        tristate "CS89x0 support"
        depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
-               || ARCH_IXDP2X01 || MACH_MX31ADS)
+               || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
        ---help---
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the
@@ -1512,7 +1512,7 @@ config CS89x0
 config CS89x0_NONISA_IRQ
        def_bool y
        depends on CS89x0 != n
-       depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS
+       depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
 
 config TC35815
        tristate "TOSHIBA TC35815 Ethernet support"
@@ -1944,7 +1944,8 @@ config 68360_ENET
 config FEC
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-               MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+               IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+       default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
        select PHYLIB
        help
          Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2007,6 +2008,15 @@ config BCM63XX_ENET
          This driver supports the ethernet MACs in the Broadcom 63xx
          MIPS chipset family (BCM63XX).
 
+config FTMAC100
+       tristate "Faraday FTMAC100 10/100 Ethernet support"
+       depends on ARM
+       select MII
+       help
+         This driver supports the FTMAC100 10/100 Ethernet controller
+         from Faraday. It is used on Faraday A320, Andes AG101 and some
+         other ARM/NDS32 SoC's.
+
 source "drivers/net/fs_enet/Kconfig"
 
 source "drivers/net/octeon/Kconfig"
@@ -2099,6 +2109,7 @@ config E1000
 config E1000E
        tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
        depends on PCI && (!SPARC32 || BROKEN)
+       select CRC32
        ---help---
          This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
          ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2235,15 +2246,6 @@ config R8169
          To compile this driver as a module, choose M here: the module
          will be called r8169.  This is recommended.
 
-config R8169_VLAN
-       bool "VLAN support"
-       depends on R8169 && VLAN_8021Q
-       ---help---
-         Say Y here for the r8169 driver to support the functions required
-         by the kernel 802.1Q code.
-
-         If in doubt, say Y.
-
 config SB1250_MAC
        tristate "SB1250 Gigabit Ethernet support"
        depends on SIBYTE_SB1xxx_SOC
@@ -2594,14 +2596,9 @@ config CHELSIO_T1_1G
          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
          are using only 10G cards say 'N' here.
 
-config CHELSIO_T3_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T3
        tristate "Chelsio Communications T3 10Gb Ethernet support"
-       depends on CHELSIO_T3_DEPENDS
+       depends on PCI && INET
        select FW_LOADER
        select MDIO
        help
@@ -2619,14 +2616,9 @@ config CHELSIO_T3
          To compile this driver as a module, choose M here: the module
          will be called cxgb3.
 
-config CHELSIO_T4_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T4
        tristate "Chelsio Communications T4 Ethernet support"
-       depends on CHELSIO_T4_DEPENDS
+       depends on PCI
        select FW_LOADER
        select MDIO
        help
@@ -2644,14 +2636,9 @@ config CHELSIO_T4
          To compile this driver as a module choose M here; the module
          will be called cxgb4.
 
-config CHELSIO_T4VF_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T4VF
        tristate "Chelsio Communications T4 Virtual Function Ethernet support"
-       depends on CHELSIO_T4VF_DEPENDS
+       depends on PCI
        help
          This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
          adapters with PCI-E SR-IOV Virtual Functions.
index b90738d..7c21711 100644 (file)
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_FTMAC100) += ftmac100.o
 
 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
index 62d6f88..aa07657 100644 (file)
@@ -1644,7 +1644,7 @@ ks8695_cleanup(void)
 module_init(ks8695_init);
 module_exit(ks8695_cleanup);
 
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
 MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" MODULENAME);
index 1bf6720..23f2ab0 100644 (file)
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
  */
 static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
 {
-       u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+       u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
        u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
                                ~GIGA_CR_1000T_SPEED_MASK;
 
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
        }
 
        if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
-           atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+           atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
                return -1;
        return 0;
 }
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
                                        "Error Setting up Auto-Negotiation\n");
                        return ret_val;
                }
-               mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+               mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
                break;
        case MEDIA_TYPE_100M_FULL:
-               mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+               mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
                break;
        case MEDIA_TYPE_100M_HALF:
-               mii_bmcr_data |= BMCR_SPEED_100;
+               mii_bmcr_data |= BMCR_SPEED100;
                break;
        case MEDIA_TYPE_10M_FULL:
-               mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+               mii_bmcr_data |= BMCR_FULLDPLX;
                break;
        case MEDIA_TYPE_10M_HALF:
-               mii_bmcr_data |= BMCR_SPEED_10;
                break;
        default:
                if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
        err = atl1c_phy_setup_adv(hw);
        if (err)
                return err;
-       mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+       mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
 
        return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
 }
index 3dd6759..655fc6c 100644 (file)
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
 #define REG_DEBUG_DATA0                0x1900
 #define REG_DEBUG_DATA1                0x1904
 
-/* PHY Control Register */
-#define MII_BMCR                       0x00
-#define BMCR_SPEED_SELECT_MSB          0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE          0x0080  /* Collision test enable */
-#define BMCR_FULL_DUPLEX               0x0100  /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG          0x0200  /* Restart auto negotiation */
-#define BMCR_ISOLATE                   0x0400  /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN                        0x0800  /* Power down */
-#define BMCR_AUTO_NEG_EN               0x1000  /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB          0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK                  0x4000  /* 0 = normal, 1 = loopback */
-#define BMCR_RESET                     0x8000  /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK                        0x2040
-#define BMCR_SPEED_1000                        0x0040
-#define BMCR_SPEED_100                 0x2000
-#define BMCR_SPEED_10                  0x0000
-
-/* PHY Status Register */
-#define MII_BMSR                       0x01
-#define BMMSR_EXTENDED_CAPS            0x0001  /* Extended register capabilities */
-#define BMSR_JABBER_DETECT             0x0002  /* Jabber Detected */
-#define BMSR_LINK_STATUS               0x0004  /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS              0x0008  /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT              0x0010  /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE          0x0020  /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS         0x0040  /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS           0x0100  /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS             0x0200  /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS             0x0400  /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS               0x0800  /* 10T   Half Duplex Capable */
-#define BMSR_10T_FD_CAPS               0x1000  /* 10T   Full Duplex Capable */
-#define BMSR_100X_HD_CAPS              0x2000  /* 100X  Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS          0x4000  /* 100X  Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS            0x8000  /* 100T4 Capable */
-
-#define MII_PHYSID1                    0x02
-#define MII_PHYSID2                    0x03
 #define L1D_MPW_PHYID1                 0xD01C  /* V7 */
 #define L1D_MPW_PHYID2                 0xD01D  /* V1-V6 */
 #define L1D_MPW_PHYID3                 0xD01E  /* V8 */
 
 
 /* Autoneg Advertisement Register */
-#define MII_ADVERTISE                  0x04
-#define ADVERTISE_SPEED_MASK           0x01E0
-#define ADVERTISE_DEFAULT_CAP          0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+       (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
 
 /* 1000BASE-T Control Register */
-#define MII_GIGA_CR                    0x09
 #define GIGA_CR_1000T_REPEATER_DTE     0x0400  /* 1=Repeater/switch device port 0=DTE device */
 
 #define GIGA_CR_1000T_MS_VALUE         0x0800  /* 1=Configure PHY as Master 0=Configure PHY as Slave */
index a699bbf..7d9d506 100644 (file)
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
        {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
        {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+       {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
        /* required last entry */
        { 0 }
 };
@@ -1101,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
        AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
        max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
                        DEVICE_CTRL_MAX_PAYLOAD_MASK;
-       hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+       hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
        max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
                        DEVICE_CTRL_MAX_RREQ_SZ_MASK;
-       hw->dmar_block = min(max_pay_load, hw->dmar_block);
+       hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
 
        txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
                        TXQ_NUM_TPD_BURST_SHIFT;
@@ -2717,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
                goto err_reset;
        }
 
-       device_init_wakeup(&pdev->dev, 1);
        /* reset the controller to
         * put the device in a known good starting state */
        err = atl1c_phy_init(&adapter->hw);
index 6943a6c..1209297 100644 (file)
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
                ecmd->advertising = hw->autoneg_advertised |
                                    ADVERTISED_TP | ADVERTISED_Autoneg;
 
-               adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+               adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
                adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
                if (hw->autoneg_advertised & ADVERTISE_10_HALF)
-                       adv4 |= MII_AR_10T_HD_CAPS;
+                       adv4 |= ADVERTISE_10HALF;
                if (hw->autoneg_advertised & ADVERTISE_10_FULL)
-                       adv4 |= MII_AR_10T_FD_CAPS;
+                       adv4 |= ADVERTISE_10FULL;
                if (hw->autoneg_advertised & ADVERTISE_100_HALF)
-                       adv4 |= MII_AR_100TX_HD_CAPS;
+                       adv4 |= ADVERTISE_100HALF;
                if (hw->autoneg_advertised & ADVERTISE_100_FULL)
-                       adv4 |= MII_AR_100TX_FD_CAPS;
+                       adv4 |= ADVERTISE_100FULL;
                if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
-                       adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+                       adv9 |= ADVERTISE_1000FULL;
 
                if (adv4 != hw->mii_autoneg_adv_reg ||
                                adv9 != hw->mii_1000t_ctrl_reg) {
index 76cc043..923063d 100644 (file)
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
         * Advertisement Register (Address 4) and the 1000 mb speed bits in
         * the  1000Base-T control Register (Address 9).
         */
-       mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+       mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
        mii_1000t_ctrl_reg  &= ~MII_AT001_CR_1000T_SPEED_MASK;
 
        /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
         */
        switch (hw->media_type) {
        case MEDIA_TYPE_AUTO_SENSOR:
-               mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS   |
-                                       MII_AR_10T_FD_CAPS   |
-                                       MII_AR_100TX_HD_CAPS |
-                                       MII_AR_100TX_FD_CAPS);
-               hw->autoneg_advertised = ADVERTISE_10_HALF  |
-                                        ADVERTISE_10_FULL  |
-                                        ADVERTISE_100_HALF |
-                                        ADVERTISE_100_FULL;
+               mii_autoneg_adv_reg |= ADVERTISE_ALL;
+               hw->autoneg_advertised = ADVERTISE_ALL;
                if (hw->nic_type == athr_l1e) {
-                       mii_1000t_ctrl_reg |=
-                               MII_AT001_CR_1000T_FD_CAPS;
+                       mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
                        hw->autoneg_advertised |= ADVERTISE_1000_FULL;
                }
                break;
 
        case MEDIA_TYPE_100M_FULL:
-               mii_autoneg_adv_reg   |= MII_AR_100TX_FD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_100FULL;
                hw->autoneg_advertised = ADVERTISE_100_FULL;
                break;
 
        case MEDIA_TYPE_100M_HALF:
-               mii_autoneg_adv_reg   |= MII_AR_100TX_HD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_100_HALF;
                hw->autoneg_advertised = ADVERTISE_100_HALF;
                break;
 
        case MEDIA_TYPE_10M_FULL:
-               mii_autoneg_adv_reg   |= MII_AR_10T_FD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_10_FULL;
                hw->autoneg_advertised = ADVERTISE_10_FULL;
                break;
 
        default:
-               mii_autoneg_adv_reg   |= MII_AR_10T_HD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_10_HALF;
                hw->autoneg_advertised = ADVERTISE_10_HALF;
                break;
        }
 
        /* flow control fixed to enable all */
-       mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+       mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
 
        hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
        hw->mii_1000t_ctrl_reg  = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
                return ret_val;
 
        if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-               ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+               ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
                                           mii_1000t_ctrl_reg);
                if (ret_val)
                        return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
        int ret_val;
        u16 phy_data;
 
-       phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+       phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
 
        ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
        if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
                return err;
 
        if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-               err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+               err = atl1e_write_phy_reg(hw, MII_CTRL1000,
                                       hw->mii_1000t_ctrl_reg);
                if (err)
                        return err;
        }
 
        err = atl1e_write_phy_reg(hw, MII_BMCR,
-                       MII_CR_RESET | MII_CR_AUTO_NEG_EN |
-                       MII_CR_RESTART_AUTO_NEG);
+                       BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
        return err;
 }
 
index 5ea2f4d..74df16a 100644 (file)
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
 
 /***************************** MII definition ***************************************/
 /* PHY Common Register */
-#define MII_BMCR                        0x00
-#define MII_BMSR                        0x01
-#define MII_PHYSID1                     0x02
-#define MII_PHYSID2                     0x03
-#define MII_ADVERTISE                   0x04
-#define MII_LPA                         0x05
-#define MII_EXPANSION                   0x06
-#define MII_AT001_CR                    0x09
-#define MII_AT001_SR                    0x0A
-#define MII_AT001_ESR                   0x0F
 #define MII_AT001_PSCR                  0x10
 #define MII_AT001_PSSR                  0x11
 #define MII_INT_CTRL                    0x12
 #define MII_INT_STATUS                  0x13
 #define MII_SMARTSPEED                  0x14
-#define MII_RERRCOUNTER                 0x15
-#define MII_SREVISION                   0x16
-#define MII_RESV1                       0x17
 #define MII_LBRERROR                    0x18
-#define MII_PHYADDR                     0x19
 #define MII_RESV2                       0x1a
-#define MII_TPISTATUS                   0x1b
-#define MII_NCONFIG                     0x1c
 
 #define MII_DBG_ADDR                   0x1D
 #define MII_DBG_DATA                   0x1E
 
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB                  0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE                  0x0080  /* Collision test enable */
-#define MII_CR_FULL_DUPLEX                       0x0100  /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG                  0x0200  /* Restart auto negotiation */
-#define MII_CR_ISOLATE                           0x0400  /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN                        0x0800  /* Power down */
-#define MII_CR_AUTO_NEG_EN                       0x1000  /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB                  0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK                          0x4000  /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET                             0x8000  /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK                        0x2040
-#define MII_CR_SPEED_1000                        0x0040
-#define MII_CR_SPEED_100                         0x2000
-#define MII_CR_SPEED_10                          0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS                     0x0001  /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT                     0x0002  /* Jabber Detected */
-#define MII_SR_LINK_STATUS                       0x0004  /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS                      0x0008  /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT                      0x0010  /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE                  0x0020  /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS                 0x0040  /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS                   0x0100  /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS                     0x0200  /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS                     0x0400  /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS                       0x0800  /* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS                       0x1000  /* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS                      0x2000  /* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS                      0x4000  /* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS                        0x8000  /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT                             0x001f  /* Same as advertise selector  */
-#define MII_LPA_10HALF                           0x0020  /* Can do 10mbps half-duplex   */
-#define MII_LPA_10FULL                           0x0040  /* Can do 10mbps full-duplex   */
-#define MII_LPA_100HALF                          0x0080  /* Can do 100mbps half-duplex  */
-#define MII_LPA_100FULL                          0x0100  /* Can do 100mbps full-duplex  */
-#define MII_LPA_100BASE4                         0x0200  /* 100BASE-T4  */
-#define MII_LPA_PAUSE                            0x0400  /* PAUSE */
-#define MII_LPA_ASYPAUSE                         0x0800  /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT                           0x2000  /* Link partner faulted        */
-#define MII_LPA_LPACK                            0x4000  /* Link partner acked us       */
-#define MII_LPA_NPAGE                            0x8000  /* Next page bit               */
-
 /* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD                   0x0001  /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS                      0x0020  /* 10T   Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS                      0x0040  /* 10T   Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS                    0x0080  /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS                    0x0100  /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS                       0x0200  /* 100T4 Capable */
-#define MII_AR_PAUSE                            0x0400  /* Pause operation desired */
-#define MII_AR_ASM_DIR                          0x0800  /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT                     0x2000  /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE                        0x8000  /* Next Page ability supported */
-#define MII_AR_SPEED_MASK                       0x01E0
-#define MII_AR_DEFAULT_CAP_MASK                 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK                 0
 
 /* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS              0x0100  /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS              0x0200  /* Advertise 1000T FD capability  */
-#define MII_AT001_CR_1000T_REPEATER_DTE         0x0400  /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE             0x0800  /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE            0x1000  /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL     0x0000  /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1          0x2000  /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2          0x4000  /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3          0x6000  /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4          0x8000  /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK           0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK     0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS           0x0400  /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS           0x0800  /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS     0x1000  /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS      0x2000  /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES        0x4000  /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT      0x8000  /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT   12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT    13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS             0x1000  /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS             0x2000  /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS             0x4000  /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS             0x8000  /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+       (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK    MII_AT001_CR_1000T_SPEED_MASK
 
 /* AT001 PHY Specific Control Register */
 #define MII_AT001_PSCR_JABBER_DISABLE           0x0001  /* 1=Jabber Function disabled */
index e28f8ba..1ff001a 100644 (file)
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_id = pdev->subsystem_device;
+       hw->revision_id  = pdev->revision;
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
        phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
        max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
                        DEVICE_CTRL_MAX_PAYLOAD_MASK;
 
-       hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+       hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
 
        max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
                        DEVICE_CTRL_MAX_RREQ_SZ_MASK;
-       hw->dmar_block = min(max_pay_load, hw->dmar_block);
+       hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
 
        if (hw->nic_type != athr_l2e_revB)
                AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
                atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
                atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
 
-               mii_advertise_data = MII_AR_10T_HD_CAPS;
+               mii_advertise_data = ADVERTISE_10HALF;
 
-               if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+               if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
                    (atl1e_write_phy_reg(hw,
                           MII_ADVERTISE, mii_advertise_data) != 0) ||
                    (atl1e_phy_commit(hw)) != 0) {
index 3b52768..67f40b9 100644 (file)
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
        hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
        adapter->wol = 0;
+       device_set_wakeup_enable(&adapter->pdev->dev, false);
        adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
        adapter->ict = 50000;           /* 100ms */
        adapter->link_speed = SPEED_0;  /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
 }
 
 #ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct atl1_adapter *adapter = netdev_priv(netdev);
        struct atl1_hw *hw = &adapter->hw;
        u32 ctrl = 0;
        u32 wufc = adapter->wol;
        u32 val;
-       int retval;
        u16 speed;
        u16 duplex;
 
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
        if (netif_running(netdev))
                atl1_down(adapter);
 
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-
        atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
        atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
        val = ctrl & BMSR_LSTATUS;
        if (val)
                wufc &= ~ATLX_WUFC_LNKC;
+       if (!wufc)
+               goto disable_wol;
 
-       if (val && wufc) {
+       if (val) {
                val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
                if (val) {
                        if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
                ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
                iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
                ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-               goto exit;
-       }
-
-       if (!val && wufc) {
+       } else {
                ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
                iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
                ioread32(hw->hw_addr + REG_WOL_CTRL);
                iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
                ioread32(hw->hw_addr + REG_MAC_CTRL);
                hw->phy_configured = false;
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-               goto exit;
        }
 
-disable_wol:
+       return 0;
+
+ disable_wol:
        iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
        ioread32(hw->hw_addr + REG_WOL_CTRL);
        ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
        iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
        ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
        hw->phy_configured = false;
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
-       if (netif_running(netdev))
-               pci_disable_msi(adapter->pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
 
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct atl1_adapter *adapter = netdev_priv(netdev);
-       u32 err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               if (netif_msg_ifup(adapter))
-                       dev_printk(KERN_DEBUG, &pdev->dev,
-                               "error enabling pci device\n");
-               return err;
-       }
-
-       pci_set_master(pdev);
        iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
 
        atl1_reset_hw(&adapter->hw);
 
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
 
        return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS    (&atl1_pm_ops)
+
 #else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS    NULL
 #endif
 
 static void atl1_shutdown(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PM
-       atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct atl1_adapter *adapter = netdev_priv(netdev);
+
+       atl1_suspend(&pdev->dev);
+       pci_wake_from_d3(pdev, adapter->wol);
+       pci_set_power_state(pdev, PCI_D3hot);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
        .id_table = atl1_pci_tbl,
        .probe = atl1_probe,
        .remove = __devexit_p(atl1_remove),
-       .suspend = atl1_suspend,
-       .resume = atl1_resume,
-       .shutdown = atl1_shutdown
+       .shutdown = atl1_shutdown,
+       .driver.pm = ATL1_PM_OPS,
 };
 
 /*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
        adapter->wol = 0;
        if (wol->wolopts & WAKE_MAGIC)
                adapter->wol |= ATLX_WUFC_MAG;
+
+       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
        return 0;
 }
 
index 4e6f4e9..e637e9f 100644 (file)
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_id = pdev->subsystem_device;
+       hw->revision_id  = pdev->revision;
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
        adapter->wol = 0;
index 4bebff3..e7cb8c8 100644 (file)
@@ -9,7 +9,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/isapnp.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
 #include <linux/eeprom_93cx6.h>
 #include <linux/slab.h>
 
 #include <net/ax88796.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
-
-static int phy_debug = 0;
 
 /* Rename the lib8390.c functions to show that they are in this driver */
-#define __ei_open       ax_ei_open
-#define __ei_close      ax_ei_close
-#define __ei_poll      ax_ei_poll
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
 #define __ei_start_xmit ax_ei_start_xmit
 #define __ei_tx_timeout ax_ei_tx_timeout
-#define __ei_get_stats  ax_ei_get_stats
+#define __ei_get_stats ax_ei_get_stats
 #define __ei_set_multicast_list ax_ei_set_multicast_list
-#define __ei_interrupt  ax_ei_interrupt
+#define __ei_interrupt ax_ei_interrupt
 #define ____alloc_ei_netdev ax__alloc_ei_netdev
-#define __NS8390_init   ax_NS8390_init
+#define __NS8390_init ax_NS8390_init
 
 /* force unsigned long back to 'void __iomem *' */
 #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
 
-#define ei_inb(_a)     readb(ax_convert_addr(_a))
+#define ei_inb(_a) readb(ax_convert_addr(_a))
 #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
 
-#define ei_inb_p(_a)   ei_inb(_a)
+#define ei_inb_p(_a) ei_inb(_a)
 #define ei_outb_p(_v, _a) ei_outb(_v, _a)
 
 /* define EI_SHIFT() to take into account our register offsets */
-#define EI_SHIFT(x)     (ei_local->reg_offset[(x)])
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
 
 /* Ensure we have our RCR base value */
 #define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
 #define NE_DATAPORT    EI_SHIFT(0x10)
 
 #define NE1SM_START_PG 0x20    /* First page of TX buffer */
-#define NE1SM_STOP_PG  0x40    /* Last page +1 of RX ring */
+#define NE1SM_STOP_PG  0x40    /* Last page +1 of RX ring */
 #define NESM_START_PG  0x40    /* First page of TX buffer */
 #define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
 
+#define AX_GPOC_PPDSET BIT(6)
+
 /* device private data */
 
 struct ax_device {
-       struct timer_list        mii_timer;
-       spinlock_t               mii_lock;
-       struct mii_if_info       mii;
-
-       u32                      msg_enable;
-       void __iomem            *map2;
-       struct platform_device  *dev;
-       struct resource         *mem;
-       struct resource         *mem2;
-       struct ax_plat_data     *plat;
-
-       unsigned char            running;
-       unsigned char            resume_open;
-       unsigned int             irqflags;
-
-       u32                      reg_offsets[0x20];
+       struct mii_bus *mii_bus;
+       struct mdiobb_ctrl bb_ctrl;
+       struct phy_device *phy_dev;
+       void __iomem *addr_memr;
+       u8 reg_memr;
+       int link;
+       int speed;
+       int duplex;
+
+       void __iomem *map2;
+       const struct ax_plat_data *plat;
+
+       unsigned char running;
+       unsigned char resume_open;
+       unsigned int irqflags;
+
+       u32 reg_offsets[0x20];
 };
 
 static inline struct ax_device *to_ax_dev(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       return (struct ax_device *)(ei_local+1);
+       return (struct ax_device *)(ei_local + 1);
 }
 
-/* ax_initial_check
+/*
+ * ax_initial_check
  *
  * do an initial probe for the card to check wether it exists
  * and is functional
  */
-
 static int ax_initial_check(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
        if (reg0 == 0xFF)
                return -ENODEV;
 
-       ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+       ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
        regd = ei_inb(ioaddr + 0x0d);
        ei_outb(0xff, ioaddr + 0x0d);
-       ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+       ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
        ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
        if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
                ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
        return 0;
 }
 
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
 static void ax_reset_8390(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        unsigned long reset_start_time = jiffies;
        void __iomem *addr = (void __iomem *)dev->base_addr;
 
        if (ei_debug > 1)
-               dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
+               netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
 
        ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
 
-       ei_status.txing = 0;
-       ei_status.dmaing = 0;
+       ei_local->txing = 0;
+       ei_local->dmaing = 0;
 
        /* This check _should_not_ be necessary, omit eventually. */
        while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
-               if (jiffies - reset_start_time > 2*HZ/100) {
-                       dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
-                              __func__, dev->name);
+               if (jiffies - reset_start_time > 2 * HZ / 100) {
+                       netdev_warn(dev, "%s: did not complete.\n", __func__);
                        break;
                }
        }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
                            int ring_page)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
 
        /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+       if (ei_local->dmaing) {
+               netdev_err(dev, "DMAing conflict in %s "
                        "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, __func__,
-                       ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                       ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+       ei_local->dmaing |= 0x01;
+       ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
        ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
        ei_outb(0, nic_base + EN0_RCNTHI);
        ei_outb(0, nic_base + EN0_RSARLO);              /* On page boundary */
        ei_outb(ring_page, nic_base + EN0_RSARHI);
        ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-       if (ei_status.word16)
-               readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+       if (ei_local->word16)
+               readsw(nic_base + NE_DATAPORT, hdr,
+                      sizeof(struct e8390_pkt_hdr) >> 1);
        else
-               readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+               readsb(nic_base + NE_DATAPORT, hdr,
+                      sizeof(struct e8390_pkt_hdr));
 
        ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
+       ei_local->dmaing &= ~0x01;
 
        le16_to_cpus(&hdr->count);
 }
 
 
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for hints.
-   The NEx000 doesn't share the on-board packet memory -- you have to put
-   the packet out through the "remote DMA" dataport using ei_outb. */
-
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
 static void ax_block_input(struct net_device *dev, int count,
                           struct sk_buff *skb, int ring_offset)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
        char *buf = skb->data;
 
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev,
-                       "%s: DMAing conflict in %s "
+       if (ei_local->dmaing) {
+               netdev_err(dev,
+                       "DMAing conflict in %s "
                        "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, __func__,
-                       ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                       ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
+       ei_local->dmaing |= 0x01;
 
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
        ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
        ei_outb(count >> 8, nic_base + EN0_RCNTHI);
        ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
        ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
        ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-       if (ei_status.word16) {
+       if (ei_local->word16) {
                readsw(nic_base + NE_DATAPORT, buf, count >> 1);
                if (count & 0x01)
                        buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
                readsb(nic_base + NE_DATAPORT, buf, count);
        }
 
-       ei_status.dmaing &= ~1;
+       ei_local->dmaing &= ~1;
 }
 
 static void ax_block_output(struct net_device *dev, int count,
                            const unsigned char *buf, const int start_page)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
        unsigned long dma_start;
 
-       /* Round the count up for word writes.  Do we need to do this?
-          What effect will an odd byte count have on the 8390?
-          I should check someday. */
-
-       if (ei_status.word16 && (count & 0x01))
+       /*
+        * Round the count up for word writes. Do we need to do this?
+        * What effect will an odd byte count have on the 8390?  I
+        * should check someday.
+        */
+       if (ei_local->word16 && (count & 0x01))
                count++;
 
        /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
+       if (ei_local->dmaing) {
+               netdev_err(dev, "DMAing conflict in %s."
                        "[DMAstat:%d][irqlock:%d]\n",
-                       dev->name, __func__,
-                      ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                      ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
+       ei_local->dmaing |= 0x01;
        /* We should already be in page 0, but to be safe... */
        ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
 
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
 
        /* Now the normal output. */
        ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
-       ei_outb(count >> 8,   nic_base + EN0_RCNTHI);
+       ei_outb(count >> 8, nic_base + EN0_RCNTHI);
        ei_outb(0x00, nic_base + EN0_RSARLO);
        ei_outb(start_page, nic_base + EN0_RSARHI);
 
        ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
-       if (ei_status.word16) {
-               writesw(nic_base + NE_DATAPORT, buf, count>>1);
-       } else {
+       if (ei_local->word16)
+               writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+       else
                writesb(nic_base + NE_DATAPORT, buf, count);
-       }
 
        dma_start = jiffies;
 
        while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
-               if (jiffies - dma_start > 2*HZ/100) {           /* 20ms */
-                       dev_warn(&ax->dev->dev,
-                                "%s: timeout waiting for Tx RDC.\n", dev->name);
+               if (jiffies - dma_start > 2 * HZ / 100) {               /* 20ms */
+                       netdev_warn(dev, "timeout waiting for Tx RDC.\n");
                        ax_reset_8390(dev);
-                       ax_NS8390_init(dev,1);
+                       ax_NS8390_init(dev, 1);
                        break;
                }
        }
 
        ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
+       ei_local->dmaing &= ~0x01;
 }
 
 /* definitions for accessing MII/EEPROM interface */
 
 #define AX_MEMR                        EI_SHIFT(0x14)
-#define AX_MEMR_MDC            (1<<0)
-#define AX_MEMR_MDIR           (1<<1)
-#define AX_MEMR_MDI            (1<<2)
-#define AX_MEMR_MDO            (1<<3)
-#define AX_MEMR_EECS           (1<<4)
-#define AX_MEMR_EEI            (1<<5)
-#define AX_MEMR_EEO            (1<<6)
-#define AX_MEMR_EECLK          (1<<7)
-
-/* ax_mii_ei_outbits
- *
- * write the specified set of bits to the phy
-*/
-
-static void
-ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
+#define AX_MEMR_MDC            BIT(0)
+#define AX_MEMR_MDIR           BIT(1)
+#define AX_MEMR_MDI            BIT(2)
+#define AX_MEMR_MDO            BIT(3)
+#define AX_MEMR_EECS           BIT(4)
+#define AX_MEMR_EEI            BIT(5)
+#define AX_MEMR_EEO            BIT(6)
+#define AX_MEMR_EECLK          BIT(7)
+
+static void ax_handle_link_change(struct net_device *dev)
 {
-       struct ei_device *ei_local = netdev_priv(dev);
-       void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-       unsigned int memr;
-
-       /* clock low, data to output mode */
-       memr = ei_inb(memr_addr);
-       memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
-       ei_outb(memr, memr_addr);
-
-       for (len--; len >= 0; len--) {
-               if (bits & (1 << len))
-                       memr |= AX_MEMR_MDO;
-               else
-                       memr &= ~AX_MEMR_MDO;
-
-               ei_outb(memr, memr_addr);
-
-               /* clock high */
+       struct ax_device  *ax = to_ax_dev(dev);
+       struct phy_device *phy_dev = ax->phy_dev;
+       int status_change = 0;
 
-               ei_outb(memr | AX_MEMR_MDC, memr_addr);
-               udelay(1);
+       if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+                            (ax->duplex != phy_dev->duplex))) {
 
-               /* clock low */
-               ei_outb(memr, memr_addr);
+               ax->speed = phy_dev->speed;
+               ax->duplex = phy_dev->duplex;
+               status_change = 1;
        }
 
-       /* leaves the clock line low, mdir input */
-       memr |= AX_MEMR_MDIR;
-       ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR);
-}
-
-/* ax_phy_ei_inbits
- *
- * read a specified number of bits from the phy
-*/
-
-static unsigned int
-ax_phy_ei_inbits(struct net_device *dev, int no)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-       unsigned int memr;
-       unsigned int result = 0;
-
-       /* clock low, data to input mode */
-       memr = ei_inb(memr_addr);
-       memr &= ~AX_MEMR_MDC;
-       memr |= AX_MEMR_MDIR;
-       ei_outb(memr, memr_addr);
-
-       for (no--; no >= 0; no--) {
-               ei_outb(memr | AX_MEMR_MDC, memr_addr);
-
-               udelay(1);
-
-               if (ei_inb(memr_addr) & AX_MEMR_MDI)
-                       result |= (1<<no);
+       if (phy_dev->link != ax->link) {
+               if (!phy_dev->link) {
+                       ax->speed = 0;
+                       ax->duplex = -1;
+               }
+               ax->link = phy_dev->link;
 
-               ei_outb(memr, memr_addr);
+               status_change = 1;
        }
 
-       return result;
-}
-
-/* ax_phy_issueaddr
- *
- * use the low level bit shifting routines to send the address
- * and command to the specified phy
-*/
-
-static void
-ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
-{
-       if (phy_debug)
-               pr_debug("%s: dev %p, %04x, %04x, %d\n",
-                       __func__, dev, phy_addr, reg, opc);
-
-       ax_mii_ei_outbits(dev, 0x3f, 6);        /* pre-amble */
-       ax_mii_ei_outbits(dev, 1, 2);           /* frame-start */
-       ax_mii_ei_outbits(dev, opc, 2);         /* op code */
-       ax_mii_ei_outbits(dev, phy_addr, 5);    /* phy address */
-       ax_mii_ei_outbits(dev, reg, 5);         /* reg address */
+       if (status_change)
+               phy_print_status(phy_dev);
 }
 
-static int
-ax_phy_read(struct net_device *dev, int phy_addr, int reg)
+static int ax_mii_probe(struct net_device *dev)
 {
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned long flags;
-       unsigned int result;
+       struct ax_device  *ax = to_ax_dev(dev);
+       struct phy_device *phy_dev = NULL;
+       int ret;
 
-       spin_lock_irqsave(&ei_local->page_lock, flags);
+       /* find the first phy */
+       phy_dev = phy_find_first(ax->mii_bus);
+       if (!phy_dev) {
+               netdev_err(dev, "no PHY found\n");
+               return -ENODEV;
+       }
 
-       ax_phy_issueaddr(dev, phy_addr, reg, 2);
+       ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+                                PHY_INTERFACE_MODE_MII);
+       if (ret) {
+               netdev_err(dev, "Could not attach to PHY\n");
+               return ret;
+       }
 
-       result = ax_phy_ei_inbits(dev, 17);
-       result &= ~(3<<16);
+       /* mask with MAC supported features */
+       phy_dev->supported &= PHY_BASIC_FEATURES;
+       phy_dev->advertising = phy_dev->supported;
 
-       spin_unlock_irqrestore(&ei_local->page_lock, flags);
+       ax->phy_dev = phy_dev;
 
-       if (phy_debug)
-               pr_debug("%s: %04x.%04x => read %04x\n", __func__,
-                        phy_addr, reg, result);
+       netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+                   phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
 
-       return result;
+       return 0;
 }
 
-static void
-ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
+static void ax_phy_switch(struct net_device *dev, int on)
 {
-       struct ei_device *ei = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
-       unsigned long flags;
-
-       dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
-               __func__, dev, phy_addr, reg, value);
-
-       spin_lock_irqsave(&ei->page_lock, flags);
-
-       ax_phy_issueaddr(dev, phy_addr, reg, 1);
-       ax_mii_ei_outbits(dev, 2, 2);           /* send TA */
-       ax_mii_ei_outbits(dev, value, 16);
-
-       spin_unlock_irqrestore(&ei->page_lock, flags);
-}
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
 
-static void ax_mii_expiry(unsigned long data)
-{
-       struct net_device *dev = (struct net_device *)data;
-       struct ax_device  *ax = to_ax_dev(dev);
-       unsigned long flags;
+       u8 reg_gpoc =  ax->plat->gpoc_val;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       mii_check_media(&ax->mii, netif_msg_link(ax), 0);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!!on)
+               reg_gpoc &= ~AX_GPOC_PPDSET;
+       else
+               reg_gpoc |= AX_GPOC_PPDSET;
 
-       if (ax->running) {
-               ax->mii_timer.expires = jiffies + HZ*2;
-               add_timer(&ax->mii_timer);
-       }
+       ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
 }
 
 static int ax_open(struct net_device *dev)
 {
-       struct ax_device  *ax = to_ax_dev(dev);
-       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
        int ret;
 
-       dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
+       netdev_dbg(dev, "open\n");
 
        ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
                          dev->name, dev);
        if (ret)
-               return ret;
-
-       ret = ax_ei_open(dev);
-       if (ret) {
-               free_irq(dev->irq, dev);
-               return ret;
-       }
+               goto failed_request_irq;
 
        /* turn the phy on (if turned off) */
+       ax_phy_switch(dev, 1);
 
-       ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17));
-       ax->running = 1;
-
-       /* start the MII timer */
-
-       init_timer(&ax->mii_timer);
+       ret = ax_mii_probe(dev);
+       if (ret)
+               goto failed_mii_probe;
+       phy_start(ax->phy_dev);
 
-       ax->mii_timer.expires  = jiffies+1;
-       ax->mii_timer.data     = (unsigned long) dev;
-       ax->mii_timer.function = ax_mii_expiry;
+       ret = ax_ei_open(dev);
+       if (ret)
+               goto failed_ax_ei_open;
 
-       add_timer(&ax->mii_timer);
+       ax->running = 1;
 
        return 0;
+
+ failed_ax_ei_open:
+       phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+       ax_phy_switch(dev, 0);
+       free_irq(dev->irq, dev);
+ failed_request_irq:
+       return ret;
 }
 
 static int ax_close(struct net_device *dev)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       struct ei_device *ei_local = netdev_priv(dev);
 
-       dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
-
-       /* turn the phy off */
-
-       ei_outb(ax->plat->gpoc_val | (1<<6),
-              ei_local->mem + EI_SHIFT(0x17));
+       netdev_dbg(dev, "close\n");
 
        ax->running = 0;
        wmb();
 
-       del_timer_sync(&ax->mii_timer);
        ax_ei_close(dev);
 
+       /* turn the phy off */
+       ax_phy_switch(dev, 0);
+       phy_disconnect(ax->phy_dev);
+
        free_irq(dev->irq, dev);
        return 0;
 }
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
 static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
-       int rc;
+       struct phy_device *phy_dev = ax->phy_dev;
 
        if (!netif_running(dev))
                return -EINVAL;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!phy_dev)
+               return -ENODEV;
 
-       return rc;
+       return phy_mii_ioctl(phy_dev, req, cmd);
 }
 
 /* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 static void ax_get_drvinfo(struct net_device *dev,
                           struct ethtool_drvinfo *info)
 {
-       struct ax_device *ax = to_ax_dev(dev);
+       struct platform_device *pdev = to_platform_device(dev->dev.parent);
 
        strcpy(info->driver, DRV_NAME);
        strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, ax->dev->name);
+       strcpy(info->bus_info, pdev->name);
 }
 
 static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
+       struct phy_device *phy_dev = ax->phy_dev;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       mii_ethtool_gset(&ax->mii, cmd);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!phy_dev)
+               return -ENODEV;
 
-       return 0;
+       return phy_ethtool_gset(phy_dev, cmd);
 }
 
 static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
-       int rc;
+       struct phy_device *phy_dev = ax->phy_dev;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       rc = mii_ethtool_sset(&ax->mii, cmd);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
-
-       return rc;
-}
-
-static int ax_nway_reset(struct net_device *dev)
-{
-       struct ax_device *ax = to_ax_dev(dev);
-       return mii_nway_restart(&ax->mii);
-}
+       if (!phy_dev)
+               return -ENODEV;
 
-static u32 ax_get_link(struct net_device *dev)
-{
-       struct ax_device *ax = to_ax_dev(dev);
-       return mii_link_ok(&ax->mii);
+       return phy_ethtool_sset(phy_dev, cmd);
 }
 
 static const struct ethtool_ops ax_ethtool_ops = {
        .get_drvinfo            = ax_get_drvinfo,
        .get_settings           = ax_get_settings,
        .set_settings           = ax_set_settings,
-       .nway_reset             = ax_nway_reset,
-       .get_link               = ax_get_link,
+       .get_link               = ethtool_op_get_link,
 };
 
 #ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
        .ndo_get_stats          = ax_ei_get_stats,
        .ndo_set_multicast_list = ax_ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ax_ei_poll,
 #endif
 };
 
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (level)
+               ax->reg_memr |= AX_MEMR_MDC;
+       else
+               ax->reg_memr &= ~AX_MEMR_MDC;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (output)
+               ax->reg_memr &= ~AX_MEMR_MDIR;
+       else
+               ax->reg_memr |= AX_MEMR_MDIR;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (value)
+               ax->reg_memr |= AX_MEMR_MDO;
+       else
+               ax->reg_memr &= ~AX_MEMR_MDO;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+       int reg_memr = ei_inb(ax->addr_memr);
+
+       return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+       .owner = THIS_MODULE,
+       .set_mdc = ax_bb_mdc,
+       .set_mdio_dir = ax_bb_dir,
+       .set_mdio_data = ax_bb_set_data,
+       .get_mdio_data = ax_bb_get_data,
+};
+
 /* setup code */
 
+static int ax_mii_init(struct net_device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev->dev.parent);
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
+       int err, i;
+
+       ax->bb_ctrl.ops = &bb_ops;
+       ax->addr_memr = ei_local->mem + AX_MEMR;
+       ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+       if (!ax->mii_bus) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       ax->mii_bus->name = "ax88796_mii_bus";
+       ax->mii_bus->parent = dev->dev.parent;
+       snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+       ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!ax->mii_bus->irq) {
+               err = -ENOMEM;
+               goto out_free_mdio_bitbang;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               ax->mii_bus->irq[i] = PHY_POLL;
+
+       err = mdiobus_register(ax->mii_bus);
+       if (err)
+               goto out_free_irq;
+
+       return 0;
+
+ out_free_irq:
+       kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+       free_mdio_bitbang(ax->mii_bus);
+ out:
+       return err;
+}
+
 static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
 {
        void __iomem *ioaddr = ei_local->mem;
        struct ax_device *ax = to_ax_dev(dev);
 
-       /* Select page 0*/
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD);
+       /* Select page 0 */
+       ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
 
        /* set to byte access */
        ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
        ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
 }
 
-/* ax_init_dev
+/*
+ * ax_init_dev
  *
  * initialise the specified device, taking care to note the MAC
  * address it may already have (if configured), ensure
  * the device is ready to be used by lib8390.c and registerd with
  * the network layer.
  */
-
-static int ax_init_dev(struct net_device *dev, int first_init)
+static int ax_init_dev(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
        struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
 
        /* read the mac from the card prom if we need it */
 
-       if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) {
+       if (ax->plat->flags & AXFLG_HAS_EEPROM) {
                unsigned char SA_prom[32];
 
-               for(i = 0; i < sizeof(SA_prom); i+=2) {
+               for (i = 0; i < sizeof(SA_prom); i += 2) {
                        SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
-                       SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT);
+                       SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
                }
 
                if (ax->plat->wordlength == 2)
                        for (i = 0; i < 16; i++)
                                SA_prom[i] = SA_prom[i+i];
 
-               memcpy(dev->dev_addr,  SA_prom, 6);
+               memcpy(dev->dev_addr, SA_prom, 6);
        }
 
 #ifdef CONFIG_AX88796_93CX6
-       if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) {
+       if (ax->plat->flags & AXFLG_HAS_93CX6) {
                unsigned char mac_addr[6];
                struct eeprom_93cx6 eeprom;
 
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
                                       (__le16 __force *)mac_addr,
                                       sizeof(mac_addr) >> 1);
 
-               memcpy(dev->dev_addr,  mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, 6);
        }
 #endif
        if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
                stop_page = NE1SM_STOP_PG;
        }
 
-       /* load the mac-address from the device if this is the
-        * first time we've initialised */
-
-       if (first_init) {
-               if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
-                       ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
-                               ei_local->mem + E8390_CMD); /* 0x61 */
-                       for (i = 0; i < ETHER_ADDR_LEN; i++)
-                               dev->dev_addr[i] =
-                                       ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
-               }
-
-               if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
-                    ax->plat->mac_addr)
-                       memcpy(dev->dev_addr, ax->plat->mac_addr,
-                               ETHER_ADDR_LEN);
+       /* load the mac-address from the device */
+       if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+               ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+                       ei_local->mem + E8390_CMD); /* 0x61 */
+               for (i = 0; i < ETHER_ADDR_LEN; i++)
+                       dev->dev_addr[i] =
+                               ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
        }
 
+       if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+           ax->plat->mac_addr)
+               memcpy(dev->dev_addr, ax->plat->mac_addr,
+                      ETHER_ADDR_LEN);
+
        ax_reset_8390(dev);
 
-       ei_status.name = "AX88796";
-       ei_status.tx_start_page = start_page;
-       ei_status.stop_page = stop_page;
-       ei_status.word16 = (ax->plat->wordlength == 2);
-       ei_status.rx_start_page = start_page + TX_PAGES;
+       ei_local->name = "AX88796";
+       ei_local->tx_start_page = start_page;
+       ei_local->stop_page = stop_page;
+       ei_local->word16 = (ax->plat->wordlength == 2);
+       ei_local->rx_start_page = start_page + TX_PAGES;
 
 #ifdef PACKETBUF_MEMSIZE
-        /* Allow the packet buffer size to be overridden by know-it-alls. */
-       ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+       /* Allow the packet buffer size to be overridden by know-it-alls. */
+       ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
 #endif
 
-       ei_status.reset_8390    = &ax_reset_8390;
-       ei_status.block_input   = &ax_block_input;
-       ei_status.block_output  = &ax_block_output;
-       ei_status.get_8390_hdr  = &ax_get_8390_hdr;
-       ei_status.priv = 0;
-
-       dev->netdev_ops         = &ax_netdev_ops;
-       dev->ethtool_ops        = &ax_ethtool_ops;
-
-       ax->msg_enable          = NETIF_MSG_LINK;
-       ax->mii.phy_id_mask     = 0x1f;
-       ax->mii.reg_num_mask    = 0x1f;
-       ax->mii.phy_id          = 0x10;         /* onboard phy */
-       ax->mii.force_media     = 0;
-       ax->mii.full_duplex     = 0;
-       ax->mii.mdio_read       = ax_phy_read;
-       ax->mii.mdio_write      = ax_phy_write;
-       ax->mii.dev             = dev;
+       ei_local->reset_8390 = &ax_reset_8390;
+       ei_local->block_input = &ax_block_input;
+       ei_local->block_output = &ax_block_output;
+       ei_local->get_8390_hdr = &ax_get_8390_hdr;
+       ei_local->priv = 0;
 
-       ax_NS8390_init(dev, 0);
+       dev->netdev_ops = &ax_netdev_ops;
+       dev->ethtool_ops = &ax_ethtool_ops;
 
-       if (first_init)
-               dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
-                        ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
-                        dev->dev_addr);
+       ret = ax_mii_init(dev);
+       if (ret)
+               goto out_irq;
+
+       ax_NS8390_init(dev, 0);
 
        ret = register_netdev(dev);
        if (ret)
                goto out_irq;
 
+       netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+                   ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+                   dev->dev_addr);
+
        return 0;
 
  out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
        return ret;
 }
 
-static int ax_remove(struct platform_device *_dev)
+static int ax_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = platform_get_drvdata(_dev);
-       struct ax_device  *ax;
-
-       ax = to_ax_dev(dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
+       struct resource *mem;
 
        unregister_netdev(dev);
        free_irq(dev->irq, dev);
 
-       iounmap(ei_status.mem);
-       release_resource(ax->mem);
-       kfree(ax->mem);
+       iounmap(ei_local->mem);
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
 
        if (ax->map2) {
                iounmap(ax->map2);
-               release_resource(ax->mem2);
-               kfree(ax->mem2);
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               release_mem_region(mem->start, resource_size(mem));
        }
 
        free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
        return 0;
 }
 
-/* ax_probe
+/*
+ * ax_probe
  *
  * This is the entry point when the platform device system uses to
- * notify us of a new device to attach to. Allocate memory, find
- * the resources and information passed, and map the necessary registers.
-*/
-
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
 static int ax_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
-       struct ax_device  *ax;
-       struct resource   *res;
-       size_t size;
+       struct ei_device *ei_local;
+       struct ax_device *ax;
+       struct resource *irq, *mem, *mem2;
+       resource_size_t mem_size, mem2_size = 0;
        int ret = 0;
 
        dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* ok, let's setup our device */
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       ei_local = netdev_priv(dev);
        ax = to_ax_dev(dev);
 
-       memset(ax, 0, sizeof(struct ax_device));
-
-       spin_lock_init(&ax->mii_lock);
-
-       ax->dev = pdev;
        ax->plat = pdev->dev.platform_data;
        platform_set_drvdata(pdev, dev);
 
-       ei_status.rxcr_base  = ax->plat->rcr_val;
+       ei_local->rxcr_base = ax->plat->rcr_val;
 
        /* find the platform resources */
-
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (res == NULL) {
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq) {
                dev_err(&pdev->dev, "no IRQ specified\n");
                ret = -ENXIO;
                goto exit_mem;
        }
 
-       dev->irq = res->start;
-       ax->irqflags = res->flags & IRQF_TRIGGER_MASK;
+       dev->irq = irq->start;
+       ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
                dev_err(&pdev->dev, "no MEM specified\n");
                ret = -ENXIO;
                goto exit_mem;
        }
 
-       size = (res->end - res->start) + 1;
-
-       /* setup the register offsets from either the platform data
-        * or by using the size of the resource provided */
+       mem_size = resource_size(mem);
 
+       /*
+        * setup the register offsets from either the platform data or
+        * by using the size of the resource provided
+        */
        if (ax->plat->reg_offsets)
-               ei_status.reg_offset = ax->plat->reg_offsets;
+               ei_local->reg_offset = ax->plat->reg_offsets;
        else {
-               ei_status.reg_offset = ax->reg_offsets;
+               ei_local->reg_offset = ax->reg_offsets;
                for (ret = 0; ret < 0x18; ret++)
-                       ax->reg_offsets[ret] = (size / 0x18) * ret;
+                       ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
        }
 
-       ax->mem = request_mem_region(res->start, size, pdev->name);
-       if (ax->mem == NULL) {
+       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
                dev_err(&pdev->dev, "cannot reserve registers\n");
-               ret = -ENXIO;
+               ret = -ENXIO;
                goto exit_mem;
        }
 
-       ei_status.mem = ioremap(res->start, size);
-       dev->base_addr = (unsigned long)ei_status.mem;
+       ei_local->mem = ioremap(mem->start, mem_size);
+       dev->base_addr = (unsigned long)ei_local->mem;
 
-       if (ei_status.mem == NULL) {
-               dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n",
-                       (unsigned long long)res->start,
-                       (unsigned long long)res->end);
+       if (ei_local->mem == NULL) {
+               dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
 
-               ret = -ENXIO;
+               ret = -ENXIO;
                goto exit_req;
        }
 
        /* look for reset area */
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (res == NULL) {
+       mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!mem2) {
                if (!ax->plat->reg_offsets) {
                        for (ret = 0; ret < 0x20; ret++)
-                               ax->reg_offsets[ret] = (size / 0x20) * ret;
+                               ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
                }
-
-               ax->map2 = NULL;
        } else {
-               size = (res->end - res->start) + 1;
+               mem2_size = resource_size(mem2);
 
-               ax->mem2 = request_mem_region(res->start, size, pdev->name);
-               if (ax->mem2 == NULL) {
+               if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
                        dev_err(&pdev->dev, "cannot reserve registers\n");
                        ret = -ENXIO;
                        goto exit_mem1;
                }
 
-               ax->map2 = ioremap(res->start, size);
-               if (ax->map2 == NULL) {
+               ax->map2 = ioremap(mem2->start, mem2_size);
+               if (!ax->map2) {
                        dev_err(&pdev->dev, "cannot map reset register\n");
                        ret = -ENXIO;
                        goto exit_mem2;
                }
 
-               ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem;
+               ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
        }
 
        /* got resources, now initialise and register device */
-
-       ret = ax_init_dev(dev, 1);
+       ret = ax_init_dev(dev);
        if (!ret)
                return 0;
 
-       if (ax->map2 == NULL)
+       if (!ax->map2)
                goto exit_mem1;
 
        iounmap(ax->map2);
 
  exit_mem2:
-       release_resource(ax->mem2);
-       kfree(ax->mem2);
+       release_mem_region(mem2->start, mem2_size);
 
  exit_mem1:
-       iounmap(ei_status.mem);
+       iounmap(ei_local->mem);
 
  exit_req:
-       release_resource(ax->mem);
-       kfree(ax->mem);
+       release_mem_region(mem->start, mem_size);
 
  exit_mem:
        free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
 static int ax_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct net_device *ndev = platform_get_drvdata(dev);
-       struct ax_device  *ax = to_ax_dev(ndev);
+       struct ax_device *ax = to_ax_dev(ndev);
 
        ax->resume_open = ax->running;
 
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
 static int ax_resume(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
-       struct ax_device  *ax = to_ax_dev(ndev);
+       struct ax_device *ax = to_ax_dev(ndev);
 
        ax_initial_setup(ndev, netdev_priv(ndev));
        ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
 
 #else
 #define ax_suspend NULL
-#define ax_resume  NULL
+#define ax_resume NULL
 #endif
 
 static struct platform_driver axdrv = {
index add0b93..62af707 100644 (file)
@@ -67,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 }
 
 /* Number of bytes of an RX frame that are copied to skb->data */
-#define BE_HDR_LEN             64
+#define BE_HDR_LEN             ((u16) 64)
 #define BE_MAX_JUMBO_FRAME_SIZE        9018
 #define BE_MIN_MTU             256
 
@@ -211,18 +211,40 @@ struct be_rx_stats {
        u32 rx_fps;             /* Rx frags per second */
 };
 
+struct be_rx_compl_info {
+       u32 rss_hash;
+       u16 vid;
+       u16 pkt_size;
+       u16 rxq_idx;
+       u16 mac_id;
+       u8 vlanf;
+       u8 num_rcvd;
+       u8 err;
+       u8 ipf;
+       u8 tcpf;
+       u8 udpf;
+       u8 ip_csum;
+       u8 l4_csum;
+       u8 ipv6;
+       u8 vtm;
+       u8 pkt_type;
+};
+
 struct be_rx_obj {
        struct be_adapter *adapter;
        struct be_queue_info q;
        struct be_queue_info cq;
+       struct be_rx_compl_info rxcp;
        struct be_rx_page_info page_info_tbl[RX_Q_LEN];
        struct be_eq_obj rx_eq;
        struct be_rx_stats stats;
        u8 rss_id;
        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
-       u16 last_frag_index;
-       u16 rsvd;
-       u32 cache_line_barrier[15];
+       u32 cache_line_barrier[16];
+};
+
+struct be_drv_stats {
+       u8 be_on_die_temperature;
 };
 
 struct be_vf_cfg {
@@ -234,6 +256,7 @@ struct be_vf_cfg {
 };
 
 #define BE_INVALID_PMAC_ID             0xffffffff
+
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -269,6 +292,7 @@ struct be_adapter {
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
        u8 msix_vec_next_idx;
+       struct be_drv_stats drv_stats;
 
        struct vlan_group *vlan_grp;
        u16 vlans_added;
@@ -281,6 +305,7 @@ struct be_adapter {
        struct be_dma_mem stats_cmd;
        /* Work queue used to perform periodic tasks like getting statistics */
        struct delayed_work work;
+       u16 work_counter;
 
        /* Ethtool knobs and info */
        bool rx_csum;           /* BE card must perform rx-checksumming */
@@ -298,7 +323,7 @@ struct be_adapter {
        u32 rx_fc;              /* Rx flow control */
        u32 tx_fc;              /* Tx flow control */
        bool ue_detected;
-       bool stats_ioctl_sent;
+       bool stats_cmd_sent;
        int link_speed;
        u8 port_type;
        u8 transceiver;
@@ -307,10 +332,13 @@ struct be_adapter {
        u32 flash_status;
        struct completion flash_compl;
 
+       bool be3_native;
        bool sriov_enabled;
        struct be_vf_cfg vf_cfg[BE_MAX_VF];
        u8 is_virtfn;
        u32 sli_family;
+       u8 hba_port_num;
+       u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
        mac[5] = (u8)(addr & 0xFF);
        mac[4] = (u8)((addr >> 8) & 0xFF);
        mac[3] = (u8)((addr >> 16) & 0xFF);
-       mac[2] = 0xC9;
-       mac[1] = 0x00;
-       mac[0] = 0x00;
+       /* Use the OUI from the current MAC address */
+       memcpy(mac, adapter->netdev->dev_addr, 3);
 }
 
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
index 0c7811f..e1124c8 100644 (file)
 #include "be.h"
 #include "be_cmds.h"
 
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
 static void be_mcc_notify(struct be_adapter *adapter)
 {
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
+       if (adapter->eeh_err) {
+               dev_info(&adapter->pdev->dev,
+                       "Error in Card Detected! Cannot issue commands\n");
+               return;
+       }
+
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        be_dws_le_to_cpu(&resp->hw_stats,
                                                sizeof(resp->hw_stats));
                        netdev_stats_update(adapter);
-                       adapter->stats_ioctl_sent = false;
+                       adapter->stats_cmd_sent = false;
                }
        } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
                   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 {
        if (evt->valid) {
                adapter->vlan_prio_bmap = evt->available_priority_bmap;
+               adapter->recommended_prio &= ~VLAN_PRIO_MASK;
                adapter->recommended_prio =
                        evt->reco_default_priority << VLAN_PRIO_SHIFT;
        }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
        }
 }
 
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+               struct be_async_event_grp5_pvid_state *evt)
+{
+       if (evt->enabled)
+               adapter->pvid = evt->tag;
+       else
+               adapter->pvid = 0;
+}
+
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
                u32 trailer, struct be_mcc_compl *evt)
 {
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
                be_async_grp5_qos_speed_process(adapter,
                (struct be_async_event_grp5_qos_link_speed *)evt);
        break;
+       case ASYNC_EVENT_PVID_STATE:
+               be_async_grp5_pvid_state_process(adapter,
+               (struct be_async_event_grp5_pvid_state *)evt);
+       break;
        default:
                dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
                break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        int i, num, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
+       if (adapter->eeh_err)
+               return -EIO;
+
        for (i = 0; i < mcc_timeout; i++) {
                num = be_process_mcc(adapter, &status);
                if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
        int msecs = 0;
        u32 ready;
 
+       if (adapter->eeh_err) {
+               dev_err(&adapter->pdev->dev,
+                       "Error detected in card.Cannot issue commands\n");
+               return -EIO;
+       }
+
        do {
                ready = ioread32(db);
                if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 
 /* Uses synchronous MCCQ */
 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-               u32 if_id, u32 *pmac_id)
+               u32 if_id, u32 *pmac_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
 
+       req->hdr.domain = domain;
        req->if_id = cpu_to_le32(if_id);
        memcpy(req->mac_address, mac_addr, ETH_ALEN);
 
@@ -634,7 +668,7 @@ err:
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
 
+       req->hdr.domain = dom;
        req->if_id = cpu_to_le32(if_id);
        req->pmac_id = cpu_to_le32(pmac_id);
 
@@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
 
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
        if (lancer_chip(adapter)) {
-               req->hdr.version = 1;
+               req->hdr.version = 2;
                req->page_size = 1; /* 1 for 4K */
                AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
                                                                coalesce_wm);
@@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
                sizeof(*req));
 
+       if (lancer_chip(adapter)) {
+               req->hdr.version = 1;
+               AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
+                                       adapter->if_handle);
+       }
+
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 }
 
 /* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
 
+       req->hdr.domain = domain;
        req->interface_id = cpu_to_le32(interface_id);
 
        status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        struct be_sge *sge;
        int status = 0;
 
+       if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+               be_cmd_get_die_temperature(adapter);
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
        be_mcc_notify(adapter);
-       adapter->stats_ioctl_sent = true;
+       adapter->stats_cmd_sent = true;
 
 err:
        spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1148,44 @@ err:
        return status;
 }
 
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_get_cntl_addnl_attribs *req;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+                       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+       status = be_mcc_notify_wait(adapter);
+       if (!status) {
+               struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+                                               embedded_payload(wrb);
+               adapter->drv_stats.be_on_die_temperature =
+                                               resp->on_die_temperature;
+       }
+       /* If IOCTL fails once, do not bother issuing it again */
+       else
+               be_get_temp_freq = 0;
+
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 /* Uses Mbox */
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
 {
@@ -1786,6 +1869,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
        req = nonemb_cmd->va;
        sge = nonembedded_sgl(wrb);
 
@@ -1801,6 +1888,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 
        status = be_mcc_notify_wait(adapter);
 
+err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -1863,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
                        OPCODE_COMMON_SET_QOS, sizeof(*req));
 
        req->hdr.domain = domain;
-       req->valid_bits = BE_QOS_BITS_NIC;
-       req->max_bps_nic = bps;
+       req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+       req->max_bps_nic = cpu_to_le32(bps);
 
        status = be_mcc_notify_wait(adapter);
 
@@ -1872,3 +1960,96 @@ err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_cntl_attribs *req;
+       struct be_cmd_resp_cntl_attribs *resp;
+       struct be_sge *sge;
+       int status;
+       int payload_len = max(sizeof(*req), sizeof(*resp));
+       struct mgmt_controller_attrib *attribs;
+       struct be_dma_mem attribs_cmd;
+
+       memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+       attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+                                               &attribs_cmd.dma);
+       if (!attribs_cmd.va) {
+               dev_err(&adapter->pdev->dev,
+                               "Memory allocation failure\n");
+               return -ENOMEM;
+       }
+
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
+
+       wrb = wrb_from_mbox(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = attribs_cmd.va;
+       sge = nonembedded_sgl(wrb);
+
+       be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+                       OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                        OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+       sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+       sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+       sge->len = cpu_to_le32(attribs_cmd.size);
+
+       status = be_mbox_notify_wait(adapter);
+       if (!status) {
+               attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
+                                       sizeof(struct be_cmd_resp_hdr));
+               adapter->hba_port_num = attribs->hba_attribs.phy_port;
+       }
+
+err:
+       mutex_unlock(&adapter->mbox_lock);
+       pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+                                       attribs_cmd.dma);
+       return status;
+}
+
+/* Uses mbox */
+int be_cmd_check_native_mode(struct be_adapter *adapter)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_func_cap *req;
+       int status;
+
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
+
+       wrb = wrb_from_mbox(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+               OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+               OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
+
+       req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
+                               CAPABILITY_BE3_NATIVE_ERX_API);
+       req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
+
+       status = be_mbox_notify_wait(adapter);
+       if (!status) {
+               struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+               adapter->be3_native = le32_to_cpu(resp->cap_flags) &
+                                       CAPABILITY_BE3_NATIVE_ERX_API;
+       }
+err:
+       mutex_unlock(&adapter->mbox_lock);
+       return status;
+}
index 83d15c8..e41fcba 100644 (file)
@@ -88,6 +88,7 @@ struct be_mcc_compl {
 #define ASYNC_EVENT_CODE_GRP_5         0x5
 #define ASYNC_EVENT_QOS_SPEED          0x1
 #define ASYNC_EVENT_COS_PRIORITY       0x2
+#define ASYNC_EVENT_PVID_STATE         0x3
 struct be_async_event_trailer {
        u32 code;
 };
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
        struct be_async_event_trailer trailer;
 } __packed;
 
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+       u8 enabled;
+       u8 rsvd0;
+       u16 tag;
+       u32 event_tag;
+       u32 rsvd1;
+       struct be_async_event_trailer trailer;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_SET_QOS                          28
 #define OPCODE_COMMON_MCC_CREATE_EXT                   90
 #define OPCODE_COMMON_SEEPROM_READ                     30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES               32
 #define OPCODE_COMMON_NTWK_RX_FILTER                   34
 #define OPCODE_COMMON_GET_FW_VERSION                   35
 #define OPCODE_COMMON_SET_FLOW_CONTROL                 36
@@ -176,6 +190,8 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_BEACON_STATE                 70
 #define OPCODE_COMMON_READ_TRANSRECV_DATA              73
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP          103
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES   121
 
 #define OPCODE_ETH_RSS_CONFIG                          1
 #define OPCODE_ETH_ACPI_CONFIG                         2
@@ -415,7 +431,7 @@ struct be_cmd_resp_mcc_create {
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
 struct amap_tx_context {
-       u8 rsvd0[16];           /* dword 0 */
+       u8 if_id[16];           /* dword 0 */
        u8 tx_ring_size[4];     /* dword 0 */
        u8 rsvd1[26];           /* dword 0 */
        u8 pci_func_id[8];      /* dword 1 */
@@ -503,7 +519,8 @@ enum be_if_flags {
        BE_IF_FLAGS_VLAN = 0x100,
        BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
        BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
-       BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
+       BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+       BE_IF_FLAGS_MULTICAST = 0x1000
 };
 
 /* An RX interface is an object with one or more MAC addresses and
@@ -619,7 +636,10 @@ struct be_rxf_stats {
        u32 rx_drops_invalid_ring;      /* dword 145*/
        u32 forwarded_packets;  /* dword 146*/
        u32 rx_drops_mtu;       /* dword 147*/
-       u32 rsvd0[15];
+       u32 rsvd0[7];
+       u32 port0_jabber_events;
+       u32 port1_jabber_events;
+       u32 rsvd1[6];
 };
 
 struct be_erx_stats {
@@ -630,11 +650,16 @@ struct be_erx_stats {
        u32 debug_pmem_pbuf_dealloc;       /* dword 47*/
 };
 
+struct be_pmem_stats {
+       u32 eth_red_drops;
+       u32 rsvd[4];
+};
+
 struct be_hw_stats {
        struct be_rxf_stats rxf;
        u32 rsvd[48];
        struct be_erx_stats erx;
-       u32 rsvd1[6];
+       struct be_pmem_stats pmem;
 };
 
 struct be_cmd_req_get_stats {
@@ -647,6 +672,20 @@ struct be_cmd_resp_get_stats {
        struct be_hw_stats hw_stats;
 };
 
+struct be_cmd_req_get_cntl_addnl_attribs {
+       struct be_cmd_req_hdr hdr;
+       u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+       struct be_cmd_resp_hdr hdr;
+       u16 ipl_file_number;
+       u8 ipl_file_version;
+       u8 rsvd0;
+       u8 on_die_temperature; /* in degrees centigrade*/
+       u8 rsvd1[3];
+};
+
 struct be_cmd_req_vlan_config {
        struct be_cmd_req_hdr hdr;
        u8 interface_id;
@@ -994,17 +1033,47 @@ struct be_cmd_resp_set_qos {
        u32 rsvd;
 };
 
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+       struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+       struct be_cmd_resp_hdr hdr;
+       struct mgmt_controller_attrib attribs;
+};
+
+/*********************** Set driver function ***********************/
+#define CAPABILITY_SW_TIMESTAMPS       2
+#define CAPABILITY_BE3_NATIVE_ERX_API  4
+
+struct be_cmd_req_set_func_cap {
+       struct be_cmd_req_hdr hdr;
+       u32 valid_cap_flags;
+       u32 cap_flags;
+       u8 rsvd[212];
+};
+
+struct be_cmd_resp_set_func_cap {
+       struct be_cmd_resp_hdr hdr;
+       u32 valid_cap_flags;
+       u32 cap_flags;
+       u8 rsvd[212];
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
                        u8 type, bool permanent, u32 if_handle);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-                       u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+                       u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+                       u32 pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
                        u32 en_flags, u8 *mac, bool pmac_invalid,
                        u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+                       u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
                        struct be_queue_info *eq, int eq_delay);
 extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
 extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+extern int be_cmd_check_native_mode(struct be_adapter *adapter);
 
index b4be027..6e5e433 100644 (file)
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
        int offset;
 };
 
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
+                       PMEMSTAT, DRVSTAT};
 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
                                        offsetof(_struct, field)
 #define NETSTAT_INFO(field)    #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
                                                field)
 #define ERXSTAT_INFO(field)    #field, ERXSTAT,\
                                        FIELDINFO(struct be_erx_stats, field)
+#define PMEMSTAT_INFO(field)   #field, PMEMSTAT,\
+                                       FIELDINFO(struct be_pmem_stats, field)
+#define        DRVSTAT_INFO(field)     #field, DRVSTAT,\
+                                       FIELDINFO(struct be_drv_stats, \
+                                               field)
 
 static const struct be_ethtool_stat et_stats[] = {
        {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
        {MISCSTAT_INFO(rx_drops_too_many_frags)},
        {MISCSTAT_INFO(rx_drops_invalid_ring)},
        {MISCSTAT_INFO(forwarded_packets)},
-       {MISCSTAT_INFO(rx_drops_mtu)}
+       {MISCSTAT_INFO(rx_drops_mtu)},
+       {MISCSTAT_INFO(port0_jabber_events)},
+       {MISCSTAT_INFO(port1_jabber_events)},
+       {PMEMSTAT_INFO(eth_red_drops)},
+       {DRVSTAT_INFO(be_on_die_temperature)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
        "MAC Loopback test",
        "PHY Loopback test",
        "External Loopback test",
-       "DDR DMA test"
+       "DDR DMA test",
        "Link test"
 };
 
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
                case MISCSTAT:
                        p = &hw_stats->rxf;
                        break;
+               case PMEMSTAT:
+                       p = &hw_stats->pmem;
+                       break;
+               case DRVSTAT:
+                       p = &adapter->drv_stats;
+                       break;
                }
 
                p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                }
 
                phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-               phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
-                                       &phy_cmd.dma);
+               phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+                                               phy_cmd.size, &phy_cmd.dma,
+                                               GFP_KERNEL);
                if (!phy_cmd.va) {
                        dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                        return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                adapter->port_type = ecmd->port;
                adapter->transceiver = ecmd->transceiver;
                adapter->autoneg = ecmd->autoneg;
-               pci_free_consistent(adapter->pdev, phy_cmd.size,
-                                       phy_cmd.va, phy_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+                                 phy_cmd.dma);
        } else {
                ecmd->speed = adapter->link_speed;
                ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
        int status;
        u32 cur;
 
-       be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+       be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
 
        if (cur == BEACON_STATE_ENABLED)
                return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
        if (data < 2)
                data = 2;
 
-       status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+       status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
                        BEACON_STATE_ENABLED);
        set_current_state(TASK_INTERRUPTIBLE);
        schedule_timeout(data*HZ);
 
-       status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+       status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
                        BEACON_STATE_DISABLED);
 
        return status;
 }
 
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+       if (!be_physfn(adapter))
+               return false;
+       else
+               return true;
+}
+
 static void
 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       wol->supported = WAKE_MAGIC;
+       if (be_is_wol_supported(adapter))
+               wol->supported = WAKE_MAGIC;
+
        if (adapter->wol)
                wol->wolopts = WAKE_MAGIC;
        else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
 
-       if (wol->wolopts & WAKE_MAGIC)
+       if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
                adapter->wol = true;
        else
                adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
        };
 
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-       ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
-                                       &ddrdma_cmd.dma);
+       ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+                                          &ddrdma_cmd.dma, GFP_KERNEL);
        if (!ddrdma_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
        }
 
 err:
-       pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
-                       ddrdma_cmd.va, ddrdma_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+                         ddrdma_cmd.dma);
        return ret;
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
                                u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->port_num,
+       be_cmd_set_loopback(adapter, adapter->hba_port_num,
                                loopback_type, 1);
-       *status = be_cmd_loopback_test(adapter, adapter->port_num,
+       *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
                                loopback_type, 1500,
                                2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->port_num,
+       be_cmd_set_loopback(adapter, adapter->hba_port_num,
                                BE_NO_LOOPBACK, 1);
        return *status;
 }
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
                                &qos_link_speed) != 0) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = -1;
-       } else if (mac_speed) {
+       } else if (!mac_speed) {
+               test->flags |= ETH_TEST_FL_FAILED;
                data[4] = 1;
        }
 }
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
        eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-       eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
-                               &eeprom_cmd.dma);
+       eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+                                          &eeprom_cmd.dma, GFP_KERNEL);
 
        if (!eeprom_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
                resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
                memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
        }
-       pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
-                       eeprom_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+                         eeprom_cmd.dma);
 
        return status;
 }
index 4096d97..e06aa15 100644 (file)
 #define POST_STAGE_BE_RESET            0x3 /* Host wants to reset chip */
 #define POST_STAGE_ARMFW_RDY           0xc000  /* FW is done with POST */
 
+
+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+#define SLIPORT_STATUS_OFFSET          0x404
+#define SLIPORT_CONTROL_OFFSET         0x408
+
+#define SLIPORT_STATUS_ERR_MASK                0x80000000
+#define SLIPORT_STATUS_RN_MASK         0x01000000
+#define SLIPORT_STATUS_RDY_MASK                0x00800000
+
+
+#define SLI_PORT_CONTROL_IP_MASK       0x08000000
+
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET     0xfc
 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -289,10 +301,10 @@ struct be_eth_rx_d {
 
 /* RX Compl Queue Descriptor */
 
-/* Pseudo amap definition for eth_rx_compl in which each bit of the
- * actual structure is defined as a byte: used to calculate
+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
  * offset/shift/mask of each field */
-struct amap_eth_rx_compl {
+struct amap_eth_rx_compl_v0 {
        u8 vlan_tag[16];        /* dword 0 */
        u8 pktsize[14];         /* dword 0 */
        u8 port;                /* dword 0 */
@@ -323,10 +335,92 @@ struct amap_eth_rx_compl {
        u8 rsshash[32];         /* dword 3 */
 } __packed;
 
+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v1 {
+       u8 vlan_tag[16];        /* dword 0 */
+       u8 pktsize[14];         /* dword 0 */
+       u8 vtp;                 /* dword 0 */
+       u8 ip_opt;              /* dword 0 */
+       u8 err;                 /* dword 1 */
+       u8 rsshp;               /* dword 1 */
+       u8 ipf;                 /* dword 1 */
+       u8 tcpf;                /* dword 1 */
+       u8 udpf;                /* dword 1 */
+       u8 ipcksm;              /* dword 1 */
+       u8 l4_cksm;             /* dword 1 */
+       u8 ip_version;          /* dword 1 */
+       u8 macdst[7];           /* dword 1 */
+       u8 rsvd0;               /* dword 1 */
+       u8 fragndx[10];         /* dword 1 */
+       u8 ct[2];               /* dword 1 */
+       u8 sw;                  /* dword 1 */
+       u8 numfrags[3];         /* dword 1 */
+       u8 rss_flush;           /* dword 2 */
+       u8 cast_enc[2];         /* dword 2 */
+       u8 vtm;                 /* dword 2 */
+       u8 rss_bank;            /* dword 2 */
+       u8 port[2];             /* dword 2 */
+       u8 vntagp;              /* dword 2 */
+       u8 header_len[8];       /* dword 2 */
+       u8 header_split[2];     /* dword 2 */
+       u8 rsvd1[13];           /* dword 2 */
+       u8 valid;               /* dword 2 */
+       u8 rsshash[32];         /* dword 3 */
+} __packed;
+
 struct be_eth_rx_compl {
        u32 dw[4];
 };
 
+struct mgmt_hba_attribs {
+       u8 flashrom_version_string[32];
+       u8 manufacturer_name[32];
+       u32 supported_modes;
+       u32 rsvd0[3];
+       u8 ncsi_ver_string[12];
+       u32 default_extended_timeout;
+       u8 controller_model_number[32];
+       u8 controller_description[64];
+       u8 controller_serial_number[32];
+       u8 ip_version_string[32];
+       u8 firmware_version_string[32];
+       u8 bios_version_string[32];
+       u8 redboot_version_string[32];
+       u8 driver_version_string[32];
+       u8 fw_on_flash_version_string[32];
+       u32 functionalities_supported;
+       u16 max_cdblength;
+       u8 asic_revision;
+       u8 generational_guid[16];
+       u8 hba_port_count;
+       u16 default_link_down_timeout;
+       u8 iscsi_ver_min_max;
+       u8 multifunction_device;
+       u8 cache_valid;
+       u8 hba_status;
+       u8 max_domains_supported;
+       u8 phy_port;
+       u32 firmware_post_status;
+       u32 hba_mtu[8];
+       u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+       struct mgmt_hba_attribs hba_attribs;
+       u16 pci_vendor_id;
+       u16 pci_device_id;
+       u16 pci_sub_vendor_id;
+       u16 pci_sub_system_id;
+       u8 pci_bus_number;
+       u8 pci_device_number;
+       u8 pci_function_number;
+       u8 interface_type;
+       u64 unique_identifier;
+       u32 rsvd0[5];
+};
+
 struct controller_id {
        u32 vendor;
        u32 device;
index de40d3b..5e15006 100644 (file)
@@ -25,9 +25,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("ServerEngines Corporation");
 MODULE_LICENSE("GPL");
 
-static unsigned int rx_frag_size = 2048;
+static ushort rx_frag_size = 2048;
 static unsigned int num_vfs;
-module_param(rx_frag_size, uint, S_IRUGO);
+module_param(rx_frag_size, ushort, S_IRUGO);
 module_param(num_vfs, uint, S_IRUGO);
 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
        struct be_dma_mem *mem = &q->dma_mem;
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 }
 
 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
        q->len = len;
        q->entry_size = entry_size;
        mem->size = len * entry_size;
-       mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+       mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+                                    GFP_KERNEL);
        if (!mem->va)
                return -1;
        memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (!be_physfn(adapter))
                goto netdev_addr;
 
-       status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+       status = be_cmd_pmac_del(adapter, adapter->if_handle,
+                               adapter->pmac_id, 0);
        if (status)
                return status;
 
        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
-                       adapter->if_handle, &adapter->pmac_id);
+                               adapter->if_handle, &adapter->pmac_id, 0);
 netdev_addr:
        if (!status)
                memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -312,11 +314,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
        if (adapter->link_up != link_up) {
                adapter->link_speed = -1;
                if (link_up) {
-                       netif_start_queue(netdev);
                        netif_carrier_on(netdev);
                        printk(KERN_INFO "%s: Link up\n", netdev->name);
                } else {
-                       netif_stop_queue(netdev);
                        netif_carrier_off(netdev);
                        printk(KERN_INFO "%s: Link down\n", netdev->name);
                }
@@ -486,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 }
 
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
                bool unmap_single)
 {
        dma_addr_t dma;
@@ -496,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
        dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
        if (wrb->frag_len) {
                if (unmap_single)
-                       pci_unmap_single(pdev, dma, wrb->frag_len,
-                               PCI_DMA_TODEVICE);
+                       dma_unmap_single(dev, dma, wrb->frag_len,
+                                        DMA_TO_DEVICE);
                else
-                       pci_unmap_page(pdev, dma, wrb->frag_len,
-                               PCI_DMA_TODEVICE);
+                       dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
        }
 }
 
@@ -509,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 {
        dma_addr_t busaddr;
        int i, copied = 0;
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = &adapter->pdev->dev;
        struct sk_buff *first_skb = skb;
        struct be_queue_info *txq = &adapter->tx_obj.q;
        struct be_eth_wrb *wrb;
@@ -523,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 
        if (skb->len > skb->data_len) {
                int len = skb_headlen(skb);
-               busaddr = pci_map_single(pdev, skb->data, len,
-                                        PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(pdev, busaddr))
+               busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
                map_single = true;
                wrb = queue_head_node(txq);
@@ -538,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                struct skb_frag_struct *frag =
                        &skb_shinfo(skb)->frags[i];
-               busaddr = pci_map_page(pdev, frag->page,
-                                      frag->page_offset,
-                                      frag->size, PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(pdev, busaddr))
+               busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+                                      frag->size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, frag->size);
@@ -565,7 +562,7 @@ dma_err:
        txq->head = map_head;
        while (copied) {
                wrb = queue_head_node(txq);
-               unmap_tx_frag(pdev, wrb, map_single);
+               unmap_tx_frag(dev, wrb, map_single);
                map_single = false;
                copied -= wrb->frag_len;
                queue_head_inc(txq);
@@ -745,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
                status = be_cmd_pmac_del(adapter,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id);
+                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
        status = be_cmd_pmac_add(adapter, mac,
                                adapter->vf_cfg[vf].vf_if_handle,
-                               &adapter->vf_cfg[vf].vf_pmac_id);
+                               &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -824,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
                rate = 10000;
 
        adapter->vf_cfg[vf].vf_tx_rate = rate;
-       status = be_cmd_set_qos(adapter, rate / 10, vf);
+       status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
        if (status)
                dev_info(&adapter->pdev->dev,
@@ -854,28 +851,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo)
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
-               u32 pktsize, u16 numfrags, u8 pkt_type)
+               struct be_rx_compl_info *rxcp)
 {
        struct be_rx_stats *stats = &rxo->stats;
 
        stats->rx_compl++;
-       stats->rx_frags += numfrags;
-       stats->rx_bytes += pktsize;
+       stats->rx_frags += rxcp->num_rcvd;
+       stats->rx_bytes += rxcp->pkt_size;
        stats->rx_pkts++;
-       if (pkt_type == BE_MULTICAST_PACKET)
+       if (rxcp->pkt_type == BE_MULTICAST_PACKET)
                stats->rx_mcast_pkts++;
+       if (rxcp->err)
+               stats->rxcp_err++;
 }
 
-static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
 {
-       u8 l4_cksm, ipv6, ipcksm;
-
-       l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
-       ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
-       ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
-
-       /* Ignore ipcksm for ipv6 pkts */
-       return l4_cksm && (ipcksm || ipv6);
+       /* L4 checksum is not reliable for non TCP/UDP packets.
+        * Also ignore ipcksm for ipv6 pkts */
+       return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
+                               (rxcp->ip_csum || rxcp->ipv6);
 }
 
 static struct be_rx_page_info *
@@ -890,8 +885,9 @@ get_rx_page_info(struct be_adapter *adapter,
        BUG_ON(!rx_page_info->page);
 
        if (rx_page_info->last_page_user) {
-               pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
-                       adapter->big_page_size, PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&adapter->pdev->dev,
+                              dma_unmap_addr(rx_page_info, bus),
+                              adapter->big_page_size, DMA_FROM_DEVICE);
                rx_page_info->last_page_user = false;
        }
 
@@ -902,26 +898,17 @@ get_rx_page_info(struct be_adapter *adapter,
 /* Throwaway the data in the Rx completion */
 static void be_rx_compl_discard(struct be_adapter *adapter,
                struct be_rx_obj *rxo,
-               struct be_eth_rx_compl *rxcp)
+               struct be_rx_compl_info *rxcp)
 {
        struct be_queue_info *rxq = &rxo->q;
        struct be_rx_page_info *page_info;
-       u16 rxq_idx, i, num_rcvd;
+       u16 i, num_rcvd = rxcp->num_rcvd;
 
-       rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
-       num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-
-        /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
-       if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
-
-               rxo->last_frag_index = rxq_idx;
-
-               for (i = 0; i < num_rcvd; i++) {
-                       page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-                       put_page(page_info->page);
-                       memset(page_info, 0, sizeof(*page_info));
-                       index_inc(&rxq_idx, rxq->len);
-               }
+       for (i = 0; i < num_rcvd; i++) {
+               page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+               index_inc(&rxcp->rxq_idx, rxq->len);
        }
 }
 
@@ -930,30 +917,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
  * indicated by rxcp.
  */
 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
-                       struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
-                       u16 num_rcvd)
+                       struct sk_buff *skb, struct be_rx_compl_info *rxcp)
 {
        struct be_queue_info *rxq = &rxo->q;
        struct be_rx_page_info *page_info;
-       u16 rxq_idx, i, j;
-       u32 pktsize, hdr_len, curr_frag_len, size;
+       u16 i, j;
+       u16 hdr_len, curr_frag_len, remaining;
        u8 *start;
-       u8 pkt_type;
-
-       rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
-       pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
-       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
-
-       page_info = get_rx_page_info(adapter, rxo, rxq_idx);
 
+       page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
        start = page_address(page_info->page) + page_info->page_offset;
        prefetch(start);
 
        /* Copy data in the first descriptor of this completion */
-       curr_frag_len = min(pktsize, rx_frag_size);
+       curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
 
        /* Copy the header portion into skb_data */
-       hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
+       hdr_len = min(BE_HDR_LEN, curr_frag_len);
        memcpy(skb->data, start, hdr_len);
        skb->len = curr_frag_len;
        if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
@@ -972,19 +952,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
        }
        page_info->page = NULL;
 
-       if (pktsize <= rx_frag_size) {
-               BUG_ON(num_rcvd != 1);
-               goto done;
+       if (rxcp->pkt_size <= rx_frag_size) {
+               BUG_ON(rxcp->num_rcvd != 1);
+               return;
        }
 
        /* More frags present for this completion */
-       size = pktsize;
-       for (i = 1, j = 0; i < num_rcvd; i++) {
-               size -= curr_frag_len;
-               index_inc(&rxq_idx, rxq->len);
-               page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-
-               curr_frag_len = min(size, rx_frag_size);
+       index_inc(&rxcp->rxq_idx, rxq->len);
+       remaining = rxcp->pkt_size - curr_frag_len;
+       for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
+               page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+               curr_frag_len = min(remaining, rx_frag_size);
 
                /* Coalesce all frags from the same physical page in one slot */
                if (page_info->page_offset == 0) {
@@ -1003,25 +981,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
                skb->len += curr_frag_len;
                skb->data_len += curr_frag_len;
 
+               remaining -= curr_frag_len;
+               index_inc(&rxcp->rxq_idx, rxq->len);
                page_info->page = NULL;
        }
        BUG_ON(j > MAX_SKB_FRAGS);
-
-done:
-       be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
 }
 
 /* Process the RX completion indicated by rxcp when GRO is disabled */
 static void be_rx_compl_process(struct be_adapter *adapter,
                        struct be_rx_obj *rxo,
-                       struct be_eth_rx_compl *rxcp)
+                       struct be_rx_compl_info *rxcp)
 {
        struct sk_buff *skb;
-       u32 vlanf, vid;
-       u16 num_rcvd;
-       u8 vtm;
-
-       num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
 
        skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
        if (unlikely(!skb)) {
@@ -1031,7 +1003,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
                return;
        }
 
-       skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
+       skb_fill_rx_data(adapter, rxo, skb, rxcp);
 
        if (likely(adapter->rx_csum && csum_passed(rxcp)))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1041,23 +1013,12 @@ static void be_rx_compl_process(struct be_adapter *adapter,
        skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, adapter->netdev);
 
-       vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
-       vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-
-       /* vlanf could be wrongly set in some cards.
-        * ignore if vtm is not set */
-       if ((adapter->function_mode & 0x400) && !vtm)
-               vlanf = 0;
-
-       if (unlikely(vlanf)) {
+       if (unlikely(rxcp->vlanf)) {
                if (!adapter->vlan_grp || adapter->vlans_added == 0) {
                        kfree_skb(skb);
                        return;
                }
-               vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               if (!lancer_chip(adapter))
-                       vid = swab16(vid);
-               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
+               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
        } else {
                netif_receive_skb(skb);
        }
@@ -1066,28 +1027,14 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 /* Process the RX completion indicated by rxcp when GRO is enabled */
 static void be_rx_compl_process_gro(struct be_adapter *adapter,
                struct be_rx_obj *rxo,
-               struct be_eth_rx_compl *rxcp)
+               struct be_rx_compl_info *rxcp)
 {
        struct be_rx_page_info *page_info;
        struct sk_buff *skb = NULL;
        struct be_queue_info *rxq = &rxo->q;
        struct be_eq_obj *eq_obj =  &rxo->rx_eq;
-       u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
-       u16 i, rxq_idx = 0, vid, j;
-       u8 vtm;
-       u8 pkt_type;
-
-       num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-       pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
-       vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
-       rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
-       vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
-
-       /* vlanf could be wrongly set in some cards.
-        * ignore if vtm is not set */
-       if ((adapter->function_mode & 0x400) && !vtm)
-               vlanf = 0;
+       u16 remaining, curr_frag_len;
+       u16 i, j;
 
        skb = napi_get_frags(&eq_obj->napi);
        if (!skb) {
@@ -1095,9 +1042,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                return;
        }
 
-       remaining = pkt_size;
-       for (i = 0, j = -1; i < num_rcvd; i++) {
-               page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+       remaining = rxcp->pkt_size;
+       for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
+               page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
 
                curr_frag_len = min(remaining, rx_frag_size);
 
@@ -1115,70 +1062,125 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                skb_shinfo(skb)->frags[j].size += curr_frag_len;
 
                remaining -= curr_frag_len;
-               index_inc(&rxq_idx, rxq->len);
+               index_inc(&rxcp->rxq_idx, rxq->len);
                memset(page_info, 0, sizeof(*page_info));
        }
        BUG_ON(j > MAX_SKB_FRAGS);
 
        skb_shinfo(skb)->nr_frags = j + 1;
-       skb->len = pkt_size;
-       skb->data_len = pkt_size;
-       skb->truesize += pkt_size;
+       skb->len = rxcp->pkt_size;
+       skb->data_len = rxcp->pkt_size;
+       skb->truesize += rxcp->pkt_size;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       if (likely(!vlanf)) {
+       if (likely(!rxcp->vlanf))
                napi_gro_frags(&eq_obj->napi);
-       } else {
-               vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               if (!lancer_chip(adapter))
-                       vid = swab16(vid);
+       else
+               vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
+}
+
+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
+                               struct be_eth_rx_compl *compl,
+                               struct be_rx_compl_info *rxcp)
+{
+       rxcp->pkt_size =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
+       rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
+       rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
+       rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
+       rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
+       rxcp->ip_csum =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
+       rxcp->l4_csum =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
+       rxcp->ipv6 =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
+       rxcp->rxq_idx =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
+       rxcp->num_rcvd =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
+       rxcp->pkt_type =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
+       rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
+       rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
+}
+
+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
+                               struct be_eth_rx_compl *compl,
+                               struct be_rx_compl_info *rxcp)
+{
+       rxcp->pkt_size =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
+       rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
+       rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
+       rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
+       rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
+       rxcp->ip_csum =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
+       rxcp->l4_csum =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
+       rxcp->ipv6 =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
+       rxcp->rxq_idx =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
+       rxcp->num_rcvd =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
+       rxcp->pkt_type =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
+       rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
+       rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
+}
+
+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
+{
+       struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
+       struct be_rx_compl_info *rxcp = &rxo->rxcp;
+       struct be_adapter *adapter = rxo->adapter;
 
-               if (!adapter->vlan_grp || adapter->vlans_added == 0)
-                       return;
+       /* For checking the valid bit it is Ok to use either definition as the
+        * valid bit is at the same position in both v0 and v1 Rx compl */
+       if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
+               return NULL;
 
-               vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
-       }
+       rmb();
+       be_dws_le_to_cpu(compl, sizeof(*compl));
 
-       be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
-}
+       if (adapter->be3_native)
+               be_parse_rx_compl_v1(adapter, compl, rxcp);
+       else
+               be_parse_rx_compl_v0(adapter, compl, rxcp);
 
-static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
-{
-       struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
+       /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
+       if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+               rxcp->vlanf = 0;
 
-       if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
-               return NULL;
+       if (!lancer_chip(adapter))
+               rxcp->vid = swab16(rxcp->vid);
 
-       rmb();
-       be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
+       if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
+               rxcp->vlanf = 0;
+
+       /* As the compl has been parsed, reset it; we wont touch it again */
+       compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
 
        queue_tail_inc(&rxo->cq);
        return rxcp;
 }
 
-/* To reset the valid bit, we need to reset the whole word as
- * when walking the queue the valid entries are little-endian
- * and invalid entries are host endian
- */
-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
 {
-       rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
-}
-
-static inline struct page *be_alloc_pages(u32 size)
-{
-       gfp_t alloc_flags = GFP_ATOMIC;
        u32 order = get_order(size);
+
        if (order > 0)
-               alloc_flags |= __GFP_COMP;
-       return  alloc_pages(alloc_flags, order);
+               gfp |= __GFP_COMP;
+       return  alloc_pages(gfp, order);
 }
 
 /*
  * Allocate a page, split it to fragments of size rx_frag_size and post as
  * receive buffers to BE
  */
-static void be_post_rx_frags(struct be_rx_obj *rxo)
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1192,14 +1194,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
        page_info = &rxo->page_info_tbl[rxq->head];
        for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
                if (!pagep) {
-                       pagep = be_alloc_pages(adapter->big_page_size);
+                       pagep = be_alloc_pages(adapter->big_page_size, gfp);
                        if (unlikely(!pagep)) {
                                rxo->stats.rx_post_fail++;
                                break;
                        }
-                       page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
-                                               adapter->big_page_size,
-                                               PCI_DMA_FROMDEVICE);
+                       page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+                                                   0, adapter->big_page_size,
+                                                   DMA_FROM_DEVICE);
                        page_info->page_offset = 0;
                } else {
                        get_page(pagep);
@@ -1272,8 +1274,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
        do {
                cur_index = txq->tail;
                wrb = queue_tail_node(txq);
-               unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
-                                       skb_headlen(sent_skb)));
+               unmap_tx_frag(&adapter->pdev->dev, wrb,
+                             (unmap_skb_hdr && skb_headlen(sent_skb)));
                unmap_skb_hdr = false;
 
                num_wrbs++;
@@ -1341,13 +1343,12 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
        struct be_rx_page_info *page_info;
        struct be_queue_info *rxq = &rxo->q;
        struct be_queue_info *rx_cq = &rxo->cq;
-       struct be_eth_rx_compl *rxcp;
+       struct be_rx_compl_info *rxcp;
        u16 tail;
 
        /* First cleanup pending rx completions */
        while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
                be_rx_compl_discard(adapter, rxo, rxcp);
-               be_rx_compl_reset(rxcp);
                be_cq_notify(adapter, rx_cq->id, false, 1);
        }
 
@@ -1575,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
        for_all_rx_queues(adapter, rxo, i) {
                rxo->adapter = adapter;
-               /* Init last_frag_index so that the frag index in the first
-                * completion will never match */
-               rxo->last_frag_index = 0xffff;
                rxo->rx_eq.max_eqd = BE_MAX_EQD;
                rxo->rx_eq.enable_aic = true;
 
@@ -1699,15 +1697,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static inline bool do_gro(struct be_rx_obj *rxo,
-                       struct be_eth_rx_compl *rxcp, u8 err)
+static inline bool do_gro(struct be_rx_compl_info *rxcp)
 {
-       int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
-
-       if (err)
-               rxo->stats.rxcp_err++;
-
-       return (tcp_frame && !err) ? true : false;
+       return (rxcp->tcpf && !rxcp->err) ? true : false;
 }
 
 static int be_poll_rx(struct napi_struct *napi, int budget)
@@ -1716,10 +1708,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
        struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
-       struct be_eth_rx_compl *rxcp;
+       struct be_rx_compl_info *rxcp;
        u32 work_done;
-       u16 frag_index, num_rcvd;
-       u8 err;
 
        rxo->stats.rx_polls++;
        for (work_done = 0; work_done < budget; work_done++) {
@@ -1727,29 +1717,19 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
                if (!rxcp)
                        break;
 
-               err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
-               frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
-                                                               rxcp);
-               num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
-                                                               rxcp);
-
-               /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
-               if (likely(frag_index != rxo->last_frag_index &&
-                               num_rcvd != 0)) {
-                       rxo->last_frag_index = frag_index;
-
-                       if (do_gro(rxo, rxcp, err))
+               /* Ignore flush completions */
+               if (rxcp->num_rcvd) {
+                       if (do_gro(rxcp))
                                be_rx_compl_process_gro(adapter, rxo, rxcp);
                        else
                                be_rx_compl_process(adapter, rxo, rxcp);
                }
-
-               be_rx_compl_reset(rxcp);
+               be_rx_stats_update(rxo, rxcp);
        }
 
        /* Refill the queue */
        if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
-               be_post_rx_frags(rxo);
+               be_post_rx_frags(rxo, GFP_ATOMIC);
 
        /* All consumed */
        if (work_done < budget) {
@@ -1829,6 +1809,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
 
        if (ue_status_lo || ue_status_hi) {
                adapter->ue_detected = true;
+               adapter->eeh_err = true;
                dev_err(&adapter->pdev->dev, "UE Detected!!\n");
        }
 
@@ -1867,10 +1848,14 @@ static void be_worker(struct work_struct *work)
                        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
                        be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
                }
+
+               if (!adapter->ue_detected && !lancer_chip(adapter))
+                       be_detect_dump_ue(adapter);
+
                goto reschedule;
        }
 
-       if (!adapter->stats_ioctl_sent)
+       if (!adapter->stats_cmd_sent)
                be_cmd_get_stats(adapter, &adapter->stats_cmd);
 
        be_tx_rate_update(adapter);
@@ -1881,7 +1866,7 @@ static void be_worker(struct work_struct *work)
 
                if (rxo->rx_post_starved) {
                        rxo->rx_post_starved = false;
-                       be_post_rx_frags(rxo);
+                       be_post_rx_frags(rxo, GFP_KERNEL);
                }
        }
        if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2085,13 +2070,24 @@ static int be_close(struct net_device *netdev)
 
        be_async_mcc_disable(adapter);
 
-       netif_stop_queue(netdev);
        netif_carrier_off(netdev);
        adapter->link_up = false;
 
        if (!lancer_chip(adapter))
                be_intr_set(adapter, false);
 
+       for_all_rx_queues(adapter, rxo, i)
+               napi_disable(&rxo->rx_eq.napi);
+
+       napi_disable(&tx_eq->napi);
+
+       if (lancer_chip(adapter)) {
+               be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
+               be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+               for_all_rx_queues(adapter, rxo, i)
+                        be_cq_notify(adapter, rxo->cq.id, false, 0);
+       }
+
        if (adapter->msix_enabled) {
                vec = be_msix_vec_get(adapter, tx_eq);
                synchronize_irq(vec);
@@ -2105,11 +2101,6 @@ static int be_close(struct net_device *netdev)
        }
        be_irq_unregister(adapter);
 
-       for_all_rx_queues(adapter, rxo, i)
-               napi_disable(&rxo->rx_eq.napi);
-
-       napi_disable(&tx_eq->napi);
-
        /* Wait for all pending tx completions to arrive so that
         * all tx skbs are freed.
         */
@@ -2129,7 +2120,7 @@ static int be_open(struct net_device *netdev)
        u16 link_speed;
 
        for_all_rx_queues(adapter, rxo, i) {
-               be_post_rx_frags(rxo);
+               be_post_rx_frags(rxo, GFP_KERNEL);
                napi_enable(&rxo->rx_eq.napi);
        }
        napi_enable(&tx_eq->napi);
@@ -2181,7 +2172,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        memset(mac, 0, ETH_ALEN);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                   GFP_KERNEL);
        if (cmd.va == NULL)
                return -1;
        memset(cmd.va, 0, cmd.size);
@@ -2192,8 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Could not enable Wake-on-lan\n");
-                       pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
-                                       cmd.dma);
+                       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                         cmd.dma);
                        return status;
                }
                status = be_cmd_enable_magic_wol(adapter,
@@ -2206,7 +2198,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
        }
 
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -2227,7 +2219,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
        for (vf = 0; vf < num_vfs; vf++) {
                status = be_cmd_pmac_add(adapter, mac,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       &adapter->vf_cfg[vf].vf_pmac_id);
+                                       &adapter->vf_cfg[vf].vf_pmac_id,
+                                       vf + 1);
                if (status)
                        dev_err(&adapter->pdev->dev,
                                "Mac address add failed for VF %d\n", vf);
@@ -2247,7 +2240,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
                if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
                        be_cmd_pmac_del(adapter,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id);
+                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
        }
 }
 
@@ -2258,7 +2251,9 @@ static int be_setup(struct be_adapter *adapter)
        int status;
        u8 mac[ETH_ALEN];
 
-       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+                               BE_IF_FLAGS_BROADCAST |
+                               BE_IF_FLAGS_MULTICAST;
 
        if (be_physfn(adapter)) {
                cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2279,22 +2274,26 @@ static int be_setup(struct be_adapter *adapter)
                goto do_none;
 
        if (be_physfn(adapter)) {
-               while (vf < num_vfs) {
-                       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
-                                       | BE_IF_FLAGS_BROADCAST;
-                       status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                       mac, true,
+               if (adapter->sriov_enabled) {
+                       while (vf < num_vfs) {
+                               cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+                                                       BE_IF_FLAGS_BROADCAST;
+                               status = be_cmd_if_create(adapter, cap_flags,
+                                       en_flags, mac, true,
                                        &adapter->vf_cfg[vf].vf_if_handle,
                                        NULL, vf+1);
-                       if (status) {
-                               dev_err(&adapter->pdev->dev,
-                               "Interface Create failed for VF %d\n", vf);
-                               goto if_destroy;
+                               if (status) {
+                                       dev_err(&adapter->pdev->dev,
+                                       "Interface Create failed for VF %d\n",
+                                       vf);
+                                       goto if_destroy;
+                               }
+                               adapter->vf_cfg[vf].vf_pmac_id =
+                                                       BE_INVALID_PMAC_ID;
+                               vf++;
                        }
-                       adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
-                       vf++;
                }
-       } else if (!be_physfn(adapter)) {
+       } else {
                status = be_cmd_mac_addr_query(adapter, mac,
                        MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
                if (!status) {
@@ -2315,44 +2314,46 @@ static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto rx_qs_destroy;
 
-       if (be_physfn(adapter)) {
-               status = be_vf_eth_addr_config(adapter);
-               if (status)
-                       goto mcc_q_destroy;
-       }
-
        adapter->link_speed = -1;
 
        return 0;
 
-mcc_q_destroy:
-       if (be_physfn(adapter))
-               be_vf_eth_addr_rem(adapter);
        be_mcc_queues_destroy(adapter);
 rx_qs_destroy:
        be_rx_queues_destroy(adapter);
 tx_qs_destroy:
        be_tx_queues_destroy(adapter);
 if_destroy:
-       for (vf = 0; vf < num_vfs; vf++)
-               if (adapter->vf_cfg[vf].vf_if_handle)
-                       be_cmd_if_destroy(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle);
-       be_cmd_if_destroy(adapter, adapter->if_handle);
+       if (be_physfn(adapter) && adapter->sriov_enabled)
+               for (vf = 0; vf < num_vfs; vf++)
+                       if (adapter->vf_cfg[vf].vf_if_handle)
+                               be_cmd_if_destroy(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle,
+                                       vf + 1);
+       be_cmd_if_destroy(adapter, adapter->if_handle, 0);
 do_none:
        return status;
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-       if (be_physfn(adapter))
+       int vf;
+
+       if (be_physfn(adapter) && adapter->sriov_enabled)
                be_vf_eth_addr_rem(adapter);
 
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
 
-       be_cmd_if_destroy(adapter, adapter->if_handle);
+       if (be_physfn(adapter) && adapter->sriov_enabled)
+               for (vf = 0; vf < num_vfs; vf++)
+                       if (adapter->vf_cfg[vf].vf_if_handle)
+                               be_cmd_if_destroy(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle,
+                                       vf + 1);
+
+       be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
        /* tell fw we're done with firing cmds */
        be_cmd_fw_clean(adapter);
@@ -2455,8 +2456,8 @@ static int be_flash_data(struct be_adapter *adapter,
                        continue;
                if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
                        (!be_flash_redboot(adapter, fw->data,
-                        pflashcomp[i].offset, pflashcomp[i].size,
-                        filehdr_size)))
+                       pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+                       (num_of_images * sizeof(struct image_hdr)))))
                        continue;
                p = fw->data;
                p += filehdr_size + pflashcomp[i].offset
@@ -2530,8 +2531,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
        dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
 
        flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
-       flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
-                                       &flash_cmd.dma);
+       flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+                                         &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
                dev_err(&adapter->pdev->dev,
@@ -2560,8 +2561,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
                status = -1;
        }
 
-       pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
-                               flash_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+                         flash_cmd.dma);
        if (status) {
                dev_err(&adapter->pdev->dev, "Firmware load error\n");
                goto fw_exit;
@@ -2628,8 +2629,6 @@ static void be_netdev_init(struct net_device *netdev)
 
        netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
                BE_NAPI_WEIGHT);
-
-       netif_stop_queue(netdev);
 }
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -2704,13 +2703,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
        be_unmap_pci_bars(adapter);
 
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 
        mem = &adapter->mc_cmd_mem;
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -2725,8 +2724,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
                goto done;
 
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
-                               mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+       mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+                                               mbox_mem_alloc->size,
+                                               &mbox_mem_alloc->dma,
+                                               GFP_KERNEL);
        if (!mbox_mem_alloc->va) {
                status = -ENOMEM;
                goto unmap_pci_bars;
@@ -2738,8 +2739,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
        mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
-       mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
-                       &mc_cmd_mem->dma);
+       mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+                                           mc_cmd_mem->size, &mc_cmd_mem->dma,
+                                           GFP_KERNEL);
        if (mc_cmd_mem->va == NULL) {
                status = -ENOMEM;
                goto free_mbox;
@@ -2755,8 +2757,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
        return 0;
 
 free_mbox:
-       pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
-               mbox_mem_alloc->va, mbox_mem_alloc->dma);
+       dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+                         mbox_mem_alloc->va, mbox_mem_alloc->dma);
 
 unmap_pci_bars:
        be_unmap_pci_bars(adapter);
@@ -2770,8 +2772,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
        struct be_dma_mem *cmd = &adapter->stats_cmd;
 
        if (cmd->va)
-               pci_free_consistent(adapter->pdev, cmd->size,
-                       cmd->va, cmd->dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd->size,
+                                 cmd->va, cmd->dma);
 }
 
 static int be_stats_init(struct be_adapter *adapter)
@@ -2779,7 +2781,8 @@ static int be_stats_init(struct be_adapter *adapter)
        struct be_dma_mem *cmd = &adapter->stats_cmd;
 
        cmd->size = sizeof(struct be_cmd_req_get_stats);
-       cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+       cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+                                    GFP_KERNEL);
        if (cmd->va == NULL)
                return -1;
        memset(cmd->va, 0, cmd->size);
@@ -2849,6 +2852,11 @@ static int be_get_config(struct be_adapter *adapter)
        else
                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
 
+       status = be_cmd_get_cntl_attributes(adapter);
+       if (status)
+               return status;
+
+       be_cmd_check_native_mode(adapter);
        return 0;
 }
 
@@ -2890,6 +2898,54 @@ static int be_dev_family_check(struct be_adapter *adapter)
        return 0;
 }
 
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 500
+       u32 sliport_status;
+       int status = 0, i;
+
+       for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+               if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+                       break;
+
+               msleep(20);
+       }
+
+       if (i == SLIPORT_READY_TIMEOUT)
+               status = -1;
+
+       return status;
+}
+
+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+       int status;
+       u32 sliport_status, err, reset_needed;
+       status = lancer_wait_ready(adapter);
+       if (!status) {
+               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+               err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+               reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+               if (err && reset_needed) {
+                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                                       adapter->db + SLIPORT_CONTROL_OFFSET);
+
+                       /* check adapter has corrected the error */
+                       status = lancer_wait_ready(adapter);
+                       sliport_status = ioread32(adapter->db +
+                                                       SLIPORT_STATUS_OFFSET);
+                       sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+                                               SLIPORT_STATUS_RN_MASK);
+                       if (status || sliport_status)
+                               status = -1;
+               } else if (err || reset_needed) {
+                       status = -1;
+               }
+       }
+       return status;
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -2922,11 +2978,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
-               status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
@@ -2939,6 +2995,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
        if (status)
                goto free_netdev;
 
+       if (lancer_chip(adapter)) {
+               status = lancer_test_and_set_rdy_state(adapter);
+               if (status) {
+                       dev_err(&pdev->dev, "Adapter in non recoverable error\n");
+                       goto free_netdev;
+               }
+       }
+
        /* sync up with fw's ready state */
        if (be_physfn(adapter)) {
                status = be_cmd_POST(adapter);
@@ -2951,11 +3015,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
        if (status)
                goto ctrl_clean;
 
-       if (be_physfn(adapter)) {
-               status = be_cmd_reset_function(adapter);
-               if (status)
-                       goto ctrl_clean;
-       }
+       status = be_cmd_reset_function(adapter);
+       if (status)
+               goto ctrl_clean;
 
        status = be_stats_init(adapter);
        if (status)
@@ -2979,10 +3041,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
                goto unsetup;
        netif_carrier_off(netdev);
 
+       if (be_physfn(adapter) && adapter->sriov_enabled) {
+               status = be_vf_eth_addr_config(adapter);
+               if (status)
+                       goto unreg_netdev;
+       }
+
        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 
+unreg_netdev:
+       unregister_netdev(netdev);
 unsetup:
        be_clear(adapter);
 msix_disable:
@@ -3009,6 +3079,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
 
+       cancel_delayed_work_sync(&adapter->work);
        if (adapter->wol)
                be_setup_wol(adapter, true);
 
@@ -3021,6 +3092,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
        be_clear(adapter);
 
+       be_msix_disable(adapter);
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3042,6 +3114,7 @@ static int be_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, 0);
        pci_restore_state(pdev);
 
+       be_msix_enable(adapter);
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
@@ -3057,6 +3130,8 @@ static int be_resume(struct pci_dev *pdev)
 
        if (adapter->wol)
                be_setup_wol(adapter, false);
+
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 }
 
@@ -3068,6 +3143,9 @@ static void be_shutdown(struct pci_dev *pdev)
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
 
+       if (netif_running(netdev))
+               cancel_delayed_work_sync(&adapter->work);
+
        netif_device_detach(netdev);
 
        be_cmd_reset_function(adapter);
index fad9126..9f356d5 100644 (file)
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
                }
                unmap_array[unmap_cons].skb = NULL;
 
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_array[unmap_cons],
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr), skb_headlen(skb),
-                                               PCI_DMA_TODEVICE);
+                                               DMA_TO_DEVICE);
 
-               pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
                if (++unmap_cons >= unmap_q->q_depth)
                        break;
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       pci_unmap_page(bnad->pcidev,
-                                      pci_unmap_addr(&unmap_array[unmap_cons],
+                       dma_unmap_page(&bnad->pcidev->dev,
+                                      dma_unmap_addr(&unmap_array[unmap_cons],
                                                      dma_addr),
                                       skb_shinfo(skb)->frags[i].size,
-                                      PCI_DMA_TODEVICE);
-                       pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+                                      DMA_TO_DEVICE);
+                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
                                           0);
                        if (++unmap_cons >= unmap_q->q_depth)
                                break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
                sent_bytes += skb->len;
                wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
 
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_array[unmap_cons],
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr), skb_headlen(skb),
-                                PCI_DMA_TODEVICE);
-               pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+                                DMA_TO_DEVICE);
+               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
                BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
 
                prefetch(&unmap_array[unmap_cons + 1]);
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        prefetch(&unmap_array[unmap_cons + 1]);
 
-                       pci_unmap_page(bnad->pcidev,
-                                      pci_unmap_addr(&unmap_array[unmap_cons],
+                       dma_unmap_page(&bnad->pcidev->dev,
+                                      dma_unmap_addr(&unmap_array[unmap_cons],
                                                      dma_addr),
                                       skb_shinfo(skb)->frags[i].size,
-                                      PCI_DMA_TODEVICE);
-                       pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+                                      DMA_TO_DEVICE);
+                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
                                           0);
                        BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
                }
@@ -340,19 +340,22 @@ static void
 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
 {
        struct bnad_unmap_q *unmap_q;
+       struct bnad_skb_unmap *unmap_array;
        struct sk_buff *skb;
        int unmap_cons;
 
        unmap_q = rcb->unmap_q;
+       unmap_array = unmap_q->unmap_array;
        for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
-               skb = unmap_q->unmap_array[unmap_cons].skb;
+               skb = unmap_array[unmap_cons].skb;
                if (!skb)
                        continue;
-               unmap_q->unmap_array[unmap_cons].skb = NULL;
-               pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
-                                       unmap_array[unmap_cons],
-                                       dma_addr), rcb->rxq->buffer_size,
-                                       PCI_DMA_FROMDEVICE);
+               unmap_array[unmap_cons].skb = NULL;
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
+                                               dma_addr),
+                                rcb->rxq->buffer_size,
+                                DMA_FROM_DEVICE);
                dev_kfree_skb(skb);
        }
        bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
                skb->dev = bnad->netdev;
                skb_reserve(skb, NET_IP_ALIGN);
                unmap_array[unmap_prod].skb = skb;
-               dma_addr = pci_map_single(bnad->pcidev, skb->data,
-                       rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
-               pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+               dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+                                         rcb->rxq->buffer_size,
+                                         DMA_FROM_DEVICE);
+               dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
                                   dma_addr);
                BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
        struct bna_rcb *rcb = NULL;
        unsigned int wi_range, packets = 0, wis = 0;
        struct bnad_unmap_q *unmap_q;
+       struct bnad_skb_unmap *unmap_array;
        struct sk_buff *skb;
-       u32 flags;
+       u32 flags, unmap_cons;
        u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                        rcb = ccb->rcb[1];
 
                unmap_q = rcb->unmap_q;
+               unmap_array = unmap_q->unmap_array;
+               unmap_cons = unmap_q->consumer_index;
 
-               skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+               skb = unmap_array[unmap_cons].skb;
                BUG_ON(!(skb));
-               unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_q->
-                                               unmap_array[unmap_q->
-                                                           consumer_index],
+               unmap_array[unmap_cons].skb = NULL;
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr),
-                                               rcb->rxq->buffer_size,
-                                               PCI_DMA_FROMDEVICE);
+                                rcb->rxq->buffer_size,
+                                DMA_FROM_DEVICE);
                BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
 
                /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
                        if (mem_info->mem_type == BNA_MEM_T_DMA) {
                                BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
                                                dma_pa);
-                               pci_free_consistent(bnad->pcidev,
-                                               mem_info->mdl[i].len,
-                                               mem_info->mdl[i].kva, dma_pa);
+                               dma_free_coherent(&bnad->pcidev->dev,
+                                                 mem_info->mdl[i].len,
+                                                 mem_info->mdl[i].kva, dma_pa);
                        } else
                                kfree(mem_info->mdl[i].kva);
                }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
                for (i = 0; i < mem_info->num; i++) {
                        mem_info->mdl[i].len = mem_info->len;
                        mem_info->mdl[i].kva =
-                               pci_alloc_consistent(bnad->pcidev,
-                                               mem_info->len, &dma_pa);
+                               dma_alloc_coherent(&bnad->pcidev->dev,
+                                               mem_info->len, &dma_pa,
+                                               GFP_KERNEL);
 
                        if (mem_info->mdl[i].kva == NULL)
                                goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        unmap_q->unmap_array[unmap_prod].skb = skb;
        BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
        txqent->vector[vect_id].length = htons(skb_headlen(skb));
-       dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
-               PCI_DMA_TODEVICE);
-       pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+       dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+                                 skb_headlen(skb), DMA_TO_DEVICE);
+       dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
                           dma_addr);
 
        BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
                txqent->vector[vect_id].length = htons(size);
-               dma_addr =
-                       pci_map_page(bnad->pcidev, frag->page,
-                                    frag->page_offset, size,
-                                    PCI_DMA_TODEVICE);
-               pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+               dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+                                       frag->page_offset, size, DMA_TO_DEVICE);
+               dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
                                   dma_addr);
                BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
        err = pci_request_regions(pdev, BNAD_NAME);
        if (err)
                goto disable_device;
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-           !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                *using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = pci_set_consistent_dma_mask(pdev,
-                                               DMA_BIT_MASK(32));
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
                        if (err)
                                goto release_regions;
                }
index 8b1d515..a89117f 100644 (file)
@@ -181,7 +181,7 @@ struct bnad_rx_info {
 /* Unmap queues for Tx / Rx cleanup */
 struct bnad_skb_unmap {
        struct sk_buff          *skb;
-       DECLARE_PCI_UNMAP_ADDR(dma_addr)
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
 };
 
 struct bnad_unmap_q {
index df99edf..d1865cc 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2.c: Broadcom NX2 network driver.
  *
- * Copyright (c) 2004-2010 Broadcom Corporation
+ * Copyright (c) 2004-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.0.21"
-#define DRV_MODULE_RELDATE     "Dec 23, 2010"
+#define DRV_MODULE_VERSION     "2.1.6"
+#define DRV_MODULE_RELDATE     "Mar 7, 2011"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.1.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1.fw"
+#define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1a.fw"
 #define FW_RV2P_FILE_09_Ax     "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
 #define FW_RV2P_FILE_09                "bnx2/bnx2-rv2p-09-6.0.17.fw"
 
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
        struct cnic_ctl_info info;
 
        mutex_lock(&bp->cnic_lock);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_lock));
        if (c_ops) {
                info.cmd = CNIC_CTL_STOP_CMD;
                c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
        struct cnic_ctl_info info;
 
        mutex_lock(&bp->cnic_lock);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_lock));
        if (c_ops) {
                if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
                        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -7553,6 +7555,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
            !(data & ETH_FLAG_RXVLAN))
                return -EINVAL;
 
+       /* TSO with VLAN tag won't work with current firmware */
+       if (!(data & ETH_FLAG_TXVLAN))
+               return -EINVAL;
+
        rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
                                  ETH_FLAG_TXVLAN);
        if (rc)
@@ -7962,11 +7968,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
                /* AER (Advanced Error Reporting) hooks */
                err = pci_enable_pcie_error_reporting(pdev);
-               if (err) {
-                       dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
-                                           "failed 0x%x\n", err);
-                       /* non-fatal, continue */
-               }
+               if (!err)
+                       bp->flags |= BNX2_FLAG_AER_ENABLED;
 
        } else {
                bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8232,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        return 0;
 
 err_out_unmap:
-       if (bp->flags & BNX2_FLAG_PCIE)
+       if (bp->flags & BNX2_FLAG_AER_ENABLED) {
                pci_disable_pcie_error_reporting(pdev);
+               bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+       }
 
        if (bp->regview) {
                iounmap(bp->regview);
@@ -8312,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
 #endif
 };
 
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
 {
        dev->vlan_features |= flags;
 }
@@ -8418,8 +8423,10 @@ bnx2_remove_one(struct pci_dev *pdev)
 
        kfree(bp->temp_stats_blk);
 
-       if (bp->flags & BNX2_FLAG_PCIE)
+       if (bp->flags & BNX2_FLAG_AER_ENABLED) {
                pci_disable_pcie_error_reporting(pdev);
+               bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+       }
 
        free_netdev(dev);
 
@@ -8535,7 +8542,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
        }
        rtnl_unlock();
 
-       if (!(bp->flags & BNX2_FLAG_PCIE))
+       if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
                return result;
 
        err = pci_cleanup_aer_uncorrect_error_status(pdev);
index 5488a2e..6802045 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2.h: Broadcom NX2 network driver.
  *
- * Copyright (c) 2004-2009 Broadcom Corporation
+ * Copyright (c) 2004-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
 
 #define BNX2_CP_SCRATCH                                        0x001a0000
 
+#define BNX2_FW_MAX_ISCSI_CONN                          0x001a0080
+
 
 /*
  *  mcp_reg definition
@@ -6741,6 +6743,7 @@ struct bnx2 {
 #define BNX2_FLAG_JUMBO_BROKEN         0x00000800
 #define BNX2_FLAG_CAN_KEEP_VLAN                0x00001000
 #define BNX2_FLAG_BROKEN_STATS         0x00002000
+#define BNX2_FLAG_AER_ENABLED          0x00004000
 
        struct bnx2_napi        bnx2_napi[BNX2_MAX_MSIX_VEC];
 
@@ -6758,7 +6761,7 @@ struct bnx2 {
        u32             tx_wake_thresh;
 
 #ifdef BCM_CNIC
-       struct cnic_ops         *cnic_ops;
+       struct cnic_ops __rcu   *cnic_ops;
        void                    *cnic_data;
 #endif
 
index 8e41837..b7ff87b 100644 (file)
@@ -22,8 +22,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-4"
-#define DRV_MODULE_RELDATE      "2011/01/18"
+#define DRV_MODULE_VERSION      "1.62.11-0"
+#define DRV_MODULE_RELDATE      "2011/01/31"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
 #define BNX2X_NEW_NAPI
 
 #if defined(CONFIG_DCB)
-#define BCM_DCB
+#define BCM_DCBNL
 #endif
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
 #endif
 
 #define bnx2x_mc_addr(ha)      ((ha)->addr)
+#define bnx2x_uc_addr(ha)      ((ha)->addr)
 
 #define U64_LO(x)                      (u32)(((u64)(x)) & 0xffffffff)
 #define U64_HI(x)                      (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
        /* chip independed shortcut into rx_prods_offset memory */
        u32                     ustorm_rx_prods_offset;
 
+       u32                     rx_buf_size;
+
        dma_addr_t              status_blk_mapping;
 
        struct sw_tx_bd         *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
 };
 
 #define bnx2x_fp(bp, nr, var)          (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU      2500
+
 #ifdef BCM_CNIC
 /* FCoE L2 `fastpath' is right after the eth entries */
 #define FCOE_IDX                       BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
        struct eth_stats_query          fw_stats;
        struct mac_configuration_cmd    mac_config;
        struct mac_configuration_cmd    mcast_config;
+       struct mac_configuration_cmd    uc_mac_config;
        struct client_init_ramrod_data  client_init_data;
 
        /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
        int                     tx_ring_size;
 
        u32                     rx_csum;
-       u32                     rx_buf_size;
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 #define ETH_OVREHEAD           (ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE            60
@@ -939,7 +946,7 @@ struct bnx2x {
        struct eth_spe          *spq_prod_bd;
        struct eth_spe          *spq_last_bd;
        __le16                  *dsb_sp_prod;
-       atomic_t                spq_left; /* serialize spq */
+       atomic_t                cq_spq_left; /* ETH_XXX ramrods credit */
        /* used to synchronize spq accesses */
        spinlock_t              spq_lock;
 
@@ -949,6 +956,7 @@ struct bnx2x {
        u16                     eq_prod;
        u16                     eq_cons;
        __le16                  *eq_cons_sb;
+       atomic_t                eq_spq_left; /* COMMON_XXX ramrods credit */
 
        /* Flags for marking that there is a STAT_QUERY or
           SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
 #define MF_FUNC_DIS                    0x1000
 #define FCOE_MACS_SET                  0x2000
 #define NO_FCOE_FLAG                   0x4000
+#define NO_ISCSI_OOO_FLAG              0x8000
+#define NO_ISCSI_FLAG                  0x10000
 
 #define NO_FCOE(bp)            ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
 
        int                     pf_num; /* absolute PF number */
        int                     pfid;   /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
        int                     num_queues;
        int                     disable_tpa;
        int                     int_mode;
+       u32                     *rx_indir_table;
 
        struct tstorm_eth_mac_filter_config     mac_filters;
 #define BNX2X_ACCEPT_NONE              0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
 #define BNX2X_CNIC_FLAG_MAC_SET                1
        void                    *t2;
        dma_addr_t              t2_mapping;
-       struct cnic_ops         *cnic_ops;
+       struct cnic_ops __rcu   *cnic_ops;
        void                    *cnic_data;
        u32                     cnic_tag;
        struct cnic_eth_dev     cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
        u16                     cnic_kwq_pending;
        u16                     cnic_spq_pending;
        struct mutex            cnic_mutex;
-       u8                      iscsi_mac[ETH_ALEN];
        u8                      fip_mac[ETH_ALEN];
 #endif
 
        int                     dmae_ready;
        /* used to synchronize dmae accesses */
-       struct mutex            dmae_mutex;
+       spinlock_t              dmae_lock;
 
        /* used to protect the FW mail box */
        struct mutex            fw_mb_mutex;
@@ -1211,6 +1223,7 @@ struct bnx2x {
        /* DCBX Negotation results */
        struct dcbx_features                    dcbx_local_feat;
        u32                                     dcbx_error;
+       u32                                     pending_max;
 };
 
 /**
@@ -1447,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                  u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
 void bnx2x_update_coalesce(struct bnx2x *bp);
 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
 
@@ -1613,19 +1632,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_BTR                      4
 #define MAX_SPQ_PENDING                        8
 
-
-/* CMNG constants
-   derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE                   100
-/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC       100
-/* resolution of fairness algorithm in usecs -
-   coefficient for calculating the actual t fair */
-#define T_FAIR_COEF                    10000000
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE                                   100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC                       400
 /* number of bytes in single QM arbitration cycle -
-   coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES                   40000
-#define FAIR_MEM                       2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES                                   160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES                                                100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH                               32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF    ((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM                                       2
 
 
 #define ATTN_NIG_FOR_FUNC              (1L << 8)
@@ -1782,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
 
 extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
 
 #endif /* bnx2x.h */
index 710ce5d..e83ac6d 100644 (file)
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        /* move empty skb from pool to prod and map it */
        prod_rx_buf->skb = fp->tpa_pool[queue].skb;
        mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-                                bp->rx_buf_size, DMA_FROM_DEVICE);
+                                fp->rx_buf_size, DMA_FROM_DEVICE);
        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 
        /* move partial skb from cons to pool (don't unmap yet) */
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 #endif
 }
 
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *             nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN     12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ *                  aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+                                   u16 len_on_bd)
+{
+       /* TPA arrgregation won't have an IP options and TCP options
+        * other than timestamp.
+        */
+       u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+       /* Check if there was a TCP timestamp, if there is it's will
+        * always be 12 bytes length: nop nop kind length echo val.
+        *
+        * Otherwise FW would close the aggregation.
+        */
+       if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+               hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+       return len_on_bd - hdrs_len;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                               struct sk_buff *skb,
                               struct eth_fast_path_rx_cqe *fp_cqe,
-                              u16 cqe_idx)
+                              u16 cqe_idx, u16 parsing_flags)
 {
        struct sw_rx_page *rx_pg, old_rx_pg;
        u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
        /* This is needed in order to enable forwarding support */
        if (frag_size)
-               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-                                              max(frag_size, (u32)len_on_bd));
+               skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+                                                             len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -333,26 +367,28 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
        struct sk_buff *skb = rx_buf->skb;
        /* alloc new skb */
-       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 
        /* Unmap skb in the pool anyway, as we are going to change
           pool entry status to BNX2X_TPA_STOP even if new skb allocation
           fails. */
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
-                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
                /* (no need to map the new skb) */
+               u16 parsing_flags =
+                       le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
 
                prefetch(skb);
                prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 #ifdef BNX2X_STOP_ON_ERROR
-               if (pad + len > bp->rx_buf_size) {
+               if (pad + len > fp->rx_buf_size) {
                        BNX2X_ERR("skb_put is about to fail...  "
                                  "pad %d  len %d  rx_buf_size %d\n",
-                                 pad, len, bp->rx_buf_size);
+                                 pad, len, fp->rx_buf_size);
                        bnx2x_panic();
                        return;
                }
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
                if (!bnx2x_fill_frag_skb(bp, fp, skb,
-                                        &cqe->fast_path_cqe, cqe_idx)) {
-                       if ((le16_to_cpu(cqe->fast_path_cqe.
-                           pars_flags.flags) & PARSING_FLAGS_VLAN))
+                                        &cqe->fast_path_cqe, cqe_idx,
+                                        parsing_flags)) {
+                       if (parsing_flags & PARSING_FLAGS_VLAN)
                                __vlan_hwaccel_put_tag(skb,
                                                 le16_to_cpu(cqe->fast_path_cqe.
                                                             vlan_tag));
@@ -582,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
                                dma_unmap_single(&bp->pdev->dev,
                                        dma_unmap_addr(rx_buf, mapping),
-                                                bp->rx_buf_size,
+                                                fp->rx_buf_size,
                                                 DMA_FROM_DEVICE);
                                skb_reserve(skb, pad);
                                skb_put(skb, len);
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 {
        u16 line_speed = bp->link_vars.line_speed;
        if (IS_MF(bp)) {
-               u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
-                                               FUNC_MF_CFG_MAX_BW_MASK) >>
-                                               FUNC_MF_CFG_MAX_BW_SHIFT;
-               /* Calculate the current MAX line speed limit for the DCC
-                * capable devices
+               u16 maxCfg = bnx2x_extract_max_cfg(bp,
+                                                  bp->mf_config[BP_VN(bp)]);
+
+               /* Calculate the current MAX line speed limit for the MF
+                * devices
                 */
-               if (IS_MF_SD(bp)) {
+               if (IS_MF_SI(bp))
+                       line_speed = (line_speed * maxCfg) / 100;
+               else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
 
                        if (vn_max_rate < line_speed)
                                line_speed = vn_max_rate;
-               } else /* IS_MF_SI(bp)) */
-                       line_speed = (line_speed * maxCfg) / 100;
+               }
        }
 
        return line_speed;
@@ -821,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
        u16 ring_prod;
        int i, j;
 
-       bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-               IP_HEADER_ALIGNMENT_PADDING;
-
-       DP(NETIF_MSG_IFUP,
-          "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
        for_each_rx_queue(bp, j) {
                struct bnx2x_fastpath *fp = &bp->fp[j];
 
+               DP(NETIF_MSG_IFUP,
+                  "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
                if (!fp->disable_tpa) {
                        for (i = 0; i < max_agg_queues; i++) {
                                fp->tpa_pool[i].skb =
-                                  netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+                                  netdev_alloc_skb(bp->dev, fp->rx_buf_size);
                                if (!fp->tpa_pool[i].skb) {
                                        BNX2X_ERR("Failed to allocate TPA "
                                                  "skb pool for queue[%d] - "
@@ -941,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
                        rx_buf->skb = NULL;
                        dev_kfree_skb(skb);
@@ -959,6 +993,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
        bnx2x_free_rx_skbs(bp);
 }
 
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+       /* load old values */
+       u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+       if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+               /* leave all but MAX value */
+               mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+               /* set new MAX value */
+               mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+                               & FUNC_MF_CFG_MAX_BW_MASK;
+
+               bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+       }
+}
+
 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 {
        int i, offset = 1;
@@ -1249,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
        return rc;
 }
 
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+       int i;
+
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+
+               /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+               if (IS_FCOE_IDX(i))
+                       /*
+                        * Although there are no IP frames expected to arrive to
+                        * this ring we still want to add an
+                        * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+                        * overrun attack.
+                        */
+                       fp->rx_buf_size =
+                               BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+                               BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+               else
+                       fp->rx_buf_size =
+                               bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+                               IP_HEADER_ALIGNMENT_PADDING;
+       }
+}
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1272,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* must be called before memory allocation and HW init */
        bnx2x_ilt_set_info(bp);
 
+       /* Set the receive queues buffer size */
+       bnx2x_set_rx_buf_size(bp);
+
        if (bnx2x_alloc_mem(bp))
                return -ENOMEM;
 
@@ -1427,28 +1506,40 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bnx2x_set_eth_mac(bp, 1);
 
+       /* Clear MC configuration */
+       if (CHIP_IS_E1(bp))
+               bnx2x_invalidate_e1_mc_list(bp);
+       else
+               bnx2x_invalidate_e1h_mc_list(bp);
+
+       /* Clear UC lists configuration */
+       bnx2x_invalidate_uc_list(bp);
+
+       if (bp->pending_max) {
+               bnx2x_update_max_mf_config(bp, bp->pending_max);
+               bp->pending_max = 0;
+       }
+
        if (bp->port.pmf)
                bnx2x_initial_phy_init(bp, load_mode);
 
+       /* Initialize Rx filtering */
+       bnx2x_set_rx_mode(bp->dev);
+
        /* Start fast path */
        switch (load_mode) {
        case LOAD_NORMAL:
                /* Tx queue should be only reenabled */
                netif_tx_wake_all_queues(bp->dev);
                /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                break;
 
        case LOAD_OPEN:
                netif_tx_start_all_queues(bp->dev);
                smp_mb__after_clear_bit();
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                break;
 
        case LOAD_DIAG:
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                bp->state = BNX2X_STATE_DIAG;
                break;
 
index 03eb4d6..ef37b98 100644 (file)
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
  */
 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
 
+/**
+ * Updates MAX part of MF configuration in HW
+ * (if required)
+ *
+ * @param bp
+ * @param value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 
@@ -822,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
        dma_addr_t mapping;
 
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
        if (unlikely(skb == NULL))
                return -ENOMEM;
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
                                 DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                dev_kfree_skb(skb);
@@ -892,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
                if (fp->tpa_state[i] == BNX2X_TPA_START)
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
                dev_kfree_skb(skb);
                rx_buf->skb = NULL;
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+       u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+                             FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (!max_cfg) {
+               BNX2X_ERR("Illegal configuration detected for Max BW - "
+                         "using 100 instead\n");
+               max_cfg = 100;
+       }
+       return max_cfg;
+}
+
 #endif /* BNX2X_CMN_H */
index fb60021..9a24d79 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/netdevice.h>
 #include <linux/types.h>
 #include <linux/errno.h>
+#ifdef BCM_DCBNL
+#include <linux/dcbnl.h>
+#endif
 
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
        return 0;
 }
 
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+       u8 pri;
+
+       /* Choose the highest priority */
+       for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+               if (ent->pri_bitmap & (1 << pri))
+                       break;
+       return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+       return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+               DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+               DCB_APP_IDTYPE_ETHTYPE;
+}
+
+static inline
+void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
+{
+       int i;
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+               bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
+                                                       ~DCBX_APP_ENTRY_VALID;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+       int i, err = 0;
+
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+               struct dcbx_app_priority_entry *ent =
+                       &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+               if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+                       u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+                       /* avoid invalid user-priority */
+                       if (up) {
+                               struct dcb_app app;
+                               app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+                               app.protocol = ent->app_id;
+                               app.priority = delall ? 0 : up;
+                               err = dcb_setapp(bp->dev, &app);
+                       }
+               }
+       }
+       return err;
+}
+#endif
+
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 {
        switch (state) {
        case BNX2X_DCBX_STATE_NEG_RECEIVED:
                {
                        DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
-
+#ifdef BCM_DCBNL
+                       /**
+                        * Delete app tlvs from dcbnl before reading new
+                        * negotiation results
+                        */
+                       bnx2x_dcbnl_update_applist(bp, true);
+#endif
                        /* Read neg results if dcbx is in the FW */
                        if (bnx2x_dcbx_read_shmem_neg_results(bp))
                                return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                                                 bp->dcbx_error);
 
                        if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+#ifdef BCM_DCBNL
+                               /**
+                                * Add new app tlvs to dcbnl
+                                */
+                               bnx2x_dcbnl_update_applist(bp, false);
+#endif
                                bnx2x_dcbx_stop_hw_tx(bp);
                                return;
                        }
                        /* fall through */
+#ifdef BCM_DCBNL
+                       /**
+                        * Invalidate the local app tlvs if they are not added
+                        * to the dcbnl app list to avoid deleting them from
+                        * the list later on
+                        */
+                       bnx2x_dcbx_invalidate_local_apps(bp);
+#endif
                }
        case BNX2X_DCBX_STATE_TX_PAUSED:
                DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
        bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
 }
 /* DCB netlink */
-#ifdef BCM_DCB
-#include <linux/dcbnl.h>
+#ifdef BCM_DCBNL
 
 #define BNX2X_DCBX_CAPS                (DCB_CAP_DCBX_LLD_MANAGED | \
                                DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
        bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
 }
 
-static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
-                              u8 idtype, u16 idval)
-{
-       if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
-               return false;
-
-       switch (idtype) {
-       case DCB_APP_IDTYPE_ETHTYPE:
-               if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
-                       DCBX_APP_SF_ETH_TYPE)
-                       return false;
-               break;
-       case DCB_APP_IDTYPE_PORTNUM:
-               if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
-                       DCBX_APP_SF_PORT)
-                       return false;
-               break;
-       default:
-               return false;
-       }
-       if (app_ent->app_id != idval)
-               return false;
-
-       return true;
-}
-
 static void bnx2x_admin_app_set_ent(
        struct bnx2x_admin_priority_app_table *app_ent,
        u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
        return bnx2x_set_admin_app_up(bp, idtype, idval, up);
 }
 
-static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
-                                u16 idval)
-{
-       int i;
-       u8 up = 0;
-
-       struct bnx2x *bp = netdev_priv(netdev);
-       DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
-
-       /* iterate over the app entries looking for idtype and idval */
-       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
-               if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
-                                      idtype, idval))
-                       break;
-
-       if (i < DCBX_MAX_APP_PROTOCOL)
-               /* if found return up */
-               up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
-       else
-               DP(NETIF_MSG_LINK, "app not found\n");
-
-       return up;
-}
-
 static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
 {
        struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
        .setnumtcs      = bnx2x_dcbnl_set_numtcs,
        .getpfcstate    = bnx2x_dcbnl_get_pfc_state,
        .setpfcstate    = bnx2x_dcbnl_set_pfc_state,
-       .getapp         = bnx2x_dcbnl_get_app_up,
        .setapp         = bnx2x_dcbnl_set_app_up,
        .getdcbx        = bnx2x_dcbnl_get_dcbx,
        .setdcbx        = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
        .setfeatcfg     = bnx2x_dcbnl_set_featcfg,
 };
 
-#endif /* BCM_DCB */
+#endif /* BCM_DCBNL */
index f650f98..71b8eda 100644 (file)
@@ -189,8 +189,9 @@ enum {
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
 
 /* DCB netlink */
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
 extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
-#endif /* BCM_DCB */
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
 
 #endif /* BNX2X_DCB_H */
index 5b44a8b..f505015 100644 (file)
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        speed |= (cmd->speed_hi << 16);
 
        if (IS_MF_SI(bp)) {
-               u32 param = 0;
+               u32 part;
                u32 line_speed = bp->link_vars.line_speed;
 
                /* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                       REQ_BC_VER_4_SET_MF_BW);
                        return -EINVAL;
                }
-               if (line_speed < speed) {
-                       BNX2X_DEV_INFO("New speed should be less or equal "
-                                      "to actual line speed\n");
+
+               part = (speed * 100) / line_speed;
+
+               if (line_speed < speed || !part) {
+                       BNX2X_DEV_INFO("Speed setting should be in a range "
+                                      "from 1%% to 100%% "
+                                      "of actual line speed\n");
                        return -EINVAL;
                }
-               /* load old values */
-               param = bp->mf_config[BP_VN(bp)];
-
-               /* leave only MIN value */
-               param &= FUNC_MF_CFG_MIN_BW_MASK;
 
-               /* set new MAX value */
-               param |= (((speed * 100) / line_speed)
-                                << FUNC_MF_CFG_MAX_BW_SHIFT)
-                                 & FUNC_MF_CFG_MAX_BW_MASK;
+               if (bp->state != BNX2X_STATE_OPEN)
+                       /* store value for following "load" */
+                       bp->pending_max = part;
+               else
+                       bnx2x_update_max_mf_config(bp, part);
 
-               bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
                return 0;
        }
 
@@ -1618,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
        /* prepare the loopback packet */
        pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
                     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
        if (!skb) {
                rc = -ENOMEM;
                goto test_loopback_exit;
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
                { 0x100, 0x350 }, /* manuf_info */
                { 0x450,  0xf0 }, /* feature_info */
                { 0x640,  0x64 }, /* upgrade_key_info */
-               { 0x6a4,  0x64 },
                { 0x708,  0x70 }, /* manuf_key_info */
-               { 0x778,  0x70 },
                {     0,     0 }
        };
        __be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
                buf[4] = 1;
                etest->flags |= ETH_TEST_FL_FAILED;
        }
-       if (bp->port.pmf)
-               if (bnx2x_link_test(bp, is_serdes) != 0) {
-                       buf[5] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
+
+       if (bnx2x_link_test(bp, is_serdes) != 0) {
+               buf[5] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
 
 #ifdef BNX2X_EXTRA_DEBUG
        bnx2x_panic_dump(bp);
@@ -2134,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
        return 0;
 }
 
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                          void *rules __always_unused)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = BNX2X_NUM_ETH_QUEUES(bp);
+               return 0;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+                               struct ethtool_rxfh_indir *indir)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       size_t copy_size =
+               min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+               return -EOPNOTSUPP;
+
+       indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+       memcpy(indir->ring_index, bp->rx_indir_table,
+              copy_size * sizeof(bp->rx_indir_table[0]));
+       return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+                               const struct ethtool_rxfh_indir *indir)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       size_t i;
+
+       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+               return -EOPNOTSUPP;
+
+       /* Validate size and indices */
+       if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+               return -EINVAL;
+       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+               if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+                       return -EINVAL;
+
+       memcpy(bp->rx_indir_table, indir->ring_index,
+              indir->size * sizeof(bp->rx_indir_table[0]));
+       bnx2x_push_indir_table(bp);
+       return 0;
+}
+
 static const struct ethtool_ops bnx2x_ethtool_ops = {
        .get_settings           = bnx2x_get_settings,
        .set_settings           = bnx2x_set_settings,
@@ -2170,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .get_strings            = bnx2x_get_strings,
        .phys_id                = bnx2x_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
+       .get_rxnfc              = bnx2x_get_rxnfc,
+       .get_rxfh_indir         = bnx2x_get_rxfh_indir,
+       .set_rxfh_indir         = bnx2x_set_rxfh_indir,
 };
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev)
index 548f563..be503cc 100644 (file)
 
 #include "bnx2x_fw_defs.h"
 
+#define FW_ENCODE_32BIT_PATTERN                0x1e1e1e1e
+
 struct license_key {
        u32 reserved[6];
 
-#if defined(__BIG_ENDIAN)
-       u16 max_iscsi_init_conn;
-       u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
-       u16 max_iscsi_trgt_conn;
-       u16 max_iscsi_init_conn;
-#endif
+       u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT        0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT        16
 
-       u32 reserved_a[6];
-};
+       u32 reserved_a;
+
+       u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK  0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK  0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
 
+       u32 reserved_b[4];
+};
 
 #define PORT_0                         0
 #define PORT_1                         1
@@ -237,8 +244,26 @@ struct port_hw_cfg {                           /* port 0: 0x12c  port 1: 0x2bc */
 #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT            16
 
 
-       u32 Reserved0[16];                                  /* 0x158 */
-
+       u32 Reserved0[3];                                   /* 0x158 */
+       /*      Controls the TX laser of the SFP+ module */
+       u32 sfp_ctrl;                                   /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK                            0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT                           0
+#define PORT_HW_CFG_TX_LASER_MDIO                            0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0                           0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1                           0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2                           0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3                           0x00000004
+
+    /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK                    0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT                   8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0                   0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1                   0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2                   0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3                   0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED                0x00000400
+       u32 Reserved01[12];                                 /* 0x158 */
        /*  for external PHY, or forced mode or during AN */
        u16 xgxs_config_rx[4];                              /* 0x198 */
 
@@ -246,12 +271,78 @@ struct port_hw_cfg {                          /* port 0: 0x12c  port 1: 0x2bc */
 
        u32 Reserved1[56];                                  /* 0x1A8 */
        u32 default_cfg;                                    /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK                        0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT                       0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW                         0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH                        0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT                       0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK                        0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT                       2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW                         0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH                        0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT                       0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK                        0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT                       4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW                         0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH                        0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT                       0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK                        0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT                       6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW                         0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH                        0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT                       0x000000c0
+
+       /*
+        * When KR link is required to be set to force which is not
+        * KR-compliant, this parameter determine what is the trigger for it.
+        * When GPIO is selected, low input will force the speed. Currently
+        * default speed is 1G. In the future, it may be widen to select the
+        * forced speed in with another parameter. Note when force-1G is
+        * enabled, it override option 56: Link Speed option.
+        */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK                    0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT                   8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED                      0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0                0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0                0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0                0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0                0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1                0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1                0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1                0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1                0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED                  0x00000900
+    /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK                    0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT                   16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE                0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0                0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0                0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0                0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0                0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1                0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1                0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1                0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1                0x00080000
        /*  Enable BAM on KR */
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK                    0x00100000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
 
+       /*  Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK                          0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT                         21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED                              0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED                       0x00200000
+
        u32 speed_capability_mask2;                         /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK                0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT               0
@@ -381,6 +472,7 @@ struct port_hw_cfg {                            /* port 0: 0x12c  port 1: 0x2bc */
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727      0x00000900
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823     0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833     0x00000d00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE      0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN     0x0000ff00
 
index 5a268e9..fa6dbe3 100644 (file)
@@ -241,7 +241,7 @@ static const struct {
        /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
         * want to handle "system kill" flow at the moment.
         */
-       BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+       BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
        BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
        BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
        BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
index 7160ec5..f2f367d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
 
 /********************************************************/
 #define ETH_HLEN                       14
-#define ETH_OVREHEAD           (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD                   (ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE            60
 #define ETH_MAX_PACKET_SIZE            1500
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 #define MDIO_ACCESS_TIMEOUT            1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE         2
 
 /***********************************************************/
 /*                     Shortcut definitions               */
@@ -79,7 +80,7 @@
 
 #define AUTONEG_CL37           SHARED_HW_CFG_AN_ENABLE_CL37
 #define AUTONEG_CL73           SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM            SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM            SHARED_HW_CFG_AN_ENABLE_BAM
 #define AUTONEG_PARALLEL \
                                SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
 #define AUTONEG_SGMII_FIBER_AUTODET \
 #define GP_STATUS_10G_KX4 \
                        MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
 
-#define LINK_10THD                     LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD                     LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD             LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD             LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD           LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4                     LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4             LINK_STATUS_SPEED_AND_DUPLEX_100T4
 #define LINK_100TXFD           LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
 #define LINK_1000THD           LINK_STATUS_SPEED_AND_DUPLEX_1000THD
 #define LINK_1000TFD           LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
 #define LINK_2500THD           LINK_STATUS_SPEED_AND_DUPLEX_2500THD
 #define LINK_2500TFD           LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
 #define LINK_2500XFD           LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD            LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD            LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD            LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD            LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
 #define LINK_12_5GTFD          LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
 #define LINK_12_5GXFD          LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD            LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD            LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD            LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD            LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD            LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD            LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
 
 #define PHY_XGXS_FLAG                  0x1
 #define PHY_SGMII_FLAG                 0x2
 
 /* */
 #define SFP_EEPROM_CON_TYPE_ADDR               0x2
-       #define SFP_EEPROM_CON_TYPE_VAL_LC              0x7
+       #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
        #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
 
 
 
 #define SFP_EEPROM_FC_TX_TECH_ADDR             0x8
        #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
-       #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE      0x8
+       #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
 
-#define SFP_EEPROM_OPTIONS_ADDR                0x40
+#define SFP_EEPROM_OPTIONS_ADDR                        0x40
        #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE                2
+#define SFP_EEPROM_OPTIONS_SIZE                        2
 
-#define EDC_MODE_LINEAR                                0x0022
-#define EDC_MODE_LIMITING                              0x0044
-#define EDC_MODE_PASSIVE_DAC                   0x0055
+#define EDC_MODE_LINEAR                                0x0022
+#define EDC_MODE_LIMITING                              0x0044
+#define EDC_MODE_PASSIVE_DAC                   0x0055
 
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND                (0x5000)
 /*                     INTERFACE                          */
 /**********************************************************/
 
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
        bnx2x_cl45_write(_bp, _phy, \
                (_phy)->def_md_devad, \
                (_bank + (_addr & 0xf)), \
                _val)
 
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
        bnx2x_cl45_read(_bp, _phy, \
                (_phy)->def_md_devad, \
                (_bank + (_addr & 0xf)), \
                _val)
 
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                         u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                          u8 devad, u16 reg, u16 val);
-
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
        u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
 
        DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
 
-       /**
+       /*
         * mapping between entry  priority to client number (0,1,2 -debug and
         * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
         * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
         */
 
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-       /**
+       /*
         * Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries, 3 -
         * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
        /* defines which entries (clients) are subjected to WFQ arbitration */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-       /**
-       * For strict priority entries defines the number of consecutive
-       * slots for the highest priority.
-       */
+       /*
+        * For strict priority entries defines the number of consecutive
+        * slots for the highest priority.
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-       /**
+       /*
         * mapping between the CREDIT_WEIGHT registers and actual client
         * numbers
         */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
        /* ETS mode disable */
        REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-       /**
+       /*
         * If ETS mode is enabled (there is no strict priority) defines a WFQ
         * weight for COS0/COS1.
         */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 }
 
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 {
        /* ETS disabled configuration */
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-       /**
-       * defines which entries (clients) are subjected to WFQ arbitration
-       * COS0 0x8
-       * COS1 0x10
-       */
+       /*
+        * defines which entries (clients) are subjected to WFQ arbitration
+        * COS0 0x8
+        * COS1 0x10
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-       /**
-       * mapping between the ARB_CREDIT_WEIGHT registers and actual
-       * client numbers (WEIGHT_0 does not actually have to represent
-       * client 0)
-       *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-       *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
-       */
+       /*
+        * mapping between the ARB_CREDIT_WEIGHT registers and actual
+        * client numbers (WEIGHT_0 does not actually have to represent
+        * client 0)
+        *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+        *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
 
        REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
 
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-       /**
-       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
-       * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
-       * entry, 4 - COS1 entry.
-       * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-       * bit4   bit3     bit2     bit1    bit0
-       * MCP and debug are strict
-       */
+       /*
+        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+        * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
+        * entry, 4 - COS1 entry.
+        * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+        * bit4   bit3    bit2     bit1    bit0
+        * MCP and debug are strict
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 
        /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
        if ((0 == total_bw) ||
            (0 == cos0_bw) ||
            (0 == cos1_bw)) {
-               DP(NETIF_MSG_LINK,
-                  "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+               DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
                return;
        }
 
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        u32 val = 0;
 
        DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-       /**
+       /*
         * Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries,
         * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
         * MCP and debug are strict
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-       /**
+       /*
         * For strict priority entries defines the number of consecutive slots
         * for the highest priority.
         */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-       /**
-       * mapping between entry  priority to client number (0,1,2 -debug and
-       * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-       * 3bits client num.
-       *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-       * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
-       * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
-       */
+       /*
+        * mapping between entry  priority to client number (0,1,2 -debug and
+        * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+        * 3bits client num.
+        *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+        * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
+        * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
+        */
        val = (0 == strict_cos) ? 0x2318 : 0x22E0;
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 /*                     MAC/PBF section                           */
 /******************************************************************/
 static void bnx2x_emac_init(struct link_params *params,
-                          struct link_vars *vars)
+                           struct link_vars *vars)
 {
        /* reset and unreset the emac core */
        struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
        u16 timeout;
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                  (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
        udelay(5);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                  (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 
        /* init emac - use read-modify-write */
        /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
 }
 
 static u8 bnx2x_emac_enable(struct link_params *params,
-                         struct link_vars *vars, u8 lb)
+                           struct link_vars *vars, u8 lb)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        /* enable emac and not bmac */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
 
-       /* for paladium */
-       if (CHIP_REV_IS_EMUL(bp)) {
-               /* Use lane 1 (of lanes 0-3) */
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                           port*4, 1);
-       }
-       /* for fpga */
-       else
-
-       if (CHIP_REV_IS_FPGA(bp)) {
-               /* Use lane 1 (of lanes 0-3) */
-               DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
-                           0);
-       } else
        /* ASIC */
        if (vars->phy_flags & PHY_XGXS_FLAG) {
                u32 ser_lane = ((params->lane_config &
-                           PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                                PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+                               PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
                DP(NETIF_MSG_LINK, "XGXS\n");
                /* select the master lanes (out of 0-3) */
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
-                          port*4, ser_lane);
+               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
                /* select XGXS */
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                          port*4, 1);
+               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 
        } else { /* SerDes */
                DP(NETIF_MSG_LINK, "SerDes\n");
                /* select SerDes */
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                          port*4, 0);
+               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
        }
 
        bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
-                   EMAC_RX_MODE_RESET);
+                     EMAC_RX_MODE_RESET);
        bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
-                   EMAC_TX_MODE_RESET);
+                     EMAC_TX_MODE_RESET);
 
        if (CHIP_REV_IS_SLOW(bp)) {
                /* config GMII mode */
                val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
-               EMAC_WR(bp, EMAC_REG_EMAC_MODE,
-                           (val | EMAC_MODE_PORT_GMII));
+               EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
        } else { /* ASIC */
                /* pause enable/disable */
                bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
        val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-       /**
-       * Setting this bit causes MAC control frames (except for pause
-       * frames) to be passed on for processing. This setting has no
-       * affect on the operation of the pause frames. This bit effects
-       * all packets regardless of RX Parser packet sorting logic.
-       * Turn the PFC off to make sure we are in Xon state before
-       * enabling it.
-       */
+       /*
+        * Setting this bit causes MAC control frames (except for pause
+        * frames) to be passed on for processing. This setting has no
+        * affect on the operation of the pause frames. This bit effects
+        * all packets regardless of RX Parser packet sorting logic.
+        * Turn the PFC off to make sure we are in Xon state before
+        * enabling it.
+        */
        EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
        if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
                DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
 
-       if (CHIP_REV_IS_EMUL(bp)) {
-               /* take the BigMac out of reset */
-               REG_WR(bp,
-                          GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                          (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
-               /* enable access for bmac registers */
-               REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
-       } else
-               REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+       REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
 
        vars->mac_type = MAC_TYPE_EMAC;
        return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
                val |= (1<<5);
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
        udelay(30);
 
        /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
 
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-       /**
-       * Set Time (based unit is 512 bit time) between automatic
-       * re-sending of PP packets amd enable automatic re-send of
-       * Per-Priroity Packet as long as pp_gen is asserted and
-       * pp_disable is low.
-       */
+       /*
+        * Set Time (based unit is 512 bit time) between automatic
+        * re-sending of PP packets amd enable automatic re-send of
+        * Per-Priroity Packet as long as pp_gen is asserted and
+        * pp_disable is low.
+        */
        val = 0x8000;
        if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
                val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        wb_data[0] = val;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
-                       wb_data, 2);
+                   wb_data, 2);
 
        /* mac control */
        val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
 
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
 static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
                        full_xon_th =
                          PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
                }
-       /* The number of free blocks below which the pause signal to class 0
-          of MAC #n is asserted. n=0,1 */
+       /*
+        * The number of free blocks below which the pause signal to class 0
+        * of MAC #n is asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
-       /* The number of free blocks above which the pause signal to class 0
-          of MAC #n is de-asserted. n=0,1 */
+       /*
+        * The number of free blocks above which the pause signal to class 0
+        * of MAC #n is de-asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
-       /* The number of free blocks below which the full signal to class 0
-          of MAC #n is asserted. n=0,1 */
+       /*
+        * The number of free blocks below which the full signal to class 0
+        * of MAC #n is asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
-       /* The number of free blocks above which the full signal to class 0
-          of MAC #n is de-asserted. n=0,1 */
+       /*
+        * The number of free blocks above which the full signal to class 0
+        * of MAC #n is de-asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
 
        if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
                        full_xon_th =
                          PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
                }
-               /**
+               /*
                 * The number of free blocks below which the pause signal to
                 * class 1 of MAC #n is asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
-               /**
+               /*
                 * The number of free blocks above which the pause signal to
                 * class 1 of MAC #n is de-asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
-               /**
+               /*
                 * The number of free blocks below which the full signal to
                 * class 1 of MAC #n is asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
-               /**
+               /*
                 * The number of free blocks above which the full signal to
                 * class 1 of MAC #n is de-asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
        }
 }
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                FEATURE_CONFIG_PFC_ENABLED;
        DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-       /**
+       /*
         * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
         * MAC control frames (that are not pause packets)
         * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
        xcm_mask = REG_RD(bp,
                                port ? NIG_REG_LLH1_XCM_MASK :
                                NIG_REG_LLH0_XCM_MASK);
-       /**
+       /*
         * nig params will override non PFC params, since it's possible to
         * do transition from PFC to SAFC
         */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
                      struct link_vars *vars,
                      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-       /**
+       /*
         * The PFC and pause are orthogonal to one another, meaning when
         * PFC is enabled, the pause are disabled, and when PFC is
         * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
 
 static u8 bnx2x_bmac1_enable(struct link_params *params,
                             struct link_vars *vars,
-                         u8 is_lb)
+                            u8 is_lb)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
        /* XGXS control */
        wb_data[0] = 0x3c;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr +
-                     BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
-                     wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+                   wb_data, 2);
 
        /* tx MAC SA */
        wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
                        params->mac_addr[5]);
        wb_data[1] = ((params->mac_addr[0] << 8) |
                        params->mac_addr[1]);
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
 
        /* mac control */
        val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
        }
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
 
        /* set rx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
 
        bnx2x_update_pfc_bmac1(params, vars);
 
        /* set tx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
 
        /* set cnt max size */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 
        /* configure safc */
        wb_data[0] = 0x1000200;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
                    wb_data, 2);
-       /* fix for emulation */
-       if (CHIP_REV_IS_EMUL(bp)) {
-               wb_data[0] = 0xf000;
-               wb_data[1] = 0;
-               REG_WR_DMAE(bp,
-                           bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
-                           wb_data, 2);
-       }
-
 
        return 0;
 }
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
 
        wb_data[0] = 0;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
        udelay(30);
 
        /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
        wb_data[0] = 0x3c;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr +
-                       BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+                   wb_data, 2);
 
        udelay(30);
 
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        wb_data[1] = ((params->mac_addr[0] << 8) |
                        params->mac_addr[1]);
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
-                       wb_data, 2);
+                   wb_data, 2);
 
        udelay(30);
 
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        wb_data[0] = 0x1000200;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
-                       wb_data, 2);
+                   wb_data, 2);
        udelay(30);
 
        /* set rx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
        udelay(30);
 
        /* set tx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
        udelay(30);
        /* set cnt max size */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
        udelay(30);
        bnx2x_update_pfc_bmac2(params, vars, is_lb);
 
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
        u32 val;
        /* reset and unreset the BigMac */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                    (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
        msleep(1);
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                    (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
        /* enable access for bmac registers */
        REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
        struct bnx2x *bp = params->bp;
 
        REG_WR(bp, params->shmem_base +
-                  offsetof(struct shmem_region,
-                           port_mb[params->port].link_status),
-                       link_status);
+              offsetof(struct shmem_region,
+                       port_mb[params->port].link_status), link_status);
 }
 
 static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 {
        u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
-               NIG_REG_INGRESS_BMAC0_MEM;
+                       NIG_REG_INGRESS_BMAC0_MEM;
        u32 wb_data[2];
        u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
                if (CHIP_IS_E2(bp)) {
                        /* Clear Rx Enable bit in BMAC_CONTROL register */
                        REG_RD_DMAE(bp, bmac_addr +
-                                       BIGMAC2_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+                                   BIGMAC2_REGISTER_BMAC_CONTROL,
+                                   wb_data, 2);
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
                        REG_WR_DMAE(bp, bmac_addr +
-                                       BIGMAC2_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+                                   BIGMAC2_REGISTER_BMAC_CONTROL,
+                                   wb_data, 2);
                } else {
                        /* Clear Rx Enable bit in BMAC_CONTROL register */
                        REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 }
 
 static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
-                        u32 line_speed)
+                          u32 line_speed)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
                /* update threshold */
                REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
                /* update init credit */
-               init_crd = 778;         /* (800-18-4) */
+               init_crd = 778;         /* (800-18-4) */
 
        } else {
                u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
        return 0;
 }
 
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
 static u32 bnx2x_get_emac_base(struct bnx2x *bp,
                               u32 mdc_mdio_access, u8 port)
 {
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
 
 }
 
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                   u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/*                     CL45 access functions                     */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+                          u8 devad, u16 reg, u16 val)
 {
        u32 tmp, saved_mode;
        u8 i, rc = 0;
-
-       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+       /*
+        * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
 
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        for (i = 0; i < 50; i++) {
                udelay(10);
 
-               tmp = REG_RD(bp, phy->mdio_ctrl +
-                                  EMAC_REG_EMAC_MDIO_COMM);
+               tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                        udelay(5);
                        break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        }
        if (tmp & EMAC_MDIO_COMM_START_BUSY) {
                DP(NETIF_MSG_LINK, "write phy register failed\n");
+               netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                rc = -EFAULT;
        } else {
                /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                        udelay(10);
 
                        tmp = REG_RD(bp, phy->mdio_ctrl +
-                                        EMAC_REG_EMAC_MDIO_COMM);
+                                    EMAC_REG_EMAC_MDIO_COMM);
                        if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                                udelay(5);
                                break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                }
                if (tmp & EMAC_MDIO_COMM_START_BUSY) {
                        DP(NETIF_MSG_LINK, "write phy register failed\n");
+                       netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                        rc = -EFAULT;
                }
        }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        return rc;
 }
 
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                  u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+                         u8 devad, u16 reg, u16 *ret_val)
 {
        u32 val, saved_mode;
        u16 i;
        u8 rc = 0;
-
-       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+       /*
+        * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
 
        saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
        val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
-                            EMAC_MDIO_MODE_CLOCK_CNT));
+                             EMAC_MDIO_MODE_CLOCK_CNT));
        val |= (EMAC_MDIO_MODE_CLAUSE_45 |
                (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
        REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
        }
        if (val & EMAC_MDIO_COMM_START_BUSY) {
                DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+               netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                *ret_val = 0;
                rc = -EFAULT;
 
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
                        udelay(10);
 
                        val = REG_RD(bp, phy->mdio_ctrl +
-                                         EMAC_REG_EMAC_MDIO_COMM);
+                                    EMAC_REG_EMAC_MDIO_COMM);
                        if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
                                *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
                                break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
                }
                if (val & EMAC_MDIO_COMM_START_BUSY) {
                        DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+                       netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                        *ret_val = 0;
                        rc = -EFAULT;
                }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
                  u8 devad, u16 reg, u16 *ret_val)
 {
        u8 phy_index;
-       /**
+       /*
         * Probe for the phy according to the given phy_addr, and execute
         * the read request on it
         */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 val)
 {
        u8 phy_index;
-       /**
+       /*
         * Probe for the phy according to the given phy_addr, and execute
         * the write request on it
         */
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
                aer_val = 0x3800 + offset - 1;
        else
                aer_val = 0x3800 + offset;
-       CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_AER_BLOCK,
-                               MDIO_AER_BLOCK_AER_REG, aer_val);
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, aer_val);
 }
 static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
                                     struct bnx2x_phy *phy)
 {
-       CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_AER_BLOCK,
-                               MDIO_AER_BLOCK_AER_REG, 0x3800);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, 0x3800);
 }
 
 /******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
 
        bnx2x_set_serdes_access(bp, port);
 
-       REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
-                    port*0x10,
-                    DEFAULT_PHY_DEV_ADDR);
+       REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+              DEFAULT_PHY_DEV_ADDR);
 }
 
 static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
        udelay(500);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
 
-       REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
-                    port*0x18, 0);
+       REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
        REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-                    params->phy[INT_PHY].def_md_devad);
+              params->phy[INT_PHY].def_md_devad);
 }
 
 
 void bnx2x_link_status_update(struct link_params *params,
-                           struct link_vars   *vars)
+                             struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u8 link_10g;
        u8 port = params->port;
 
        vars->link_status = REG_RD(bp, params->shmem_base +
-                                         offsetof(struct shmem_region,
-                                          port_mb[port].link_status));
+                                  offsetof(struct shmem_region,
+                                           port_mb[port].link_status));
 
        vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
 
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
                vars->phy_link_up = 1;
                vars->duplex = DUPLEX_FULL;
                switch (vars->link_status &
-                                       LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+                       LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
                        case LINK_10THD:
                                vars->duplex = DUPLEX_HALF;
                                /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
 {
        struct bnx2x *bp = params->bp;
        u16 new_master_ln, ser_lane;
-       ser_lane =  ((params->lane_config &
+       ser_lane = ((params->lane_config &
                     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
        /* set the master_ln for AN */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_XGXS_BLOCK2,
-                             MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-                             &new_master_ln);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_XGXS_BLOCK2,
+                         MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+                         &new_master_ln);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_XGXS_BLOCK2 ,
-                             MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-                             (new_master_ln | ser_lane));
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_XGXS_BLOCK2 ,
+                         MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+                         (new_master_ln | ser_lane));
 }
 
 static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
        struct bnx2x *bp = params->bp;
        u16 mii_control;
        u16 i;
-
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
 
        /* reset the unicore */
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL,
-                             (mii_control |
-                              MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL,
+                         (mii_control |
+                          MDIO_COMBO_IEEO_MII_CONTROL_RESET));
        if (set_serdes)
                bnx2x_set_serdes_access(bp, params->port);
 
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
                udelay(5);
 
                /* the reset erased the previous bank value */
-               CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL,
-                             &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
 
                if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
                        udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
                }
        }
 
+       netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                             " Port %d\n",
+                        params->port);
        DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
        return -EINVAL;
 
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
                                 struct bnx2x_phy *phy)
 {
        struct bnx2x *bp = params->bp;
-       /* Each two bits represents a lane number:
-          No swap is 0123 => 0x1b no need to enable the swap */
+       /*
+        *  Each two bits represents a lane number:
+        *  No swap is 0123 => 0x1b no need to enable the swap
+        */
        u16 ser_lane, rx_lane_swap, tx_lane_swap;
 
        ser_lane = ((params->lane_config &
-                        PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                       PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
        rx_lane_swap = ((params->lane_config &
-                            PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+                        PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+                       PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
        tx_lane_swap = ((params->lane_config &
-                            PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+                        PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+                       PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
 
        if (rx_lane_swap != 0x1b) {
-               CL45_WR_OVER_CL22(bp, phy,
-                                   MDIO_REG_BANK_XGXS_BLOCK2,
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP,
-                                   (rx_lane_swap |
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+                                 (rx_lane_swap |
+                                  MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+                                  MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
        } else {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
        }
 
        if (tx_lane_swap != 0x1b) {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP,
-                                     (tx_lane_swap |
-                                      MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+                                 (tx_lane_swap |
+                                  MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
        } else {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
        }
 }
 
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u16 control2;
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-                             &control2);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+                         &control2);
        if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
                control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
        else
                control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
        DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
                phy->speed_cap_mask, control2);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-                             control2);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+                         control2);
 
        if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
             (phy->speed_cap_mask &
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
                DP(NETIF_MSG_LINK, "XGXS\n");
 
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+               CL22_WR_OVER_CL45(bp, phy,
+                                MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
 
-               CL45_RD_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                               &control2);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                                 &control2);
 
 
                control2 |=
                    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
 
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                               control2);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                                 control2);
 
                /* Disable parallel detection of HiG */
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_XGXS_BLOCK2,
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
        }
 }
 
 static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                              struct link_params *params,
-                           struct link_vars *vars,
-                           u8 enable_cl73)
+                             struct link_vars *vars,
+                             u8 enable_cl73)
 {
        struct bnx2x *bp = params->bp;
        u16 reg_val;
 
        /* CL37 Autoneg */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 
        /* CL37 Autoneg Enabled */
        if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                             MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
        /* Enable/Disable Autodetection */
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
        reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
                    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
        reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
        else
                reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
 
        /* Enable TetonII and BAM autoneg */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_BAM_NEXT_PAGE,
-                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_BAM_NEXT_PAGE,
+                         MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
                          &reg_val);
        if (vars->line_speed == SPEED_AUTO_NEG) {
                /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
        }
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_BAM_NEXT_PAGE,
-                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
-                             reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_BAM_NEXT_PAGE,
+                         MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+                         reg_val);
 
        if (enable_cl73) {
                /* Enable Cl73 FSM status bits */
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_USERB0,
-                                   MDIO_CL73_USERB0_CL73_UCTRL,
-                                     0xe);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_USERB0,
+                                 MDIO_CL73_USERB0_CL73_UCTRL,
+                                 0xe);
 
                /* Enable BAM Station Manager*/
-               CL45_WR_OVER_CL22(bp, phy,
+               CL22_WR_OVER_CL45(bp, phy,
                        MDIO_REG_BANK_CL73_USERB0,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 
                /* Advertise CL73 link speeds */
-               CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV2,
-                                             &reg_val);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB1,
+                                 MDIO_CL73_IEEEB1_AN_ADV2,
+                                 &reg_val);
                if (phy->speed_cap_mask &
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                        reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
                        reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 
-               CL45_WR_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV2,
-                                     reg_val);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB1,
+                                 MDIO_CL73_IEEEB1_AN_ADV2,
+                                 reg_val);
 
                /* CL73 Autoneg Enabled */
                reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
        } else /* CL73 Autoneg Disabled */
                reg_val = 0;
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB0,
-                             MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB0,
+                         MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
 }
 
 /* program SerDes, forced speed */
 static void bnx2x_program_serdes(struct bnx2x_phy *phy,
                                 struct link_params *params,
-                              struct link_vars *vars)
+                                struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u16 reg_val;
 
        /* program duplex, disable autoneg and sgmii*/
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
        reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                     MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
        if (phy->req_duplex == DUPLEX_FULL)
                reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
-       /* program speed
-          - needed only if the speed is greater than 1G (2.5G or 10G) */
-       CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_SERDES_DIGITAL,
-                                     MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+       /*
+        * program speed
+        *  - needed only if the speed is greater than 1G (2.5G or 10G)
+        */
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_MISC1, &reg_val);
        /* clearing the speed value before setting the right speed */
        DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
 
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
                                MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
        }
 
-       CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_SERDES_DIGITAL,
-                                     MDIO_SERDES_DIGITAL_MISC1, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_MISC1, reg_val);
 
 }
 
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
                val |= MDIO_OVER_1G_UP1_2_5G;
        if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                val |= MDIO_OVER_1G_UP1_10G;
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_UP1, val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_UP1, val);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_UP3, 0x400);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_UP3, 0x400);
 }
 
 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-       /* resolve pause mode and advertisement
-        * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+       /*
+        * Resolve pause mode and advertisement.
+        * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+        */
 
        switch (phy->req_flow_ctrl) {
        case BNX2X_FLOW_CTRL_AUTO:
-               if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
-                       *ieee_fc |=
-                            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-               } else {
+               if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+                       *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+               else
                        *ieee_fc |=
-                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-               }
+                       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
                break;
        case BNX2X_FLOW_CTRL_TX:
-               *ieee_fc |=
-                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+               *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
                break;
 
        case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 
 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
                                             struct link_params *params,
-                                          u16 ieee_fc)
+                                            u16 ieee_fc)
 {
        struct bnx2x *bp = params->bp;
        u16 val;
        /* for AN, we are always publishing full duplex */
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB1,
-                             MDIO_CL73_IEEEB1_AN_ADV1, &val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB1,
+                         MDIO_CL73_IEEEB1_AN_ADV1, &val);
        val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
        val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB1,
-                             MDIO_CL73_IEEEB1_AN_ADV1, val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB1,
+                         MDIO_CL73_IEEEB1_AN_ADV1, val);
 }
 
 static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
        /* Enable and restart BAM/CL37 aneg */
 
        if (enable_cl73) {
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_IEEEB0,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                                     &mii_control);
-
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_CL73_IEEEB0,
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                               (mii_control |
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 &mii_control);
+
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 (mii_control |
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
        } else {
 
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
                DP(NETIF_MSG_LINK,
                         "bnx2x_restart_autoneg mii_control before = 0x%x\n",
                         mii_control);
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     (mii_control |
-                                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
-                                      MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 (mii_control |
+                                  MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+                                  MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
        }
 }
 
 static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
                                           struct link_params *params,
-                                        struct link_vars *vars)
+                                          struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u16 control1;
 
        /* in SGMII mode, the unicore is always slave */
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-                     &control1);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+                         &control1);
        control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
        /* set sgmii mode (and not fiber) */
        control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
                      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
                      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-                             control1);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+                         control1);
 
        /* if forced speed */
        if (!(vars->line_speed == SPEED_AUTO_NEG)) {
                /* set speed, disable autoneg */
                u16 mii_control;
 
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
                mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
                if (phy->req_duplex == DUPLEX_FULL)
                        mii_control |=
                                MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     mii_control);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 mii_control);
 
        } else { /* AN mode */
                /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
 
 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
 {                                              /*  LD      LP   */
-       switch (pause_result) {                 /* ASYM P ASYM P */
-       case 0xb:                               /*   1  0   1  1 */
+       switch (pause_result) {                 /* ASYM P ASYM P */
+       case 0xb:                               /*   1  0   1  1 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
                break;
 
-       case 0xe:                               /*   1  1   1  0 */
+       case 0xe:                               /*   1  1   1  0 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
                break;
 
-       case 0x5:                               /*   0  1   0  1 */
-       case 0x7:                               /*   0  1   1  1 */
-       case 0xd:                               /*   1  1   0  1 */
-       case 0xf:                               /*   1  1   1  1 */
+       case 0x5:                               /*   0  1   0  1 */
+       case 0x7:                               /*   0  1   1  1 */
+       case 0xd:                               /*   1  1   0  1 */
+       case 0xf:                               /*   1  1   1  1 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
                break;
 
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
        u16 pd_10g, status2_1000x;
        if (phy->req_line_speed != SPEED_AUTO_NEG)
                return 0;
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-                             &status2_1000x);
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-                             &status2_1000x);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+                         &status2_1000x);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+                         &status2_1000x);
        if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
                DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
                         params->port);
                return 1;
        }
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                             MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
-                             &pd_10g);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                         MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+                         &pd_10g);
 
        if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
                DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
                    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
                     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
 
-                       CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV1,
-                                             &ld_pause);
-                       CL45_RD_OVER_CL22(bp, phy,
-                                            MDIO_REG_BANK_CL73_IEEEB1,
-                                            MDIO_CL73_IEEEB1_AN_LP_ADV1,
-                                            &lp_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_CL73_IEEEB1,
+                                         MDIO_CL73_IEEEB1_AN_ADV1,
+                                         &ld_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_CL73_IEEEB1,
+                                         MDIO_CL73_IEEEB1_AN_LP_ADV1,
+                                         &lp_pause);
                        pause_result = (ld_pause &
                                        MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
                                        >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
                                 pause_result);
                } else {
-                       CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_COMBO_IEEE0,
-                                             MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
-                                             &ld_pause);
-                       CL45_RD_OVER_CL22(bp, phy,
-                              MDIO_REG_BANK_COMBO_IEEE0,
-                              MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
-                              &lp_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_COMBO_IEEE0,
+                                         MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+                                         &ld_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                               MDIO_REG_BANK_COMBO_IEEE0,
+                               MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+                               &lp_pause);
                        pause_result = (ld_pause &
                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
                        pause_result |= (lp_pause &
-                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+                               MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
                        DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
                                 pause_result);
                }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
        u16 rx_status, ustat_val, cl37_fsm_recieved;
        DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
        /* Step 1: Make sure signal is detected */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_RX0,
-                             MDIO_RX0_RX_STATUS,
-                             &rx_status);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_RX0,
+                         MDIO_RX0_RX_STATUS,
+                         &rx_status);
        if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
            (MDIO_RX0_RX_STATUS_SIGDET)) {
                DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
                             "rx_status(0x80b0) = 0x%x\n", rx_status);
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_IEEEB0,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
                return;
        }
        /* Step 2: Check CL73 state machine */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_USERB0,
-                             MDIO_CL73_USERB0_CL73_USTAT1,
-                             &ustat_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_USERB0,
+                         MDIO_CL73_USERB0_CL73_USTAT1,
+                         &ustat_val);
        if ((ustat_val &
             (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
              MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                             "ustat_val(0x8371) = 0x%x\n", ustat_val);
                return;
        }
-       /* Step 3: Check CL37 Message Pages received to indicate LP
-       supports only CL37 */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_REMOTE_PHY,
-                             MDIO_REMOTE_PHY_MISC_RX_STATUS,
-                             &cl37_fsm_recieved);
+       /*
+        * Step 3: Check CL37 Message Pages received to indicate LP
+        * supports only CL37
+        */
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_REMOTE_PHY,
+                         MDIO_REMOTE_PHY_MISC_RX_STATUS,
+                         &cl37_fsm_recieved);
        if ((cl37_fsm_recieved &
             (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
             MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                         cl37_fsm_recieved);
                return;
        }
-       /* The combined cl37/cl73 fsm state information indicating that we are
-       connected to a device which does not support cl73, but does support
-       cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+       /*
+        * The combined cl37/cl73 fsm state information indicating that
+        * we are connected to a device which does not support cl73, but
+        * does support cl37 BAM. In this case we disable cl73 and
+        * restart cl37 auto-neg
+        */
+
        /* Disable CL73 */
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB0,
-                             MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                             0);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB0,
+                         MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                         0);
        /* Restart CL37 autoneg */
        bnx2x_restart_autoneg(phy, params, 0);
        DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
                                     struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 new_line_speed , gp_status;
+       u16 new_line_speed, gp_status;
        u8 rc = 0;
 
        /* Read gp_status */
-       CL45_RD_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_GP_STATUS,
-                               MDIO_GP_STATUS_TOP_AN_STATUS1,
-                               &gp_status);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_GP_STATUS,
+                         MDIO_GP_STATUS_TOP_AN_STATUS1,
+                         &gp_status);
 
        if (phy->req_line_speed == SPEED_AUTO_NEG)
                vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
        u16 bank;
 
        /* read precomp */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_LP_UP2, &lp_up2);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_LP_UP2, &lp_up2);
 
        /* bits [10:7] at lp_up2, positioned at [15:12] */
        lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
 
        for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
              bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
-               CL45_RD_OVER_CL22(bp, phy,
-                                     bank,
-                                     MDIO_TX0_TX_DRIVER, &tx_driver);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 bank,
+                                 MDIO_TX0_TX_DRIVER, &tx_driver);
 
                /* replace tx_driver bits [15:12] */
                if (lp_up2 !=
                    (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
                        tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
                        tx_driver |= lp_up2;
-                       CL45_WR_OVER_CL22(bp, phy,
-                                             bank,
-                                             MDIO_TX0_TX_DRIVER, tx_driver);
+                       CL22_WR_OVER_CL45(bp, phy,
+                                         bank,
+                                         MDIO_TX0_TX_DRIVER, tx_driver);
                }
        }
 }
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
 
        DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
        bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
-                    EMAC_REG_EMAC_MODE,
-                    (EMAC_MODE_25G_MODE |
-                    EMAC_MODE_PORT_MII_10M |
-                    EMAC_MODE_HALF_DUPLEX));
+                      EMAC_REG_EMAC_MODE,
+                      (EMAC_MODE_25G_MODE |
+                       EMAC_MODE_PORT_MII_10M |
+                       EMAC_MODE_HALF_DUPLEX));
        switch (vars->line_speed) {
        case SPEED_10:
                mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
        if (vars->duplex == DUPLEX_HALF)
                mode |= EMAC_MODE_HALF_DUPLEX;
        bnx2x_bits_en(bp,
-                   GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
-                   mode);
+                     GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+                     mode);
 
        bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
        return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
 
        for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
              bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
-                       CL45_WR_OVER_CL22(bp, phy,
+                       CL22_WR_OVER_CL45(bp, phy,
                                          bank,
                                          MDIO_RX0_RX_EQ_BOOST,
                                          phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
 
        for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
                      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
-                       CL45_WR_OVER_CL22(bp, phy,
+                       CL22_WR_OVER_CL45(bp, phy,
                                          bank,
                                          MDIO_TX0_TX_DRIVER,
                                          phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
                /* forced speed requested? */
                if (vars->line_speed != SPEED_AUTO_NEG ||
                    (SINGLE_MEDIA_DIRECT(params) &&
-                         params->loopback_mode == LOOPBACK_EXT)) {
+                    params->loopback_mode == LOOPBACK_EXT)) {
                        DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 
                        /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
 
                        /* program duplex & pause advertisement (for aneg) */
                        bnx2x_set_ieee_aneg_advertisment(phy, params,
-                                                      vars->ieee_fc);
+                                                        vars->ieee_fc);
 
                        /* enable autoneg */
                        bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
 }
 
 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
-                                    struct bnx2x_phy *phy)
+                                    struct bnx2x_phy *phy,
+                                    struct link_params *params)
 {
        u16 cnt, ctrl;
        /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
                        break;
                msleep(1);
        }
+
+       if (cnt == 1000)
+               netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                                     " Port %d\n",
+                        params->port);
        DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
        return cnt;
 }
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
        u32 mask;
        struct bnx2x *bp = params->bp;
 
-       /* setting the status to report on link up
-          for either XGXS or SerDes */
-
+       /* Setting the status to report on link up for either XGXS or SerDes */
        if (params->switch_cfg == SWITCH_CFG_10G) {
                mask = (NIG_MASK_XGXS0_LINK10G |
                        NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 {
        u32 latch_status = 0;
 
-       /**
+       /*
         * Disable the MI INT ( external phy int ) by writing 1 to the
         * status register. Link down indication is high-active-signal,
         * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 
                /* For all latched-signal=up : Re-Arm Latch signals */
                REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
-                            (latch_status & 0xfffe) | (latch_status & 1));
+                      (latch_status & 0xfffe) | (latch_status & 1));
        }
        /* For all latched-signal=up,Write original_signal to status */
 }
 
 static void bnx2x_link_int_ack(struct link_params *params,
-                            struct link_vars *vars, u8 is_10g)
+                              struct link_vars *vars, u8 is_10g)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
 
-       /* first reset all status
-        * we assume only one line will be change at a time */
+       /*
+        * First reset all status we assume only one line will be
+        * change at a time
+        */
        bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                    (NIG_STATUS_XGXS0_LINK10G |
-                     NIG_STATUS_XGXS0_LINK_STATUS |
-                     NIG_STATUS_SERDES0_LINK_STATUS));
+                      (NIG_STATUS_XGXS0_LINK10G |
+                       NIG_STATUS_XGXS0_LINK_STATUS |
+                       NIG_STATUS_SERDES0_LINK_STATUS));
        if (vars->phy_link_up) {
                if (is_10g) {
-                       /* Disable the 10G link interrupt
-                        * by writing 1 to the status register
+                       /*
+                        * Disable the 10G link interrupt by writing 1 to the
+                        * status register
                         */
                        DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
                        bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
                                      NIG_STATUS_XGXS0_LINK10G);
 
                } else if (params->switch_cfg == SWITCH_CFG_10G) {
-                       /* Disable the link interrupt
-                        * by writing 1 to the relevant lane
-                        * in the status register
+                       /*
+                        * Disable the link interrupt by writing 1 to the
+                        * relevant lane in the status register
                         */
                        u32 ser_lane = ((params->lane_config &
                                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
 
                } else { /* SerDes */
                        DP(NETIF_MSG_LINK, "SerDes phy link up\n");
-                       /* Disable the link interrupt
-                        * by writing 1 to the status register
+                       /*
+                        * Disable the link interrupt by writing 1 to the status
+                        * register
                         */
                        bnx2x_bits_en(bp,
                                      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
        }
        if ((params->num_phys == MAX_PHYS) &&
            (params->phy[EXT_PHY2].ver_addr != 0)) {
-               spirom_ver = REG_RD(bp,
-                                         params->phy[EXT_PHY2].ver_addr);
+               spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
                if (params->phy[EXT_PHY2].format_fw_ver) {
                        *ver_p = '/';
                        ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
 
                /* change the uni_phy_addr in the nig */
                md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
-                                         port*0x18));
+                                      port*0x18));
 
                REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
 
                bnx2x_cl45_write(bp, phy,
-                              5,
-                              (MDIO_REG_BANK_AER_BLOCK +
-                               (MDIO_AER_BLOCK_AER_REG & 0xf)),
-                              0x2800);
+                                5,
+                                (MDIO_REG_BANK_AER_BLOCK +
+                                 (MDIO_AER_BLOCK_AER_REG & 0xf)),
+                                0x2800);
 
                bnx2x_cl45_write(bp, phy,
-                              5,
-                              (MDIO_REG_BANK_CL73_IEEEB0 +
-                               (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
-                              0x6041);
+                                5,
+                                (MDIO_REG_BANK_CL73_IEEEB0 +
+                                 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+                                0x6041);
                msleep(200);
                /* set aer mmd back */
                bnx2x_set_aer_mmd_xgxs(params, phy);
 
                /* and md_devad */
-               REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-                           md_devad);
-
+               REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
        } else {
                u16 mii_ctrl;
                DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
        case LED_MODE_OFF:
                REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
                REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                          SHARED_HW_CFG_LED_MAC1);
+                      SHARED_HW_CFG_LED_MAC1);
 
                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
                EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
                break;
 
        case LED_MODE_OPER:
-               /**
+               /*
                 * For all other phys, OPER mode is same as ON, so in case
                 * link is down, do nothing
-                **/
+                */
                if (!vars->link_up)
                        break;
        case LED_MODE_ON:
                if (params->phy[EXT_PHY1].type ==
                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
                    CHIP_IS_E2(bp) && params->num_phys == 2) {
-                       /**
-                       * This is a work-around for E2+8727 Configurations
-                       */
+                       /*
+                        * This is a work-around for E2+8727 Configurations
+                        */
                        if (mode == LED_MODE_ON ||
                                speed == SPEED_10000){
                                REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
                                return rc;
                        }
                } else if (SINGLE_MEDIA_DIRECT(params)) {
-                       /**
-                       * This is a work-around for HW issue found when link
-                       * is up in CL73
-                       */
+                       /*
+                        * This is a work-around for HW issue found when link
+                        * is up in CL73
+                        */
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
                        REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
                } else {
-                       REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                                  hw_led_mode);
+                       REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
                }
 
-               REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
-                          port*4, 0);
+               REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
                /* Set blinking rate to ~15.9Hz */
                REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
-                          LED_BLINK_RATE_VAL);
+                      LED_BLINK_RATE_VAL);
                REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
-                          port*4, 1);
+                      port*4, 1);
                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-               EMAC_WR(bp, EMAC_REG_EMAC_LED,
-                           (tmp & (~EMAC_LED_OVERRIDE)));
+               EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
 
                if (CHIP_IS_E1(bp) &&
                    ((speed == SPEED_2500) ||
                     (speed == SPEED_1000) ||
                     (speed == SPEED_100) ||
                     (speed == SPEED_10))) {
-                       /* On Everest 1 Ax chip versions for speeds less than
-                       10G LED scheme is different */
+                       /*
+                        * On Everest 1 Ax chip versions for speeds less than
+                        * 10G LED scheme is different
+                        */
                        REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
-                                  + port*4, 1);
+                              + port*4, 1);
                        REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
-                                  port*4, 0);
+                              port*4, 0);
                        REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
-                                  port*4, 1);
+                              port*4, 1);
                }
                break;
 
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
 
 }
 
-/**
+/*
  * This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
        u8 ext_phy_link_up = 0, serdes_phy_type;
        struct link_vars temp_vars;
 
-       CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
-                             MDIO_REG_BANK_GP_STATUS,
-                             MDIO_GP_STATUS_TOP_AN_STATUS1,
-                             &gp_status);
+       CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+                         MDIO_REG_BANK_GP_STATUS,
+                         MDIO_GP_STATUS_TOP_AN_STATUS1,
+                         &gp_status);
        /* link is up only if both local phy and external phy are up */
        if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
                return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
        u8 rc = 0;
        u8 phy_index, non_ext_phy;
        struct bnx2x *bp = params->bp;
-       /**
-       * In case of external phy existence, the line speed would be the
-       * line speed linked up by the external phy. In case it is direct
-       * only, then the line_speed during initialization will be
-       * equal to the req_line_speed
-       */
+       /*
+        * In case of external phy existence, the line speed would be the
+        * line speed linked up by the external phy. In case it is direct
+        * only, then the line_speed during initialization will be
+        * equal to the req_line_speed
+        */
        vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-       /**
+       /*
         * Initialize the internal phy in case this is a direct board
         * (no external phys), or this board has external phy which requires
         * to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
        if (!non_ext_phy)
                for (phy_index = EXT_PHY1; phy_index < params->num_phys;
                      phy_index++) {
-                       /**
+                       /*
                         * No need to initialize second phy in case of first
                         * phy only selection. In case of second phy, we do
                         * need to initialize the first phy, since they are
                         * connected.
-                        **/
+                        */
                        if (phy_index == EXT_PHY2 &&
                            (bnx2x_phy_selection(params) ==
                             PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
-                               DP(NETIF_MSG_LINK, "Not initializing"
-                                                  "second phy\n");
+                               DP(NETIF_MSG_LINK, "Ignoring second phy\n");
                                continue;
                        }
                        params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
                                 struct link_params *params)
 {
        /* reset the SerDes/XGXS */
-       REG_WR(params->bp, GRCBASE_MISC +
-                    MISC_REGISTERS_RESET_REG_3_CLEAR,
-                    (0x1ff << (params->port*16)));
+       REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+              (0x1ff << (params->port*16)));
 }
 
 static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
        else
                gpio_port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
        DP(NETIF_MSG_LINK, "reset external PHY\n");
 }
 
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
 
        /* reset BigMac */
        bnx2x_bmac_rx_disable(bp, params->port);
-       REG_WR(bp, GRCBASE_MISC +
-                  MISC_REGISTERS_RESET_REG_2_CLEAR,
-                  (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
        return 0;
 }
 
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
        msleep(20);
        return rc;
 }
-/**
+/*
  * The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
 
        is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
-                                   port*0x18) > 0);
+                               port*0x18) > 0);
        DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
                 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
                 is_mi_int,
-                REG_RD(bp,
-                           NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+                REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
 
        DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
          REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        /* disable emac */
        REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-       /**
-       * Step 1:
-       * Check external link change only for external phys, and apply
-       * priority selection between them in case the link on both phys
-       * is up. Note that the instead of the common vars, a temporary
-       * vars argument is used since each phy may have different link/
-       * speed/duplex result
-       */
+       /*
+        * Step 1:
+        * Check external link change only for external phys, and apply
+        * priority selection between them in case the link on both phys
+        * is up. Note that the instead of the common vars, a temporary
+        * vars argument is used since each phy may have different link/
+        * speed/duplex result
+        */
        for (phy_index = EXT_PHY1; phy_index < params->num_phys;
              phy_index++) {
                struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                        switch (bnx2x_phy_selection(params)) {
                        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
                        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-                       /**
+                       /*
                         * In this option, the first PHY makes sure to pass the
                         * traffic through itself only.
                         * Its not clear how to reset the link on the second phy
-                        **/
+                        */
                                active_external_phy = EXT_PHY1;
                                break;
                        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-                       /**
+                       /*
                         * In this option, the first PHY makes sure to pass the
                         * traffic through the second PHY.
-                        **/
+                        */
                                active_external_phy = EXT_PHY2;
                                break;
                        default:
-                       /**
+                       /*
                         * Link indication on both PHYs with the following cases
                         * is invalid:
                         * - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                         * - SECOND_PHY means that first phy should not be able
                         * to link up by itself (using configuration)
                         * - DEFAULT should be overriden during initialiazation
-                        **/
+                        */
                                DP(NETIF_MSG_LINK, "Invalid link indication"
                                           "mpc=0x%x. DISABLING LINK !!!\n",
                                           params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                }
        }
        prev_line_speed = vars->line_speed;
-       /**
-       * Step 2:
-       * Read the status of the internal phy. In case of
-       * DIRECT_SINGLE_MEDIA board, this link is the external link,
-       * otherwise this is the link between the 577xx and the first
-       * external phy
-       */
+       /*
+        * Step 2:
+        * Read the status of the internal phy. In case of
+        * DIRECT_SINGLE_MEDIA board, this link is the external link,
+        * otherwise this is the link between the 577xx and the first
+        * external phy
+        */
        if (params->phy[INT_PHY].read_status)
                params->phy[INT_PHY].read_status(
                        &params->phy[INT_PHY],
                        params, vars);
-       /**
+       /*
         * The INT_PHY flow control reside in the vars. This include the
         * case where the speed or flow control are not set to AUTO.
         * Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
         */
        if (active_external_phy > INT_PHY) {
                vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-               /**
+               /*
                 * Link speed is taken from the XGXS. AN and FC result from
                 * the external phy.
                 */
                vars->link_status |= phy_vars[active_external_phy].link_status;
 
-               /**
+               /*
                 * if active_external_phy is first PHY and link is up - disable
                 * disable TX on second external PHY
                 */
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
                   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
                   vars->link_status, ext_phy_line_speed);
-       /**
+       /*
         * Upon link speed change set the NIG into drain mode. Comes to
         * deals with possible FIFO glitch due to clk change when speed
         * is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                                   ext_phy_line_speed);
                        vars->phy_link_up = 0;
                } else if (prev_line_speed != vars->line_speed) {
-                       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-                                    + params->port*4, 0);
+                       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+                              0);
                        msleep(1);
                }
        }
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 
        bnx2x_link_int_ack(params, vars, link_10g);
 
-       /**
-       * In case external phy link is up, and internal link is down
-       * (not initialized yet probably after link initialization, it
-       * needs to be initialized.
-       * Note that after link down-up as result of cable plug, the xgxs
-       * link would probably become up again without the need
-       * initialize it
-       */
+       /*
+        * In case external phy link is up, and internal link is down
+        * (not initialized yet probably after link initialization, it
+        * needs to be initialized.
+        * Note that after link down-up as result of cable plug, the xgxs
+        * link would probably become up again without the need
+        * initialize it
+        */
        if (!(SINGLE_MEDIA_DIRECT(params))) {
                DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
                           " init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                                                vars);
                }
        }
-       /**
-        *  Link is up only if both local phy and external phy (in case of
-        *  non-direct board) are up
+       /*
+        * Link is up only if both local phy and external phy (in case of
+        * non-direct board) are up
         */
        vars->link_up = (vars->phy_link_up &&
                         (ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
 {
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
        msleep(1);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
 
 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
        u16 fw_ver1, fw_ver2;
 
        bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+                       MDIO_PMA_REG_ROM_VER1, &fw_ver1);
        bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+                       MDIO_PMA_REG_ROM_VER2, &fw_ver2);
        bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
                                  phy->ver_addr);
 }
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
        if ((vars->ieee_fc &
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
-               val |=  MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+               val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
        }
        if ((vars->ieee_fc &
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
        else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
                ret = 1;
                bnx2x_cl45_read(bp, phy,
-                             MDIO_AN_DEVAD,
-                             MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_ADV_PAUSE, &ld_pause);
                bnx2x_cl45_read(bp, phy,
-                             MDIO_AN_DEVAD,
-                             MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
                pause_result = (ld_pause &
                                MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
                pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
        /* Boot port from external ROM  */
        /* EDC grst */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        0x0001);
 
        /* ucode reboot and rst */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      0x008c);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        0x008c);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
        /* Reset internal microprocessor */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
        /* Release srst bit */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
        /* Delay 100ms per the PHY specifications */
        msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
 
        /* Clear ser_boot_ctl bit */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0000);
        bnx2x_save_bcm_spirom_ver(bp, phy, port);
 
        DP(NETIF_MSG_LINK,
@@ -3948,48 +3925,6 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
        return rc;
 }
 
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
-                                              struct bnx2x_phy *phy)
-{
-       u16 val;
-       bnx2x_cl45_read(bp, phy,
-                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
-
-       if (val == 0) {
-               /* Mustn't set low power mode in 8073 A0 */
-               return;
-       }
-
-       /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
-       bnx2x_cl45_read(bp, phy,
-                       MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-       val &= ~(1<<13);
-       bnx2x_cl45_write(bp, phy,
-                      MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
-       /* PLL controls */
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
-       /* Tx Controls */
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
-       /* Rx Controls */
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
-
-       /* Enable PLL sequencer  (use read-modify-write to set bit 13) */
-       bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-       val |= (1<<13);
-       bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-}
-
 /******************************************************************/
 /*                     BCM8073 PHY SECTION                       */
 /******************************************************************/
@@ -4000,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
 
        /* Read 8073 HW revision*/
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_8073_CHIP_REV, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_8073_CHIP_REV, &val);
 
        if (val != 1) {
                /* No need to workaround in 8073 A1 */
@@ -4009,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
        }
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2, &val);
 
        /* SNR should be applied only for version 0x102 */
        if (val != 0x102)
@@ -4024,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
        u16 val, cnt, cnt1 ;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_8073_CHIP_REV, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_8073_CHIP_REV, &val);
 
        if (val > 0) {
                /* No need to workaround in 8073 A1 */
@@ -4033,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
        }
        /* XAUI workaround in 8073 A0: */
 
-       /* After loading the boot ROM and restarting Autoneg,
-       poll Dev1, Reg $C820: */
+       /*
+        * After loading the boot ROM and restarting Autoneg, poll
+        * Dev1, Reg $C820:
+        */
 
        for (cnt = 0; cnt < 1000; cnt++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
-                             &val);
-                 /* If bit [14] = 0 or bit [13] = 0, continue on with
-                  system initialization (XAUI work-around not required,
-                   as these bits indicate 2.5G or 1G link up). */
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+                               &val);
+                 /*
+                  * If bit [14] = 0 or bit [13] = 0, continue on with
+                  * system initialization (XAUI work-around not required, as
+                  * these bits indicate 2.5G or 1G link up).
+                  */
                if (!(val & (1<<14)) || !(val & (1<<13))) {
                        DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
                        return 0;
                } else if (!(val & (1<<15))) {
-                       DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
-                        /* If bit 15 is 0, then poll Dev1, Reg $C841 until
-                         it's MSB (bit 15) goes to 1 (indicating that the
-                         XAUI workaround has completed),
-                         then continue on with system initialization.*/
+                       DP(NETIF_MSG_LINK, "bit 15 went off\n");
+                       /*
+                        * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+                        * MSB (bit15) goes to 1 (indicating that the XAUI
+                        * workaround has completed), then continue on with
+                        * system initialization.
+                        */
                        for (cnt1 = 0; cnt1 < 1000; cnt1++) {
                                bnx2x_cl45_read(bp, phy,
                                        MDIO_PMA_DEVAD,
@@ -4135,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                gpio_port = params->port;
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
        /* enable LASI */
        bnx2x_cl45_write(bp, phy,
@@ -4148,8 +4089,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
 
        bnx2x_8073_set_pause_cl37(params, phy, vars);
 
-       bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 
@@ -4158,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
-       /**
-        * If this is forced speed, set to KR or KX (all other are not
-        * supported)
-        */
        /* Swap polarity if required - Must be done only in non-1G mode */
        if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
                /* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4204,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                        val = (1<<7);
                } else if (phy->req_line_speed ==  SPEED_2500) {
                        val = (1<<5);
-                       /* Note that 2.5G works only
-                       when used with 1G advertisment */
+                       /*
+                        * Note that 2.5G works only when used with 1G
+                        * advertisment
+                        */
                } else
                        val = (1<<5);
        } else {
@@ -4214,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                        val |= (1<<7);
 
-               /* Note that 2.5G works only when
-               used with 1G advertisment */
+               /* Note that 2.5G works only when used with 1G advertisment */
                if (phy->speed_cap_mask &
                        (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4255,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
        /* Add support for CL37 (passive mode) III */
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-       /* The SNR will improve about 2db by changing
-       BW and FEE main tap. Rest commands are executed
-       after link is up*/
+       /*
+        * The SNR will improve about 2db by changing BW and FEE main
+        * tap. Rest commands are executed after link is up
+        * Change FFE main cursor to 5 in EDC register
+        */
        if (bnx2x_8073_is_snr_needed(bp, phy))
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4341,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
 
        link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
        if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-               /* The SNR will improve about 2dbby
-               changing the BW and FEE main tap.*/
-               /* The 1st write to change FFE main
-               tap is set before restart AN */
-               /* Change PLL Bandwidth in EDC
-               register */
+               /*
+                * The SNR will improve about 2dbby changing the BW and FEE main
+                * tap. The 1st write to change FFE main tap is set before
+                * restart AN. Change PLL Bandwidth in EDC register
+                */
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
                                 0x26BC);
@@ -4390,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
                        bnx2x_cl45_read(bp, phy,
                                        MDIO_XS_DEVAD,
                                        MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
-                       /**
-                       * Set bit 3 to invert Rx in 1G mode and clear this bit
-                       * when it`s in 10G mode.
-                       */
+                       /*
+                        * Set bit 3 to invert Rx in 1G mode and clear this bit
+                        * when it`s in 10G mode.
+                        */
                        if (vars->line_speed == SPEED_1000) {
                                DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
                                              "the 8073\n");
@@ -4425,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
           gpio_port);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
 }
 
 /******************************************************************/
@@ -4440,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "init 8705\n");
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4495,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
 /******************************************************************/
 /*                     SFP+ module Section                       */
 /******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+       u8 gpio_port;
+       u32 swap_val, swap_override;
+       struct bnx2x *bp = params->bp;
+       if (CHIP_IS_E2(bp))
+               gpio_port = BP_PATH(bp);
+       else
+               gpio_port = params->port;
+       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+       return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
                                      struct bnx2x_phy *phy,
-                                     u8 port,
                                      u8 tx_en)
 {
        u16 val;
+       u8 port = params->port;
+       struct bnx2x *bp = params->bp;
+       u32 tx_en_mode;
 
-       DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
-                tx_en, port);
        /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
-       bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_PHY_IDENTIFIER,
-                     &val);
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                                    dev_info.port_hw_config[port].sfp_ctrl)) &
+               PORT_HW_CFG_TX_LASER_MASK;
+       DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+                          "mode = %x\n", tx_en, port, tx_en_mode);
+       switch (tx_en_mode) {
+       case PORT_HW_CFG_TX_LASER_MDIO:
 
-       if (tx_en)
-               val &= ~(1<<15);
-       else
-               val |= (1<<15);
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_PHY_IDENTIFIER,
+                               &val);
 
-       bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      val);
+               if (tx_en)
+                       val &= ~(1<<15);
+               else
+                       val |= (1<<15);
+
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_PHY_IDENTIFIER,
+                                val);
+       break;
+       case PORT_HW_CFG_TX_LASER_GPIO0:
+       case PORT_HW_CFG_TX_LASER_GPIO1:
+       case PORT_HW_CFG_TX_LASER_GPIO2:
+       case PORT_HW_CFG_TX_LASER_GPIO3:
+       {
+               u16 gpio_pin;
+               u8 gpio_port, gpio_mode;
+               if (tx_en)
+                       gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+               else
+                       gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+               gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+               gpio_port = bnx2x_get_gpio_port(params);
+               bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+               break;
+       }
+       default:
+               DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+               break;
+       }
 }
 
 static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                            struct link_params *params,
-                                         u16 addr, u8 byte_cnt, u8 *o_buf)
+                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 {
        struct bnx2x *bp = params->bp;
        u16 val = 0;
@@ -4536,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* Set the read command byte count */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-                      (byte_cnt | 0xa000));
+                        (byte_cnt | 0xa000));
 
        /* Set the read command address */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-                      addr);
+                        addr);
 
        /* Activate read command */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                      0x2c0f);
+                        0x2c0f);
 
        /* Wait up to 500us for command complete status */
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
                        break;
@@ -4570,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* Read the buffer */
        for (i = 0; i < byte_cnt; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
                o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
        }
 
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
@@ -4589,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 
 static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                            struct link_params *params,
-                                         u16 addr, u8 byte_cnt, u8 *o_buf)
+                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 {
        struct bnx2x *bp = params->bp;
        u16 val, i;
@@ -4602,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 
        /* Need to read from 1.8000 to clear it */
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                     &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+                       &val);
 
        /* Set the read command byte count */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-                      ((byte_cnt < 2) ? 2 : byte_cnt));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+                        ((byte_cnt < 2) ? 2 : byte_cnt));
 
        /* Set the read command address */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-                      addr);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+                        addr);
        /* Set the destination address */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      0x8004,
-                      MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+                        MDIO_PMA_DEVAD,
+                        0x8004,
+                        MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
 
        /* Activate read command */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                      0x8002);
-       /* Wait appropriate time for two-wire command to finish before
-       polling the status register */
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+                        0x8002);
+       /*
+        * Wait appropriate time for two-wire command to finish before
+        * polling the status register
+        */
        msleep(1);
 
        /* Wait up to 500us for command complete status */
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
                        break;
@@ -4648,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK,
                         "Got bad status 0x%x when reading from SFP+ EEPROM\n",
                         (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
-               return -EINVAL;
+               return -EFAULT;
        }
 
        /* Read the buffer */
        for (i = 0; i < byte_cnt; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
                o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
        }
 
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
@@ -4672,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                                      struct link_params *params, u16 addr,
-                                      u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                               struct link_params *params, u16 addr,
+                               u8 byte_cnt, u8 *o_buf)
 {
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
-                                                      byte_cnt, o_buf);
+                                                        byte_cnt, o_buf);
        else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
                return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
-                                                      byte_cnt, o_buf);
+                                                        byte_cnt, o_buf);
        return -EINVAL;
 }
 
 static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                             struct link_params *params,
-                                 u16 *edc_mode)
+                            u16 *edc_mode)
 {
        struct bnx2x *bp = params->bp;
        u8 val, check_limiting_mode = 0;
@@ -4708,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        {
                u8 copper_module_type;
 
-               /* Check if its active cable( includes SFP+ module)
-               of passive cable*/
+               /*
+                * Check if its active cable (includes SFP+ module)
+                * of passive cable
+                */
                if (bnx2x_read_sfp_module_eeprom(phy,
                                               params,
                                               SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4768,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
        return 0;
 }
-/* This function read the relevant field from the module ( SFP+ ),
-       and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
 static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
                                  struct link_params *params)
 {
@@ -4818,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
        /* format the warning message */
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
-                                      SFP_EEPROM_VENDOR_NAME_ADDR,
-                                      SFP_EEPROM_VENDOR_NAME_SIZE,
-                                      (u8 *)vendor_name))
+                                        SFP_EEPROM_VENDOR_NAME_ADDR,
+                                        SFP_EEPROM_VENDOR_NAME_SIZE,
+                                        (u8 *)vendor_name))
                vendor_name[0] = '\0';
        else
                vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
-                                      SFP_EEPROM_PART_NO_ADDR,
-                                      SFP_EEPROM_PART_NO_SIZE,
-                                      (u8 *)vendor_pn))
+                                        SFP_EEPROM_PART_NO_ADDR,
+                                        SFP_EEPROM_PART_NO_SIZE,
+                                        (u8 *)vendor_pn))
                vendor_pn[0] = '\0';
        else
                vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
 
-       netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
-                            " Port %d from %s part number %s\n",
-                   params->port, vendor_name, vendor_pn);
+       netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
+                             " Port %d from %s part number %s\n",
+                        params->port, vendor_name, vendor_pn);
        phy->flags |= FLAGS_SFP_NOT_APPROVED;
        return -EINVAL;
 }
@@ -4847,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
        u8 val;
        struct bnx2x *bp = params->bp;
        u16 timeout;
-       /* Initialization time after hot-plug may take up to 300ms for some
-       phys type ( e.g. JDSU ) */
+       /*
+        * Initialization time after hot-plug may take up to 300ms for
+        * some phys type ( e.g. JDSU )
+        */
+
        for (timeout = 0; timeout < 60; timeout++) {
                if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
                    == 0) {
@@ -4867,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
        /* Make sure GPIOs are not using for LED mode */
        u16 val;
        /*
-        * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+        * In the GPIO register, bit 4 is use to determine if the GPIOs are
         * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
         * output
         * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
         * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
         * where the 1st bit is the over-current(only input), and 2nd bit is
         * for power( only output )
-       */
-
-       /*
+        *
         * In case of NOC feature is disabled and power is up, set GPIO control
         *  as input to enable listening of over-current indication
         */
@@ -4905,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
        u16 cur_limiting_mode;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2,
-                     &cur_limiting_mode);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2,
+                       &cur_limiting_mode);
        DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
                 cur_limiting_mode);
 
        if (edc_mode == EDC_MODE_LIMITING) {
-               DP(NETIF_MSG_LINK,
-                        "Setting LIMITING MODE\n");
+               DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_ROM_VER2,
@@ -4922,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
 
                DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-               /* Changing to LRM mode takes quite few seconds.
-               So do it only if current mode is limiting
-               ( default is LRM )*/
+               /*
+                * Changing to LRM mode takes quite few seconds. So do it only
+                * if current mode is limiting (default is LRM)
+                */
                if (cur_limiting_mode != EDC_MODE_LIMITING)
                        return 0;
 
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_LRM_MODE,
-                              0);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_LRM_MODE,
+                                0);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_ROM_VER2,
-                              0x128);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_ROM_VER2,
+                                0x128);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_MISC_CTRL0,
-                              0x4008);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_MISC_CTRL0,
+                                0x4008);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_LRM_MODE,
-                              0xaaaa);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_LRM_MODE,
+                                0xaaaa);
        }
        return 0;
 }
 
 static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
                                       struct bnx2x_phy *phy,
-                                       u16 edc_mode)
+                                      u16 edc_mode)
 {
        u16 phy_identifier;
        u16 rom_ver2_val;
        bnx2x_cl45_read(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      &phy_identifier);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_PHY_IDENTIFIER,
+                       &phy_identifier);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      (phy_identifier & ~(1<<9)));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_PHY_IDENTIFIER,
+                        (phy_identifier & ~(1<<9)));
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2,
-                     &rom_ver2_val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2,
+                       &rom_ver2_val);
        /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_ROM_VER2,
-                      (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_ROM_VER2,
+                        (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      (phy_identifier | (1<<9)));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_PHY_IDENTIFIER,
+                        (phy_identifier | (1<<9)));
 
        return 0;
 }
@@ -4990,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
 
        switch (action) {
        case DISABLE_TX:
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+               bnx2x_sfp_set_transmitter(params, phy, 0);
                break;
        case ENABLE_TX:
                if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+                       bnx2x_sfp_set_transmitter(params, phy, 1);
                break;
        default:
                DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -5003,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
        }
 }
 
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+                                          u8 gpio_mode)
+{
+       struct bnx2x *bp = params->bp;
+
+       u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].sfp_ctrl)) &
+               PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+       switch (fault_led_gpio) {
+       case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+               return;
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+       {
+               u8 gpio_port = bnx2x_get_gpio_port(params);
+               u16 gpio_pin = fault_led_gpio -
+                       PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+               DP(NETIF_MSG_LINK, "Set fault module-detected led "
+                                  "pin %x port %x mode %x\n",
+                              gpio_pin, gpio_port, gpio_mode);
+               bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+       }
+       break;
+       default:
+               DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+                              fault_led_gpio);
+       }
+}
+
 static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
                                     struct link_params *params)
 {
@@ -5020,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
        if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
                DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
                return -EINVAL;
-       } else if (bnx2x_verify_sfp_module(phy, params) !=
-                  0) {
+       } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
                /* check SFP+ module compatibility */
                DP(NETIF_MSG_LINK, "Module verification failed!!\n");
                rc = -EINVAL;
                /* Turn on fault module-detected led */
-               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                 MISC_REGISTERS_GPIO_HIGH,
-                                 params->port);
+               bnx2x_set_sfp_module_fault_led(params,
+                                              MISC_REGISTERS_GPIO_HIGH);
+
                if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
                    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -5039,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
                }
        } else {
                /* Turn off fault module-detected led */
-               DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
-               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                         MISC_REGISTERS_GPIO_LOW,
-                                         params->port);
+               bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
        }
 
        /* power up the SFP module */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
                bnx2x_8727_power_module(bp, phy, 1);
 
-       /* Check and set limiting mode / LRM mode on 8726.
-       On 8727 it is done automatically */
+       /*
+        * Check and set limiting mode / LRM mode on 8726. On 8727 it
+        * is done automatically
+        */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
        else
@@ -5062,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
        if (rc == 0 ||
            (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
            PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+               bnx2x_sfp_set_transmitter(params, phy, 1);
        else
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+               bnx2x_sfp_set_transmitter(params, phy, 0);
 
        return rc;
 }
@@ -5077,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
        u8 port = params->port;
 
        /* Set valid module led off */
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                         MISC_REGISTERS_GPIO_HIGH,
-                         params->port);
+       bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
 
-       /* Get current gpio val refelecting module plugged in / out*/
+       /* Get current gpio val reflecting module plugged in / out*/
        gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
 
        /* Call the handling function in case module is detected */
@@ -5097,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
                        DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
        } else {
                u32 val = REG_RD(bp, params->shmem_base +
-                                    offsetof(struct shmem_region, dev_info.
-                                             port_feature_config[params->port].
-                                             config));
+                                offsetof(struct shmem_region, dev_info.
+                                         port_feature_config[params->port].
+                                         config));
 
                bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
                                   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
                                   port);
-               /* Module was plugged out. */
-               /* Disable transmit for this module */
+               /*
+                * Module was plugged out.
+                * Disable transmit for this module
+                */
                if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+                       bnx2x_sfp_set_transmitter(params, phy, 0);
        }
 }
 
@@ -5144,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
                        " link_status 0x%x\n", rx_sd, pcs_status, val2);
-       /* link is up if both bit 0 of pmd_rx_sd and
-        * bit 0 of pcs_status are set, or if the autoneg bit
-        * 1 is set
+       /*
+        * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+        * are set, or if the autoneg bit 1 is set
         */
        link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
        if (link_up) {
@@ -5167,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
                                 struct link_params *params,
                                 struct link_vars *vars)
 {
-       u16 cnt, val;
+       u32 tx_en_mode;
+       u16 cnt, val, tmp1;
        struct bnx2x *bp = params->bp;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        /* Wait until fw is loaded */
        for (cnt = 0; cnt < 100; cnt++) {
@@ -5241,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
                                 0x0004);
        }
        bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+       /*
+        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+        * power mode, if TX Laser is disabled
+        */
+
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                               dev_info.port_hw_config[params->port].sfp_ctrl))
+                       & PORT_HW_CFG_TX_LASER_MASK;
+
+       if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+               DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+               bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+               tmp1 |= 0x1;
+               bnx2x_cl45_write(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+       }
+
        return 0;
 }
 
@@ -5275,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
 
        /* Set soft reset */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
        /* wait for 150ms for microcode load */
        msleep(150);
 
        /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 
        msleep(200);
        bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5329,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
        u32 val;
        u32 swap_val, swap_override, aeu_gpio_mask, offset;
        DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
-       /* Restore normal power mode*/
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_8726_external_rom_boot(phy, params);
 
-       /* Need to call module detected on initialization since
-       the module detection triggered by actual module
-       insertion might occur before driver is loaded, and when
-       driver is loaded, it reset all registers, including the
-       transmitter */
+       /*
+        * Need to call module detected on initialization since the module
+        * detection triggered by actual module insertion might occur before
+        * driver is loaded, and when driver is loaded, it reset all
+        * registers, including the transmitter
+        */
        bnx2x_sfp_module_detection(phy, params);
 
        if (phy->req_line_speed == SPEED_1000) {
@@ -5378,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
                bnx2x_cl45_write(bp, phy,
                                MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-               /* Enable RX-ALARM control to receive
-               interrupt for 1G speed change */
+               /*
+                * Enable RX-ALARM control to receive interrupt for 1G speed
+                * change
+                */
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
                bnx2x_cl45_write(bp, phy,
@@ -5411,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
 
        /* Set GPIO3 to trigger SFP+ module insertion/removal */
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-                           MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+                      MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
 
        /* The GPIO should be swapped if the swap register is set and active */
        swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5502,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params) {
        u32 swap_val, swap_override;
        u8 port;
-       /**
+       /*
         * The PHY reset is controlled by GPIO 1. Fake the port number
         * to cancel the swap done in set_gpio()
         */
@@ -5511,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
        swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
        port = (swap_val && swap_override) ^ 1;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 }
 
 static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 struct link_params *params,
                                 struct link_vars *vars)
 {
-       u16 tmp1, val, mod_abs;
+       u32 tx_en_mode;
+       u16 tmp1, val, mod_abs, tmp2;
        u16 rx_alarm_ctrl_val;
        u16 lasi_ctrl_val;
        struct bnx2x *bp = params->bp;
        /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
        rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
        lasi_ctrl_val = 0x0004;
 
@@ -5537,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
 
-       /* Initially configure  MOD_ABS to interrupt when
-       module is presence( bit 8) */
+       /*
+        * Initially configure MOD_ABS to interrupt when module is
+        * presence( bit 8)
+        */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-       /* Set EDC off by setting OPTXLOS signal input to low
-       (bit 9).
-       When the EDC is off it locks onto a reference clock and
-       avoids becoming 'lost'.*/
+       /*
+        * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+        * When the EDC is off it locks onto a reference clock and avoids
+        * becoming 'lost'
+        */
        mod_abs &= ~(1<<8);
        if (!(phy->flags & FLAGS_NOC))
                mod_abs &= ~(1<<9);
@@ -5559,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
        if (phy->flags & FLAGS_NOC)
                val |= (3<<5);
 
-       /**
+       /*
         * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
         * status which reflect SFP+ module over-current
         */
@@ -5586,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
                DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-               /**
+               /*
                 * Power down the XAUI until link is up in case of dual-media
                 * and 1G
                 */
@@ -5612,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
        } else {
-               /**
+               /*
                 * Since the 8727 has only single reset pin, need to set the 10G
                 * registers although it is default
                 */
@@ -5628,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 0x0008);
        }
 
-       /* Set 2-wire transfer rate of SFP+ module EEPROM
+       /*
+        * Set 2-wire transfer rate of SFP+ module EEPROM
         * to 100Khz since some DACs(direct attached cables) do
         * not work at 400Khz.
         */
@@ -5651,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 phy->tx_preemphasis[1]);
        }
 
+       /*
+        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+        * power mode, if TX Laser is disabled
+        */
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                               dev_info.port_hw_config[params->port].sfp_ctrl))
+                       & PORT_HW_CFG_TX_LASER_MASK;
+
+       if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+               DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+               bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+               tmp2 |= 0x1000;
+               tmp2 &= 0xFFEF;
+               bnx2x_cl45_write(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+       }
+
        return 0;
 }
 
@@ -5664,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                      port_feature_config[params->port].
                                      config));
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
        if (mod_abs & (1<<8)) {
 
                /* Module is absent */
                DP(NETIF_MSG_LINK, "MOD_ABS indication "
                            "show module is absent\n");
 
-               /* 1. Set mod_abs to detect next module
-               presence event
-                  2. Set EDC off by setting OPTXLOS signal input to low
-                       (bit 9).
-                       When the EDC is off it locks onto a reference clock and
-                       avoids becoming 'lost'.*/
+               /*
+                * 1. Set mod_abs to detect next module
+                *    presence event
+                * 2. Set EDC off by setting OPTXLOS signal input to low
+                *    (bit 9).
+                *    When the EDC is off it locks onto a reference clock and
+                *    avoids becoming 'lost'.
+                */
                mod_abs &= ~(1<<8);
                if (!(phy->flags & FLAGS_NOC))
                        mod_abs &= ~(1<<9);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /* Clear RX alarm since it stays up as long as
-               the mod_abs wasn't changed */
+               /*
+                * Clear RX alarm since it stays up as long as
+                * the mod_abs wasn't changed
+                */
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
 
        } else {
                /* Module is present */
                DP(NETIF_MSG_LINK, "MOD_ABS indication "
                            "show module is present\n");
-               /* First thing, disable transmitter,
-               and if the module is ok, the
-               module_detection will enable it*/
-
-               /* 1. Set mod_abs to detect next module
-               absent event ( bit 8)
-                  2. Restore the default polarity of the OPRXLOS signal and
-               this signal will then correctly indicate the presence or
-               absence of the Rx signal. (bit 9) */
+               /*
+                * First disable transmitter, and if the module is ok, the
+                * module_detection will enable it
+                * 1. Set mod_abs to detect next module absent event ( bit 8)
+                * 2. Restore the default polarity of the OPRXLOS signal and
+                * this signal will then correctly indicate the presence or
+                * absence of the Rx signal. (bit 9)
+                */
                mod_abs |= (1<<8);
                if (!(phy->flags & FLAGS_NOC))
                        mod_abs |= (1<<9);
@@ -5711,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /* Clear RX alarm since it stays up as long as
-               the mod_abs wasn't changed. This is need to be done
-               before calling the module detection, otherwise it will clear
-               the link update alarm */
+               /*
+                * Clear RX alarm since it stays up as long as the mod_abs
+                * wasn't changed. This is need to be done before calling the
+                * module detection, otherwise it will clear* the link update
+                * alarm
+                */
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5722,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
 
                if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+                       bnx2x_sfp_set_transmitter(params, phy, 0);
 
                if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
                        bnx2x_sfp_module_detection(phy, params);
@@ -5731,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
        }
 
        DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
-                rx_alarm_status);
-       /* No need to check link status in case of
-       module plugged in/out */
+                  rx_alarm_status);
+       /* No need to check link status in case of module plugged in/out */
 }
 
 static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5769,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-       /**
+       /*
         * If a module is present and there is need to check
         * for over current
         */
@@ -5789,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                                            " Please remove the SFP+ module and"
                                            " restart the system to clear this"
                                            " error.\n",
-                                  params->port);
-
-                       /*
-                        * Disable all RX_ALARMs except for
-                        * mod_abs
-                        */
+                        params->port);
+                       /* Disable all RX_ALARMs except for mod_abs */
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5837,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        MDIO_PMA_DEVAD,
                        MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-       /* Bits 0..2 --> speed detected,
-          bits 13..15--> link is down */
+       /*
+        * Bits 0..2 --> speed detected,
+        * Bits 13..15--> link is down
+        */
        if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
                link_up = 1;
                vars->line_speed = SPEED_10000;
+               DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+                          params->port);
        } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
                link_up = 1;
                vars->line_speed = SPEED_1000;
@@ -5863,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_8727_PCS_GP, &val1);
-               /**
+               /*
                 * In case of dual-media board and 1G, power up the XAUI side,
                 * otherwise power it down. For 10G it is done automatically
                 */
@@ -5883,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        /* Disable Transmitter */
-       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+       bnx2x_sfp_set_transmitter(params, phy, 0);
        /* Clear LASI */
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
 
@@ -5895,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
                                           struct link_params *params)
 {
-       u16 val, fw_ver1, fw_ver2, cnt;
+       u16 val, fw_ver1, fw_ver2, cnt, adj;
        struct bnx2x *bp = params->bp;
 
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
+
        /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
        /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
 
        for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
                if (val & 1)
                        break;
                udelay(5);
@@ -5921,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 
 
        /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
        for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
                if (val & 1)
                        break;
                udelay(5);
@@ -5938,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
        }
 
        /* lower 16 bits of the register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
        /* upper 16 bits of register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
 
        bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
                                  phy->ver_addr);
@@ -5949,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
-       u16 val;
+       u16 val, adj;
+
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
 
        /* PHYC_CTL_LED_CTL */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+                       MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
        val &= 0xFE00;
        val |= 0x0092;
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+                        MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED1_MASK,
+                        MDIO_PMA_REG_8481_LED1_MASK + adj,
                         0x80);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED2_MASK,
+                        MDIO_PMA_REG_8481_LED2_MASK + adj,
                         0x18);
 
        /* Select activity source by Tx and Rx, as suggested by PHY AE */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED3_MASK,
+                        MDIO_PMA_REG_8481_LED3_MASK + adj,
                         0x0006);
 
        /* Select the closest activity blink rate to that in 10/100/1000 */
        bnx2x_cl45_write(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_8481_LED3_BLINK,
+                       MDIO_PMA_REG_8481_LED3_BLINK + adj,
                        0);
 
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+                       MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
        val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+                        MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
 
        /* 'Interrupt Mask' */
        bnx2x_cl45_write(bp, phy,
@@ -6005,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u16 autoneg_val, an_1000_val, an_10_100_val;
-
+       /*
+        * This phy uses the NIG latch mechanism since link indication
+        * arrives through its LED4 and not via its LASI signal, so we
+        * get steady signal instead of clear on read
+        */
        bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
                      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -6130,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6146,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u8 port, initialize = 1;
-       u16 val;
+       u16 val, adj;
        u16 temp;
-       u32 actual_phy_selection;
+       u32 actual_phy_selection, cms_enable;
        u8 rc = 0;
 
        /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = 3;
 
        msleep(1);
        if (CHIP_IS_E2(bp))
@@ -6161,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
                       port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       /* BCM84823 requires that XGXS links up first @ 10G for normal
-       behavior */
+       /*
+        * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+        */
        temp = vars->line_speed;
        vars->line_speed = SPEED_10000;
        bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6175,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        /* Set dual-media configuration according to configuration */
 
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_CTL_REG_84823_MEDIA, &val);
+                       MDIO_CTL_REG_84823_MEDIA + adj, &val);
        val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
                 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
                 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6208,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
 
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                        MDIO_CTL_REG_84823_MEDIA, val);
+                        MDIO_CTL_REG_84823_MEDIA + adj, val);
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
@@ -6216,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
                bnx2x_save_848xx_spirom_version(phy, params);
+       cms_enable = REG_RD(bp, params->shmem_base +
+                       offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].default_cfg)) &
+                       PORT_HW_CFG_ENABLE_CMS_MASK;
+
+       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+               MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+       if (cms_enable)
+               val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+       else
+               val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+               MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
        return rc;
 }
 
 static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
-                                      struct link_params *params,
-                                      struct link_vars *vars)
+                                 struct link_params *params,
+                                 struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 val, val1, val2;
+       u16 val, val1, val2, adj;
        u8 link_up = 0;
 
+       /* Reg offset adjustment for 84833 */
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
+
        /* Check 10G-BaseT link status */
        /* Check PMD signal ok */
        bnx2x_cl45_read(bp, phy,
                        MDIO_AN_DEVAD, 0xFFFA, &val1);
        bnx2x_cl45_read(bp, phy,
-                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
                        &val2);
        DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
 
@@ -6317,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params)
 {
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
 }
 
 static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6341,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
        else
                port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      port);
 }
 
 static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6397,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x20);
 
                } else {
                        bnx2x_cl45_write(bp, phy,
@@ -6438,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                        val |= 0x2492;
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LINK_SIGNAL,
-                                       val);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LINK_SIGNAL,
+                                        val);
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x20);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x20);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x0);
                } else {
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x20);
                }
                break;
 
@@ -6484,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                        &val);
 
                        if (!((val &
-                             MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
-                          >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
-                               DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+                              MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+                         >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+                               DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
                                bnx2x_cl45_write(bp, phy,
                                                 MDIO_PMA_DEVAD,
                                                 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6495,30 +6596,42 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x10);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x10);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x80);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x80);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x98);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x98);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x40);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x40);
 
                } else {
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x80);
+
+                       /* Tell LED3 to blink on source */
+                       bnx2x_cl45_read(bp, phy,
+                                       MDIO_PMA_DEVAD,
+                                       MDIO_PMA_REG_8481_LINK_SIGNAL,
+                                       &val);
+                       val &= ~(7<<6);
+                       val |= (1<<6); /* A83B[8:6]= 1 */
+                       bnx2x_cl45_write(bp, phy,
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LINK_SIGNAL,
+                                        val);
                }
                break;
        }
@@ -6545,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
 
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6595,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
                   val2, val1);
        link_up = ((val1 & 4) == 4);
-       /* if link is up
-        * print the AN outcome of the SFX7101 PHY
-        */
+       /* if link is up print the AN outcome of the SFX7101 PHY */
        if (link_up) {
                bnx2x_cl45_read(bp, phy,
                                MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6631,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
        u16 val, cnt;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_7101_RESET, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_7101_RESET, &val);
 
        for (cnt = 0; cnt < 10; cnt++) {
                msleep(50);
                /* Writes a self-clearing reset */
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_7101_RESET,
-                              (val | (1<<15)));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_7101_RESET,
+                                (val | (1<<15)));
                /* Wait for clear */
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_7101_RESET, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_7101_RESET, &val);
 
                if ((val & (1<<15)) == 0)
                        break;
@@ -6655,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params) {
        /* Low power mode is controlled by GPIO 2 */
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
        /* The PHY reset is controlled by GPIO 1 */
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 }
 
 static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6700,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
        .supported      = 0,
        .media_type     = ETH_PHY_NOT_PRESENT,
        .ver_addr       = 0,
-       .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)NULL,
@@ -6737,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_init_serdes,
@@ -6774,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_init_xgxs,
@@ -6805,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
        .media_type     = ETH_PHY_BASE_T,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_7101_config_init,
@@ -6836,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
                           SUPPORTED_Asym_Pause),
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
-       .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_8073_config_init,
@@ -7047,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
+static struct bnx2x_phy phy_84833 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+       .addr           = 0xff,
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
+                           FLAGS_REARM_LATCH_SIGNAL,
+       .def_md_devad   = 0,
+       .reserved       = 0,
+       .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .mdio_ctrl      = 0,
+       .supported      = (SUPPORTED_10baseT_Half |
+                          SUPPORTED_10baseT_Full |
+                          SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_TP |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .media_type     = ETH_PHY_BASE_T,
+       .ver_addr       = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+       .config_init    = (config_init_t)bnx2x_848x3_config_init,
+       .read_status    = (read_status_t)bnx2x_848xx_read_status,
+       .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)NULL
+};
+
 /*****************************************************************/
 /*                                                               */
 /* Populate the phy according. Main function: bnx2x_populate_phy   */
@@ -7060,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
        /* Get the 4 lanes xgxs config rx and tx */
        u32 rx = 0, tx = 0, i;
        for (i = 0; i < 2; i++) {
-               /**
+               /*
                 * INT_PHY and EXT_PHY1 share the same value location in the
                 * shmem. When num_phys is greater than 1, than this value
                 * applies only to EXT_PHY1
@@ -7068,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
                if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
                        rx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                          dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+                         dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
 
                        tx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                          dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+                         dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
                } else {
                        rx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                         dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+                        dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 
                        tx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                         dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+                        dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
                }
 
                phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7200,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
                *phy = phy_84823;
                break;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+               *phy = phy_84833;
+               break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                *phy = phy_7101;
                break;
@@ -7214,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
        bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-       /**
-       * The shmem address of the phy version is located on different
-       * structures. In case this structure is too old, do not set
-       * the address
-       */
+       /*
+        * The shmem address of the phy version is located on different
+        * structures. In case this structure is too old, do not set
+        * the address
+        */
        config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
                                        dev_info.shared_hw_config.config2));
        if (phy_index == EXT_PHY1) {
                phy->ver_addr = shmem_base + offsetof(struct shmem_region,
                                port_mb[port].ext_phy_fw_version);
 
-       /* Check specific mdc mdio settings */
-       if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
-               mdc_mdio_access = config2 &
-               SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+               /* Check specific mdc mdio settings */
+               if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+                       mdc_mdio_access = config2 &
+                       SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
        } else {
                u32 size = REG_RD(bp, shmem2_base);
 
@@ -7247,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-       /**
+       /*
         * In case mdc/mdio_access of the external phy is different than the
         * mdc/mdio access of the XGXS, a HW lock must be taken in each access
         * to prevent one port interfere with another port's CL45 operations.
@@ -7282,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
        /* Populate the default phy configuration for MF mode */
        if (phy_index == EXT_PHY2) {
                link_config = REG_RD(bp, params->shmem_base +
-                                        offsetof(struct shmem_region, dev_info.
+                                    offsetof(struct shmem_region, dev_info.
                        port_feature_config[params->port].link_config2));
                phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-                                       offsetof(struct shmem_region, dev_info.
+                                            offsetof(struct shmem_region,
+                                                     dev_info.
                        port_hw_config[params->port].speed_capability_mask2));
        } else {
                link_config = REG_RD(bp, params->shmem_base +
-                               offsetof(struct shmem_region, dev_info.
+                                    offsetof(struct shmem_region, dev_info.
                                port_feature_config[params->port].link_config));
                phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-                               offsetof(struct shmem_region, dev_info.
-                          port_hw_config[params->port].speed_capability_mask));
+                                            offsetof(struct shmem_region,
+                                                     dev_info.
+                       port_hw_config[params->port].speed_capability_mask));
        }
        DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
                       " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7440,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
                        else if (phy_index == EXT_PHY2)
                                actual_phy_idx = EXT_PHY1;
                }
-               params->phy[actual_phy_idx].req_flow_ctrl  =
+               params->phy[actual_phy_idx].req_flow_ctrl =
                        params->req_flow_ctrl[link_cfg_idx];
 
                params->phy[actual_phy_idx].req_line_speed =
@@ -7493,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        set_phy_vars(params);
 
        DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
-       if (CHIP_REV_IS_FPGA(bp)) {
-
-               vars->link_up = 1;
-               vars->line_speed = SPEED_10000;
-               vars->duplex = DUPLEX_FULL;
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-               vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-               /* enable on E1.5 FPGA */
-               if (CHIP_IS_E1H(bp)) {
-                       vars->flow_ctrl |=
-                                       (BNX2X_FLOW_CTRL_TX |
-                                        BNX2X_FLOW_CTRL_RX);
-                       vars->link_status |=
-                                       (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
-                                        LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
-               }
-
-               bnx2x_emac_enable(params, vars, 0);
-               if (!(CHIP_IS_E2(bp)))
-                       bnx2x_pbf_update(params, vars->flow_ctrl,
-                                        vars->line_speed);
-               /* disable drain */
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-               /* update shared memory */
-               bnx2x_update_mng(params, vars->link_status);
-
-               return 0;
-
-       } else
-       if (CHIP_REV_IS_EMUL(bp)) {
-
-               vars->link_up = 1;
-               vars->line_speed = SPEED_10000;
-               vars->duplex = DUPLEX_FULL;
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-               vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
-               bnx2x_bmac_enable(params, vars, 0);
-
-               bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
-               /* Disable drain */
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-                                   + params->port*4, 0);
-
-               /* update shared memory */
-               bnx2x_update_mng(params, vars->link_status);
-
-               return 0;
-
-       } else
        if (params->loopback_mode == LOOPBACK_BMAC) {
 
                vars->link_up = 1;
@@ -7559,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                /* set bmac loopback */
                bnx2x_bmac_enable(params, vars, 1);
 
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                   params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
        } else if (params->loopback_mode == LOOPBACK_EMAC) {
 
@@ -7576,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                /* set bmac loopback */
                bnx2x_emac_enable(params, vars, 1);
                bnx2x_emac_program(params, vars);
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                   params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
        } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
                   (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7600,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                        bnx2x_emac_program(params, vars);
                        bnx2x_emac_enable(params, vars, 0);
                } else
-               bnx2x_bmac_enable(params, vars, 0);
-
+                       bnx2x_bmac_enable(params, vars, 0);
                if (params->loopback_mode == LOOPBACK_XGXS) {
                        /* set 10G XGXS loopback */
                        params->phy[INT_PHY].config_loopback(
@@ -7619,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                                                params);
                        }
                }
-
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                           params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
                bnx2x_set_led(params, vars,
                              LED_MODE_OPER, vars->line_speed);
@@ -7640,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        return 0;
 }
 u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
-                 u8 reset_ext_phy)
+                   u8 reset_ext_phy)
 {
        struct bnx2x *bp = params->bp;
        u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7649,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
        vars->link_status = 0;
        bnx2x_update_mng(params, vars->link_status);
        bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                    (NIG_MASK_XGXS0_LINK_STATUS |
-                     NIG_MASK_XGXS0_LINK10G |
-                     NIG_MASK_SERDES0_LINK_STATUS |
-                     NIG_MASK_MI_INT));
+                      (NIG_MASK_XGXS0_LINK_STATUS |
+                       NIG_MASK_XGXS0_LINK10G |
+                       NIG_MASK_SERDES0_LINK_STATUS |
+                       NIG_MASK_MI_INT));
 
        /* activate nig drain */
        REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7720,10 +7817,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
        struct bnx2x_phy phy[PORT_MAX];
        struct bnx2x_phy *phy_blk[PORT_MAX];
        u16 val;
-       s8 port;
+       s8 port = 0;
        s8 port_of_path = 0;
-
-       bnx2x_ext_phy_hw_reset(bp, 0);
+       u32 swap_val, swap_override;
+       swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
+       swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+       port ^= (swap_val && swap_override);
+       bnx2x_ext_phy_hw_reset(bp, port);
        /* PART1 - Reset both phys */
        for (port = PORT_MAX - 1; port >= PORT_0; port--) {
                u32 shmem_base, shmem2_base;
@@ -7748,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
                /* disable attentions */
                bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
                               port_of_path*4,
-                            (NIG_MASK_XGXS0_LINK_STATUS |
-                             NIG_MASK_XGXS0_LINK10G |
-                             NIG_MASK_SERDES0_LINK_STATUS |
-                             NIG_MASK_MI_INT));
+                              (NIG_MASK_XGXS0_LINK_STATUS |
+                               NIG_MASK_XGXS0_LINK10G |
+                               NIG_MASK_SERDES0_LINK_STATUS |
+                               NIG_MASK_MI_INT));
 
                /* Need to take the phy out of low power mode in order
                        to write to access its registers */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                                 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+                              MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+                              port);
 
                /* Reset the phy */
                bnx2x_cl45_write(bp, &phy[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_CTRL,
-                              1<<15);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_CTRL,
+                                1<<15);
        }
 
        /* Add delay of 150ms after reset */
@@ -7791,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
 
                /* Only set bit 10 = 1 (Tx power down) */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_TX_POWER_DOWN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
                /* Phase1 of TX_POWER_DOWN reset */
                bnx2x_cl45_write(bp, phy_blk[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_TX_POWER_DOWN,
-                              (val | 1<<10));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_TX_POWER_DOWN,
+                                (val | 1<<10));
        }
 
-       /* Toggle Transmitter: Power down and then up with 600ms
-          delay between */
+       /*
+        * Toggle Transmitter: Power down and then up with 600ms delay
+        * between
+        */
        msleep(600);
 
        /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7810,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
                /* Phase2 of POWER_DOWN_RESET */
                /* Release bit 10 (Release Tx power down) */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_TX_POWER_DOWN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
                bnx2x_cl45_write(bp, phy_blk[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
                msleep(15);
 
                /* Read modify write the SPI-ROM version select register */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_EDC_FFE_MAIN, &val);
                bnx2x_cl45_write(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
 
                /* set GPIO2 back to LOW */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                                 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                              MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
        }
        return 0;
 }
@@ -7875,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
 
                /* Set fault module detected LED on */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                 MISC_REGISTERS_GPIO_HIGH,
-                                 port);
+                              MISC_REGISTERS_GPIO_HIGH,
+                              port);
        }
 
        return 0;
 }
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+                                        u8 *io_gpio, u8 *io_port)
+{
+
+       u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+                                         offsetof(struct shmem_region,
+                               dev_info.port_hw_config[PORT_0].default_cfg));
+       switch (phy_gpio_reset) {
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+               *io_gpio = 0;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+               *io_gpio = 1;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+               *io_gpio = 2;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+               *io_gpio = 3;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+               *io_gpio = 0;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+               *io_gpio = 1;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+               *io_gpio = 2;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+               *io_gpio = 3;
+               *io_port = 1;
+               break;
+       default:
+               /* Don't override the io_gpio and io_port */
+               break;
+       }
+}
 static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
                                     u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
                                     u32 chip_id)
 {
-       s8 port;
+       s8 port, reset_gpio;
        u32 swap_val, swap_override;
        struct bnx2x_phy phy[PORT_MAX];
        struct bnx2x_phy *phy_blk[PORT_MAX];
        s8 port_of_path;
-       swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
-       swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 
+       reset_gpio = MISC_REGISTERS_GPIO_1;
        port = 1;
 
-       bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+       /*
+        * Retrieve the reset gpio/port which control the reset.
+        * Default is GPIO1, PORT1
+        */
+       bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+                                    (u8 *)&reset_gpio, (u8 *)&port);
 
        /* Calculate the port based on port swap */
        port ^= (swap_val && swap_override);
 
+       /* Initiate PHY reset*/
+       bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      port);
+       msleep(1);
+       bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+                      port);
+
        msleep(5);
 
        /* PART1 - Reset both phys */
@@ -7936,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
 
                /* Reset the phy */
                bnx2x_cl45_write(bp, &phy[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_CTRL,
-                              1<<15);
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
        }
 
        /* Add delay of 150ms after reset */
@@ -7952,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
        }
        /* PART2 - Download firmware to both phys */
        for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-                if (CHIP_IS_E2(bp))
+               if (CHIP_IS_E2(bp))
                        port_of_path = 0;
                else
                        port_of_path = port;
@@ -7987,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
 
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-               /* GPIO1 affects both ports, so there's need to pull
-               it for single port alone */
+               /*
+                * GPIO1 affects both ports, so there's need to pull
+                * it for single port alone
+                */
                rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
                                                shmem2_base_path,
                                                phy_index, chip_id);
@@ -7998,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
        default:
                DP(NETIF_MSG_LINK,
-                        "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
-                        ext_phy_type);
+                          "ext_phy 0x%x common init not required\n",
+                          ext_phy_type);
                break;
        }
 
+       if (rc != 0)
+               netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                                     " Port %d\n",
+                        0);
        return rc;
 }
 
@@ -8015,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
        u32 ext_phy_type, ext_phy_config;
        DP(NETIF_MSG_LINK, "Begin common phy init\n");
 
-       if (CHIP_REV_IS_EMUL(bp))
-               return 0;
-
        /* Check if common init was already done */
        phy_ver = REG_RD(bp, shmem_base_path[0] +
                         offsetof(struct shmem_region,
index bedab1a..92f36b6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
 #define BNX2X_FLOW_CTRL_BOTH           PORT_FEATURE_FLOW_CONTROL_BOTH
 #define BNX2X_FLOW_CTRL_NONE           PORT_FEATURE_FLOW_CONTROL_NONE
 
-#define SPEED_AUTO_NEG     0
+#define SPEED_AUTO_NEG         0
 #define SPEED_12000            12000
 #define SPEED_12500            12500
 #define SPEED_13000            13000
@@ -44,8 +44,8 @@
 #define SFP_EEPROM_VENDOR_NAME_SIZE            16
 #define SFP_EEPROM_VENDOR_OUI_ADDR             0x25
 #define SFP_EEPROM_VENDOR_OUI_SIZE             3
-#define SFP_EEPROM_PART_NO_ADDR                0x28
-#define SFP_EEPROM_PART_NO_SIZE                16
+#define SFP_EEPROM_PART_NO_ADDR                        0x28
+#define SFP_EEPROM_PART_NO_SIZE                        16
 #define PWR_FLT_ERR_MSG_LEN                    250
 
 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
 #define SINGLE_MEDIA(params)           (params->num_phys == 2)
 /* Dual Media board contains two external phy with different media */
 #define DUAL_MEDIA(params)             (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET              16
 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
        (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
 
@@ -201,12 +201,14 @@ struct link_params {
 
        /* Default / User Configuration */
        u8 loopback_mode;
-#define LOOPBACK_NONE  0
-#define LOOPBACK_EMAC  1
-#define LOOPBACK_BMAC  2
+#define LOOPBACK_NONE          0
+#define LOOPBACK_EMAC          1
+#define LOOPBACK_BMAC          2
 #define LOOPBACK_XGXS          3
 #define LOOPBACK_EXT_PHY       4
-#define LOOPBACK_EXT   5
+#define LOOPBACK_EXT           5
+#define LOOPBACK_UMAC          6
+#define LOOPBACK_XMAC          7
 
        /* Device parameters */
        u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
        /* Phy register parameter */
        u32 chip_id;
 
+       /* features */
        u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED             (1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY        (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED    (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED                     (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY                (1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY       (1<<3)
        /* Will be populated during common init */
        struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
 /* Reset the external of SFX7101 */
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                               struct link_params *params, u16 addr,
+                               u8 byte_cnt, u8 *o_buf);
+
 void bnx2x_hw_reset_phy(struct link_params *params);
 
 /* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
 
 /* Used to configure the ETS to BW limited */
 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
-                                               const u32 cos1_bw);
+                       const u32 cos1_bw);
 
 /* Used to configure the ETS to strict */
 u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
index 8cdcf5b..bba21d5 100644 (file)
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
        /* lock the dmae channel */
-       mutex_lock(&bp->dmae_mutex);
+       spin_lock_bh(&bp->dmae_lock);
 
        /* reset completion */
        *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
 unlock:
-       mutex_unlock(&bp->dmae_mutex);
+       spin_unlock_bh(&bp->dmae_lock);
        return rc;
 }
 
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
        }
 
        smp_mb__before_atomic_inc();
-       atomic_inc(&bp->spq_left);
+       atomic_inc(&bp->cq_spq_left);
        /* push the change in fp->state and towards the memory */
        smp_wmb();
 
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                vn_max_rate = 0;
 
        } else {
+               u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
                vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
                                FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If min rate is zero - set it to 1 */
+               /* If fairness is enabled (not all min rates are zeroes) and
+                  if current min rate is zero - set it to 1.
+                  This is a requirement of the algorithm. */
                if (bp->vn_weight_sum && (vn_min_rate == 0))
                        vn_min_rate = DEF_MIN_RATE;
-               vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+               if (IS_MF_SI(bp))
+                       /* maxCfg in percents of linkspeed */
+                       vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+               else
+                       /* maxCfg is absolute in 100Mb units */
+                       vn_max_rate = maxCfg * 100;
        }
 
        DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                m_fair_vn.vn_credit_delta =
                        max_t(u32, (vn_min_rate * (T_FAIR_COEF /
                                                   (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold * 2));
+                             (bp->cmng.fair_vars.fair_threshold +
+                                                       MIN_ABOVE_THRESH));
                DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
                   m_fair_vn.vn_credit_delta);
        }
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
                bnx2x_calc_vn_weight_sum(bp);
 
                /* calculate and set min-max rate for each vn */
-               for (vn = VN_0; vn < E1HVN_MAX; vn++)
-                       bnx2x_init_vn_minmax(bp, vn);
+               if (bp->port.pmf)
+                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                               bnx2x_init_vn_minmax(bp, vn);
 
                /* always enable rate shaping and fairness */
                bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 
-       /* indicate link status only if link status actually changed */
-       if (prev_link_status != bp->link_vars.link_status)
-               bnx2x_link_report(bp);
-
-       if (IS_MF(bp))
-               bnx2x_link_sync_notify(bp);
-
        if (bp->link_vars.link_up && bp->link_vars.line_speed) {
                int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
 
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
                        DP(NETIF_MSG_IFUP,
                           "single function mode without fairness\n");
        }
+
+       if (IS_MF(bp))
+               bnx2x_link_sync_notify(bp);
+
+       /* indicate link status only if link status actually changed */
+       if (prev_link_status != bp->link_vars.link_status)
+               bnx2x_link_report(bp);
 }
 
 void bnx2x__link_status_update(struct bnx2x *bp)
@@ -2301,15 +2312,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
                /* accept matched ucast */
                drop_all_ucast = 0;
        }
-       if (filters & BNX2X_ACCEPT_MULTICAST) {
+       if (filters & BNX2X_ACCEPT_MULTICAST)
                /* accept matched mcast */
                drop_all_mcast = 0;
-               if (IS_MF_SI(bp))
-                       /* since mcast addresses won't arrive with ovlan,
-                        * fw needs to accept all of them in
-                        * switch-independent mode */
-                       accp_all_mcast = 1;
-       }
+
        if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
                /* accept all mcast */
                drop_all_ucast = 0;
@@ -2478,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
        rxq_init->sge_map = fp->rx_sge_mapping;
        rxq_init->rcq_map = fp->rx_comp_mapping;
        rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
-       rxq_init->mtu = bp->dev->mtu;
-       rxq_init->buf_sz = bp->rx_buf_size;
+
+       /* Always use mini-jumbo MTU for FCoE L2 ring */
+       if (IS_FCOE_FP(fp))
+               rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+       else
+               rxq_init->mtu = bp->dev->mtu;
+
+       rxq_init->buf_sz = fp->rx_buf_size;
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->cl_id = fp->cl_id;
        rxq_init->spcl_id = fp->cl_id;
@@ -2731,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 
        spin_lock_bh(&bp->spq_lock);
 
-       if (!atomic_read(&bp->spq_left)) {
-               BNX2X_ERR("BUG! SPQ ring full!\n");
-               spin_unlock_bh(&bp->spq_lock);
-               bnx2x_panic();
-               return -EBUSY;
+       if (common) {
+               if (!atomic_read(&bp->eq_spq_left)) {
+                       BNX2X_ERR("BUG! EQ ring full!\n");
+                       spin_unlock_bh(&bp->spq_lock);
+                       bnx2x_panic();
+                       return -EBUSY;
+               }
+       } else if (!atomic_read(&bp->cq_spq_left)) {
+                       BNX2X_ERR("BUG! SPQ ring full!\n");
+                       spin_unlock_bh(&bp->spq_lock);
+                       bnx2x_panic();
+                       return -EBUSY;
        }
 
        spe = bnx2x_sp_get_next(bp);
@@ -2766,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
 
        /* stats ramrod has it's own slot on the spq */
-       if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+       if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
                /* It's ok if the actual decrement is issued towards the memory
                 * somewhere between the spin_lock and spin_unlock. Thus no
                 * more explict memory barrier is needed.
                 */
-               atomic_dec(&bp->spq_left);
+               if (common)
+                       atomic_dec(&bp->eq_spq_left);
+               else
+                       atomic_dec(&bp->cq_spq_left);
+       }
+
 
        DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
           "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
-          "type(0x%x) left %x\n",
+          "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
           bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
           (u32)(U64_LO(bp->spq_mapping) +
           (void *)bp->spq_prod_bd - (void *)bp->spq), command,
-          HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+          HW_CID(bp, cid), data_hi, data_lo, type,
+          atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 
        bnx2x_sp_prod_update(bp);
        spin_unlock_bh(&bp->spq_lock);
@@ -3691,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
        sw_cons = bp->eq_cons;
        sw_prod = bp->eq_prod;
 
-       DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
-                       hw_cons, sw_cons, atomic_read(&bp->spq_left));
+       DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
+                       hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
 
        for (; sw_cons != hw_cons;
              sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3757,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
                        DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-                       bp->set_mac_pending = 0;
+                       if (elem->message.data.set_mac_event.echo)
+                               bp->set_mac_pending = 0;
                        break;
 
                case (EVENT_RING_OPCODE_SET_MAC |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
                        DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
-                       bp->set_mac_pending = 0;
+                       if (elem->message.data.set_mac_event.echo)
+                               bp->set_mac_pending = 0;
                        break;
                default:
                        /* unknown event log error and continue */
@@ -3775,7 +3802,7 @@ next_spqe:
        } /* for */
 
        smp_mb__before_atomic_inc();
-       atomic_add(spqe_cnt, &bp->spq_left);
+       atomic_add(spqe_cnt, &bp->eq_spq_left);
 
        bp->eq_cons = sw_cons;
        bp->eq_prod = sw_prod;
@@ -4208,7 +4235,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
 static void bnx2x_init_sp_ring(struct bnx2x *bp)
 {
        spin_lock_init(&bp->spq_lock);
-       atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+       atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 
        bp->spq_prod_idx = 0;
        bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4233,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
        bp->eq_cons = 0;
        bp->eq_prod = NUM_EQ_DESC;
        bp->eq_cons_sb = BNX2X_EQ_INDEX;
+       /* we want a warning message before it gets rought... */
+       atomic_set(&bp->eq_spq_left,
+               min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 }
 
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
        int i;
@@ -4243,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
        if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
                return;
 
-       DP(NETIF_MSG_IFUP,
-          "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
        for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
                REG_WR8(bp, BAR_TSTRORM_INTMEM +
                        TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-                       bp->fp->cl_id + (i % (bp->num_queues -
-                               NONE_ETH_CONTEXT_USE)));
+                       bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+       int i;
+
+       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+               bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+       bnx2x_push_indir_table(bp);
 }
 
 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -4281,9 +4318,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
                def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
                                BNX2X_ACCEPT_MULTICAST;
 #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id,
+                                                 BNX2X_ACCEPT_UNICAST |
+                                                 BNX2X_ACCEPT_MULTICAST);
+               }
 #endif
                break;
 
@@ -4291,18 +4331,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
                def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
                                BNX2X_ACCEPT_ALL_MULTICAST;
 #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               /*
+                *  Prevent duplication of multicast packets by configuring FCoE
+                *  L2 Client to receive only matched unicast frames.
+                */
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id,
+                                                 BNX2X_ACCEPT_UNICAST);
+               }
 #endif
                break;
 
        case BNX2X_RX_MODE_PROMISC:
                def_q_filters |= BNX2X_PROMISCUOUS_MODE;
 #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               /*
+                *  Prevent packets duplication by configuring DROP_ALL for FCoE
+                *  L2 Client.
+                */
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+               }
 #endif
                /* pass management unicast packets as well */
                llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5347,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
                }
        }
 
-       bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-                                                      bp->common.shmem_base,
-                                                      bp->common.shmem2_base);
-
        bnx2x_setup_fan_failure_detection(bp);
 
        /* clear PXP2 attentions */
@@ -5503,9 +5550,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 
        bnx2x_init_block(bp, MCP_BLOCK, init_stage);
        bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-       bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-                                                      bp->common.shmem_base,
-                                                      bp->common.shmem2_base);
        if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
                                      bp->common.shmem2_base, port)) {
                u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -5838,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
           BP_ABS_FUNC(bp), load_code);
 
        bp->dmae_ready = 0;
-       mutex_init(&bp->dmae_mutex);
+       spin_lock_init(&bp->dmae_lock);
        rc = bnx2x_gunzip_init(bp);
        if (rc)
                return rc;
@@ -5990,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
        BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 
+       BNX2X_FREE(bp->rx_indir_table);
+
 #undef BNX2X_PCI_FREE
 #undef BNX2X_KFREE
 }
@@ -6120,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
        /* EQ */
        BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+       BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+                   TSTORM_INDIRECTION_TABLE_SIZE);
        return 0;
 
 alloc_mem_err:
@@ -6173,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
        int ramrod_flags = WAIT_RAMROD_COMMON;
 
        bp->set_mac_pending = 1;
-       smp_wmb();
 
        config->hdr.length = 1;
        config->hdr.offset = cam_offset;
        config->hdr.client_id = 0xff;
-       config->hdr.reserved1 = 0;
+       /* Mark the single MAC configuration ramrod as opposed to a
+        * UC/MC list configuration).
+        */
+       config->hdr.echo = 1;
 
        /* primary MAC */
        config->config_table[0].msb_mac_addr =
@@ -6210,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
           config->config_table[0].middle_mac_addr,
           config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
 
+       mb();
+
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
                      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6274,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
        if (CHIP_IS_E1H(bp))
                return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else if (CHIP_MODE_IS_4_PORT(bp))
-               return BP_FUNC(bp) * 32  + rel_offset;
+               return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else
-               return BP_VN(bp) * 32  + rel_offset;
+               return E2_FUNC_MAX * rel_offset + BP_VN(bp);
 }
 
 /**
  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
  *  relevant. In addition, current implementation is tuned for a
  *  single ETH MAC.
- *
- *  When multiple unicast ETH MACs PF configuration in switch
- *  independent mode is required (NetQ, multiple netdev MACs,
- *  etc.), consider better utilisation of 16 per function MAC
- *  entries in the LLH memory.
  */
 enum {
        LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6362,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
                bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
        }
 }
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+       return CHIP_REV_IS_SLOW(bp) ?
+               (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+               (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
 {
        int i = 0, old;
        struct net_device *dev = bp->dev;
+       u8 offset = bnx2x_e1_cam_mc_offset(bp);
        struct netdev_hw_addr *ha;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
 
+       if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+               return -EINVAL;
+
        netdev_for_each_mc_addr(ha, dev) {
                /* copy mac */
                config_cmd->config_table[i].msb_mac_addr =
@@ -6410,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
                }
        }
 
+       wmb();
+
        config_cmd->hdr.length = i;
        config_cmd->hdr.offset = offset;
        config_cmd->hdr.client_id = 0xff;
-       config_cmd->hdr.reserved1 = 0;
+       /* Mark that this ramrod doesn't use bp->set_mac_pending for
+        * synchronization.
+        */
+       config_cmd->hdr.echo = 0;
 
-       bp->set_mac_pending = 1;
-       smp_wmb();
+       mb();
 
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 }
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
 {
        int i;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
        int ramrod_flags = WAIT_RAMROD_COMMON;
+       u8 offset = bnx2x_e1_cam_mc_offset(bp);
 
-       bp->set_mac_pending = 1;
-       smp_wmb();
-
-       for (i = 0; i < config_cmd->hdr.length; i++)
+       for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
                SET_FLAG(config_cmd->config_table[i].flags,
                        MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
                        T_ETH_MAC_COMMAND_INVALIDATE);
 
+       wmb();
+
+       config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* We'll wait for a completion this time... */
+       config_cmd->hdr.echo = 1;
+
+       bp->set_mac_pending = 1;
+
+       mb();
+
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 
@@ -6445,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
 
 }
 
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct netdev_hw_addr *ha;
+       u32 mc_filter[MC_HASH_SIZE];
+       u32 crc, bit, regidx;
+       int i;
+
+       memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+       netdev_for_each_mc_addr(ha, dev) {
+               DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+                  bnx2x_mc_addr(ha));
+
+               crc = crc32c_le(0, bnx2x_mc_addr(ha),
+                               ETH_ALEN);
+               bit = (crc >> 24) & 0xff;
+               regidx = bit >> 5;
+               bit &= 0x1f;
+               mc_filter[regidx] |= (1 << bit);
+       }
+
+       for (i = 0; i < MC_HASH_SIZE; i++)
+               REG_WR(bp, MC_HASH_OFFSET(bp, i),
+                      mc_filter[i]);
+
+       return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+       int i;
+
+       for (i = 0; i < MC_HASH_SIZE; i++)
+               REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
 #ifdef BCM_CNIC
 /**
  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6463,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
        u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+       u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 
        /* Send a SET_MAC ramrod */
-       bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+       bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
                               cam_offset, 0);
 
-       bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+       bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 
        return 0;
 }
@@ -7110,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
        /* Give HW time to discard old tx messages */
        msleep(1);
 
-       if (CHIP_IS_E1(bp)) {
-               /* invalidate mc list,
-                * wait and poll (interrupts are off)
-                */
-               bnx2x_invlidate_e1_mc_list(bp);
-               bnx2x_set_eth_mac(bp, 0);
-
-       } else {
-               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+       bnx2x_set_eth_mac(bp, 0);
 
-               bnx2x_set_eth_mac(bp, 0);
+       bnx2x_invalidate_uc_list(bp);
 
-               for (i = 0; i < MC_HASH_SIZE; i++)
-                       REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+       if (CHIP_IS_E1(bp))
+               bnx2x_invalidate_e1_mc_list(bp);
+       else {
+               bnx2x_invalidate_e1h_mc_list(bp);
+               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
        }
 
 #ifdef BCM_CNIC
@@ -8379,13 +8499,60 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
                bp->mdio.prtad =
                        XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+       /*
+        * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+        * In MF mode, it is set to cover self test cases
+        */
+       if (IS_MF(bp))
+               bp->port.need_hw_lock = 1;
+       else
+               bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+                                                       bp->common.shmem_base,
+                                                       bp->common.shmem2_base);
 }
 
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+       u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+       /* Get the number of maximum allowed iSCSI and FCoE connections */
+       bp->cnic_eth_dev.max_iscsi_conn =
+               (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+               BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+       bp->cnic_eth_dev.max_fcoe_conn =
+               (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+               BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+       BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+                      bp->cnic_eth_dev.max_iscsi_conn,
+                      bp->cnic_eth_dev.max_fcoe_conn);
+
+       /* If mamimum allowed number of connections is zero -
+        * disable the feature.
+        */
+       if (!bp->cnic_eth_dev.max_iscsi_conn)
+               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+       if (!bp->cnic_eth_dev.max_fcoe_conn)
+               bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
        u32 val, val2;
        int func = BP_ABS_FUNC(bp);
        int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+       u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+       u8 *fip_mac = bp->fip_mac;
+#endif
 
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8398,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI NPAR MAC */
+               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+                * FCoE MAC then the appropriate feature should be disabled.
+                */
                if (IS_MF_SI(bp)) {
                        u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
                        if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8406,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                                     iscsi_mac_addr_upper);
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    iscsi_mac_addr_lower);
-                               bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-                       }
+                               BNX2X_DEV_INFO("Read iSCSI MAC: "
+                                              "0x%x:0x%04x\n", val2, val);
+                               bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+                               /* Disable iSCSI OOO if MAC configuration is
+                                * invalid.
+                                */
+                               if (!is_valid_ether_addr(iscsi_mac)) {
+                                       bp->flags |= NO_ISCSI_OOO_FLAG |
+                                                    NO_ISCSI_FLAG;
+                                       memset(iscsi_mac, 0, ETH_ALEN);
+                               }
+                       } else
+                               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+                       if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+                               val2 = MF_CFG_RD(bp, func_ext_config[func].
+                                                    fcoe_mac_addr_upper);
+                               val = MF_CFG_RD(bp, func_ext_config[func].
+                                                   fcoe_mac_addr_lower);
+                               BNX2X_DEV_INFO("Read FCoE MAC to "
+                                              "0x%x:0x%04x\n", val2, val);
+                               bnx2x_set_mac_buf(fip_mac, val, val2);
+
+                               /* Disable FCoE if MAC configuration is
+                                * invalid.
+                                */
+                               if (!is_valid_ether_addr(fip_mac)) {
+                                       bp->flags |= NO_FCOE_FLAG;
+                                       memset(bp->fip_mac, 0, ETH_ALEN);
+                               }
+                       } else
+                               bp->flags |= NO_FCOE_FLAG;
                }
 #endif
        } else {
@@ -8421,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                    iscsi_mac_upper);
                val = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                   iscsi_mac_lower);
-               bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+               bnx2x_set_mac_buf(iscsi_mac, val, val2);
 #endif
        }
 
@@ -8429,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
 #ifdef BCM_CNIC
-       /* Inform the upper layers about FCoE MAC */
+       /* Set the FCoE MAC in modes other then MF_SI */
        if (!CHIP_IS_E1x(bp)) {
                if (IS_MF_SD(bp))
-                       memcpy(bp->fip_mac, bp->dev->dev_addr,
-                              sizeof(bp->fip_mac));
-               else
-                       memcpy(bp->fip_mac, bp->iscsi_mac,
-                              sizeof(bp->fip_mac));
+                       memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+               else if (!IS_MF(bp))
+                       memcpy(fip_mac, iscsi_mac, ETH_ALEN);
        }
 #endif
 }
@@ -8599,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
+#ifdef BCM_CNIC
+       bnx2x_get_cnic_info(bp);
+#endif
+
        return rc;
 }
 
@@ -8813,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev)
        return 0;
 }
 
+#define E1_MAX_UC_LIST 29
+#define E1H_MAX_UC_LIST        30
+#define E2_MAX_UC_LIST 14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+       if (CHIP_IS_E1(bp))
+               return E1_MAX_UC_LIST;
+       else if (CHIP_IS_E1H(bp))
+               return E1H_MAX_UC_LIST;
+       else
+               return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+       if (CHIP_IS_E1(bp))
+               /* CAM Entries for Port0:
+                *      0 - prim ETH MAC
+                *      1 - BCAST MAC
+                *      2 - iSCSI L2 ring ETH MAC
+                *      3-31 - UC MACs
+                *
+                * Port1 entries are allocated the same way starting from
+                * entry 32.
+                */
+               return 3 + 32 * BP_PORT(bp);
+       else if (CHIP_IS_E1H(bp)) {
+               /* CAM Entries:
+                *      0-7  - prim ETH MAC for each function
+                *      8-15 - iSCSI L2 ring ETH MAC for each function
+                *      16 till 255 UC MAC lists for each function
+                *
+                * Remark: There is no FCoE support for E1H, thus FCoE related
+                *         MACs are not considered.
+                */
+               return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+                       bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+       } else {
+               /* CAM Entries (there is a separate CAM per engine):
+                *      0-4  - prim ETH MAC for each function
+                *      4-7 - iSCSI L2 ring ETH MAC for each function
+                *      8-11 - FIP ucast L2 MAC for each function
+                *      12-15 - ALL_ENODE_MACS mcast MAC for each function
+                *      16 till 71 UC MAC lists for each function
+                */
+               u8 func_idx =
+                       (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+               return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+                       bnx2x_max_uc_list(bp) * func_idx;
+       }
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+       int i = 0, old;
+       struct net_device *dev = bp->dev;
+       u8 offset = bnx2x_uc_list_cam_offset(bp);
+       struct netdev_hw_addr *ha;
+       struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+       dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+       if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+               return -EINVAL;
+
+       netdev_for_each_uc_addr(ha, dev) {
+               /* copy mac */
+               config_cmd->config_table[i].msb_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+               config_cmd->config_table[i].middle_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+               config_cmd->config_table[i].lsb_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+               config_cmd->config_table[i].vlan_id = 0;
+               config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+               config_cmd->config_table[i].clients_bit_vector =
+                       cpu_to_le32(1 << BP_L_ID(bp));
+
+               SET_FLAG(config_cmd->config_table[i].flags,
+                       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                       T_ETH_MAC_COMMAND_SET);
+
+               DP(NETIF_MSG_IFUP,
+                  "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+                  config_cmd->config_table[i].msb_mac_addr,
+                  config_cmd->config_table[i].middle_mac_addr,
+                  config_cmd->config_table[i].lsb_mac_addr);
+
+               i++;
+
+               /* Set uc MAC in NIG */
+               bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+                                    LLH_CAM_ETH_LINE + i);
+       }
+       old = config_cmd->hdr.length;
+       if (old > i) {
+               for (; i < old; i++) {
+                       if (CAM_IS_INVALID(config_cmd->
+                                          config_table[i])) {
+                               /* already invalidated */
+                               break;
+                       }
+                       /* invalidate */
+                       SET_FLAG(config_cmd->config_table[i].flags,
+                               MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                               T_ETH_MAC_COMMAND_INVALIDATE);
+               }
+       }
+
+       wmb();
+
+       config_cmd->hdr.length = i;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* Mark that this ramrod doesn't use bp->set_mac_pending for
+        * synchronization.
+        */
+       config_cmd->hdr.echo = 0;
+
+       mb();
+
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+                  U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+       int i;
+       struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+       dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+       int ramrod_flags = WAIT_RAMROD_COMMON;
+       u8 offset = bnx2x_uc_list_cam_offset(bp);
+       u8 max_list_size = bnx2x_max_uc_list(bp);
+
+       for (i = 0; i < max_list_size; i++) {
+               SET_FLAG(config_cmd->config_table[i].flags,
+                       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                       T_ETH_MAC_COMMAND_INVALIDATE);
+               bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+       }
+
+       wmb();
+
+       config_cmd->hdr.length = max_list_size;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* We'll wait for a completion this time... */
+       config_cmd->hdr.echo = 1;
+
+       bp->set_mac_pending = 1;
+
+       mb();
+
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+                     U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+       /* Wait for a completion */
+       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+                               ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+       /* some multicasts */
+       if (CHIP_IS_E1(bp)) {
+               return bnx2x_set_e1_mc_list(bp);
+       } else { /* E1H and newer */
+               return bnx2x_set_e1h_mc_list(bp);
+       }
+}
+
 /* called with netif_tx_lock from dev_mcast.c */
 void bnx2x_set_rx_mode(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 rx_mode = BNX2X_RX_MODE_NORMAL;
-       int port = BP_PORT(bp);
 
        if (bp->state != BNX2X_STATE_OPEN) {
                DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8829,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
 
        if (dev->flags & IFF_PROMISC)
                rx_mode = BNX2X_RX_MODE_PROMISC;
-       else if ((dev->flags & IFF_ALLMULTI) ||
-                ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
-                 CHIP_IS_E1(bp)))
+       else if (dev->flags & IFF_ALLMULTI)
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
-       else { /* some multicasts */
-               if (CHIP_IS_E1(bp)) {
-                       /*
-                        * set mc list, do not wait as wait implies sleep
-                        * and set_rx_mode can be invoked from non-sleepable
-                        * context
-                        */
-                       u8 offset = (CHIP_REV_IS_SLOW(bp) ?
-                                    BNX2X_MAX_EMUL_MULTI*(1 + port) :
-                                    BNX2X_MAX_MULTICAST*(1 + port));
-
-                       bnx2x_set_e1_mc_list(bp, offset);
-               } else { /* E1H */
-                       /* Accept one or more multicasts */
-                       struct netdev_hw_addr *ha;
-                       u32 mc_filter[MC_HASH_SIZE];
-                       u32 crc, bit, regidx;
-                       int i;
-
-                       memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
-                       netdev_for_each_mc_addr(ha, dev) {
-                               DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-                                  bnx2x_mc_addr(ha));
-
-                               crc = crc32c_le(0, bnx2x_mc_addr(ha),
-                                               ETH_ALEN);
-                               bit = (crc >> 24) & 0xff;
-                               regidx = bit >> 5;
-                               bit &= 0x1f;
-                               mc_filter[regidx] |= (1 << bit);
-                       }
+       else {
+               /* some multicasts */
+               if (bnx2x_set_mc_list(bp))
+                       rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
-                       for (i = 0; i < MC_HASH_SIZE; i++)
-                               REG_WR(bp, MC_HASH_OFFSET(bp, i),
-                                      mc_filter[i]);
-               }
+               /* some unicasts */
+               if (bnx2x_set_uc_list(bp))
+                       rx_mode = BNX2X_RX_MODE_PROMISC;
        }
 
        bp->rx_mode = rx_mode;
@@ -8950,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_stop               = bnx2x_close,
        .ndo_start_xmit         = bnx2x_start_xmit,
        .ndo_select_queue       = bnx2x_select_queue,
-       .ndo_set_multicast_list = bnx2x_set_rx_mode,
+       .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
@@ -9096,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
        dev->vlan_features |= NETIF_F_TSO6;
 
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
        dev->dcbnl_ops = &bnx2x_dcbnl_ops;
 #endif
 
@@ -9503,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
        }
 #endif
 
+#ifdef BCM_DCBNL
+       /* Delete app tlvs from dcbnl */
+       bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
        unregister_netdev(dev);
 
        /* Delete all NAPI objects */
@@ -9776,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                                        HW_CID(bp, BNX2X_ISCSI_ETH_CID));
                }
 
-               /* There may be not more than 8 L2 and COMMON SPEs and not more
-                * than 8 L5 SPEs in the air.
+               /* There may be not more than 8 L2 and not more than 8 L5 SPEs
+                * We also check that the number of outstanding
+                * COMMON ramrods is not more than the EQ and SPQ can
+                * accommodate.
                 */
-               if ((type == NONE_CONNECTION_TYPE) ||
-                   (type == ETH_CONNECTION_TYPE)) {
-                       if (!atomic_read(&bp->spq_left))
+               if (type == ETH_CONNECTION_TYPE) {
+                       if (!atomic_read(&bp->cq_spq_left))
+                               break;
+                       else
+                               atomic_dec(&bp->cq_spq_left);
+               } else if (type == NONE_CONNECTION_TYPE) {
+                       if (!atomic_read(&bp->eq_spq_left))
                                break;
                        else
-                               atomic_dec(&bp->spq_left);
+                               atomic_dec(&bp->eq_spq_left);
                } else if ((type == ISCSI_CONNECTION_TYPE) ||
                           (type == FCOE_CONNECTION_TYPE)) {
                        if (bp->cnic_spq_pending >=
@@ -9862,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
        int rc = 0;
 
        mutex_lock(&bp->cnic_mutex);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_mutex));
        if (c_ops)
                rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
        mutex_unlock(&bp->cnic_mutex);
@@ -9976,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                int count = ctl->data.credit.credit_count;
 
                smp_mb__before_atomic_inc();
-               atomic_add(count, &bp->spq_left);
+               atomic_add(count, &bp->cq_spq_left);
                smp_mb__after_atomic_inc();
                break;
        }
@@ -10072,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
+       /* If both iSCSI and FCoE are disabled - return NULL in
+        * order to indicate CNIC that it should not try to work
+        * with this device.
+        */
+       if (NO_ISCSI(bp) && NO_FCOE(bp))
+               return NULL;
+
        cp->drv_owner = THIS_MODULE;
        cp->chip_id = CHIP_ID(bp);
        cp->pdev = bp->pdev;
@@ -10092,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 
+       if (NO_ISCSI_OOO(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+       if (NO_ISCSI(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+       if (NO_FCOE(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
        DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
                         "starting cid %d\n",
           cp->ctx_blk_size,
index e01330b..1c89f19 100644 (file)
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_PMA_REG_8727_PCS_OPT_CTRL         0xc808
 #define MDIO_PMA_REG_8727_GPIO_CTRL            0xc80e
 #define MDIO_PMA_REG_8727_PCS_GP               0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG          0xc8e4
 
 #define MDIO_AN_REG_8727_MISC_CTRL             0x8309
 
index bda60d5..3445ded 100644 (file)
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        if (unlikely(bp->panic))
                return;
 
+       bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
        /* Protect a state change flow */
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        spin_unlock_bh(&bp->stats_lock);
 
-       bnx2x_stats_stm[state][event].action(bp);
-
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
                   state, event, bp->stats_state);
index 0e2737e..3c5c014 100644 (file)
@@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o
 
 bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
 
+proc-$(CONFIG_PROC_FS) += bond_procfs.o
+bonding-objs += $(proc-y)
+
 ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
 bonding-objs += $(ipv6-y)
 
index 171782e..a5d5d0b 100644 (file)
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
 }
 
 /**
- * __get_rx_machine_lock - lock the port's RX machine
+ * __get_state_machine_lock - lock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __get_rx_machine_lock(struct port *port)
+static inline void __get_state_machine_lock(struct port *port)
 {
-       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
- * __release_rx_machine_lock - unlock the port's RX machine
+ * __release_state_machine_lock - unlock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __release_rx_machine_lock(struct port *port)
+static inline void __release_state_machine_lock(struct port *port)
 {
-       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
 }
 
 /**
- * __initialize_port_locks - initialize a port's RX machine spinlock
+ * __initialize_port_locks - initialize a port's STATE machine spinlock
  * @port: the port we're looking at
  *
  */
 static inline void __initialize_port_locks(struct port *port)
 {
        // make sure it isn't called twice
-       spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+       spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 //conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 {
        rx_states_t last_state;
 
-       // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
-       __get_rx_machine_lock(port);
-
        // keep current State Machine state to compare later if it was changed
        last_state = port->sm_rx_state;
 
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                                pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
                                       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
                                       port->slave->dev->master->name, port->slave->dev->name);
-                               __release_rx_machine_lock(port);
                                return;
                        }
                        __update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        break;
                }
        }
-       __release_rx_machine_lock(port);
 }
 
 /**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                        goto re_arm;
                }
 
+               /* Lock around state machines to protect data accessed
+                * by all (e.g., port->sm_vars).  ad_rx_machine may run
+                * concurrently due to incoming LACPDU.
+                */
+               __get_state_machine_lock(port);
+
                ad_rx_machine(NULL, port);
                ad_periodic_machine(port);
                ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                // turn off the BEGIN bit, since we already handled it
                if (port->sm_vars & AD_PORT_BEGIN)
                        port->sm_vars &= ~AD_PORT_BEGIN;
+
+               __release_state_machine_lock(port);
        }
 
 re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
                case AD_TYPE_LACPDU:
                        pr_debug("Received LACPDU on port %d\n",
                                 port->actor_port_number);
+                       /* Protect against concurrent state machines */
+                       __get_state_machine_lock(port);
                        ad_rx_machine(lacpdu, port);
+                       __release_state_machine_lock(port);
                        break;
 
                case AD_TYPE_MARKER:
@@ -2470,6 +2476,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
        if (!(dev->flags & IFF_MASTER))
                goto out;
 
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out;
+
        if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
                goto out;
 
index 2c46a15..b28baff 100644 (file)
@@ -264,7 +264,8 @@ struct ad_bond_info {
 struct ad_slave_info {
        struct aggregator aggregator;       // 802.3ad aggregator structure
        struct port port;                   // 802.3ad port structure
-       spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+       spinlock_t state_machine_lock; /* mutex state machines vs.
+                                         incoming LACPDU */
        u16 id;
 };
 
index f4e638c..9bc5de3 100644 (file)
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
                goto out;
        }
 
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out;
+
        if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
                goto out;
 
@@ -600,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
 
        _lock_rx_hashtbl(bond);
 
-       hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
+       hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
        client_info = &(bond_info->rx_hashtbl[hash_index]);
 
        if (client_info->assigned) {
index b1025b8..3ad4f50 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
-#include <linux/netpoll.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
 #include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <linux/smp.h>
 #include <linux/if_ether.h>
 #include <net/arp.h>
@@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 #endif
 
-static const char * const version =
-       DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
-
 int bond_net_id __read_mostly;
 
 static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
 
 /*---------------------------- General routines -----------------------------*/
 
-static const char *bond_mode_name(int mode)
+const char *bond_mode_name(int mode)
 {
        static const char *names[] = {
                [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
        skb->priority = 1;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
-               struct netpoll *np = bond->dev->npinfo->netpoll;
-               slave_dev->npinfo = bond->dev->npinfo;
-               slave_dev->priv_flags |= IFF_IN_NETPOLL;
-               netpoll_send_skb_on_dev(np, skb, slave_dev);
-               slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
-       } else
-#endif
+       if (unlikely(netpoll_tx_running(slave_dev)))
+               bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+       else
                dev_queue_xmit(skb);
 
        return 0;
@@ -1288,63 +1276,105 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * You must hold read lock on bond->lock before calling this.
- */
-static bool slaves_support_netpoll(struct net_device *bond_dev)
+static inline int slave_enable_netpoll(struct slave *slave)
 {
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
-       int i = 0;
-       bool ret = true;
+       struct netpoll *np;
+       int err = 0;
 
-       bond_for_each_slave(bond, slave, i) {
-               if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
-                   !slave->dev->netdev_ops->ndo_poll_controller)
-                       ret = false;
+       np = kzalloc(sizeof(*np), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!np)
+               goto out;
+
+       np->dev = slave->dev;
+       err = __netpoll_setup(np);
+       if (err) {
+               kfree(np);
+               goto out;
        }
-       return i != 0 && ret;
+       slave->np = np;
+out:
+       return err;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+       struct netpoll *np = slave->np;
+
+       if (!np)
+               return;
+
+       slave->np = NULL;
+       synchronize_rcu_bh();
+       __netpoll_cleanup(np);
+       kfree(np);
+}
+static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
+{
+       if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
+               return false;
+       if (!slave_dev->netdev_ops->ndo_poll_controller)
+               return false;
+       return true;
 }
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
-       struct bonding *bond = netdev_priv(bond_dev);
+}
+
+static void __bond_netpoll_cleanup(struct bonding *bond)
+{
        struct slave *slave;
        int i;
 
-       bond_for_each_slave(bond, slave, i) {
-               if (slave->dev && IS_UP(slave->dev))
-                       netpoll_poll_dev(slave->dev);
-       }
+       bond_for_each_slave(bond, slave, i)
+               if (IS_UP(slave->dev))
+                       slave_disable_netpoll(slave);
 }
-
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+
+       read_lock(&bond->lock);
+       __bond_netpoll_cleanup(bond);
+       read_unlock(&bond->lock);
+}
+
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+{
+       struct bonding *bond = netdev_priv(dev);
        struct slave *slave;
-       const struct net_device_ops *ops;
-       int i;
+       int i, err = 0;
 
        read_lock(&bond->lock);
-       bond_dev->npinfo = NULL;
        bond_for_each_slave(bond, slave, i) {
-               if (slave->dev) {
-                       ops = slave->dev->netdev_ops;
-                       if (ops->ndo_netpoll_cleanup)
-                               ops->ndo_netpoll_cleanup(slave->dev);
-                       else
-                               slave->dev->npinfo = NULL;
+               if (!IS_UP(slave->dev))
+                       continue;
+               err = slave_enable_netpoll(slave);
+               if (err) {
+                       __bond_netpoll_cleanup(bond);
+                       break;
                }
        }
        read_unlock(&bond->lock);
+       return err;
 }
 
-#else
+static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
+{
+       return bond->dev->npinfo;
+}
 
+#else
+static inline int slave_enable_netpoll(struct slave *slave)
+{
+       return 0;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+}
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
 }
-
 #endif
 
 /*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1402,8 @@ static int bond_compute_features(struct bonding *bond)
 {
        struct slave *slave;
        struct net_device *bond_dev = bond->dev;
-       unsigned long features = bond_dev->features;
-       unsigned long vlan_features = 0;
+       u32 features = bond_dev->features;
+       u32 vlan_features = 0;
        unsigned short max_hard_header_len = max((u16)ETH_HLEN,
                                                bond_dev->hard_header_len);
        int i;
@@ -1400,8 +1430,8 @@ static int bond_compute_features(struct bonding *bond)
 
 done:
        features |= (bond_dev->features & BOND_VLAN_FEATURES);
-       bond_dev->features = netdev_fix_features(features, NULL);
-       bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+       bond_dev->features = netdev_fix_features(bond_dev, features);
+       bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
        bond_dev->hard_header_len = max_hard_header_len;
 
        return 0;
@@ -1423,6 +1453,71 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
        bond->setup_by_slave = 1;
 }
 
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
+ */
+static bool bond_should_deliver_exact_match(struct sk_buff *skb,
+                                           struct net_device *slave_dev,
+                                           struct net_device *bond_dev)
+{
+       if (slave_dev->priv_flags & IFF_SLAVE_INACTIVE) {
+               if (slave_dev->priv_flags & IFF_SLAVE_NEEDARP &&
+                   skb->protocol == __cpu_to_be16(ETH_P_ARP))
+                       return false;
+
+               if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+                   skb->pkt_type != PACKET_BROADCAST &&
+                   skb->pkt_type != PACKET_MULTICAST)
+                               return false;
+
+               if (bond_dev->priv_flags & IFF_MASTER_8023AD &&
+                   skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+                       return false;
+
+               return true;
+       }
+       return false;
+}
+
+static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
+{
+       struct net_device *slave_dev;
+       struct net_device *bond_dev;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return NULL;
+       slave_dev = skb->dev;
+       bond_dev = ACCESS_ONCE(slave_dev->master);
+       if (unlikely(!bond_dev))
+               return skb;
+
+       if (bond_dev->priv_flags & IFF_MASTER_ARPMON)
+               slave_dev->last_rx = jiffies;
+
+       if (bond_should_deliver_exact_match(skb, slave_dev, bond_dev)) {
+               skb->deliver_no_wcard = 1;
+               return skb;
+       }
+
+       skb->dev = bond_dev;
+
+       if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+           bond_dev->priv_flags & IFF_BRIDGE_PORT &&
+           skb->pkt_type == PACKET_HOST) {
+
+               if (unlikely(skb_cow_head(skb,
+                                         skb->data - skb_mac_header(skb)))) {
+                       kfree_skb(skb);
+                       return NULL;
+               }
+               memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
+       }
+
+       return skb;
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1594,16 +1689,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       res = netdev_set_master(slave_dev, bond_dev);
+       res = netdev_set_bond_master(slave_dev, bond_dev);
        if (res) {
-               pr_debug("Error %d calling netdev_set_master\n", res);
+               pr_debug("Error %d calling netdev_set_bond_master\n", res);
                goto err_restore_mac;
        }
+       res = netdev_rx_handler_register(slave_dev, bond_handle_frame, NULL);
+       if (res) {
+               pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+               goto err_unset_master;
+       }
+
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
                pr_debug("Opening slave %s failed\n", slave_dev->name);
-               goto err_unset_master;
+               goto err_unreg_rxhandler;
        }
 
        new_slave->dev = slave_dev;
@@ -1782,17 +1883,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_set_carrier(bond);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       if (slaves_support_netpoll(bond_dev)) {
-               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-               if (bond_dev->npinfo)
-                       slave_dev->npinfo = bond_dev->npinfo;
-       } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               pr_info("New slave device %s does not support netpoll\n",
-                       slave_dev->name);
-               pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+       slave_dev->npinfo = bond_netpoll_info(bond);
+       if (slave_dev->npinfo) {
+               if (slave_enable_netpoll(new_slave)) {
+                       read_unlock(&bond->lock);
+                       pr_info("Error, %s: master_dev is using netpoll, "
+                                "but new slave device does not support netpoll.\n",
+                                bond_dev->name);
+                       res = -EBUSY;
+                       goto err_close;
+               }
        }
 #endif
+
        read_unlock(&bond->lock);
 
        res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1811,8 +1914,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 err_close:
        dev_close(slave_dev);
 
+err_unreg_rxhandler:
+       netdev_rx_handler_unregister(slave_dev);
+
 err_unset_master:
-       netdev_set_master(slave_dev, NULL);
+       netdev_set_bond_master(slave_dev, NULL);
 
 err_restore_mac:
        if (!bond->params.fail_over_mac) {
@@ -1992,19 +2098,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                netif_addr_unlock_bh(bond_dev);
        }
 
-       netdev_set_master(slave_dev, NULL);
+       netdev_rx_handler_unregister(slave_dev);
+       netdev_set_bond_master(slave_dev, NULL);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       read_lock_bh(&bond->lock);
-
-       if (slaves_support_netpoll(bond_dev))
-               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-       read_unlock_bh(&bond->lock);
-       if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
-               slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
-       else
-               slave_dev->npinfo = NULL;
-#endif
+       slave_disable_netpoll(slave);
 
        /* close slave before restoring its mac address */
        dev_close(slave_dev);
@@ -2039,6 +2136,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
 
        ret = bond_release(bond_dev, slave_dev);
        if ((ret == 0) && (bond->slave_cnt == 0)) {
+               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
                unregister_netdevice(bond_dev);
@@ -2114,7 +2212,10 @@ static int bond_release_all(struct net_device *bond_dev)
                        netif_addr_unlock_bh(bond_dev);
                }
 
-               netdev_set_master(slave_dev, NULL);
+               netdev_rx_handler_unregister(slave_dev);
+               netdev_set_bond_master(slave_dev, NULL);
+
+               slave_disable_netpoll(slave);
 
                /* close slave before restoring its mac address */
                dev_close(slave_dev);
@@ -2571,11 +2672,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       int i, vlan_id, rv;
+       int i, vlan_id;
        __be32 *targets = bond->params.arp_targets;
        struct vlan_entry *vlan;
        struct net_device *vlan_dev;
-       struct flowi fl;
        struct rtable *rt;
 
        for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
@@ -2594,15 +2694,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 * determine which VLAN interface would be used, so we
                 * can tag the ARP with the proper VLAN tag.
                 */
-               memset(&fl, 0, sizeof(fl));
-               fl.fl4_dst = targets[i];
-               fl.fl4_tos = RTO_ONLINK;
-
-               rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
-               if (rv) {
+               rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
+                                    RTO_ONLINK, 0);
+               if (IS_ERR(rt)) {
                        if (net_ratelimit()) {
                                pr_warning("%s: no route to arp_ip_target %pI4\n",
-                                          bond->dev->name, &fl.fl4_dst);
+                                          bond->dev->name, &targets[i]);
                        }
                        continue;
                }
@@ -2638,7 +2735,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 
                if (net_ratelimit()) {
                        pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
-                                  bond->dev->name, &fl.fl4_dst,
+                                  bond->dev->name, &targets[i],
                                   rt->dst.dev ? rt->dst.dev->name : "NULL");
                }
                ip_rt_put(rt);
@@ -2733,6 +2830,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
        if (!slave || !slave_do_arp_validate(bond, slave))
                goto out_unlock;
 
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out_unlock;
+
        if (!pskb_may_pull(skb, arp_hdr_len(dev)))
                goto out_unlock;
 
@@ -3178,299 +3279,6 @@ out:
        read_unlock(&bond->lock);
 }
 
-/*------------------------------ proc/seq_file-------------------------------*/
-
-#ifdef CONFIG_PROC_FS
-
-static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
-       __acquires(&bond->lock)
-{
-       struct bonding *bond = seq->private;
-       loff_t off = 0;
-       struct slave *slave;
-       int i;
-
-       /* make sure the bond won't be taken away */
-       rcu_read_lock();
-       read_lock(&bond->lock);
-
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       bond_for_each_slave(bond, slave, i) {
-               if (++off == *pos)
-                       return slave;
-       }
-
-       return NULL;
-}
-
-static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       struct bonding *bond = seq->private;
-       struct slave *slave = v;
-
-       ++*pos;
-       if (v == SEQ_START_TOKEN)
-               return bond->first_slave;
-
-       slave = slave->next;
-
-       return (slave == bond->first_slave) ? NULL : slave;
-}
-
-static void bond_info_seq_stop(struct seq_file *seq, void *v)
-       __releases(&bond->lock)
-       __releases(RCU)
-{
-       struct bonding *bond = seq->private;
-
-       read_unlock(&bond->lock);
-       rcu_read_unlock();
-}
-
-static void bond_info_show_master(struct seq_file *seq)
-{
-       struct bonding *bond = seq->private;
-       struct slave *curr;
-       int i;
-
-       read_lock(&bond->curr_slave_lock);
-       curr = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
-       seq_printf(seq, "Bonding Mode: %s",
-                  bond_mode_name(bond->params.mode));
-
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
-           bond->params.fail_over_mac)
-               seq_printf(seq, " (fail_over_mac %s)",
-                  fail_over_mac_tbl[bond->params.fail_over_mac].modename);
-
-       seq_printf(seq, "\n");
-
-       if (bond->params.mode == BOND_MODE_XOR ||
-               bond->params.mode == BOND_MODE_8023AD) {
-               seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
-                       xmit_hashtype_tbl[bond->params.xmit_policy].modename,
-                       bond->params.xmit_policy);
-       }
-
-       if (USES_PRIMARY(bond->params.mode)) {
-               seq_printf(seq, "Primary Slave: %s",
-                          (bond->primary_slave) ?
-                          bond->primary_slave->dev->name : "None");
-               if (bond->primary_slave)
-                       seq_printf(seq, " (primary_reselect %s)",
-                  pri_reselect_tbl[bond->params.primary_reselect].modename);
-
-               seq_printf(seq, "\nCurrently Active Slave: %s\n",
-                          (curr) ? curr->dev->name : "None");
-       }
-
-       seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
-                  "up" : "down");
-       seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
-       seq_printf(seq, "Up Delay (ms): %d\n",
-                  bond->params.updelay * bond->params.miimon);
-       seq_printf(seq, "Down Delay (ms): %d\n",
-                  bond->params.downdelay * bond->params.miimon);
-
-
-       /* ARP information */
-       if (bond->params.arp_interval > 0) {
-               int printed = 0;
-               seq_printf(seq, "ARP Polling Interval (ms): %d\n",
-                               bond->params.arp_interval);
-
-               seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
-
-               for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
-                       if (!bond->params.arp_targets[i])
-                               break;
-                       if (printed)
-                               seq_printf(seq, ",");
-                       seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
-                       printed = 1;
-               }
-               seq_printf(seq, "\n");
-       }
-
-       if (bond->params.mode == BOND_MODE_8023AD) {
-               struct ad_info ad_info;
-
-               seq_puts(seq, "\n802.3ad info\n");
-               seq_printf(seq, "LACP rate: %s\n",
-                          (bond->params.lacp_fast) ? "fast" : "slow");
-               seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
-                          ad_select_tbl[bond->params.ad_select].modename);
-
-               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
-                       seq_printf(seq, "bond %s has no active aggregator\n",
-                                  bond->dev->name);
-               } else {
-                       seq_printf(seq, "Active Aggregator Info:\n");
-
-                       seq_printf(seq, "\tAggregator ID: %d\n",
-                                  ad_info.aggregator_id);
-                       seq_printf(seq, "\tNumber of ports: %d\n",
-                                  ad_info.ports);
-                       seq_printf(seq, "\tActor Key: %d\n",
-                                  ad_info.actor_key);
-                       seq_printf(seq, "\tPartner Key: %d\n",
-                                  ad_info.partner_key);
-                       seq_printf(seq, "\tPartner Mac Address: %pM\n",
-                                  ad_info.partner_system);
-               }
-       }
-}
-
-static void bond_info_show_slave(struct seq_file *seq,
-                                const struct slave *slave)
-{
-       struct bonding *bond = seq->private;
-
-       seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-       seq_printf(seq, "MII Status: %s\n",
-                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
-       seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
-       seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
-       seq_printf(seq, "Link Failure Count: %u\n",
-                  slave->link_failure_count);
-
-       seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
-
-       if (bond->params.mode == BOND_MODE_8023AD) {
-               const struct aggregator *agg
-                       = SLAVE_AD_INFO(slave).port.aggregator;
-
-               if (agg)
-                       seq_printf(seq, "Aggregator ID: %d\n",
-                                  agg->aggregator_identifier);
-               else
-                       seq_puts(seq, "Aggregator ID: N/A\n");
-       }
-       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
-}
-
-static int bond_info_seq_show(struct seq_file *seq, void *v)
-{
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%s\n", version);
-               bond_info_show_master(seq);
-       } else
-               bond_info_show_slave(seq, v);
-
-       return 0;
-}
-
-static const struct seq_operations bond_info_seq_ops = {
-       .start = bond_info_seq_start,
-       .next  = bond_info_seq_next,
-       .stop  = bond_info_seq_stop,
-       .show  = bond_info_seq_show,
-};
-
-static int bond_info_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq;
-       struct proc_dir_entry *proc;
-       int res;
-
-       res = seq_open(file, &bond_info_seq_ops);
-       if (!res) {
-               /* recover the pointer buried in proc_dir_entry data */
-               seq = file->private_data;
-               proc = PDE(inode);
-               seq->private = proc->data;
-       }
-
-       return res;
-}
-
-static const struct file_operations bond_info_fops = {
-       .owner   = THIS_MODULE,
-       .open    = bond_info_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
-       struct net_device *bond_dev = bond->dev;
-       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
-       if (bn->proc_dir) {
-               bond->proc_entry = proc_create_data(bond_dev->name,
-                                                   S_IRUGO, bn->proc_dir,
-                                                   &bond_info_fops, bond);
-               if (bond->proc_entry == NULL)
-                       pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
-                                  DRV_NAME, bond_dev->name);
-               else
-                       memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
-       }
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
-       struct net_device *bond_dev = bond->dev;
-       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
-       if (bn->proc_dir && bond->proc_entry) {
-               remove_proc_entry(bond->proc_file_name, bn->proc_dir);
-               memset(bond->proc_file_name, 0, IFNAMSIZ);
-               bond->proc_entry = NULL;
-       }
-}
-
-/* Create the bonding directory under /proc/net, if doesn't exist yet.
- * Caller must hold rtnl_lock.
- */
-static void __net_init bond_create_proc_dir(struct bond_net *bn)
-{
-       if (!bn->proc_dir) {
-               bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
-               if (!bn->proc_dir)
-                       pr_warning("Warning: cannot create /proc/net/%s\n",
-                                  DRV_NAME);
-       }
-}
-
-/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
- */
-static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
-{
-       if (bn->proc_dir) {
-               remove_proc_entry(DRV_NAME, bn->net->proc_net);
-               bn->proc_dir = NULL;
-       }
-}
-
-#else /* !CONFIG_PROC_FS */
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
-}
-
-static inline void bond_create_proc_dir(struct bond_net *bn)
-{
-}
-
-static inline void bond_destroy_proc_dir(struct bond_net *bn)
-{
-}
-
-#endif /* CONFIG_PROC_FS */
-
-
 /*-------------------------- netdev event handling --------------------------*/
 
 /*
@@ -4650,9 +4458,12 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
        .ndo_poll_controller    = bond_poll_controller,
 #endif
+       .ndo_add_slave          = bond_enslave,
+       .ndo_del_slave          = bond_release,
 };
 
 static void bond_destructor(struct net_device *bond_dev)
@@ -5271,7 +5082,7 @@ static int __init bonding_init(void)
        int i;
        int res;
 
-       pr_info("%s", version);
+       pr_info("%s", bond_version);
 
        res = bond_check_params(&bonding_defaults);
        if (res)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
new file mode 100644 (file)
index 0000000..c32ff55
--- /dev/null
@@ -0,0 +1,275 @@
+#include <linux/proc_fs.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "bonding.h"
+
+
+extern const char *bond_mode_name(int mode);
+
+static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU)
+       __acquires(&bond->lock)
+{
+       struct bonding *bond = seq->private;
+       loff_t off = 0;
+       struct slave *slave;
+       int i;
+
+       /* make sure the bond won't be taken away */
+       rcu_read_lock();
+       read_lock(&bond->lock);
+
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       bond_for_each_slave(bond, slave, i) {
+               if (++off == *pos)
+                       return slave;
+       }
+
+       return NULL;
+}
+
+static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bonding *bond = seq->private;
+       struct slave *slave = v;
+
+       ++*pos;
+       if (v == SEQ_START_TOKEN)
+               return bond->first_slave;
+
+       slave = slave->next;
+
+       return (slave == bond->first_slave) ? NULL : slave;
+}
+
+static void bond_info_seq_stop(struct seq_file *seq, void *v)
+       __releases(&bond->lock)
+       __releases(RCU)
+{
+       struct bonding *bond = seq->private;
+
+       read_unlock(&bond->lock);
+       rcu_read_unlock();
+}
+
+static void bond_info_show_master(struct seq_file *seq)
+{
+       struct bonding *bond = seq->private;
+       struct slave *curr;
+       int i;
+
+       read_lock(&bond->curr_slave_lock);
+       curr = bond->curr_active_slave;
+       read_unlock(&bond->curr_slave_lock);
+
+       seq_printf(seq, "Bonding Mode: %s",
+                  bond_mode_name(bond->params.mode));
+
+       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+           bond->params.fail_over_mac)
+               seq_printf(seq, " (fail_over_mac %s)",
+                  fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+
+       seq_printf(seq, "\n");
+
+       if (bond->params.mode == BOND_MODE_XOR ||
+               bond->params.mode == BOND_MODE_8023AD) {
+               seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
+                       xmit_hashtype_tbl[bond->params.xmit_policy].modename,
+                       bond->params.xmit_policy);
+       }
+
+       if (USES_PRIMARY(bond->params.mode)) {
+               seq_printf(seq, "Primary Slave: %s",
+                          (bond->primary_slave) ?
+                          bond->primary_slave->dev->name : "None");
+               if (bond->primary_slave)
+                       seq_printf(seq, " (primary_reselect %s)",
+                  pri_reselect_tbl[bond->params.primary_reselect].modename);
+
+               seq_printf(seq, "\nCurrently Active Slave: %s\n",
+                          (curr) ? curr->dev->name : "None");
+       }
+
+       seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
+                  "up" : "down");
+       seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
+       seq_printf(seq, "Up Delay (ms): %d\n",
+                  bond->params.updelay * bond->params.miimon);
+       seq_printf(seq, "Down Delay (ms): %d\n",
+                  bond->params.downdelay * bond->params.miimon);
+
+
+       /* ARP information */
+       if (bond->params.arp_interval > 0) {
+               int printed = 0;
+               seq_printf(seq, "ARP Polling Interval (ms): %d\n",
+                               bond->params.arp_interval);
+
+               seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
+
+               for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
+                       if (!bond->params.arp_targets[i])
+                               break;
+                       if (printed)
+                               seq_printf(seq, ",");
+                       seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
+                       printed = 1;
+               }
+               seq_printf(seq, "\n");
+       }
+
+       if (bond->params.mode == BOND_MODE_8023AD) {
+               struct ad_info ad_info;
+
+               seq_puts(seq, "\n802.3ad info\n");
+               seq_printf(seq, "LACP rate: %s\n",
+                          (bond->params.lacp_fast) ? "fast" : "slow");
+               seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
+                          ad_select_tbl[bond->params.ad_select].modename);
+
+               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+                       seq_printf(seq, "bond %s has no active aggregator\n",
+                                  bond->dev->name);
+               } else {
+                       seq_printf(seq, "Active Aggregator Info:\n");
+
+                       seq_printf(seq, "\tAggregator ID: %d\n",
+                                  ad_info.aggregator_id);
+                       seq_printf(seq, "\tNumber of ports: %d\n",
+                                  ad_info.ports);
+                       seq_printf(seq, "\tActor Key: %d\n",
+                                  ad_info.actor_key);
+                       seq_printf(seq, "\tPartner Key: %d\n",
+                                  ad_info.partner_key);
+                       seq_printf(seq, "\tPartner Mac Address: %pM\n",
+                                  ad_info.partner_system);
+               }
+       }
+}
+
+static void bond_info_show_slave(struct seq_file *seq,
+                                const struct slave *slave)
+{
+       struct bonding *bond = seq->private;
+
+       seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
+       seq_printf(seq, "MII Status: %s\n",
+                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
+       seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
+       seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+       seq_printf(seq, "Link Failure Count: %u\n",
+                  slave->link_failure_count);
+
+       seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+
+       if (bond->params.mode == BOND_MODE_8023AD) {
+               const struct aggregator *agg
+                       = SLAVE_AD_INFO(slave).port.aggregator;
+
+               if (agg)
+                       seq_printf(seq, "Aggregator ID: %d\n",
+                                  agg->aggregator_identifier);
+               else
+                       seq_puts(seq, "Aggregator ID: N/A\n");
+       }
+       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+}
+
+static int bond_info_seq_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(seq, "%s\n", bond_version);
+               bond_info_show_master(seq);
+       } else
+               bond_info_show_slave(seq, v);
+
+       return 0;
+}
+
+static const struct seq_operations bond_info_seq_ops = {
+       .start = bond_info_seq_start,
+       .next  = bond_info_seq_next,
+       .stop  = bond_info_seq_stop,
+       .show  = bond_info_seq_show,
+};
+
+static int bond_info_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       struct proc_dir_entry *proc;
+       int res;
+
+       res = seq_open(file, &bond_info_seq_ops);
+       if (!res) {
+               /* recover the pointer buried in proc_dir_entry data */
+               seq = file->private_data;
+               proc = PDE(inode);
+               seq->private = proc->data;
+       }
+
+       return res;
+}
+
+static const struct file_operations bond_info_fops = {
+       .owner   = THIS_MODULE,
+       .open    = bond_info_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+void bond_create_proc_entry(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+       if (bn->proc_dir) {
+               bond->proc_entry = proc_create_data(bond_dev->name,
+                                                   S_IRUGO, bn->proc_dir,
+                                                   &bond_info_fops, bond);
+               if (bond->proc_entry == NULL)
+                       pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
+                                  DRV_NAME, bond_dev->name);
+               else
+                       memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
+       }
+}
+
+void bond_remove_proc_entry(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+       if (bn->proc_dir && bond->proc_entry) {
+               remove_proc_entry(bond->proc_file_name, bn->proc_dir);
+               memset(bond->proc_file_name, 0, IFNAMSIZ);
+               bond->proc_entry = NULL;
+       }
+}
+
+/* Create the bonding directory under /proc/net, if doesn't exist yet.
+ * Caller must hold rtnl_lock.
+ */
+void __net_init bond_create_proc_dir(struct bond_net *bn)
+{
+       if (!bn->proc_dir) {
+               bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
+               if (!bn->proc_dir)
+                       pr_warning("Warning: cannot create /proc/net/%s\n",
+                                  DRV_NAME);
+       }
+}
+
+/* Destroy the bonding directory under /proc/net, if empty.
+ * Caller must hold rtnl_lock.
+ */
+void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
+{
+       if (bn->proc_dir) {
+               remove_proc_entry(DRV_NAME, bn->net->proc_net);
+               bn->proc_dir = NULL;
+       }
+}
index 8fd0174..72bb0f6 100644 (file)
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
                        bond->dev->name, new_value);
        }
 out:
-       return count;
+       return ret;
 }
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
                   bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                }
        }
 out:
-       return count;
+       return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
                   bonding_show_slaves_active, bonding_store_slaves_active);
index 31fe980..c4e2343 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/if_bonding.h>
 #include <linux/cpumask.h>
 #include <linux/in6.h>
+#include <linux/netpoll.h>
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
@@ -28,6 +29,8 @@
 #define DRV_NAME       "bonding"
 #define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
 
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
 #define BOND_MAX_ARP_TARGETS   16
 
 #define IS_UP(dev)                                        \
@@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void)
 
 static inline int is_netpoll_tx_blocked(struct net_device *dev)
 {
-       if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
+       if (unlikely(netpoll_tx_running(dev)))
                return atomic_read(&netpoll_block_tx);
        return 0;
 }
@@ -198,6 +201,9 @@ struct slave {
        u16    queue_id;
        struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
        struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll *np;
+#endif
 };
 
 /*
@@ -265,7 +271,8 @@ struct bonding {
  *
  * Caller must hold bond lock for read
  */
-static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+                                                 struct net_device *slave_dev)
 {
        struct slave *slave = NULL;
        int i;
@@ -276,7 +283,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
                }
        }
 
-       return 0;
+       return NULL;
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -323,6 +330,22 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
        return slave->dev->last_rx;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+                                        struct sk_buff *skb)
+{
+       struct netpoll *np = slave->np;
+
+       if (np)
+               netpoll_send_skb(np, skb);
+}
+#else
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+                                        struct sk_buff *skb)
+{
+}
+#endif
+
 static inline void bond_set_slave_inactive_flags(struct slave *slave)
 {
        struct bonding *bond = netdev_priv(slave->dev->master);
@@ -393,6 +416,30 @@ struct bond_net {
 #endif
 };
 
+#ifdef CONFIG_PROC_FS
+void bond_create_proc_entry(struct bonding *bond);
+void bond_remove_proc_entry(struct bonding *bond);
+void bond_create_proc_dir(struct bond_net *bn);
+void bond_destroy_proc_dir(struct bond_net *bn);
+#else
+static inline void bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_create_proc_dir(struct bond_net *bn)
+{
+}
+
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
+{
+}
+#endif
+
+
 /* exported from bond_main.c */
 extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
index d5a9db6..1d699e3 100644 (file)
@@ -23,7 +23,7 @@ config CAN_SLCAN
 
          As only the sending and receiving of CAN frames is implemented, this
          driver should work with the (serial/USB) CAN hardware from:
-         www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+         www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
 
          Userspace tools to attach the SLCAN line discipline (slcan_attach,
          slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -115,8 +115,12 @@ source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
 
+source "drivers/net/can/c_can/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
+source "drivers/net/can/softing/Kconfig"
+
 config CAN_DEBUG_DEVICES
        bool "CAN devices debugging messages"
        depends on CAN
index 07ca159..24ebfe8 100644 (file)
@@ -9,9 +9,11 @@ obj-$(CONFIG_CAN_DEV)          += can-dev.o
 can-dev-y                      := dev.o
 
 obj-y                          += usb/
+obj-y                          += softing/
 
 obj-$(CONFIG_CAN_SJA1000)      += sja1000/
 obj-$(CONFIG_CAN_MSCAN)                += mscan/
+obj-$(CONFIG_CAN_C_CAN)                += c_can/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
index 7ef83d0..57d2ffb 100644 (file)
@@ -2,7 +2,7 @@
  * at91_can.c - CAN network driver for AT91 SoC CAN controller
  *
  * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
  *
  * This software may be distributed under the terms of the GNU General
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 
 #include <mach/board.h>
 
-#define AT91_NAPI_WEIGHT       12
+#define AT91_NAPI_WEIGHT       11
 
 /*
  * RX/TX Mailbox split
  * don't dare to touch
  */
-#define AT91_MB_RX_NUM         12
+#define AT91_MB_RX_NUM         11
 #define AT91_MB_TX_SHIFT       2
 
-#define AT91_MB_RX_FIRST       0
+#define AT91_MB_RX_FIRST       1
 #define AT91_MB_RX_LAST                (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
 
 #define AT91_MB_RX_MASK(i)     ((1 << (i)) - 1)
 #define AT91_MB_RX_SPLIT       8
 #define AT91_MB_RX_LOW_LAST    (AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK    (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK    (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+                                ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
 
 #define AT91_MB_TX_NUM         (1 << AT91_MB_TX_SHIFT)
 #define AT91_MB_TX_FIRST       (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
 
        struct clk              *clk;
        struct at91_can_data    *pdata;
+
+       canid_t                 mb0_id;
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
        set_mb_mode_prio(priv, mb, mode, 0);
 }
 
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+       u32 reg_mid;
+
+       if (can_id & CAN_EFF_FLAG)
+               reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+       else
+               reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+       return reg_mid;
+}
+
 /*
  * Swtich transceiver on or off
  */
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
 {
        struct at91_priv *priv = netdev_priv(dev);
        unsigned int i;
+       u32 reg_mid;
 
        /*
-        * The first 12 mailboxes are used as a reception FIFO. The
-        * last mailbox is configured with overwrite option. The
-        * overwrite flag indicates a FIFO overflow.
+        * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+        * mailbox is disabled. The next 11 mailboxes are used as a
+        * reception FIFO. The last mailbox is configured with
+        * overwrite option. The overwrite flag indicates a FIFO
+        * overflow.
         */
+       reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+       for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+               set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+               at91_write(priv, AT91_MID(i), reg_mid);
+               at91_write(priv, AT91_MCR(i), 0x0);     /* clear dlc */
+       }
+
        for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
                set_mb_mode(priv, i, AT91_MB_MODE_RX);
        set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
                set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
 
        /* Reset tx and rx helper pointers */
-       priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+       priv->tx_next = priv->tx_echo = 0;
+       priv->rx_next = AT91_MB_RX_FIRST;
 }
 
 static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
                netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
                return NETDEV_TX_BUSY;
        }
-
-       if (cf->can_id & CAN_EFF_FLAG)
-               reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
-       else
-               reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+       reg_mid = at91_can_id_to_reg_mid(cf->can_id);
        reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
                (cf->can_dlc << 16) | AT91_MCR_MTCR;
 
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
  *
  * Theory of Operation:
  *
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
  *
  * Like it or not, but the chip always saves a received CAN message
  * into the first free mailbox it finds (starting with the
  * lowest). This makes it very difficult to read the messages in the
  * right order from the chip. This is how we work around that problem:
  *
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
  * rx ints are disabled in the interrupt handler and a napi poll is
  * scheduled. We read the mailbox, but do _not_ reenable the mb (to
  * receive another message).
  *
  *    lower mbxs      upper
- *   ______^______    __^__
- *  /             \  /     \
+ *     ____^______    __^__
+ *    /           \  /     \
  * +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
  * +-+-+-+-+-+-+-+-++-+-+-+-+
  *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
  *  0 1 2 3 4 5  6 7 8 9 0 1  / box
+ *  ^
+ *  |
+ *   \
+ *     unused, due to chip bug
  *
  * The variable priv->rx_next points to the next mailbox to read a
  * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
                        "order of incoming frames cannot be guaranteed\n");
 
  again:
-       for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
-            mb < AT91_MB_RX_NUM && quota > 0;
+       for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+            mb < AT91_MB_RX_LAST + 1 && quota > 0;
             reg_sr = at91_read(priv, AT91_SR),
-            mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+            mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
                at91_read_msg(dev, mb);
 
                /* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
 
        /* upper group completed, look again in lower */
        if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
-           quota > 0 && mb >= AT91_MB_RX_NUM) {
-               priv->rx_next = 0;
+           quota > 0 && mb > AT91_MB_RX_LAST) {
+               priv->rx_next = AT91_MB_RX_FIRST;
                goto again;
        }
 
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
        .ndo_start_xmit = at91_start_xmit,
 };
 
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+       if (priv->mb0_id & CAN_EFF_FLAG)
+               return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+       else
+               return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       struct at91_priv *priv = netdev_priv(ndev);
+       unsigned long can_id;
+       ssize_t ret;
+       int err;
+
+       rtnl_lock();
+
+       if (ndev->flags & IFF_UP) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       err = strict_strtoul(buf, 0, &can_id);
+       if (err) {
+               ret = err;
+               goto out;
+       }
+
+       if (can_id & CAN_EFF_FLAG)
+               can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+       else
+               can_id &= CAN_SFF_MASK;
+
+       priv->mb0_id = can_id;
+       ret = count;
+
+ out:
+       rtnl_unlock();
+       return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
+       at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+       &dev_attr_mb0_id.attr,
+       NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+       .attrs = at91_sysfs_attrs,
+};
+
 static int __devinit at91_can_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
        dev->netdev_ops = &at91_netdev_ops;
        dev->irq = irq;
        dev->flags |= IFF_ECHO;
+       dev->sysfs_groups[0] = &at91_sysfs_attr_group;
 
        priv = netdev_priv(dev);
        priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
        priv->dev = dev;
        priv->clk = clk;
        priv->pdata = pdev->dev.platform_data;
+       priv->mb0_id = 0x7ff;
 
        netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
 
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644 (file)
index 0000000..ffb9773
--- /dev/null
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+       tristate "Bosch C_CAN devices"
+       depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+       tristate "Generic Platform Bus based C_CAN driver"
+       ---help---
+         This driver adds support for the C_CAN chips connected to
+         the "platform bus" (Linux abstraction for directly to the
+         processor attached devices) which can be found on various
+         boards from ST Microelectronics (http://www.st.com)
+         like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644 (file)
index 0000000..9273f6d
--- /dev/null
@@ -0,0 +1,8 @@
+#
+#  Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644 (file)
index 0000000..1405078
--- /dev/null
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST           BIT(7)
+#define CONTROL_CCE            BIT(6)
+#define CONTROL_DISABLE_AR     BIT(5)
+#define CONTROL_ENABLE_AR      (0 << 5)
+#define CONTROL_EIE            BIT(3)
+#define CONTROL_SIE            BIT(2)
+#define CONTROL_IE             BIT(1)
+#define CONTROL_INIT           BIT(0)
+
+/* test register */
+#define TEST_RX                        BIT(7)
+#define TEST_TX1               BIT(6)
+#define TEST_TX2               BIT(5)
+#define TEST_LBACK             BIT(4)
+#define TEST_SILENT            BIT(3)
+#define TEST_BASIC             BIT(2)
+
+/* status register */
+#define STATUS_BOFF            BIT(7)
+#define STATUS_EWARN           BIT(6)
+#define STATUS_EPASS           BIT(5)
+#define STATUS_RXOK            BIT(4)
+#define STATUS_TXOK            BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK       0xff
+#define ERR_CNT_TEC_SHIFT      0
+#define ERR_CNT_REC_SHIFT      8
+#define ERR_CNT_REC_MASK       (0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT       15
+#define ERR_CNT_RP_MASK                (0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK           0x3f
+#define BTR_BRP_SHIFT          0
+#define BTR_SJW_SHIFT          6
+#define BTR_SJW_MASK           (0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT                8
+#define BTR_TSEG1_MASK         (0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT                12
+#define BTR_TSEG2_MASK         (0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK      0x0f
+#define BRP_EXT_BRPE_SHIFT     0
+
+/* IFx command request */
+#define IF_COMR_BUSY           BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR             BIT(7)
+#define IF_COMM_MASK           BIT(6)
+#define IF_COMM_ARB            BIT(5)
+#define IF_COMM_CONTROL                BIT(4)
+#define IF_COMM_CLR_INT_PND    BIT(3)
+#define IF_COMM_TXRQST         BIT(2)
+#define IF_COMM_DATAA          BIT(1)
+#define IF_COMM_DATAB          BIT(0)
+#define IF_COMM_ALL            (IF_COMM_MASK | IF_COMM_ARB | \
+                               IF_COMM_CONTROL | IF_COMM_TXRQST | \
+                               IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL          BIT(15)
+#define IF_ARB_MSGXTD          BIT(14)
+#define IF_ARB_TRANSMIT                BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT                BIT(15)
+#define IF_MCONT_MSGLST                BIT(14)
+#define IF_MCONT_CLR_MSGLST    (0 << 14)
+#define IF_MCONT_INTPND                BIT(13)
+#define IF_MCONT_UMASK         BIT(12)
+#define IF_MCONT_TXIE          BIT(11)
+#define IF_MCONT_RXIE          BIT(10)
+#define IF_MCONT_RMTEN         BIT(9)
+#define IF_MCONT_TXRQST                BIT(8)
+#define IF_MCONT_EOB           BIT(7)
+#define IF_MCONT_DLC_MASK      0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x)        (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS    32
+#define C_CAN_MSG_OBJ_RX_NUM   16
+#define C_CAN_MSG_OBJ_TX_NUM   16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST  (C_CAN_MSG_OBJ_RX_FIRST + \
+                               C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST  (C_CAN_MSG_OBJ_TX_FIRST + \
+                               C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST  (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK        (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS    0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT       0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS  1
+#define DISABLE_ALL_INTERRUPTS 0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE      6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT      C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+       LEC_NO_ERROR = 0,
+       LEC_STUFF_ERROR,
+       LEC_FORM_ERROR,
+       LEC_ACK_ERROR,
+       LEC_BIT1_ERROR,
+       LEC_BIT0_ERROR,
+       LEC_CRC_ERROR,
+       LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+       C_CAN_NO_ERROR = 0,
+       C_CAN_BUS_OFF,
+       C_CAN_ERROR_WARNING,
+       C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,        /* 6-bit BRP field + 4-bit BRPE field*/
+       .brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+       return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+                       C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+       return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+                       C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+       u32 val = priv->read_reg(priv, reg);
+       val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+       return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+                                               int enable)
+{
+       unsigned int cntrl_save = priv->read_reg(priv,
+                                               &priv->regs->control);
+
+       if (enable)
+               cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+       else
+               cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+       priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+       int count = MIN_TIMEOUT_VALUE;
+
+       while (count && priv->read_reg(priv,
+                               &priv->regs->ifregs[iface].com_req) &
+                               IF_COMR_BUSY) {
+               count--;
+               udelay(1);
+       }
+
+       if (!count)
+               return 1;
+
+       return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+                                       int iface, int objno, int mask)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /*
+        * As per specs, after writting the message object number in the
+        * IF command request register the transfer b/w interface
+        * register and message RAM must be complete in 6 CAN-CLK
+        * period.
+        */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+                       IFX_WRITE_LOW_16BIT(mask));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+                       IFX_WRITE_LOW_16BIT(objno));
+
+       if (c_can_msg_obj_is_busy(priv, iface))
+               netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+                                       int iface, int objno, int mask)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /*
+        * As per specs, after writting the message object number in the
+        * IF command request register the transfer b/w interface
+        * register and message RAM must be complete in 6 CAN-CLK
+        * period.
+        */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+                       (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+                       IFX_WRITE_LOW_16BIT(objno));
+
+       if (c_can_msg_obj_is_busy(priv, iface))
+               netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+                       int iface, struct can_frame *frame, int objno)
+{
+       int i;
+       u16 flags = 0;
+       unsigned int id;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(frame->can_id & CAN_RTR_FLAG))
+               flags |= IF_ARB_TRANSMIT;
+
+       if (frame->can_id & CAN_EFF_FLAG) {
+               id = frame->can_id & CAN_EFF_MASK;
+               flags |= IF_ARB_MSGXTD;
+       } else
+               id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+       flags |= IF_ARB_MSGVAL;
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+                               IFX_WRITE_LOW_16BIT(id));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+                               IFX_WRITE_HIGH_16BIT(id));
+
+       for (i = 0; i < frame->can_dlc; i += 2) {
+               priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+                               frame->data[i] | (frame->data[i + 1] << 8));
+       }
+
+       /* enable interrupt for this message object */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+                       frame->can_dlc);
+       c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+                                               int iface, int ctrl_mask,
+                                               int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+       c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+                                               int iface,
+                                               int ctrl_mask)
+{
+       int i;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+               priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                               ctrl_mask & ~(IF_MCONT_MSGLST |
+                                       IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+               c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+       }
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+                                               int iface, int ctrl_mask,
+                                               int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       ctrl_mask & ~(IF_MCONT_MSGLST |
+                               IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+       c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+                                       int iface, int objno)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+       c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       IF_MCONT_CLR_MSGLST);
+
+       c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+       /* create an error msg */
+       skb = alloc_can_err_skb(dev, &frame);
+       if (unlikely(!skb))
+               return;
+
+       frame->can_id |= CAN_ERR_CRTL;
+       frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+       stats->rx_errors++;
+       stats->rx_over_errors++;
+
+       netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+       u16 flags, data;
+       int i;
+       unsigned int val;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       skb = alloc_can_skb(dev, &frame);
+       if (!skb) {
+               stats->rx_dropped++;
+               return -ENOMEM;
+       }
+
+       frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+       flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+       val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+               (flags << 16);
+
+       if (flags & IF_ARB_MSGXTD)
+               frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       else
+               frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+       if (flags & IF_ARB_TRANSMIT)
+               frame->can_id |= CAN_RTR_FLAG;
+       else {
+               for (i = 0; i < frame->can_dlc; i += 2) {
+                       data = priv->read_reg(priv,
+                               &priv->regs->ifregs[iface].data[i / 2]);
+                       frame->data[i] = data;
+                       frame->data[i + 1] = data >> 8;
+               }
+       }
+
+       netif_receive_skb(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += frame->can_dlc;
+
+       return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+                                       int objno, unsigned int mask,
+                                       unsigned int id, unsigned int mcont)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+                       IFX_WRITE_LOW_16BIT(mask));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+                       IFX_WRITE_HIGH_16BIT(mask));
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+                       IFX_WRITE_LOW_16BIT(id));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+                       (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+       c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+                       c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+       c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+                       c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+       int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+       /*
+        * as transmission request register's bit n-1 corresponds to
+        * message object n, we need to handle the same properly.
+        */
+       if (val & (1 << (objno - 1)))
+               return 1;
+
+       return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+                                       struct net_device *dev)
+{
+       u32 msg_obj_no;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct can_frame *frame = (struct can_frame *)skb->data;
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       msg_obj_no = get_tx_next_msg_obj(priv);
+
+       /* prepare message object for transmission */
+       c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+       can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+       /*
+        * we have to stop the queue in case of a wrap around or
+        * if the next TX message object is still in use
+        */
+       priv->tx_next++;
+       if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+                       (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+               netif_stop_queue(dev);
+
+       return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+       unsigned int reg_btr, reg_brpe, ctrl_save;
+       u8 brp, brpe, sjw, tseg1, tseg2;
+       u32 ten_bit_brp;
+       struct c_can_priv *priv = netdev_priv(dev);
+       const struct can_bittiming *bt = &priv->can.bittiming;
+
+       /* c_can provides a 6-bit brp and 4-bit brpe fields */
+       ten_bit_brp = bt->brp - 1;
+       brp = ten_bit_brp & BTR_BRP_MASK;
+       brpe = ten_bit_brp >> 6;
+
+       sjw = bt->sjw - 1;
+       tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+       tseg2 = bt->phase_seg2 - 1;
+       reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+                       (tseg2 << BTR_TSEG2_SHIFT);
+       reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+       netdev_info(dev,
+               "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+       ctrl_save = priv->read_reg(priv, &priv->regs->control);
+       priv->write_reg(priv, &priv->regs->control,
+                       ctrl_save | CONTROL_CCE | CONTROL_INIT);
+       priv->write_reg(priv, &priv->regs->btr, reg_btr);
+       priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+       priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+       return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+       int i;
+
+       /* first invalidate all message objects */
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+               c_can_inval_msg_object(dev, 0, i);
+
+       /* setup receive message objects */
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+               c_can_setup_receive_object(dev, 0, i, 0, 0,
+                       (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+       c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+                       IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               /* disable automatic retransmission */
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_DISABLE_AR);
+       else
+               /* enable automatic retransmission */
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_ENABLE_AR);
+
+       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+                                       CAN_CTRLMODE_LOOPBACK)) {
+               /* loopback + silent mode : useful for hot self-test */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test,
+                               TEST_LBACK | TEST_SILENT);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+               /* loopback mode : useful for self-test function */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+               /* silent mode : bus-monitoring mode */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+       } else
+               /* normal mode*/
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+       /* configure message objects */
+       c_can_configure_msg_objects(dev);
+
+       /* set a `lec` value so that we can check for updates later */
+       priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+       /* set bittiming params */
+       c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* enable status change, error and module interrupts */
+       c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+       /* basic c_can configuration */
+       c_can_chip_config(dev);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* reset tx helper pointers */
+       priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* disable all interrupts */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+       /* set the state as STOPPED */
+       priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               c_can_start(dev);
+               netif_wake_queue(dev);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+                                       struct can_berr_counter *bec)
+{
+       unsigned int reg_err_counter;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+       bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+                               ERR_CNT_REC_SHIFT;
+       bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+       return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+       u32 val;
+       u32 msg_obj_no;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+
+       for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+               msg_obj_no = get_tx_echo_msg_obj(priv);
+               c_can_inval_msg_object(dev, 0, msg_obj_no);
+               val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+               if (!(val & (1 << msg_obj_no))) {
+                       can_get_echo_skb(dev,
+                                       msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+                       stats->tx_bytes += priv->read_reg(priv,
+                                       &priv->regs->ifregs[0].msg_cntrl)
+                                       & IF_MCONT_DLC_MASK;
+                       stats->tx_packets++;
+               }
+       }
+
+       /* restart queue if wrap-up or if queue stalled on last pkt */
+       if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+                       ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+               netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ *   the INTPND bit.
+ * - if the current message object number is equal to
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ *   receive message objects.
+ * - if the current message object number is greater than
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ *   only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+       u32 num_rx_pkts = 0;
+       unsigned int msg_obj, msg_ctrl_save;
+       struct c_can_priv *priv = netdev_priv(dev);
+       u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+       for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+                       msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+                       val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+                       msg_obj++) {
+               /*
+                * as interrupt pending register's bit n-1 corresponds to
+                * message object n, we need to handle the same properly.
+                */
+               if (val & (1 << (msg_obj - 1))) {
+                       c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+                                       ~IF_COMM_TXRQST);
+                       msg_ctrl_save = priv->read_reg(priv,
+                                       &priv->regs->ifregs[0].msg_cntrl);
+
+                       if (msg_ctrl_save & IF_MCONT_EOB)
+                               return num_rx_pkts;
+
+                       if (msg_ctrl_save & IF_MCONT_MSGLST) {
+                               c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+                               num_rx_pkts++;
+                               quota--;
+                               continue;
+                       }
+
+                       if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+                               continue;
+
+                       /* read the data from the message object */
+                       c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+                       if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+                               c_can_mark_rx_msg_obj(dev, 0,
+                                               msg_ctrl_save, msg_obj);
+                       else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+                               /* activate this msg obj */
+                               c_can_activate_rx_msg_obj(dev, 0,
+                                               msg_ctrl_save, msg_obj);
+                       else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+                               /* activate all lower message objects */
+                               c_can_activate_all_lower_rx_msg_obj(dev,
+                                               0, msg_ctrl_save);
+
+                       num_rx_pkts++;
+                       quota--;
+               }
+       }
+
+       return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+               (priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+                               enum c_can_bus_error_types error_type)
+{
+       unsigned int reg_err_counter;
+       unsigned int rx_err_passive;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       struct can_berr_counter bec;
+
+       /* propogate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       c_can_get_berr_counter(dev, &bec);
+       reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+       rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+                               ERR_CNT_RP_SHIFT;
+
+       switch (error_type) {
+       case C_CAN_ERROR_WARNING:
+               /* error warning state */
+               priv->can.can_stats.error_warning++;
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = (bec.txerr > bec.rxerr) ?
+                       CAN_ERR_CRTL_TX_WARNING :
+                       CAN_ERR_CRTL_RX_WARNING;
+               cf->data[6] = bec.txerr;
+               cf->data[7] = bec.rxerr;
+
+               break;
+       case C_CAN_ERROR_PASSIVE:
+               /* error passive state */
+               priv->can.can_stats.error_passive++;
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               cf->can_id |= CAN_ERR_CRTL;
+               if (rx_err_passive)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+               if (bec.txerr > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+               cf->data[6] = bec.txerr;
+               cf->data[7] = bec.rxerr;
+               break;
+       case C_CAN_BUS_OFF:
+               /* bus-off state */
+               priv->can.state = CAN_STATE_BUS_OFF;
+               cf->can_id |= CAN_ERR_BUSOFF;
+               /*
+                * disable all interrupts in bus-off mode to ensure that
+                * the CPU is not hogged down
+                */
+               c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+               can_bus_off(dev);
+               break;
+       default:
+               break;
+       }
+
+       netif_receive_skb(skb);
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+                               enum c_can_lec_type lec_type)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       /*
+        * early exit if no lec update or no error.
+        * no lec update means that no CAN bus event has been detected
+        * since CPU wrote 0x7 value to status reg.
+        */
+       if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+               return 0;
+
+       /* propogate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       /*
+        * check for 'last error code' which tells us the
+        * type of the last error to occur on the CAN bus
+        */
+
+       /* common for all type of bus errors */
+       priv->can.can_stats.bus_error++;
+       stats->rx_errors++;
+       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+       switch (lec_type) {
+       case LEC_STUFF_ERROR:
+               netdev_dbg(dev, "stuff error\n");
+               cf->data[2] |= CAN_ERR_PROT_STUFF;
+               break;
+       case LEC_FORM_ERROR:
+               netdev_dbg(dev, "form error\n");
+               cf->data[2] |= CAN_ERR_PROT_FORM;
+               break;
+       case LEC_ACK_ERROR:
+               netdev_dbg(dev, "ack error\n");
+               cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+                               CAN_ERR_PROT_LOC_ACK_DEL);
+               break;
+       case LEC_BIT1_ERROR:
+               netdev_dbg(dev, "bit1 error\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT1;
+               break;
+       case LEC_BIT0_ERROR:
+               netdev_dbg(dev, "bit0 error\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT0;
+               break;
+       case LEC_CRC_ERROR:
+               netdev_dbg(dev, "CRC error\n");
+               cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+                               CAN_ERR_PROT_LOC_CRC_DEL);
+               break;
+       default:
+               break;
+       }
+
+       /* set a `lec` value so that we can check for updates later */
+       priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+       netif_receive_skb(skb);
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+       u16 irqstatus;
+       int lec_type = 0;
+       int work_done = 0;
+       struct net_device *dev = napi->dev;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!irqstatus)
+               goto end;
+
+       /* status events have the highest priority */
+       if (irqstatus == STATUS_INTERRUPT) {
+               priv->current_status = priv->read_reg(priv,
+                                       &priv->regs->status);
+
+               /* handle Tx/Rx events */
+               if (priv->current_status & STATUS_TXOK)
+                       priv->write_reg(priv, &priv->regs->status,
+                                       priv->current_status & ~STATUS_TXOK);
+
+               if (priv->current_status & STATUS_RXOK)
+                       priv->write_reg(priv, &priv->regs->status,
+                                       priv->current_status & ~STATUS_RXOK);
+
+               /* handle state changes */
+               if ((priv->current_status & STATUS_EWARN) &&
+                               (!(priv->last_status & STATUS_EWARN))) {
+                       netdev_dbg(dev, "entered error warning state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_ERROR_WARNING);
+               }
+               if ((priv->current_status & STATUS_EPASS) &&
+                               (!(priv->last_status & STATUS_EPASS))) {
+                       netdev_dbg(dev, "entered error passive state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_ERROR_PASSIVE);
+               }
+               if ((priv->current_status & STATUS_BOFF) &&
+                               (!(priv->last_status & STATUS_BOFF))) {
+                       netdev_dbg(dev, "entered bus off state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_BUS_OFF);
+               }
+
+               /* handle bus recovery events */
+               if ((!(priv->current_status & STATUS_BOFF)) &&
+                               (priv->last_status & STATUS_BOFF)) {
+                       netdev_dbg(dev, "left bus off state\n");
+                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+               if ((!(priv->current_status & STATUS_EPASS)) &&
+                               (priv->last_status & STATUS_EPASS)) {
+                       netdev_dbg(dev, "left error passive state\n");
+                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+
+               priv->last_status = priv->current_status;
+
+               /* handle lec errors on the bus */
+               lec_type = c_can_has_and_handle_berr(priv);
+               if (lec_type)
+                       work_done += c_can_handle_bus_err(dev, lec_type);
+       } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+                       (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+               /* handle events corresponding to receive message objects */
+               work_done += c_can_do_rx_poll(dev, (quota - work_done));
+       } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+                       (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+               /* handle events corresponding to transmit message objects */
+               c_can_do_tx(dev);
+       }
+
+end:
+       if (work_done < quota) {
+               napi_complete(napi);
+               /* enable all IRQs */
+               c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+       }
+
+       return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+       u16 irqstatus;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!irqstatus)
+               return IRQ_NONE;
+
+       /* disable all interrupts and schedule the NAPI */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+       int err;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* open the can device */
+       err = open_candev(dev);
+       if (err) {
+               netdev_err(dev, "failed to open can device\n");
+               return err;
+       }
+
+       /* register interrupt handler */
+       err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+                               dev);
+       if (err < 0) {
+               netdev_err(dev, "failed to request interrupt\n");
+               goto exit_irq_fail;
+       }
+
+       /* start the c_can controller */
+       c_can_start(dev);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+
+exit_irq_fail:
+       close_candev(dev);
+       return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+       c_can_stop(dev);
+       free_irq(dev->irq, dev);
+       close_candev(dev);
+
+       return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+       struct net_device *dev;
+       struct c_can_priv *priv;
+
+       dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+       if (!dev)
+               return NULL;
+
+       priv = netdev_priv(dev);
+       netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+       priv->dev = dev;
+       priv->can.bittiming_const = &c_can_bittiming_const;
+       priv->can.do_set_mode = c_can_set_mode;
+       priv->can.do_get_berr_counter = c_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+                                       CAN_CTRLMODE_LOOPBACK |
+                                       CAN_CTRLMODE_LISTENONLY |
+                                       CAN_CTRLMODE_BERR_REPORTING;
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+       free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+       .ndo_open = c_can_open,
+       .ndo_stop = c_can_close,
+       .ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+       dev->flags |= IFF_ECHO; /* we support local echo */
+       dev->netdev_ops = &c_can_netdev_ops;
+
+       return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* disable all interrupts */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+       unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644 (file)
index 0000000..9b7fbef
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+       u16 com_req;
+       u16 com_mask;
+       u16 mask1;
+       u16 mask2;
+       u16 arb1;
+       u16 arb2;
+       u16 msg_cntrl;
+       u16 data[4];
+       u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+       u16 control;
+       u16 status;
+       u16 err_cnt;
+       u16 btr;
+       u16 interrupt;
+       u16 test;
+       u16 brp_ext;
+       u16 _reserved1;
+       struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+       u16 _reserved2[8];
+       u16 txrqst1;
+       u16 txrqst2;
+       u16 _reserved3[6];
+       u16 newdat1;
+       u16 newdat2;
+       u16 _reserved4[6];
+       u16 intpnd1;
+       u16 intpnd2;
+       u16 _reserved5[6];
+       u16 msgval1;
+       u16 msgval2;
+       u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+       struct can_priv can;    /* must be the first member */
+       struct napi_struct napi;
+       struct net_device *dev;
+       int tx_object;
+       int current_status;
+       int last_status;
+       u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+       void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+       struct c_can_regs __iomem *regs;
+       unsigned long irq_flags; /* for request_irq() */
+       unsigned int tx_next;
+       unsigned int tx_echo;
+       void *priv;             /* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644 (file)
index 0000000..e629b96
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+                                               void *reg)
+{
+       return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+                                               void *reg, u16 val)
+{
+       writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+                                               void *reg)
+{
+       return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+                                               void *reg, u16 val)
+{
+       writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+       int ret;
+       void __iomem *addr;
+       struct net_device *dev;
+       struct c_can_priv *priv;
+       struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+       struct clk *clk;
+
+       /* get the appropriate clk */
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "no clock defined\n");
+               ret = -ENODEV;
+               goto exit;
+       }
+#endif
+
+       /* get the platform data */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!mem || (irq <= 0)) {
+               ret = -ENODEV;
+               goto exit_free_clk;
+       }
+
+       if (!request_mem_region(mem->start, resource_size(mem),
+                               KBUILD_MODNAME)) {
+               dev_err(&pdev->dev, "resource unavailable\n");
+               ret = -ENODEV;
+               goto exit_free_clk;
+       }
+
+       addr = ioremap(mem->start, resource_size(mem));
+       if (!addr) {
+               dev_err(&pdev->dev, "failed to map can port\n");
+               ret = -ENOMEM;
+               goto exit_release_mem;
+       }
+
+       /* allocate the c_can device */
+       dev = alloc_c_can_dev();
+       if (!dev) {
+               ret = -ENOMEM;
+               goto exit_iounmap;
+       }
+
+       priv = netdev_priv(dev);
+
+       dev->irq = irq->start;
+       priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+       priv->can.clock.freq = clk_get_rate(clk);
+       priv->priv = clk;
+#endif
+
+       switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+       case IORESOURCE_MEM_32BIT:
+               priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+               priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+               break;
+       case IORESOURCE_MEM_16BIT:
+       default:
+               priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+               priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+               break;
+       }
+
+       platform_set_drvdata(pdev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       ret = register_c_can_dev(dev);
+       if (ret) {
+               dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+                       KBUILD_MODNAME, ret);
+               goto exit_free_device;
+       }
+
+       dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+                KBUILD_MODNAME, priv->regs, dev->irq);
+       return 0;
+
+exit_free_device:
+       platform_set_drvdata(pdev, NULL);
+       free_c_can_dev(dev);
+exit_iounmap:
+       iounmap(addr);
+exit_release_mem:
+       release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+       clk_put(clk);
+exit:
+#endif
+       dev_err(&pdev->dev, "probe failed\n");
+
+       return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct resource *mem;
+
+       unregister_c_can_dev(dev);
+       platform_set_drvdata(pdev, NULL);
+
+       free_c_can_dev(dev);
+       iounmap(priv->regs);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+       clk_put(priv->priv);
+#endif
+
+       return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = c_can_plat_probe,
+       .remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+       return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+       platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
index b9a6d7a..366f5cc 100644 (file)
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
                                                   ican3_sysfs_set_term);
 
 static struct attribute *ican3_sysfs_attrs[] = {
index 7ab534a..7513c45 100644 (file)
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
                goto open_unlock;
        }
 
-       priv->wq = create_freezeable_workqueue("mcp251x_wq");
+       priv->wq = create_freezable_workqueue("mcp251x_wq");
        INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
        INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
index 27d1d39..d387069 100644 (file)
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-       depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+       depends on CAN_DEV && (PPC || M68K)
        tristate "Support for Freescale MSCAN based chips"
        ---help---
          The Motorola Scalable Controller Area Network (MSCAN) definition
index c42e972..e54712b 100644 (file)
@@ -185,7 +185,7 @@ struct pch_can_priv {
 
 static struct can_bittiming_const pch_can_bittiming_const = {
        .name = KBUILD_MODNAME,
-       .tseg1_min = 1,
+       .tseg1_min = 2,
        .tseg1_max = 16,
        .tseg2_min = 1,
        .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
        struct pch_can_priv *priv = netdev_priv(ndev);
 
        unregister_candev(priv->ndev);
-       pci_iounmap(pdev, priv->regs);
        if (priv->use_msi)
                pci_disable_msi(priv->dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
        pch_can_reset(priv);
+       pci_iounmap(pdev, priv->regs);
        free_candev(priv->ndev);
 }
 
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
                priv->use_msi = 0;
        } else {
                netdev_err(ndev, "PCH CAN opened with MSI\n");
+               pci_set_master(pdev);
                priv->use_msi = 1;
        }
 
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644 (file)
index 0000000..5de46a9
--- /dev/null
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+       tristate "Softing Gmbh CAN generic support"
+       depends on CAN_DEV && HAS_IOMEM
+       ---help---
+         Support for CAN cards from Softing Gmbh & some cards
+         from Vector Gmbh.
+         Softing Gmbh CAN cards come with 1 or 2 physical busses.
+         Those cards typically use Dual Port RAM to communicate
+         with the host CPU. The interface is then identical for PCI
+         and PCMCIA cards. This driver operates on a platform device,
+         which has been created by softing_cs or softing_pci driver.
+         Warning:
+         The API of the card does not allow fine control per bus, but
+         controls the 2 busses on the card together.
+         As such, some actions (start/stop/busoff recovery) on 1 bus
+         must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+       tristate "Softing Gmbh CAN pcmcia cards"
+       depends on PCMCIA
+       depends on CAN_SOFTING
+       ---help---
+         Support for PCMCIA cards from Softing Gmbh & some cards
+         from Vector Gmbh.
+         You need firmware for these, which you can get at
+         http://developer.berlios.de/projects/socketcan/
+         This version of the driver is written against
+         firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+         In order to use the card as CAN device, you need the Softing generic
+         support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644 (file)
index 0000000..c5e5016
--- /dev/null
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644 (file)
index 0000000..7ec9f4d
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+       struct can_priv can; /* must be the first member! */
+       struct net_device *netdev;
+       struct softing *card;
+       struct {
+               int pending;
+               /* variables wich hold the circular buffer */
+               int echo_put;
+               int echo_get;
+       } tx;
+       struct can_bittiming_const btr_const;
+       int index;
+       uint8_t output;
+       uint16_t chip;
+};
+#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+       const struct softing_platform_data *pdat;
+       struct platform_device *pdev;
+       struct net_device *net[2];
+       spinlock_t spin; /* protect this structure & DPRAM access */
+       ktime_t ts_ref;
+       ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+       struct {
+               /* indication of firmware status */
+               int up;
+               /* protection of the 'up' variable */
+               struct mutex lock;
+       } fw;
+       struct {
+               int nr;
+               int requested;
+               int svc_count;
+               unsigned int dpram_position;
+       } irq;
+       struct {
+               int pending;
+               int last_bus;
+               /*
+                * keep the bus that last tx'd a message,
+                * in order to let every netdev queue resume
+                */
+       } tx;
+       __iomem uint8_t *dpram;
+       unsigned long dpram_phys;
+       unsigned long dpram_size;
+       struct {
+               uint16_t fw_version, hw_version, license, serial;
+               uint16_t chip[2];
+               unsigned int freq; /* remote cpu's operating frequency */
+       } id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+               const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+                       __iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+               const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX               0x0000
+       #define DPRAM_RX_SIZE   32
+       #define DPRAM_RX_CNT    16
+#define DPRAM_RX_RD            0x0201  /* uint8_t */
+#define DPRAM_RX_WR            0x0205  /* uint8_t */
+#define DPRAM_RX_LOST          0x0207  /* uint8_t */
+
+#define DPRAM_FCT_PARAM                0x0300  /* int16_t [20] */
+#define DPRAM_FCT_RESULT       0x0328  /* int16_t */
+#define DPRAM_FCT_HOST         0x032b  /* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE    0x0331  /* uint16_t */
+#define DPRAM_INFO_BUSSTATE2   0x0335  /* uint16_t */
+#define DPRAM_INFO_ERRSTATE    0x0339  /* uint16_t */
+#define DPRAM_INFO_ERRSTATE2   0x033d  /* uint16_t */
+#define DPRAM_RESET            0x0341  /* uint16_t */
+#define DPRAM_CLR_RECV_FIFO    0x0345  /* uint16_t */
+#define DPRAM_RESET_TIME       0x034d  /* uint16_t */
+#define DPRAM_TIME             0x0350  /* uint64_t */
+#define DPRAM_WR_START         0x0358  /* uint8_t */
+#define DPRAM_WR_END           0x0359  /* uint8_t */
+#define DPRAM_RESET_RX_FIFO    0x0361  /* uint16_t */
+#define DPRAM_RESET_TX_FIFO    0x0364  /* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL  0x0365  /* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL    0x0366  /* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL    0x0366  /* uint16_t */
+
+#define DPRAM_TX               0x0400  /* uint16_t */
+       #define DPRAM_TX_SIZE   16
+       #define DPRAM_TX_CNT    32
+#define DPRAM_TX_RD            0x0601  /* uint8_t */
+#define DPRAM_TX_WR            0x0605  /* uint8_t */
+
+#define DPRAM_COMMAND          0x07e0  /* uint16_t */
+#define DPRAM_RECEIPT          0x07f0  /* uint16_t */
+#define DPRAM_IRQ_TOHOST       0x07fe  /* uint8_t */
+#define DPRAM_IRQ_TOCARD       0x07ff  /* uint8_t */
+
+#define DPRAM_V2_RESET         0x0e00  /* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST    0x0e02  /* uint8_t */
+
+#define TXMAX  (DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE       0
+#define RES_OK         1
+#define RES_NOK                2
+#define RES_UNKNOWN    3
+/* DPRAM flags */
+#define CMD_TX         0x01
+#define CMD_ACK                0x02
+#define CMD_XTD                0x04
+#define CMD_RTR                0x08
+#define CMD_ERR                0x10
+#define CMD_BUS2       0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF         0x80
+#define SF_MASK_EPASSIVE       0x60
+
+/* bus states */
+#define STATE_BUSOFF   2
+#define STATE_EPASSIVE 1
+#define STATE_EACTIVE  0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644 (file)
index 0000000..c11bb4d
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+       .name = "CANcard",
+       .manf = 0x0168, .prod = 0x001,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "CANcard-NEC",
+       .manf = 0x0168, .prod = 0x002,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "CANcard-SJA",
+       .manf = 0x0168, .prod = 0x004,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "CANcard-2",
+       .manf = 0x0168, .prod = 0x005,
+       .generation = 2,
+       .nbus = 2,
+       .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+       .dpram_size = 0x1000,
+       .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = NULL,
+}, {
+       .name = "Vector-CANcard",
+       .manf = 0x0168, .prod = 0x081,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "Vector-CANcard-SJA",
+       .manf = 0x0168, .prod = 0x084,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "Vector-CANcard-2",
+       .manf = 0x0168, .prod = 0x085,
+       .generation = 2,
+       .nbus = 2,
+       .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+       .dpram_size = 0x1000,
+       .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = NULL,
+}, {
+       .name = "EDICcard-NEC",
+       .manf = 0x0168, .prod = 0x102,
+       .generation = 1,
+       .nbus = 2,
+       .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+       .dpram_size = 0x0800,
+       .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = softingcs_enable_irq,
+}, {
+       .name = "EDICcard-2",
+       .manf = 0x0168, .prod = 0x105,
+       .generation = 2,
+       .nbus = 2,
+       .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+       .dpram_size = 0x1000,
+       .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+       .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+       .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+       .reset = softingcs_reset,
+       .enable_irq = NULL,
+}, {
+       0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+       const struct softing_platform_data *lp;
+
+       for (lp = softingcs_platform_data; lp->manf; ++lp) {
+               if ((lp->manf == manf) && (lp->prod == prod))
+                       return lp;
+       }
+       return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+       struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+       dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+       return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+       struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+       dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+       return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+               void *priv_data)
+{
+       struct softing_platform_data *pdat = priv_data;
+       struct resource *pres;
+       int memspeed = 0;
+
+       WARN_ON(!pdat);
+       pres = pcmcia->resource[PCMCIA_IOMEM_0];
+       if (resource_size(pres) < 0x1000)
+               return -ERANGE;
+
+       pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+       if (pdat->generation < 2) {
+               pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+               memspeed = 3;
+       } else {
+               pres->flags |= WIN_DATA_WIDTH_16;
+       }
+       return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+       struct platform_device *pdev = pcmcia->priv;
+
+       /* free bits */
+       platform_device_unregister(pdev);
+       /* release pcmcia stuff */
+       pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+       int ret;
+       struct platform_device *pdev;
+       const struct softing_platform_data *pdat;
+       struct resource *pres;
+       struct dev {
+               struct platform_device pdev;
+               struct resource res[2];
+       } *dev;
+
+       /* find matching platform_data */
+       pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+       if (!pdat)
+               return -ENOTTY;
+
+       /* setup pcmcia device */
+       pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+               CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+       ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+       if (ret)
+               goto pcmcia_failed;
+
+       ret = pcmcia_enable_device(pcmcia);
+       if (ret < 0)
+               goto pcmcia_failed;
+
+       pres = pcmcia->resource[PCMCIA_IOMEM_0];
+       if (!pres) {
+               ret = -EBADF;
+               goto pcmcia_bad;
+       }
+
+       /* create softing platform device */
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               ret = -ENOMEM;
+               goto mem_failed;
+       }
+       dev->pdev.resource = dev->res;
+       dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+       dev->pdev.dev.release = softingcs_pdev_release;
+
+       pdev = &dev->pdev;
+       pdev->dev.platform_data = (void *)pdat;
+       pdev->dev.parent = &pcmcia->dev;
+       pcmcia->priv = pdev;
+
+       /* platform device resources */
+       pdev->resource[0].flags = IORESOURCE_MEM;
+       pdev->resource[0].start = pres->start;
+       pdev->resource[0].end = pres->end;
+
+       pdev->resource[1].flags = IORESOURCE_IRQ;
+       pdev->resource[1].start = pcmcia->irq;
+       pdev->resource[1].end = pdev->resource[1].start;
+
+       /* platform device setup */
+       spin_lock(&softingcs_index_lock);
+       pdev->id = softingcs_index++;
+       spin_unlock(&softingcs_index_lock);
+       pdev->name = "softing";
+       dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+       ret = platform_device_register(pdev);
+       if (ret < 0)
+               goto platform_failed;
+
+       dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+       return 0;
+
+platform_failed:
+       kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+       pcmcia_disable_device(pcmcia);
+       pcmcia->priv = NULL;
+       return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+       /* softing */
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+       /* vector, manufacturer? */
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+       /* EDIC */
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+       PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+       PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "softingcs",
+       .id_table       = softingcs_ids,
+       .probe          = softingcs_probe,
+       .remove         = __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+       spin_lock_init(&softingcs_index_lock);
+       return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+       pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+               ", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644 (file)
index 0000000..b520784
--- /dev/null
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+               const char *msg)
+{
+       int ret;
+       unsigned long stamp;
+
+       iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+       iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+       iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+       /* be sure to flush this to the card */
+       wmb();
+       stamp = jiffies + 1 * HZ;
+       /* wait for card */
+       do {
+               /* DPRAM_FCT_HOST is _not_ aligned */
+               ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+                       (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+               /* don't have any cached variables */
+               rmb();
+               if (ret == RES_OK)
+                       /* read return-value now */
+                       return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+               if ((ret != vector) || time_after(jiffies, stamp))
+                       break;
+               /* process context => relax */
+               usleep_range(500, 10000);
+       } while (1);
+
+       ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+       dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+       return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+       int ret;
+
+       ret = _softing_fct_cmd(card, cmd, 0, msg);
+       if (ret > 0) {
+               dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+               ret = -EIO;
+       }
+       return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+               const char *msg)
+{
+       int ret;
+       unsigned long stamp;
+
+       iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+       iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+       /* be sure to flush this to the card */
+       wmb();
+       stamp = jiffies + 3 * HZ;
+       /* wait for card */
+       do {
+               ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+               /* don't have any cached variables */
+               rmb();
+               if (ret == RES_OK)
+                       return 0;
+               if (time_after(jiffies, stamp))
+                       break;
+               /* process context => relax */
+               usleep_range(500, 10000);
+       } while (!signal_pending(current));
+
+       ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+       dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+       return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+               uint16_t *plen, const uint8_t **pdat)
+{
+       uint16_t checksum[2];
+       const uint8_t *mem;
+       const uint8_t *end;
+
+       /*
+        * firmware records are a binary, unaligned stream composed of:
+        * uint16_t type;
+        * uint32_t addr;
+        * uint16_t len;
+        * uint8_t dat[len];
+        * uint16_t checksum;
+        * all values in little endian.
+        * We could define a struct for this, with __attribute__((packed)),
+        * but would that solve the alignment in _all_ cases (cfr. the
+        * struct itself may be an odd address)?
+        *
+        * I chose to use leXX_to_cpup() since this solves both
+        * endianness & alignment.
+        */
+       mem = *pmem;
+       *ptype = le16_to_cpup((void *)&mem[0]);
+       *paddr = le32_to_cpup((void *)&mem[2]);
+       *plen = le16_to_cpup((void *)&mem[6]);
+       *pdat = &mem[8];
+       /* verify checksum */
+       end = &mem[8 + *plen];
+       checksum[0] = le16_to_cpup((void *)end);
+       for (checksum[1] = 0; mem < end; ++mem)
+               checksum[1] += *mem;
+       if (checksum[0] != checksum[1])
+               return -EINVAL;
+       /* increment */
+       *pmem += 10 + *plen;
+       return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+               __iomem uint8_t *dpram, unsigned int size, int offset)
+{
+       const struct firmware *fw;
+       int ret;
+       const uint8_t *mem, *end, *dat;
+       uint16_t type, len;
+       uint32_t addr;
+       uint8_t *buf = NULL;
+       int buflen = 0;
+       int8_t type_end = 0;
+
+       ret = request_firmware(&fw, file, &card->pdev->dev);
+       if (ret < 0)
+               return ret;
+       dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+               ", offset %c0x%04x\n",
+               card->pdat->name, file, (unsigned int)fw->size,
+               (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+       /* parse the firmware */
+       mem = fw->data;
+       end = &mem[fw->size];
+       /* look for header record */
+       ret = fw_parse(&mem, &type, &addr, &len, &dat);
+       if (ret < 0)
+               goto failed;
+       if (type != 0xffff)
+               goto failed;
+       if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+               ret = -EINVAL;
+               goto failed;
+       }
+       /* ok, we had a header */
+       while (mem < end) {
+               ret = fw_parse(&mem, &type, &addr, &len, &dat);
+               if (ret < 0)
+                       goto failed;
+               if (type == 3) {
+                       /* start address, not used here */
+                       continue;
+               } else if (type == 1) {
+                       /* eof */
+                       type_end = 1;
+                       break;
+               } else if (type != 0) {
+                       ret = -EINVAL;
+                       goto failed;
+               }
+
+               if ((addr + len + offset) > size)
+                       goto failed;
+               memcpy_toio(&dpram[addr + offset], dat, len);
+               /* be sure to flush caches from IO space */
+               mb();
+               if (len > buflen) {
+                       /* align buflen */
+                       buflen = (len + (1024-1)) & ~(1024-1);
+                       buf = krealloc(buf, buflen, GFP_KERNEL);
+                       if (!buf) {
+                               ret = -ENOMEM;
+                               goto failed;
+                       }
+               }
+               /* verify record data */
+               memcpy_fromio(buf, &dpram[addr + offset], len);
+               if (memcmp(buf, dat, len)) {
+                       /* is not ok */
+                       dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+                       ret = -EIO;
+                       goto failed;
+               }
+       }
+       if (!type_end)
+               /* no end record seen */
+               goto failed;
+       ret = 0;
+failed:
+       kfree(buf);
+       release_firmware(fw);
+       if (ret < 0)
+               dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+       return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+       const struct firmware *fw;
+       const uint8_t *mem, *end, *dat;
+       int ret, j;
+       uint16_t type, len;
+       uint32_t addr, start_addr = 0;
+       unsigned int sum, rx_sum;
+       int8_t type_end = 0, type_entrypoint = 0;
+
+       ret = request_firmware(&fw, file, &card->pdev->dev);
+       if (ret) {
+               dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+                       file, ret);
+               return ret;
+       }
+       dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+               file, (unsigned long)fw->size);
+       /* parse the firmware */
+       mem = fw->data;
+       end = &mem[fw->size];
+       /* look for header record */
+       ret = fw_parse(&mem, &type, &addr, &len, &dat);
+       if (ret)
+               goto failed;
+       ret = -EINVAL;
+       if (type != 0xffff) {
+               dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+                       type);
+               goto failed;
+       }
+       if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+               dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+                               len, dat);
+               goto failed;
+       }
+       /* ok, we had a header */
+       while (mem < end) {
+               ret = fw_parse(&mem, &type, &addr, &len, &dat);
+               if (ret)
+                       goto failed;
+
+               if (type == 3) {
+                       /* start address */
+                       start_addr = addr;
+                       type_entrypoint = 1;
+                       continue;
+               } else if (type == 1) {
+                       /* eof */
+                       type_end = 1;
+                       break;
+               } else if (type != 0) {
+                       dev_alert(&card->pdev->dev,
+                                       "unknown record type 0x%04x\n", type);
+                       ret = -EINVAL;
+                       goto failed;
+               }
+
+               /* regualar data */
+               for (sum = 0, j = 0; j < len; ++j)
+                       sum += dat[j];
+               /* work in 16bit (target) */
+               sum &= 0xffff;
+
+               memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+               iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+                               &card->dpram[DPRAM_COMMAND + 2]);
+               iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+               iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+               iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+               ret = softing_bootloader_command(card, 1, "loading app.");
+               if (ret < 0)
+                       goto failed;
+               /* verify checksum */
+               rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+               if (rx_sum != sum) {
+                       dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+                               ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+                       ret = -EIO;
+                       goto failed;
+               }
+       }
+       if (!type_end || !type_entrypoint)
+               goto failed;
+       /* start application in card */
+       iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+       iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+       ret = softing_bootloader_command(card, 3, "start app.");
+       if (ret < 0)
+               goto failed;
+       ret = 0;
+failed:
+       release_firmware(fw);
+       if (ret < 0)
+               dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+       return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+       int ret;
+
+       do {
+               /* reset chip */
+               iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+               iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+               iowrite8(1, &card->dpram[DPRAM_RESET]);
+               iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+               ret = softing_fct_cmd(card, 0, "reset_can");
+               if (!ret)
+                       break;
+               if (signal_pending(current))
+                       /* don't wait any longer */
+                       break;
+       } while (1);
+       card->tx.pending = 0;
+       return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+       int ret;
+       /* sync */
+       ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+       if (ret < 0)
+               goto failed;
+
+       ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+       if (ret < 0)
+               goto failed;
+
+       ret = softing_reset_chip(card);
+       if (ret < 0)
+               goto failed;
+       /* get_serial */
+       ret = softing_fct_cmd(card, 43, "get_serial_number");
+       if (ret < 0)
+               goto failed;
+       card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+       /* get_version */
+       ret = softing_fct_cmd(card, 12, "get_version");
+       if (ret < 0)
+               goto failed;
+       card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+       card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+       card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+       card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+       card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+       return 0;
+failed:
+       return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+       uint64_t ovf;
+
+       card->ts_ref = ktime_get();
+
+       /* 16MHz is the reference */
+       ovf = 0x100000000ULL * 16;
+       do_div(ovf, card->pdat->freq ?: 16);
+
+       card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+       uint64_t rawl;
+       ktime_t now, real_offset;
+       ktime_t target;
+       ktime_t tmp;
+
+       now = ktime_get();
+       real_offset = ktime_sub(ktime_get_real(), now);
+
+       /* find nsec from card */
+       rawl = raw * 16;
+       do_div(rawl, card->pdat->freq ?: 16);
+       target = ktime_add_us(card->ts_ref, rawl);
+       /* test for overflows */
+       tmp = ktime_add(target, card->ts_overflow);
+       while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+               card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+               target = tmp;
+               tmp = ktime_add(target, card->ts_overflow);
+       }
+       return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+       struct softing_priv *priv = netdev_priv(netdev);
+
+       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+               ? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+       int ret;
+       struct softing *card;
+       struct softing_priv *priv;
+       struct net_device *netdev;
+       int bus_bitmask_start;
+       int j, error_reporting;
+       struct can_frame msg;
+       const struct can_bittiming *bt;
+
+       priv = netdev_priv(dev);
+       card = priv->card;
+
+       if (!card->fw.up)
+               return -EIO;
+
+       ret = mutex_lock_interruptible(&card->fw.lock);
+       if (ret)
+               return ret;
+
+       bus_bitmask_start = 0;
+       if (dev && up)
+               /* prepare to start this bus as well */
+               bus_bitmask_start |= (1 << priv->index);
+       /* bring netdevs down */
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               netdev = card->net[j];
+               if (!netdev)
+                       continue;
+               priv = netdev_priv(netdev);
+
+               if (dev != netdev)
+                       netif_stop_queue(netdev);
+
+               if (netif_running(netdev)) {
+                       if (dev != netdev)
+                               bus_bitmask_start |= (1 << j);
+                       priv->tx.pending = 0;
+                       priv->tx.echo_put = 0;
+                       priv->tx.echo_get = 0;
+                       /*
+                        * this bus' may just have called open_candev()
+                        * which is rather stupid to call close_candev()
+                        * already
+                        * but we may come here from busoff recovery too
+                        * in which case the echo_skb _needs_ flushing too.
+                        * just be sure to call open_candev() again
+                        */
+                       close_candev(netdev);
+               }
+               priv->can.state = CAN_STATE_STOPPED;
+       }
+       card->tx.pending = 0;
+
+       softing_enable_irq(card, 0);
+       ret = softing_reset_chip(card);
+       if (ret)
+               goto failed;
+       if (!bus_bitmask_start)
+               /* no busses to be brought up */
+               goto card_done;
+
+       if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+                       && (softing_error_reporting(card->net[0])
+                               != softing_error_reporting(card->net[1]))) {
+               dev_alert(&card->pdev->dev,
+                               "err_reporting flag differs for busses\n");
+               goto invalid;
+       }
+       error_reporting = 0;
+       if (bus_bitmask_start & 1) {
+               netdev = card->net[0];
+               priv = netdev_priv(netdev);
+               error_reporting += softing_error_reporting(netdev);
+               /* init chip 1 */
+               bt = &priv->can.bittiming;
+               iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               iowrite16(bt->phase_seg1 + bt->prop_seg,
+                               &card->dpram[DPRAM_FCT_PARAM + 6]);
+               iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+               iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+                               &card->dpram[DPRAM_FCT_PARAM + 10]);
+               ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+               if (ret < 0)
+                       goto failed;
+               /* set mode */
+               iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               ret = softing_fct_cmd(card, 3, "set_mode[0]");
+               if (ret < 0)
+                       goto failed;
+               /* set filter */
+               /* 11bit id & mask */
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+               iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+               iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+               ret = softing_fct_cmd(card, 7, "set_filter[0]");
+               if (ret < 0)
+                       goto failed;
+               /* set output control */
+               iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               ret = softing_fct_cmd(card, 5, "set_output[0]");
+               if (ret < 0)
+                       goto failed;
+       }
+       if (bus_bitmask_start & 2) {
+               netdev = card->net[1];
+               priv = netdev_priv(netdev);
+               error_reporting += softing_error_reporting(netdev);
+               /* init chip2 */
+               bt = &priv->can.bittiming;
+               iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               iowrite16(bt->phase_seg1 + bt->prop_seg,
+                               &card->dpram[DPRAM_FCT_PARAM + 6]);
+               iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+               iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+                               &card->dpram[DPRAM_FCT_PARAM + 10]);
+               ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+               if (ret < 0)
+                       goto failed;
+               /* set mode2 */
+               iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               ret = softing_fct_cmd(card, 4, "set_mode[1]");
+               if (ret < 0)
+                       goto failed;
+               /* set filter2 */
+               /* 11bit id & mask */
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+               /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+               iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+               iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+               iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+               ret = softing_fct_cmd(card, 8, "set_filter[1]");
+               if (ret < 0)
+                       goto failed;
+               /* set output control2 */
+               iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+               ret = softing_fct_cmd(card, 6, "set_output[1]");
+               if (ret < 0)
+                       goto failed;
+       }
+       /* enable_error_frame */
+       /*
+        * Error reporting is switched off at the moment since
+        * the receiving of them is not yet 100% verified
+        * This should be enabled sooner or later
+        *
+       if (error_reporting) {
+               ret = softing_fct_cmd(card, 51, "enable_error_frame");
+               if (ret < 0)
+                       goto failed;
+       }
+       */
+       /* initialize interface */
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+       iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+       ret = softing_fct_cmd(card, 17, "initialize_interface");
+       if (ret < 0)
+               goto failed;
+       /* enable_fifo */
+       ret = softing_fct_cmd(card, 36, "enable_fifo");
+       if (ret < 0)
+               goto failed;
+       /* enable fifo tx ack */
+       ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+       if (ret < 0)
+               goto failed;
+       /* enable fifo tx ack2 */
+       ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+       if (ret < 0)
+               goto failed;
+       /* start_chip */
+       ret = softing_fct_cmd(card, 11, "start_chip");
+       if (ret < 0)
+               goto failed;
+       iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+       iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+       if (card->pdat->generation < 2) {
+               iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+               /* flush the DPRAM caches */
+               wmb();
+       }
+
+       softing_initialize_timestamp(card);
+
+       /*
+        * do socketcan notifications/status changes
+        * from here, no errors should occur, or the failed: part
+        * must be reviewed
+        */
+       memset(&msg, 0, sizeof(msg));
+       msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+       msg.can_dlc = CAN_ERR_DLC;
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               if (!(bus_bitmask_start & (1 << j)))
+                       continue;
+               netdev = card->net[j];
+               if (!netdev)
+                       continue;
+               priv = netdev_priv(netdev);
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               open_candev(netdev);
+               if (dev != netdev) {
+                       /* notify other busses on the restart */
+                       softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+                       ++priv->can.can_stats.restarts;
+               }
+               netif_wake_queue(netdev);
+       }
+
+       /* enable interrupts */
+       ret = softing_enable_irq(card, 1);
+       if (ret)
+               goto failed;
+card_done:
+       mutex_unlock(&card->fw.lock);
+       return 0;
+invalid:
+       ret = -EINVAL;
+failed:
+       softing_enable_irq(card, 0);
+       softing_reset_chip(card);
+       mutex_unlock(&card->fw.lock);
+       /* bring all other interfaces down */
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               netdev = card->net[j];
+               if (!netdev)
+                       continue;
+               dev_close(netdev);
+       }
+       return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+       struct softing_priv *priv = netdev_priv(netdev);
+       struct softing *card = priv->card;
+
+       switch (priv->chip) {
+       case 1000:
+               return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+       case 5:
+               return 0x60;
+       default:
+               return 0x40;
+       }
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644 (file)
index 0000000..aeea9f9
--- /dev/null
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+       struct can_priv *can = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return 0;
+       return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+       if (card->pdat->generation >= 2) {
+               spin_lock_bh(&card->spin);
+               iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+                               &card->dpram[DPRAM_V2_RESET]);
+               spin_unlock_bh(&card->spin);
+       }
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+       if (card->pdat->generation >= 2) {
+               spin_lock_bh(&card->spin);
+               iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+                               &card->dpram[DPRAM_V2_RESET]);
+               spin_unlock_bh(&card->spin);
+       }
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+               struct net_device *dev)
+{
+       struct softing_priv *priv = netdev_priv(dev);
+       struct softing *card = priv->card;
+       int ret;
+       uint8_t *ptr;
+       uint8_t fifo_wr, fifo_rd;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       uint8_t buf[DPRAM_TX_SIZE];
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       spin_lock(&card->spin);
+
+       ret = NETDEV_TX_BUSY;
+       if (!card->fw.up ||
+                       (card->tx.pending >= TXMAX) ||
+                       (priv->tx.pending >= TX_ECHO_SKB_MAX))
+               goto xmit_done;
+       fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+       fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+       if (fifo_wr == fifo_rd)
+               /* fifo full */
+               goto xmit_done;
+       memset(buf, 0, sizeof(buf));
+       ptr = buf;
+       *ptr = CMD_TX;
+       if (cf->can_id & CAN_RTR_FLAG)
+               *ptr |= CMD_RTR;
+       if (cf->can_id & CAN_EFF_FLAG)
+               *ptr |= CMD_XTD;
+       if (priv->index)
+               *ptr |= CMD_BUS2;
+       ++ptr;
+       *ptr++ = cf->can_dlc;
+       *ptr++ = (cf->can_id >> 0);
+       *ptr++ = (cf->can_id >> 8);
+       if (cf->can_id & CAN_EFF_FLAG) {
+               *ptr++ = (cf->can_id >> 16);
+               *ptr++ = (cf->can_id >> 24);
+       } else {
+               /* increment 1, not 2 as you might think */
+               ptr += 1;
+       }
+       if (!(cf->can_id & CAN_RTR_FLAG))
+               memcpy(ptr, &cf->data[0], cf->can_dlc);
+       memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+                       buf, DPRAM_TX_SIZE);
+       if (++fifo_wr >= DPRAM_TX_CNT)
+               fifo_wr = 0;
+       iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+       card->tx.last_bus = priv->index;
+       ++card->tx.pending;
+       ++priv->tx.pending;
+       can_put_echo_skb(skb, dev, priv->tx.echo_put);
+       ++priv->tx.echo_put;
+       if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+               priv->tx.echo_put = 0;
+       /* can_put_echo_skb() saves the skb, safe to return TX_OK */
+       ret = NETDEV_TX_OK;
+xmit_done:
+       spin_unlock(&card->spin);
+       if (card->tx.pending >= TXMAX) {
+               int j;
+               for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+                       if (card->net[j])
+                               netif_stop_queue(card->net[j]);
+               }
+       }
+       if (ret != NETDEV_TX_OK)
+               netif_stop_queue(dev);
+
+       return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+               ktime_t ktime)
+{
+       struct sk_buff *skb;
+       struct can_frame *cf;
+
+       skb = alloc_can_skb(netdev, &cf);
+       if (!skb)
+               return -ENOMEM;
+       memcpy(cf, msg, sizeof(*msg));
+       skb->tstamp = ktime;
+       return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+       struct net_device *netdev;
+       struct softing_priv *priv;
+       ktime_t ktime;
+       struct can_frame msg;
+       int cnt = 0, lost_msg;
+       uint8_t fifo_rd, fifo_wr, cmd;
+       uint8_t *ptr;
+       uint32_t tmp_u32;
+       uint8_t buf[DPRAM_RX_SIZE];
+
+       memset(&msg, 0, sizeof(msg));
+       /* test for lost msgs */
+       lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+       if (lost_msg) {
+               int j;
+               /* reset condition */
+               iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+               /* prepare msg */
+               msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+               msg.can_dlc = CAN_ERR_DLC;
+               msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+               /*
+                * service to all busses, we don't know which it was applicable
+                * but only service busses that are online
+                */
+               for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+                       netdev = card->net[j];
+                       if (!netdev)
+                               continue;
+                       if (!canif_is_active(netdev))
+                               /* a dead bus has no overflows */
+                               continue;
+                       ++netdev->stats.rx_over_errors;
+                       softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+               }
+               /* prepare for other use */
+               memset(&msg, 0, sizeof(msg));
+               ++cnt;
+       }
+
+       fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+       fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+       if (++fifo_rd >= DPRAM_RX_CNT)
+               fifo_rd = 0;
+       if (fifo_wr == fifo_rd)
+               return cnt;
+
+       memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+                       DPRAM_RX_SIZE);
+       mb();
+       /* trigger dual port RAM */
+       iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+       ptr = buf;
+       cmd = *ptr++;
+       if (cmd == 0xff)
+               /* not quite usefull, probably the card has got out */
+               return 0;
+       netdev = card->net[0];
+       if (cmd & CMD_BUS2)
+               netdev = card->net[1];
+       priv = netdev_priv(netdev);
+
+       if (cmd & CMD_ERR) {
+               uint8_t can_state, state;
+
+               state = *ptr++;
+
+               msg.can_id = CAN_ERR_FLAG;
+               msg.can_dlc = CAN_ERR_DLC;
+
+               if (state & SF_MASK_BUSOFF) {
+                       can_state = CAN_STATE_BUS_OFF;
+                       msg.can_id |= CAN_ERR_BUSOFF;
+                       state = STATE_BUSOFF;
+               } else if (state & SF_MASK_EPASSIVE) {
+                       can_state = CAN_STATE_ERROR_PASSIVE;
+                       msg.can_id |= CAN_ERR_CRTL;
+                       msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+                       state = STATE_EPASSIVE;
+               } else {
+                       can_state = CAN_STATE_ERROR_ACTIVE;
+                       msg.can_id |= CAN_ERR_CRTL;
+                       state = STATE_EACTIVE;
+               }
+               /* update DPRAM */
+               iowrite8(state, &card->dpram[priv->index ?
+                               DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+               /* timestamp */
+               tmp_u32 = le32_to_cpup((void *)ptr);
+               ptr += 4;
+               ktime = softing_raw2ktime(card, tmp_u32);
+
+               ++netdev->stats.rx_errors;
+               /* update internal status */
+               if (can_state != priv->can.state) {
+                       priv->can.state = can_state;
+                       if (can_state == CAN_STATE_ERROR_PASSIVE)
+                               ++priv->can.can_stats.error_passive;
+                       else if (can_state == CAN_STATE_BUS_OFF) {
+                               /* this calls can_close_cleanup() */
+                               can_bus_off(netdev);
+                               netif_stop_queue(netdev);
+                       }
+                       /* trigger socketcan */
+                       softing_netdev_rx(netdev, &msg, ktime);
+               }
+
+       } else {
+               if (cmd & CMD_RTR)
+                       msg.can_id |= CAN_RTR_FLAG;
+               msg.can_dlc = get_can_dlc(*ptr++);
+               if (cmd & CMD_XTD) {
+                       msg.can_id |= CAN_EFF_FLAG;
+                       msg.can_id |= le32_to_cpup((void *)ptr);
+                       ptr += 4;
+               } else {
+                       msg.can_id |= le16_to_cpup((void *)ptr);
+                       ptr += 2;
+               }
+               /* timestamp */
+               tmp_u32 = le32_to_cpup((void *)ptr);
+               ptr += 4;
+               ktime = softing_raw2ktime(card, tmp_u32);
+               if (!(msg.can_id & CAN_RTR_FLAG))
+                       memcpy(&msg.data[0], ptr, 8);
+               ptr += 8;
+               /* update socket */
+               if (cmd & CMD_ACK) {
+                       /* acknowledge, was tx msg */
+                       struct sk_buff *skb;
+                       skb = priv->can.echo_skb[priv->tx.echo_get];
+                       if (skb)
+                               skb->tstamp = ktime;
+                       can_get_echo_skb(netdev, priv->tx.echo_get);
+                       ++priv->tx.echo_get;
+                       if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+                               priv->tx.echo_get = 0;
+                       if (priv->tx.pending)
+                               --priv->tx.pending;
+                       if (card->tx.pending)
+                               --card->tx.pending;
+                       ++netdev->stats.tx_packets;
+                       if (!(msg.can_id & CAN_RTR_FLAG))
+                               netdev->stats.tx_bytes += msg.can_dlc;
+               } else {
+                       int ret;
+
+                       ret = softing_netdev_rx(netdev, &msg, ktime);
+                       if (ret == NET_RX_SUCCESS) {
+                               ++netdev->stats.rx_packets;
+                               if (!(msg.can_id & CAN_RTR_FLAG))
+                                       netdev->stats.rx_bytes += msg.can_dlc;
+                       } else {
+                               ++netdev->stats.rx_dropped;
+                       }
+               }
+       }
+       ++cnt;
+       return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+       struct softing *card = (struct softing *)dev_id;
+       struct net_device *netdev;
+       struct softing_priv *priv;
+       int j, offset, work_done;
+
+       work_done = 0;
+       spin_lock_bh(&card->spin);
+       while (softing_handle_1(card) > 0) {
+               ++card->irq.svc_count;
+               ++work_done;
+       }
+       spin_unlock_bh(&card->spin);
+       /* resume tx queue's */
+       offset = card->tx.last_bus;
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               if (card->tx.pending >= TXMAX)
+                       break;
+               netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+               if (!netdev)
+                       continue;
+               priv = netdev_priv(netdev);
+               if (!canif_is_active(netdev))
+                       /* it makes no sense to wake dead busses */
+                       continue;
+               if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+                       continue;
+               ++work_done;
+               netif_wake_queue(netdev);
+       }
+       return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+       struct softing *card = (struct softing *)dev_id;
+       uint8_t ir;
+
+       ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+       iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+       return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+       struct softing *card = (struct softing *)dev_id;
+       uint8_t ir;
+
+       ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+       iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+       return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+       int ret;
+
+       /* check or determine and set bittime */
+       ret = open_candev(ndev);
+       if (!ret)
+               ret = softing_startstop(ndev, 1);
+       return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+       int ret;
+
+       netif_stop_queue(ndev);
+
+       /* softing cycle does close_candev() */
+       ret = softing_startstop(ndev, 0);
+       return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       int ret;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               /* softing_startstop does close_candev() */
+               ret = softing_startstop(ndev, 1);
+               return ret;
+       case CAN_MODE_STOP:
+       case CAN_MODE_SLEEP:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+       int ret;
+
+       if (!card->irq.nr) {
+               return 0;
+       } else if (card->irq.requested && !enable) {
+               free_irq(card->irq.nr, card);
+               card->irq.requested = 0;
+       } else if (!card->irq.requested && enable) {
+               ret = request_threaded_irq(card->irq.nr,
+                               (card->pdat->generation >= 2) ?
+                                       softing_irq_v2 : softing_irq_v1,
+                               softing_irq_thread, IRQF_SHARED,
+                               dev_name(&card->pdev->dev), card);
+               if (ret) {
+                       dev_alert(&card->pdev->dev,
+                                       "request_threaded_irq(%u) failed\n",
+                                       card->irq.nr);
+                       return ret;
+               }
+               card->irq.requested = 1;
+       }
+       return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+       int fw_up = 0;
+
+       if (mutex_lock_interruptible(&card->fw.lock))
+               /* return -ERESTARTSYS */;
+       fw_up = card->fw.up;
+       card->fw.up = 0;
+
+       if (card->irq.requested && card->irq.nr) {
+               free_irq(card->irq.nr, card);
+               card->irq.requested = 0;
+       }
+       if (fw_up) {
+               if (card->pdat->enable_irq)
+                       card->pdat->enable_irq(card->pdev, 0);
+               softing_set_reset_dpram(card);
+               if (card->pdat->reset)
+                       card->pdat->reset(card->pdev, 1);
+       }
+       mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+       int ret, j;
+       static const uint8_t stream[] = {
+               0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+       unsigned char back[sizeof(stream)];
+
+       if (mutex_lock_interruptible(&card->fw.lock))
+               return -ERESTARTSYS;
+       if (card->fw.up) {
+               mutex_unlock(&card->fw.lock);
+               return 0;
+       }
+       /* reset board */
+       if (card->pdat->enable_irq)
+               card->pdat->enable_irq(card->pdev, 1);
+       /* boot card */
+       softing_set_reset_dpram(card);
+       if (card->pdat->reset)
+               card->pdat->reset(card->pdev, 1);
+       for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+                       j += sizeof(stream)) {
+
+               memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+               /* flush IO cache */
+               mb();
+               memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+               if (!memcmp(back, stream, sizeof(stream)))
+                       continue;
+               /* memory is not equal */
+               dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+               ret = -EIO;
+               goto failed;
+       }
+       wmb();
+       /* load boot firmware */
+       ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+                               card->dpram_size,
+                               card->pdat->boot.offs - card->pdat->boot.addr);
+       if (ret < 0)
+               goto failed;
+       /* load loader firmware */
+       ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+                               card->dpram_size,
+                               card->pdat->load.offs - card->pdat->load.addr);
+       if (ret < 0)
+               goto failed;
+
+       if (card->pdat->reset)
+               card->pdat->reset(card->pdev, 0);
+       softing_clr_reset_dpram(card);
+       ret = softing_bootloader_command(card, 0, "card boot");
+       if (ret < 0)
+               goto failed;
+       ret = softing_load_app_fw(card->pdat->app.fw, card);
+       if (ret < 0)
+               goto failed;
+
+       ret = softing_chip_poweron(card);
+       if (ret < 0)
+               goto failed;
+
+       card->fw.up = 1;
+       mutex_unlock(&card->fw.lock);
+       return 0;
+failed:
+       card->fw.up = 0;
+       if (card->pdat->enable_irq)
+               card->pdat->enable_irq(card->pdev, 0);
+       softing_set_reset_dpram(card);
+       if (card->pdat->reset)
+               card->pdat->reset(card->pdev, 1);
+       mutex_unlock(&card->fw.lock);
+       return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       struct softing_priv *priv = netdev2softing(ndev);
+
+       return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       struct softing_priv *priv = netdev2softing(ndev);
+
+       return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       struct softing_priv *priv = netdev2softing(ndev);
+
+       return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       struct softing_priv *priv = netdev2softing(ndev);
+       struct softing *card = priv->card;
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       val &= 0xFF;
+
+       ret = mutex_lock_interruptible(&card->fw.lock);
+       if (ret)
+               return -ERESTARTSYS;
+       if (netif_running(ndev)) {
+               mutex_unlock(&card->fw.lock);
+               return -EBUSY;
+       }
+       priv->output = val;
+       mutex_unlock(&card->fw.lock);
+       return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+       &dev_attr_channel.attr,
+       &dev_attr_chip.attr,
+       &dev_attr_output.attr,
+       NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+       .name = NULL,
+       .attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+       .ndo_open = softing_netdev_open,
+       .ndo_stop = softing_netdev_stop,
+       .ndo_start_xmit = softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+       .name = "softing",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4, /* overruled */
+       .brp_min = 1,
+       .brp_max = 32, /* overruled */
+       .brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+               uint16_t chip_id)
+{
+       struct net_device *netdev;
+       struct softing_priv *priv;
+
+       netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+       if (!netdev) {
+               dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+               return NULL;
+       }
+       priv = netdev_priv(netdev);
+       priv->netdev = netdev;
+       priv->card = card;
+       memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+       priv->btr_const.brp_max = card->pdat->max_brp;
+       priv->btr_const.sjw_max = card->pdat->max_sjw;
+       priv->can.bittiming_const = &priv->btr_const;
+       priv->can.clock.freq = 8000000;
+       priv->chip = chip_id;
+       priv->output = softing_default_output(netdev);
+       SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+       netdev->flags |= IFF_ECHO;
+       netdev->netdev_ops = &softing_netdev_ops;
+       priv->can.do_set_mode = softing_candev_set_mode;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+       return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+       int ret;
+
+       netdev->sysfs_groups[0] = &netdev_sysfs_group;
+       ret = register_candev(netdev);
+       if (ret) {
+               dev_alert(&netdev->dev, "register failed\n");
+               return ret;
+       }
+       return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+       unregister_candev(netdev);
+       free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+               struct device_attribute *attr, char *buf) \
+{ \
+       struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+       return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+               struct device_attribute *attr, char *buf) \
+{ \
+       struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+       return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+       &dev_attr_serial.attr,
+       &dev_attr_firmware.attr,
+       &dev_attr_firmware_version.attr,
+       &dev_attr_hardware.attr,
+       &dev_attr_hardware_version.attr,
+       &dev_attr_license.attr,
+       &dev_attr_frequency.attr,
+       &dev_attr_txpending.attr,
+       NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+       .name = NULL,
+       .attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+       struct softing *card = platform_get_drvdata(pdev);
+       int j;
+
+       /* first, disable card*/
+       softing_card_shutdown(card);
+
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               if (!card->net[j])
+                       continue;
+               softing_netdev_cleanup(card->net[j]);
+               card->net[j] = NULL;
+       }
+       sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+       iounmap(card->dpram);
+       kfree(card);
+       return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+       const struct softing_platform_data *pdat = pdev->dev.platform_data;
+       struct softing *card;
+       struct net_device *netdev;
+       struct softing_priv *priv;
+       struct resource *pres;
+       int ret;
+       int j;
+
+       if (!pdat) {
+               dev_warn(&pdev->dev, "no platform data\n");
+               return -EINVAL;
+       }
+       if (pdat->nbus > ARRAY_SIZE(card->net)) {
+               dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+               return -EINVAL;
+       }
+
+       card = kzalloc(sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
+       card->pdat = pdat;
+       card->pdev = pdev;
+       platform_set_drvdata(pdev, card);
+       mutex_init(&card->fw.lock);
+       spin_lock_init(&card->spin);
+
+       ret = -EINVAL;
+       pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!pres)
+               goto platform_resource_failed;;
+       card->dpram_phys = pres->start;
+       card->dpram_size = pres->end - pres->start + 1;
+       card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+       if (!card->dpram) {
+               dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+               goto ioremap_failed;
+       }
+
+       pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (pres)
+               card->irq.nr = pres->start;
+
+       /* reset card */
+       ret = softing_card_boot(card);
+       if (ret < 0) {
+               dev_alert(&pdev->dev, "failed to boot\n");
+               goto boot_failed;
+       }
+
+       /* only now, the chip's are known */
+       card->id.freq = card->pdat->freq;
+
+       ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+       if (ret < 0) {
+               dev_alert(&card->pdev->dev, "sysfs failed\n");
+               goto sysfs_failed;
+       }
+
+       ret = -ENOMEM;
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               card->net[j] = netdev =
+                       softing_netdev_create(card, card->id.chip[j]);
+               if (!netdev) {
+                       dev_alert(&pdev->dev, "failed to make can[%i]", j);
+                       goto netdev_failed;
+               }
+               priv = netdev_priv(card->net[j]);
+               priv->index = j;
+               ret = softing_netdev_register(netdev);
+               if (ret) {
+                       free_candev(netdev);
+                       card->net[j] = NULL;
+                       dev_alert(&card->pdev->dev,
+                                       "failed to register can[%i]\n", j);
+                       goto netdev_failed;
+               }
+       }
+       dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+       return 0;
+
+netdev_failed:
+       for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+               if (!card->net[j])
+                       continue;
+               softing_netdev_cleanup(card->net[j]);
+       }
+       sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+       softing_card_shutdown(card);
+boot_failed:
+       iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+       kfree(card);
+       return ret;
+}
+
+static struct platform_driver softing_driver = {
+       .driver = {
+               .name = "softing",
+               .owner = THIS_MODULE,
+       },
+       .probe = softing_pdev_probe,
+       .remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+       return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+       platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644 (file)
index 0000000..ebbf698
--- /dev/null
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+       unsigned int manf;
+       unsigned int prod;
+       /*
+        * generation
+        * 1st with NEC or SJA1000
+        * 8bit, exclusive interrupt, ...
+        * 2nd only SJA1000
+        * 16bit, shared interrupt
+        */
+       int generation;
+       int nbus; /* # busses on device */
+       unsigned int freq; /* operating frequency in Hz */
+       unsigned int max_brp;
+       unsigned int max_sjw;
+       unsigned long dpram_size;
+       const char *name;
+       struct {
+               unsigned long offs;
+               unsigned long addr;
+               const char *fw;
+       } boot, load, app;
+       /*
+        * reset() function
+        * bring pdev in or out of reset, depending on value
+        */
+       int (*reset)(struct platform_device *pdev, int value);
+       int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
index 263a294..8cca60e 100644 (file)
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
 static DEFINE_RWLOCK(cnic_dev_lock);
 static DEFINE_MUTEX(cnic_lock);
 
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+       return rcu_dereference_protected(cnic_ulp_tbl[type],
+                                        lockdep_is_held(&cnic_lock));
+}
 
 static int cnic_service_bnx2(void *, void *);
 static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       if (cnic_ulp_tbl[ulp_type]) {
+       if (cnic_ulp_tbl_prot(ulp_type)) {
                pr_err("%s: Type %d has already been registered\n",
                       __func__, ulp_type);
                mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       ulp_ops = cnic_ulp_tbl[ulp_type];
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
        if (!ulp_ops) {
                pr_err("%s: Type %d has not been registered\n",
                       __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       if (cnic_ulp_tbl[ulp_type] == NULL) {
+       if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
                pr_err("%s: Driver with type %d has not been registered\n",
                       __func__, ulp_type);
                mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
        cp->ulp_handle[ulp_type] = ulp_ctx;
-       ulp_ops = cnic_ulp_tbl[ulp_type];
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
        rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
        cnic_hold(dev);
 
@@ -699,13 +706,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 {
        int i;
-       u32 *page_table = dma->pgtbl;
+       __le32 *page_table = (__le32 *) dma->pgtbl;
 
        for (i = 0; i < dma->num_pages; i++) {
                /* Each entry needs to be in big endian format. */
-               *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+               *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
                page_table++;
-               *page_table = (u32) dma->pg_map_arr[i];
+               *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
                page_table++;
        }
 }
@@ -713,13 +720,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 {
        int i;
-       u32 *page_table = dma->pgtbl;
+       __le32 *page_table = (__le32 *) dma->pgtbl;
 
        for (i = 0; i < dma->num_pages; i++) {
                /* Each entry needs to be in little endian format. */
-               *page_table = dma->pg_map_arr[i] & 0xffffffff;
+               *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
                page_table++;
-               *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+               *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
                page_table++;
        }
 }
@@ -2760,6 +2767,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
        u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading other fields */
+       rmb();
        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 
        while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2779,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
                barrier();
                if (status_idx != *cp->kcq1.status_idx_ptr) {
                        status_idx = (u16) *cp->kcq1.status_idx_ptr;
+                       /* status block index must be read first */
+                       rmb();
                        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
                } else
                        break;
@@ -2888,6 +2899,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
        u32 last_status = *info->status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading the KCQ */
+       rmb();
        while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
 
                service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2911,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
                        break;
 
                last_status = *info->status_idx_ptr;
+               /* status block index must be read before reading the KCQ */
+               rmb();
        }
        return last_status;
 }
@@ -2906,26 +2921,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
 {
        struct cnic_dev *dev = (struct cnic_dev *) data;
        struct cnic_local *cp = dev->cnic_priv;
-       u32 status_idx;
+       u32 status_idx, new_status_idx;
 
        if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
                return;
 
-       status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+       while (1) {
+               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
 
-       CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+               CNIC_WR16(dev, cp->kcq1.io_addr,
+                         cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-       if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+               if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+                       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+                                          status_idx, IGU_INT_ENABLE, 1);
+                       break;
+               }
+
+               new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+               if (new_status_idx != status_idx)
+                       continue;
 
                CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
                          MAX_KCQ_IDX);
 
                cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
                                status_idx, IGU_INT_ENABLE, 1);
-       } else {
-               cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-                                  status_idx, IGU_INT_ENABLE, 1);
+
+               break;
        }
 }
 
@@ -2953,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cp->ulp_ops[if_type];
+               ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+                                                   lockdep_is_held(&cnic_lock));
                if (!ulp_ops) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -2977,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cp->ulp_ops[if_type];
+               ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+                                                   lockdep_is_held(&cnic_lock));
                if (!ulp_ops || !ulp_ops->cnic_start) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3041,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cnic_ulp_tbl[i];
+               ulp_ops = cnic_ulp_tbl_prot(i);
                if (!ulp_ops || !ulp_ops->cnic_init) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3065,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cnic_ulp_tbl[i];
+               ulp_ops = cnic_ulp_tbl_prot(i);
                if (!ulp_ops || !ulp_ops->cnic_exit) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3381,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
                             struct dst_entry **dst)
 {
 #if defined(CONFIG_INET)
-       struct flowi fl;
-       int err;
        struct rtable *rt;
 
-       memset(&fl, 0, sizeof(fl));
-       fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
-
-       err = ip_route_output_key(&init_net, &rt, &fl);
-       if (!err)
+       rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+       if (!IS_ERR(rt)) {
                *dst = &rt->dst;
-       return err;
+               return 0;
+       }
+       return PTR_ERR(rt);
 #else
        return -ENETUNREACH;
 #endif
@@ -3401,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
                             struct dst_entry **dst)
 {
 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
-       struct flowi fl;
+       struct flowi6 fl6;
 
-       memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
-       if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
-               fl.oif = dst_addr->sin6_scope_id;
+       memset(&fl6, 0, sizeof(fl6));
+       ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+       if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+               fl6.flowi6_oif = dst_addr->sin6_scope_id;
 
-       *dst = ip6_route_output(&init_net, NULL, &fl);
+       *dst = ip6_route_output(&init_net, NULL, &fl6);
        if (*dst)
                return 0;
 #endif
@@ -4170,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
 }
 
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+       u32 max_conn;
+
+       max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+       dev->max_iscsi_conn = max_conn;
+}
+
 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
                return err;
        }
 
+       cnic_get_bnx2_iscsi_info(dev);
+
        return 0;
 }
 
@@ -4705,129 +4738,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
        cp->rx_cons = *cp->rx_cons_ptr;
 }
 
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
-                                    u32 lower_addr)
-{
-       u32 val;
-       u8 mac[6];
-
-       val = CNIC_RD(dev, upper_addr);
-
-       mac[0] = (u8) (val >> 8);
-       mac[1] = (u8) val;
-
-       val = CNIC_RD(dev, lower_addr);
-
-       mac[2] = (u8) (val >> 24);
-       mac[3] = (u8) (val >> 16);
-       mac[4] = (u8) (val >> 8);
-       mac[5] = (u8) val;
-
-       if (is_valid_ether_addr(mac)) {
-               memcpy(dev->mac_addr, mac, 6);
-               return 0;
-       } else {
-               return -EINVAL;
-       }
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
-       struct cnic_local *cp = dev->cnic_priv;
-       u32 base, base2, addr, addr1, val;
-       int port = CNIC_PORT(cp);
-
-       dev->max_iscsi_conn = 0;
-       base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
-       if (base == 0)
-               return;
-
-       base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
-                                             MISC_REG_GENERIC_CR_0));
-       addr = BNX2X_SHMEM_ADDR(base,
-               dev_info.port_hw_config[port].iscsi_mac_upper);
-
-       addr1 = BNX2X_SHMEM_ADDR(base,
-               dev_info.port_hw_config[port].iscsi_mac_lower);
-
-       cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
-       addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
-       val = CNIC_RD(dev, addr);
-
-       if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
-               u16 val16;
-
-               addr = BNX2X_SHMEM_ADDR(base,
-                               drv_lic_key[port].max_iscsi_init_conn);
-               val16 = CNIC_RD16(dev, addr);
-
-               if (val16)
-                       val16 ^= 0x1e1e;
-               dev->max_iscsi_conn = val16;
-       }
-
-       if (BNX2X_CHIP_IS_E2(cp->chip_id))
-               dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
-       if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
-               int func = CNIC_FUNC(cp);
-               u32 mf_cfg_addr;
-
-               if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
-                       mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
-                                             mf_cfg_addr));
-               else
-                       mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
-               if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-                       /* Must determine if the MF is SD vs SI mode */
-                       addr = BNX2X_SHMEM_ADDR(base,
-                                       dev_info.shared_feature_config.config);
-                       val = CNIC_RD(dev, addr);
-                       if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
-                           SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
-                               int rc;
-
-                               /* MULTI_FUNCTION_SI mode */
-                               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].func_cfg);
-                               val = CNIC_RD(dev, addr);
-                               if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
-                                       dev->max_iscsi_conn = 0;
-
-                               if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
-                                       dev->max_fcoe_conn = 0;
-
-                               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].
-                                       iscsi_mac_addr_upper);
-                               addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].
-                                       iscsi_mac_addr_lower);
-                               rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
-                                                               addr1);
-                               if (rc && func > 1)
-                                       dev->max_iscsi_conn = 0;
-
-                               return;
-                       }
-               }
-
-               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                       func_mf_config[func].e1hov_tag);
-
-               val = CNIC_RD(dev, addr);
-               val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
-               if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
-                       dev->max_fcoe_conn = 0;
-                       dev->max_iscsi_conn = 0;
-               }
-       }
-       if (!is_valid_ether_addr(dev->mac_addr))
-               dev->max_iscsi_conn = 0;
-}
-
 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4819,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 
        cnic_init_bnx2x_kcq(dev);
 
-       cnic_get_bnx2x_iscsi_info(dev);
-
        /* Only 1 EQ */
        CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5264,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
 
        dev_hold(dev);
        pci_dev_get(pdev);
-       if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
-           pdev->device == PCI_DEVICE_ID_NX2_5709S) {
-               u8 rev;
-
-               pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-               if (rev < 0x10) {
-                       pci_dev_put(pdev);
-                       goto cnic_err;
-               }
+       if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+            pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+           (pdev->revision < 0x10)) {
+               pci_dev_put(pdev);
+               goto cnic_err;
        }
        pci_dev_put(pdev);
 
@@ -5343,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cdev->pcidev = pdev;
        cp->chip_id = ethdev->chip_id;
 
+       if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+               cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+       if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+           !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+               cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+       memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
        cp->cnic_ops = &cnic_bnx2x_ops;
        cp->start_hw = cnic_start_bnx2x_hw;
        cp->stop_hw = cnic_stop_bnx2x_hw;
index b328f6c..4456260 100644 (file)
@@ -220,7 +220,7 @@ struct cnic_local {
 #define ULP_F_INIT     0
 #define ULP_F_START    1
 #define ULP_F_CALL_PENDING     2
-       struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+       struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
 
        unsigned long cnic_local_flags;
 #define        CNIC_LCL_FL_KWQ_INIT            0x0
index 9f44e0f..e01b49e 100644 (file)
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION    "2.2.12"
-#define CNIC_MODULE_RELDATE    "Jan 03, 2011"
+#define CNIC_MODULE_VERSION    "2.2.13"
+#define CNIC_MODULE_RELDATE    "Jan 31, 2011"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
        u32             drv_state;
 #define CNIC_DRV_STATE_REGD            0x00000001
 #define CNIC_DRV_STATE_USING_MSIX      0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO    0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI                0x00000008
+#define CNIC_DRV_STATE_NO_FCOE         0x00000010
        u32             chip_id;
        u32             max_kwqe_pending;
        struct pci_dev  *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
        u32             fcoe_init_cid;
        u16             iscsi_l2_client_id;
        u16             iscsi_l2_cid;
+       u8              iscsi_mac[ETH_ALEN];
 
        int             num_irq;
        struct cnic_irq irq_arr[MAX_CNIC_VEC];
index d325e01..537a4b2 100644 (file)
@@ -95,6 +95,9 @@
   Dmitry Pervushin  : dpervushin@ru.mvista.com
                     : PNX010X platform support
 
+  Domenico Andreoli : cavokz@gmail.com
+                    : QQ2440 platform support
+
 */
 
 /* Always include 'config.h' first in case the user wants to turn on
@@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
 #elif defined(CONFIG_ARCH_IXDP2X01)
 static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
 static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
+#elif defined(CONFIG_MACH_QQ2440)
+#include <mach/qq2440.h>
+static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
+static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
 #elif defined(CONFIG_MACH_MX31ADS)
 #include <mach/board-mx31ads.h>
 static unsigned int netcard_portlist[] __used __initdata = {
@@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
 #endif
                lp->force = g_cs89x0_media__force;
 #endif
+
+#if defined(CONFIG_MACH_QQ2440)
+               lp->force |= FORCE_RJ45 | FORCE_FULL;
+#endif
         }
 
        /* Grab the region so we can find another board if autoIRQ fails. */
@@ -943,10 +954,10 @@ skip_this_frame:
 static void __init reset_chip(struct net_device *dev)
 {
 #if !defined(CONFIG_MACH_MX31ADS)
-#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
+#if !defined(CS89x0_NONISA_IRQ)
        struct net_local *lp = netdev_priv(dev);
        int ioaddr = dev->base_addr;
-#endif
+#endif /* CS89x0_NONISA_IRQ */
        int reset_start_time;
 
        writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev)
        /* wait 30 ms */
        msleep(30);
 
-#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
+#if !defined(CS89x0_NONISA_IRQ)
        if (lp->chip_type != CS8900) {
                /* Hardware problem requires PNP registers to be reconfigured after a reset */
                writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
@@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev)
                outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
                outb((dev->mem_start >> 8) & 0xff,   ioaddr + DATA_PORT + 1);
        }
-#endif /* IXDP2x01 */
+#endif /* CS89x0_NONISA_IRQ */
 
        /* Wait until the chip is reset */
        reset_start_time = jiffies;
index ef02aa6..862804f 100644 (file)
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
                                dev = NULL;
                                if (grp)
                                        dev = vlan_group_get_device(grp, vlan);
-                       } else
+                       } else if (netif_is_bond_slave(dev)) {
                                while (dev->master)
                                        dev = dev->master;
+                       }
                        return dev;
                }
        }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
                cxgb_neigh_update((struct neighbour *)ctx);
                break;
        }
-       case (NETEVENT_PMTU_UPDATE):
-               break;
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
index 059c1ee..5352c8a 100644 (file)
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
        case NETEVENT_NEIGH_UPDATE:
                check_neigh_update(data);
                break;
-       case NETEVENT_PMTU_UPDATE:
        case NETEVENT_REDIRECT:
        default:
                break;
@@ -2710,6 +2709,8 @@ static int cxgb_open(struct net_device *dev)
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
 
+       netif_carrier_off(dev);
+
        if (!(adapter->flags & FULL_INIT_DONE)) {
                err = cxgb_up(adapter);
                if (err < 0)
@@ -3661,7 +3662,6 @@ static int __devinit init_one(struct pci_dev *pdev,
                pi->xact_addr_filt = -1;
                pi->rx_offload = RX_CSO;
                pi->port_id = i;
-               netif_carrier_off(netdev);
                netdev->irq = pdev->irq;
 
                netdev->features |= NETIF_F_SG | TSO_FLAGS;
index 56166ae..6aad64d 100644 (file)
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
 {
        int i;
 
-       BUG_ON(adapter->debugfs_root == NULL);
+       BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
        /*
         * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
  */
 static void cleanup_debugfs(struct adapter *adapter)
 {
-       BUG_ON(adapter->debugfs_root == NULL);
+       BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
        /*
         * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2488,17 +2488,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
        struct port_info *pi;
        struct net_device *netdev;
 
-       /*
-        * Vet our module parameters.
-        */
-       if (msi != MSI_MSIX && msi != MSI_MSI) {
-               dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
-                       " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
-                       MSI_MSI);
-               err = -EINVAL;
-               goto err_out;
-       }
-
        /*
         * Print our driver banner the first time we're called to initialize a
         * device.
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
        /*
         * Set up our debugfs entries.
         */
-       if (cxgb4vf_debugfs_root) {
+       if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
                adapter->debugfs_root =
                        debugfs_create_dir(pci_name(pdev),
                                           cxgb4vf_debugfs_root);
-               if (adapter->debugfs_root == NULL)
+               if (IS_ERR_OR_NULL(adapter->debugfs_root))
                        dev_warn(&pdev->dev, "could not create debugfs"
                                 " directory");
                else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
         */
 
 err_free_debugfs:
-       if (adapter->debugfs_root) {
+       if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
                cleanup_debugfs(adapter);
                debugfs_remove_recursive(adapter->debugfs_root);
        }
@@ -2802,7 +2791,6 @@ err_release_regions:
 err_disable_device:
        pci_disable_device(pdev);
 
-err_out:
        return err;
 }
 
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
                /*
                 * Tear down our debugfs entries.
                 */
-               if (adapter->debugfs_root) {
+               if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
                        cleanup_debugfs(adapter);
                        debugfs_remove_recursive(adapter->debugfs_root);
                }
@@ -2873,6 +2861,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
        pci_release_regions(pdev);
 }
 
+/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+       struct adapter *adapter;
+       int pidx;
+
+       adapter = pci_get_drvdata(pdev);
+       if (!adapter)
+               return;
+
+       /*
+        * Disable all Virtual Interfaces.  This will shut down the
+        * delivery of all ingress packets into the chip for these
+        * Virtual Interfaces.
+        */
+       for_each_port(adapter, pidx) {
+               struct net_device *netdev;
+               struct port_info *pi;
+
+               if (!test_bit(pidx, &adapter->registered_device_map))
+                       continue;
+
+               netdev = adapter->port[pidx];
+               if (!netdev)
+                       continue;
+
+               pi = netdev_priv(netdev);
+               t4vf_enable_vi(adapter, pi->viid, false, false);
+       }
+
+       /*
+        * Free up all Queues which will prevent further DMA and
+        * Interrupts allowing various internal pathways to drain.
+        */
+       t4vf_free_sge_resources(adapter);
+}
+
 /*
  * PCI Device registration data structures.
  */
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
        .id_table       = cxgb4vf_pci_tbl,
        .probe          = cxgb4vf_pci_probe,
        .remove         = __devexit_p(cxgb4vf_pci_remove),
+       .shutdown       = __devexit_p(cxgb4vf_pci_shutdown),
 };
 
 /*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
 {
        int ret;
 
+       /*
+        * Vet our module parameters.
+        */
+       if (msi != MSI_MSIX && msi != MSI_MSI) {
+               printk(KERN_WARNING KBUILD_MODNAME
+                      ": bad module parameter msi=%d; must be %d"
+                      " (MSI-X or MSI) or %d (MSI)\n",
+                      msi, MSI_MSIX, MSI_MSI);
+               return -EINVAL;
+       }
+
        /* Debugfs support is optional, just warn if this fails */
        cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-       if (!cxgb4vf_debugfs_root)
+       if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
                printk(KERN_WARNING KBUILD_MODNAME ": could not create"
                       " debugfs entry, continuing\n");
 
        ret = pci_register_driver(&cxgb4vf_driver);
-       if (ret < 0)
+       if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
                debugfs_remove(cxgb4vf_debugfs_root);
        return ret;
 }
index 0f51c80..192db22 100644 (file)
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
        delay_idx = 0;
        ms = delay[0];
 
-       for (i = 0; i < 500; i += ms) {
+       for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
                if (sleep_ok) {
                        ms = delay[delay_idx];
                        if (delay_idx < ARRAY_SIZE(delay) - 1)
index 2a628d1..7018bfe 100644 (file)
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
        int                     ret;
 
        /* free and bail if we are shutting down */
-       if (unlikely(!netif_running(ndev))) {
+       if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
                dev_kfree_skb_any(skb);
                return;
        }
index 1b48b68..8b0084d 100644 (file)
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
                                }
                        }
                        /* Change buffer ownership for this last frame, back to the adapter */
-                       for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+                       for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
                                writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
                        }
                        writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
                /*
                   ** Update entry information
                 */
-               lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+               lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
        }
 
        return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
                }
 
                /* Update all the pointers */
-               lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+               lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
        }
 
        return 0;
index e1a8216..c05db60 100644 (file)
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
 
        /* Free all the skbuffs in the queue. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               np->rx_ring[i].status = 0;
-               np->rx_ring[i].fraginfo = 0;
                skb = np->rx_skbuff[i];
                if (skb) {
                        pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
                        dev_kfree_skb (skb);
                        np->rx_skbuff[i] = NULL;
                }
+               np->rx_ring[i].status = 0;
+               np->rx_ring[i].fraginfo = 0;
        }
        for (i = 0; i < TX_RING_SIZE; i++) {
                skb = np->tx_skbuff[i];
index 2d4c4fc..3177081 100644 (file)
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
        /* Checksum mode */
        dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
 
-       /* GPIO0 on pre-activate PHY */
-       iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
-       iow(db, DM9000_GPR, 0); /* Enable PHY */
 
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
        unsigned long flags;
 
        /* Save previous register address */
-       reg_save = readb(db->io_addr);
        spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
 
        netif_stop_queue(dev);
        dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
        if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
                return -EAGAIN;
 
+       /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+       iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+       mdelay(1); /* delay needs by DM9000B */
+
        /* Initialize DM9000 board */
        dm9000_reset(db);
        dm9000_init_dm9000(dev);
@@ -1592,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
                        ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
        }
 
-       if (!is_valid_ether_addr(ndev->dev_addr))
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
                dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
                         "set using ifconfig\n", ndev->name);
 
+               random_ether_addr(ndev->dev_addr);
+               mac_src = "random";
+       }
+
+
        platform_set_drvdata(pdev, ndev);
        ret = register_netdev(ndev);
 
index 9d8a20b..8318ea0 100644 (file)
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
-
        if (mdiobus_register(bp->mii_bus)) {
                err = -ENXIO;
                goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
        bp = netdev_priv(dev);
        bp->dev = dev;
 
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        spin_lock_init(&bp->lock);
index aed223b..7501d97 100644 (file)
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
        case M88E1000_I_PHY_ID:
        case M88E1011_I_PHY_ID:
        case M88E1111_I_PHY_ID:
+       case M88E1118_E_PHY_ID:
                hw->phy_type = e1000_phy_m88;
                break;
        case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
                break;
        case e1000_ce4100:
                if ((hw->phy_id == RTL8211B_PHY_ID) ||
-                   (hw->phy_id == RTL8201N_PHY_ID))
+                   (hw->phy_id == RTL8201N_PHY_ID) ||
+                   (hw->phy_id == M88E1118_E_PHY_ID))
                        match = true;
                break;
        case e1000_82541:
index 196eeda..c70b23d 100644 (file)
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
 #define M88E1011_I_REV_4   0x04
 #define M88E1111_I_PHY_ID  0x01410CC0
+#define M88E1118_E_PHY_ID  0x01410E40
 #define L1LXT971A_PHY_ID   0x001378E0
 
 #define RTL8211B_PHY_ID    0x001CC910
index 55c1711..33e7c45 100644 (file)
@@ -42,7 +42,8 @@
 #define GBE_CONFIG_RAM_BASE \
        ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
 
-#define GBE_CONFIG_BASE_VIRT    phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+       ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
 
 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
        (iowrite16_rep(base + offset, data, count))
index 1314998..c516a74 100644 (file)
@@ -86,6 +86,7 @@
 #define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
 #define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
 #define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK         0x00001000
 #define E1000_CTRL_EXT_PHYPDEN        0x00100000
 
 /* Receive Descriptor bit definitions */
index e610e13..00bf595 100644 (file)
@@ -364,6 +364,7 @@ struct e1000_adapter {
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
 
+       spinlock_t stats64_lock;
        struct e1000_hw_stats stats;
        struct e1000_phy_info phy_info;
        struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
 extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                                    struct rtnl_link_stats64
+                                                    *stats);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
index fa08b63..07f09e9 100644 (file)
@@ -46,15 +46,15 @@ struct e1000_stats {
 };
 
 #define E1000_STAT(str, m) { \
-                       .stat_string = str, \
-                       .type = E1000_STATS, \
-                       .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
-                       .stat_offset = offsetof(struct e1000_adapter, m) }
+               .stat_string = str, \
+               .type = E1000_STATS, \
+               .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+               .stat_offset = offsetof(struct e1000_adapter, m) }
 #define E1000_NETDEV_STAT(str, m) { \
-                       .stat_string = str, \
-                       .type = NETDEV_STATS, \
-                       .sizeof_stat = sizeof(((struct net_device *)0)->m), \
-                       .stat_offset = offsetof(struct net_device, m) }
+               .stat_string = str, \
+               .type = NETDEV_STATS, \
+               .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+               .stat_offset = offsetof(struct rtnl_link_stats64, m) }
 
 static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("tx_broadcast", stats.bptc),
        E1000_STAT("rx_multicast", stats.mprc),
        E1000_STAT("tx_multicast", stats.mptc),
-       E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
-       E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
-       E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+       E1000_NETDEV_STAT("rx_errors", rx_errors),
+       E1000_NETDEV_STAT("tx_errors", tx_errors),
+       E1000_NETDEV_STAT("tx_dropped", tx_dropped),
        E1000_STAT("multicast", stats.mprc),
        E1000_STAT("collisions", stats.colc),
-       E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
-       E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+       E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+       E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
        E1000_STAT("rx_crc_errors", stats.crcerrs),
-       E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+       E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
        E1000_STAT("rx_no_buffer_count", stats.rnbc),
        E1000_STAT("rx_missed_errors", stats.mpc),
        E1000_STAT("tx_aborted_errors", stats.ecol),
        E1000_STAT("tx_carrier_errors", stats.tncrs),
-       E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
-       E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+       E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+       E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
        E1000_STAT("tx_window_errors", stats.latecol),
        E1000_STAT("tx_abort_late_coll", stats.latecol),
        E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
        struct e1000_hw *hw = &adapter->hw;
        u32 *regs_buff = p;
        u16 phy_data;
-       u8 revision_id;
 
        memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
-       pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
-       regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+                       adapter->pdev->device;
 
        regs_buff[0]  = er32(CTRL);
        regs_buff[1]  = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
        rx_old = adapter->rx_ring;
 
        err = -ENOMEM;
-       tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+       tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
        if (!tx_ring)
                goto err_alloc_tx;
-       /*
-        * use a memcpy to save any previously configured
-        * items like napi structs from having to be
-        * reinitialized
-        */
-       memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
 
-       rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+       rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
        if (!rx_ring)
                goto err_alloc_rx;
-       memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
 
        adapter->tx_ring = tx_ring;
        adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl_reg = 0;
-       u32 stat_reg = 0;
        u16 phy_reg = 0;
        s32 ret_val = 0;
 
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                 * Set the ILOS bit on the fiber Nic if half duplex link is
                 * detected.
                 */
-               stat_reg = er32(STATUS);
-               if ((stat_reg & E1000_STATUS_FD) == 0)
+               if ((er32(STATUS) & E1000_STATUS_FD) == 0)
                        ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
        }
 
@@ -1677,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
        } else {
                hw->mac.ops.check_for_link(hw);
                if (hw->mac.autoneg)
-                       msleep(4000);
+                       /*
+                        * On some Phy/switch combinations, link establishment
+                        * can take a few seconds more than expected.
+                        */
+                       msleep(5000);
 
-               if (!(er32(STATUS) &
-                     E1000_STATUS_LU))
+               if (!(er32(STATUS) & E1000_STATUS_LU))
                        *data = 1;
        }
        return *data;
@@ -1807,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev,
                return;
 
        wol->supported = WAKE_UCAST | WAKE_MCAST |
-                        WAKE_BCAST | WAKE_MAGIC |
-                        WAKE_PHY | WAKE_ARP;
+           WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
 
        /* apply any specific unsupported masks here */
        if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@@ -1829,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev,
                wol->wolopts |= WAKE_MAGIC;
        if (adapter->wol & E1000_WUFC_LNKC)
                wol->wolopts |= WAKE_PHY;
-       if (adapter->wol & E1000_WUFC_ARP)
-               wol->wolopts |= WAKE_ARP;
 }
 
-static int e1000_set_wol(struct net_device *netdev,
-                        struct ethtool_wolinfo *wol)
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
        if (!(adapter->flags & FLAG_HAS_WOL) ||
            !device_can_wakeup(&adapter->pdev->dev) ||
            (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
-                             WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
+                             WAKE_MAGIC | WAKE_PHY)))
                return -EOPNOTSUPP;
 
        /* these settings will always override what we currently have */
@@ -1857,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev,
                adapter->wol |= E1000_WUFC_MAG;
        if (wol->wolopts & WAKE_PHY)
                adapter->wol |= E1000_WUFC_LNKC;
-       if (wol->wolopts & WAKE_ARP)
-               adapter->wol |= E1000_WUFC_ARP;
 
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
@@ -1972,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
 static int e1000_nway_reset(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       if (netif_running(netdev))
-               e1000e_reinit_locked(adapter);
+
+       if (!netif_running(netdev))
+               return -EAGAIN;
+
+       if (!adapter->hw.mac.autoneg)
+               return -EINVAL;
+
+       e1000e_reinit_locked(adapter);
+
        return 0;
 }
 
@@ -1982,14 +1975,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
                                    u64 *data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct rtnl_link_stats64 net_stats;
        int i;
        char *p = NULL;
 
-       e1000e_update_stats(adapter);
+       e1000e_get_stats64(netdev, &net_stats);
        for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
                switch (e1000_gstrings_stats[i].type) {
                case NETDEV_STATS:
-                       p = (char *) netdev +
+                       p = (char *) &net_stats +
                                        e1000_gstrings_stats[i].stat_offset;
                        break;
                case E1000_STATS:
@@ -2014,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
 
        switch (stringset) {
        case ETH_SS_TEST:
-               memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+               memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
                break;
        case ETH_SS_STATS:
                for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
index bc0860a..307e1ec 100644 (file)
@@ -812,9 +812,8 @@ struct e1000_nvm_operations {
 
 struct e1000_mac_info {
        struct e1000_mac_operations ops;
-
-       u8 addr[6];
-       u8 perm_addr[6];
+       u8 addr[ETH_ALEN];
+       u8 perm_addr[ETH_ALEN];
 
        enum e1000_mac_type type;
 
index fb46974..ce1dbfd 100644 (file)
 #define I82579_LPI_CTRL                        PHY_REG(772, 20)
 #define I82579_LPI_CTRL_ENABLE_MASK    0x6000
 
+/* EMI Registers */
+#define I82579_EMI_ADDR         0x10
+#define I82579_EMI_DATA         0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
 #define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
@@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
         * the interconnect to PCIe mode.
         */
        fwsm = er32(FWSM);
-       if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+       if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
                ctrl = er32(CTRL);
-               ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
+               ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
                ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
                ew32(CTRL, ctrl);
                udelay(10);
@@ -331,7 +336,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                goto out;
 
        /* Ungate automatic PHY configuration on non-managed 82579 */
-       if ((hw->mac.type == e1000_pch2lan)  &&
+       if ((hw->mac.type == e1000_pch2lan) &&
            !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
                msleep(10);
                e1000_gate_hw_phy_config_ich8lan(hw, false);
@@ -366,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
        case e1000_phy_82579:
                phy->ops.check_polarity = e1000_check_polarity_82577;
                phy->ops.force_speed_duplex =
-                       e1000_phy_force_speed_duplex_82577;
+                   e1000_phy_force_speed_duplex_82577;
                phy->ops.get_cable_length = e1000_get_cable_length_82577;
                phy->ops.get_info = e1000_get_phy_info_82577;
                phy->ops.commit = e1000e_phy_sw_reset;
@@ -753,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
        if (rc)
                return rc;
 
-       if (adapter->hw.phy.type == e1000_phy_ife) {
+       /*
+        * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+        * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+        */
+       if ((adapter->hw.phy.type == e1000_phy_ife) ||
+           ((adapter->hw.mac.type >= e1000_pch2lan) &&
+            (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
                adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
        }
@@ -1723,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
        /* Configure the LCD with the OEM bits in NVM */
        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
 
-       /* Ungate automatic PHY configuration on non-managed 82579 */
-       if ((hw->mac.type == e1000_pch2lan) &&
-           !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-               msleep(10);
-               e1000_gate_hw_phy_config_ich8lan(hw, false);
+       if (hw->mac.type == e1000_pch2lan) {
+               /* Ungate automatic PHY configuration on non-managed 82579 */
+               if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+                       msleep(10);
+                       e1000_gate_hw_phy_config_ich8lan(hw, false);
+               }
+
+               /* Set EEE LPI Update Timer to 200usec */
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+               ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+                                                      I82579_LPI_UPDATE_TIMER);
+               if (ret_val)
+                       goto release;
+               ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+                                                      0x1387);
+release:
+               hw->phy.ops.release(hw);
        }
 
 out:
@@ -2104,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 {
        union ich8_hws_flash_status hsfsts;
        s32 ret_val = -E1000_ERR_NVM;
-       s32 i = 0;
 
        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
@@ -2140,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
                ret_val = 0;
        } else {
+               s32 i = 0;
+
                /*
                 * Otherwise poll for sometime so the current
                 * cycle has a chance to end before giving up.
index 68aa174..96921de 100644 (file)
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 {
        struct e1000_nvm_info *nvm = &hw->nvm;
        u32 eecd = er32(EECD);
-       u16 timeout = 0;
        u8 spi_stat_reg;
 
        if (nvm->type == e1000_nvm_eeprom_spi) {
+               u16 timeout = NVM_MAX_RETRY_SPI;
+
                /* Clear SK and CS */
                eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
                ew32(EECD, eecd);
                udelay(1);
-               timeout = NVM_MAX_RETRY_SPI;
 
                /*
                 * Read "Status Register" repeatedly until the LSB is cleared.
index 1c18f26..a74de23 100644 (file)
@@ -54,7 +54,7 @@
 
 #define DRV_EXTRAVERSION "-k2"
 
-#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
+#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -900,8 +900,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -937,6 +935,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
        u16 phy_status, phy_1000t_status, phy_ext_status;
        u16 pci_status;
 
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1057,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
-       netdev->stats.tx_bytes += total_tx_bytes;
-       netdev->stats.tx_packets += total_tx_packets;
        return count < tx_ring->count;
 }
 
@@ -1245,8 +1244,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1325,7 +1322,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                /* an error means any chain goes out the window
                                 * too */
                                if (rx_ring->rx_skb_top)
-                                       dev_kfree_skb(rx_ring->rx_skb_top);
+                                       dev_kfree_skb_irq(rx_ring->rx_skb_top);
                                rx_ring->rx_skb_top = NULL;
                                goto next_desc;
                }
@@ -1398,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                /* eth type trans needs skb->data to point to something */
                if (!pskb_may_pull(skb, ETH_HLEN)) {
                        e_err("pskb_may_pull failed.\n");
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
@@ -1426,8 +1423,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1506,6 +1501,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                        struct e1000_adapter, downshift_task);
 
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
 }
 
@@ -1851,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        int err = 0, vector = 0;
 
        if (strlen(netdev->name) < (IFNAMSIZ - 5))
-               sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+               snprintf(adapter->rx_ring->name,
+                        sizeof(adapter->rx_ring->name) - 1,
+                        "%s-rx-0", netdev->name);
        else
                memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -1864,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        vector++;
 
        if (strlen(netdev->name) < (IFNAMSIZ - 5))
-               sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+               snprintf(adapter->tx_ring->name,
+                        sizeof(adapter->tx_ring->name) - 1,
+                        "%s-tx-0", netdev->name);
        else
                memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -2728,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl, rfctl;
-       u32 psrctl = 0;
        u32 pages = 0;
 
        /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2827,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                adapter->rx_ps_pages = 0;
 
        if (adapter->rx_ps_pages) {
+               u32 psrctl = 0;
+
                /* Configure extra packet-split registers */
                rfctl = er32(RFCTL);
                rfctl |= E1000_RFCTL_EXTEN;
@@ -3028,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
        struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
-       int i;
 
        /* Check for Promiscuous and All Multicast modes */
 
@@ -3051,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
        ew32(RCTL, rctl);
 
        if (!netdev_mc_empty(netdev)) {
+               int i = 0;
+
                mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
                if (!mta_list)
                        return;
 
                /* prepare a packed array of only addresses. */
-               i = 0;
                netdev_for_each_mc_addr(ha, netdev)
                        memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 
@@ -3338,6 +3341,23 @@ int e1000e_up(struct e1000_adapter *adapter)
        return 0;
 }
 
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (!(adapter->flags2 & FLAG2_DMA_BURST))
+               return;
+
+       /* flush pending descriptor writebacks to memory */
+       ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+       ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+       /* execute the writes immediately */
+       e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -3372,11 +3392,19 @@ void e1000e_down(struct e1000_adapter *adapter)
        del_timer_sync(&adapter->phy_info_timer);
 
        netif_carrier_off(netdev);
+
+       spin_lock(&adapter->stats64_lock);
+       e1000e_update_stats(adapter);
+       spin_unlock(&adapter->stats64_lock);
+
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
 
        if (!pci_channel_offline(adapter->pdev))
                e1000e_reset(adapter);
+
+       e1000e_flush_descriptors(adapter);
+
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
@@ -3413,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
+       spin_lock_init(&adapter->stats64_lock);
+
        e1000e_set_interrupt_capability(adapter);
 
        if (e1000_alloc_queues(adapter))
@@ -3765,6 +3795,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 {
        struct e1000_adapter *adapter = container_of(work,
                                        struct e1000_adapter, update_phy_task);
+
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        e1000_get_phy_info(&adapter->hw);
 }
 
@@ -3775,6 +3809,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 static void e1000_update_phy_info(unsigned long data)
 {
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        schedule_work(&adapter->update_phy_task);
 }
 
@@ -3886,7 +3924,7 @@ release:
  * e1000e_update_stats - Update the board statistics counters
  * @adapter: board private structure
  **/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
@@ -3998,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_phy_regs *phy = &adapter->phy_regs;
-       int ret_val;
 
        if ((er32(STATUS) & E1000_STATUS_LU) &&
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+               int ret_val;
+
                ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
                ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
                ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4147,7 +4186,9 @@ static void e1000_watchdog_task(struct work_struct *work)
        struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_hw *hw = &adapter->hw;
        u32 link, tctl;
-       int tx_pending = 0;
+
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
 
        link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
@@ -4285,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
        }
 
 link_up:
+       spin_lock(&adapter->stats64_lock);
        e1000e_update_stats(adapter);
+       spin_unlock(&adapter->stats64_lock);
 
        mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
        adapter->tpt_old = adapter->stats.tpt;
@@ -4299,21 +4342,17 @@ link_up:
 
        e1000e_update_adaptive(&adapter->hw);
 
-       if (!netif_carrier_ok(netdev)) {
-               tx_pending = (e1000_desc_unused(tx_ring) + 1 <
-                              tx_ring->count);
-               if (tx_pending) {
-                       /*
-                        * We've lost link, so the controller stops DMA,
-                        * but we've got queued Tx work that's never going
-                        * to get done, so reset controller to flush Tx.
-                        * (Do the reset outside of interrupt context).
-                        */
-                       adapter->tx_timeout_count++;
-                       schedule_work(&adapter->reset_task);
-                       /* return immediately since reset is imminent */
-                       return;
-               }
+       if (!netif_carrier_ok(netdev) &&
+           (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+               /*
+                * We've lost link, so the controller stops DMA,
+                * but we've got queued Tx work that's never going
+                * to get done, so reset controller to flush Tx.
+                * (Do the reset outside of interrupt context).
+                */
+               schedule_work(&adapter->reset_task);
+               /* return immediately since reset is imminent */
+               return;
        }
 
        /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4338,19 +4377,12 @@ link_up:
        else
                ew32(ICS, E1000_ICS_RXDMT0);
 
+       /* flush pending descriptors to memory before detecting Tx hang */
+       e1000e_flush_descriptors(adapter);
+
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = 1;
 
-       /* flush partial descriptors to memory before detecting Tx hang */
-       if (adapter->flags2 & FLAG2_DMA_BURST) {
-               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
-               ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-               /*
-                * no need to flush the writes because the timeout code does
-                * an er32 first thing
-                */
-       }
-
        /*
         * With 82571 controllers, LAA may be overwritten due to controller
         * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4384,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
        u32 cmd_length = 0;
        u16 ipcse = 0, tucse, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
-       int err;
 
        if (!skb_is_gso(skb))
                return 0;
 
        if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
                if (err)
                        return err;
        }
@@ -4888,6 +4920,10 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       /* don't run the task if already down */
+       if (test_bit(__E1000_DOWN, &adapter->state))
+               return;
+
        if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
              (adapter->flags & FLAG_RX_RESTART_NOW))) {
                e1000e_dump(adapter);
@@ -4897,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
 }
 
 /**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
  * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
  *
  * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
  **/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                             struct rtnl_link_stats64 *stats)
 {
-       /* only return the current stats */
-       return &netdev->stats;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       memset(stats, 0, sizeof(struct rtnl_link_stats64));
+       spin_lock(&adapter->stats64_lock);
+       e1000e_update_stats(adapter);
+       /* Fill out the OS statistics structure */
+       stats->rx_bytes = adapter->stats.gorc;
+       stats->rx_packets = adapter->stats.gprc;
+       stats->tx_bytes = adapter->stats.gotc;
+       stats->tx_packets = adapter->stats.gptc;
+       stats->multicast = adapter->stats.mprc;
+       stats->collisions = adapter->stats.colc;
+
+       /* Rx Errors */
+
+       /*
+        * RLEC on some newer hardware can be incorrect so build
+        * our own version based on RUC and ROC
+        */
+       stats->rx_errors = adapter->stats.rxerrc +
+               adapter->stats.crcerrs + adapter->stats.algnerrc +
+               adapter->stats.ruc + adapter->stats.roc +
+               adapter->stats.cexterr;
+       stats->rx_length_errors = adapter->stats.ruc +
+                                             adapter->stats.roc;
+       stats->rx_crc_errors = adapter->stats.crcerrs;
+       stats->rx_frame_errors = adapter->stats.algnerrc;
+       stats->rx_missed_errors = adapter->stats.mpc;
+
+       /* Tx Errors */
+       stats->tx_errors = adapter->stats.ecol +
+                                      adapter->stats.latecol;
+       stats->tx_aborted_errors = adapter->stats.ecol;
+       stats->tx_window_errors = adapter->stats.latecol;
+       stats->tx_carrier_errors = adapter->stats.tncrs;
+
+       /* Tx Dropped needs to be maintained elsewhere */
+
+       spin_unlock(&adapter->stats64_lock);
+       return stats;
 }
 
 /**
@@ -5476,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
 {
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int vector, msix_irq;
 
        if (adapter->msix_entries) {
+               int vector, msix_irq;
+
                vector = 0;
                msix_irq = adapter->msix_entries[vector].vector;
                disable_irq(msix_irq);
@@ -5675,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_open               = e1000_open,
        .ndo_stop               = e1000_close,
        .ndo_start_xmit         = e1000_xmit_frame,
-       .ndo_get_stats          = e1000_get_stats,
+       .ndo_get_stats64        = e1000e_get_stats64,
        .ndo_set_multicast_list = e1000_set_multi,
        .ndo_set_mac_address    = e1000_set_mac,
        .ndo_change_mtu         = e1000_change_mtu,
@@ -5936,7 +6012,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                /* APME bit in EEPROM is mapped to WUC.APME */
                eeprom_data = er32(WUC);
                eeprom_apme_mask = E1000_WUC_APME;
-               if (eeprom_data & E1000_WUC_PHY_WAKE)
+               if ((hw->mac.type > e1000_ich10lan) &&
+                   (eeprom_data & E1000_WUC_PHY_WAKE))
                        adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
index 6bea051..6ae31fc 100644 (file)
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
 s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
 {
        s32 ret_val;
-       u32 page_select = 0;
        u32 page = offset >> IGP_PAGE_SHIFT;
-       u32 page_shift = 0;
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               u32 page_shift, page_select;
+
                /*
                 * Page select is register 31 for phy address 1 and 22 for
                 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
 s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
 {
        s32 ret_val;
-       u32 page_select = 0;
        u32 page = offset >> IGP_PAGE_SHIFT;
-       u32 page_shift = 0;
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               u32 page_shift, page_select;
+
                /*
                 * Page select is register 31 for phy address 1 and 22 for
                 * phy address 2 and 3. Page select is shifted only for
index 112c5aa..907b05a 100644 (file)
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
        if (netif_msg_hw(priv))
                printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
                         endptr + 1);
-       enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
+       enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
 }
 
 static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
index e7b6c31..2e573be 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
-       enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+       enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
 
index a937f49..3a3c3c8 100644 (file)
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "1.4.1.10"
-#define DRV_COPYRIGHT          "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION            "2.1.1.12"
+#define DRV_COPYRIGHT          "Copyright 2008-2011 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
 
-#define ENIC_WQ_MAX            8
-#define ENIC_RQ_MAX            8
+#define ENIC_WQ_MAX            1
+#define ENIC_RQ_MAX            1
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
@@ -49,7 +49,7 @@ struct enic_msix_entry {
        void *devid;
 };
 
-#define ENIC_SET_APPLIED               (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED      (1 << 0)
 #define ENIC_SET_REQUEST               (1 << 1)
 #define ENIC_SET_NAME                  (1 << 2)
 #define ENIC_SET_INSTANCE              (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
        /* receive queue cache line section */
        ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
        unsigned int rq_count;
-       int (*rq_alloc_buf)(struct vnic_rq *rq);
        u64 rq_truncated_pkts;
        u64 rq_bad_fcs;
        struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644 (file)
index 0000000..37ad3a1
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_fw_info(enic->vdev, fw_info);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_stats_dump(enic->vdev, vstats);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+       int err;
+
+       if (!is_valid_ether_addr(enic->netdev->dev_addr))
+               return -EADDRNOTAVAIL;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+       int err;
+
+       if (!is_valid_ether_addr(enic->netdev->dev_addr))
+               return -EADDRNOTAVAIL;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+       int broadcast, int promisc, int allmulti)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_packet_filter(enic->vdev, directed,
+               multicast, broadcast, promisc, allmulti);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_add_addr(enic->vdev, addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_del_addr(enic->vdev, addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_notify_unset(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_hang_notify(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+               IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_enable_wait(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_disable(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_deinit(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_init_prov(enic->vdev,
+               (u8 *)vp, vic_provinfo_size(vp));
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_init_done(enic->vdev, done, error);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       spin_lock(&enic->devcmd_lock);
+       enic_add_vlan(enic, vid);
+       spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       spin_lock(&enic->devcmd_lock);
+       enic_del_vlan(enic, vid);
+       spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644 (file)
index 0000000..495f57f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+       int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
index a0af48c..8b9cad5 100644 (file)
@@ -44,6 +44,7 @@
 #include "vnic_vic.h"
 #include "enic_res.h"
 #include "enic.h"
+#include "enic_dev.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
        return 0;
 }
 
-static int enic_dev_fw_info(struct enic *enic,
-       struct vnic_devcmd_fw_info **fw_info)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_fw_info(enic->vdev, fw_info);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_get_drvinfo(struct net_device *netdev,
        struct ethtool_drvinfo *drvinfo)
 {
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_stats_dump(enic->vdev, vstats);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_get_ethtool_stats(struct net_device *netdev,
        struct ethtool_stats *stats, u64 *data)
 {
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
        return net_stats;
 }
 
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
 {
        enic->mc_count = 0;
+       enic->uc_count = 0;
        enic->flags = 0;
 }
 
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
        return 0;
 }
 
-static int enic_dev_add_station_addr(struct enic *enic)
-{
-       int err = 0;
-
-       if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-               spin_lock(&enic->devcmd_lock);
-               err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
-               spin_unlock(&enic->devcmd_lock);
-       }
-
-       return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
-       int err = 0;
-
-       if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-               spin_lock(&enic->devcmd_lock);
-               err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
-               spin_unlock(&enic->devcmd_lock);
-       }
-
-       return err;
-}
-
 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 {
        struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
        return enic_dev_add_station_addr(enic);
 }
 
-static int enic_dev_packet_filter(struct enic *enic, int directed,
-       int multicast, int broadcast, int promisc, int allmulti)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_packet_filter(enic->vdev, directed,
-               multicast, broadcast, promisc, allmulti);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_add_addr(struct enic *enic, u8 *addr)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_add_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_del_addr(struct enic *enic, u8 *addr)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_del_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static void enic_add_multicast_addr_list(struct enic *enic)
+static void enic_update_multicast_addr_list(struct enic *enic)
 {
        struct net_device *netdev = enic->netdev;
        struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
        enic->mc_count = mc_count;
 }
 
-static void enic_add_unicast_addr_list(struct enic *enic)
+static void enic_update_unicast_addr_list(struct enic *enic)
 {
        struct net_device *netdev = enic->netdev;
        struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
        }
 
        if (!promisc) {
-               enic_add_unicast_addr_list(enic);
+               enic_update_unicast_addr_list(enic);
                if (!allmulti)
-                       enic_add_multicast_addr_list(enic);
+                       enic_update_multicast_addr_list(enic);
        }
 }
 
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
        enic->vlan_group = vlan_group;
 }
 
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       spin_lock(&enic->devcmd_lock);
-       enic_add_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
-}
-
-/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       spin_lock(&enic->devcmd_lock);
-       enic_del_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
-}
-
 /* netif_tx_lock held, BHs disabled */
 static void enic_tx_timeout(struct net_device *netdev)
 {
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
        schedule_work(&enic->reset);
 }
 
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_deinit(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_init_prov(enic->vdev,
-               (u8 *)vp, vic_provinfo_size(vp));
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_init_done(enic->vdev, done, error);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
        if (err)
                return err;
 
+       enic_reset_addr_lists(enic);
+
        switch (enic->pp.request) {
 
        case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
                vic_provinfo_free(vp);
                if (err)
                        return err;
-
-               enic->pp.set |= ENIC_SET_APPLIED;
                break;
 
        case PORT_REQUEST_DISASSOCIATE:
-               enic->pp.set &= ~ENIC_SET_APPLIED;
                break;
 
        default:
                return -EINVAL;
        }
 
+       /* Set flag to indicate that the port assoc/disassoc
+        * request has been sent out to fw
+        */
+       enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
        return 0;
 }
 
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
 
                if (is_zero_ether_addr(netdev->dev_addr))
                        random_ether_addr(netdev->dev_addr);
-       } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
-               if (!is_zero_ether_addr(enic->pp.mac_addr))
-                       enic_dev_del_addr(enic, enic->pp.mac_addr);
        }
 
        memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
        if (err)
                goto set_port_profile_cleanup;
 
-       if (!is_zero_ether_addr(enic->pp.mac_addr))
-               enic_dev_add_addr(enic, enic->pp.mac_addr);
-
 set_port_profile_cleanup:
        memset(enic->pp.vf_mac, 0, ETH_ALEN);
 
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
        int err, error, done;
        u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
 
-       if (!(enic->pp.set & ENIC_SET_APPLIED))
+       if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
                return -ENODATA;
 
        err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        return 0;
 }
 
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
-       struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
-       if (vnic_rq_posting_soon(rq)) {
-
-               /* SW workaround for A0 HW erratum: if we're just about
-                * to write posted_index, insert a dummy desc
-                * of type resvd
-                */
-
-               rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
-               vnic_rq_post(rq, 0, 0, 0, 0);
-       } else {
-               return enic_rq_alloc_buf(rq);
-       }
-
-       return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
-       enum vnic_dev_hw_version *hw_ver)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_hw_version(enic->vdev, hw_ver);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
-       enum vnic_dev_hw_version hw_ver;
-       int err;
-
-       err = enic_dev_hw_version(enic, &hw_ver);
-       if (err)
-               return err;
-
-       switch (hw_ver) {
-       case VNIC_DEV_HW_VER_A1:
-               enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
-               break;
-       case VNIC_DEV_HW_VER_A2:
-       case VNIC_DEV_HW_VER_UNKNOWN:
-               enic->rq_alloc_buf = enic_rq_alloc_buf;
-               break;
-       default:
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
        int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                        0 /* don't unmask intr */,
                        0 /* don't reset intr timer */);
 
-       err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+       err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
 
        /* Buffer allocation failed. Stay in polling
         * mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                        0 /* don't unmask intr */,
                        0 /* don't reset intr timer */);
 
-       err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+       err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
 
        /* Buffer allocation failed. Stay in polling mode
         * so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
        return err;
 }
 
-static int enic_dev_notify_unset(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_notify_unset(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_enable_wait(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_disable(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_notify_timer_start(struct enic *enic)
 {
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
        }
 
        for (i = 0; i < enic->rq_count; i++) {
-               vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+               vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
                /* Need at least one buffer on ring to get going */
                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
                        netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
                rss_hash_bits, rss_base_cpu, rss_enable);
 }
 
-static int enic_dev_hang_notify(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_hang_notify(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-               IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_reset(struct work_struct *work)
 {
        struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
        enic_dev_hang_notify(enic);
        enic_stop(enic->netdev);
        enic_dev_hang_reset(enic);
-       enic_reset_multicast_list(enic);
+       enic_reset_addr_lists(enic);
        enic_init_vnic_resources(enic);
        enic_set_rss_nic_cfg(enic);
        enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
 static int enic_set_intr_mode(struct enic *enic)
 {
        unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
-       unsigned int m = 1;
+       unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
        unsigned int i;
 
        /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
        .ndo_tx_timeout         = enic_tx_timeout,
        .ndo_set_vf_port        = enic_set_vf_port,
        .ndo_get_vf_port        = enic_get_vf_port,
-#ifdef IFLA_VF_MAX
        .ndo_set_vf_mac         = enic_set_vf_mac,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
 
        enic_init_vnic_resources(enic);
 
-       err = enic_set_rq_alloc_buf(enic);
-       if (err) {
-               dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
-               goto err_out_free_vnic_resources;
-       }
-
        err = enic_set_rss_nic_cfg(enic);
        if (err) {
                dev_err(dev, "Failed to config nic, aborting\n");
                goto err_out_free_vnic_resources;
        }
 
-       err = enic_dev_set_ig_vlan_rewrite_mode(enic);
-       if (err) {
-               dev_err(dev,
-                       "Failed to set ingress vlan rewrite mode, aborting.\n");
-               goto err_out_free_vnic_resources;
-       }
-
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        default:
                netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
                goto err_out_vnic_unregister;
        }
 
+       /* Setup devcmd lock
+        */
+
+       spin_lock_init(&enic->devcmd_lock);
+
+       /*
+        * Set ingress vlan rewrite mode before vnic initialization
+        */
+
+       err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+       if (err) {
+               dev_err(dev,
+                       "Failed to set ingress vlan rewrite mode, aborting.\n");
+               goto err_out_dev_close;
+       }
+
        /* Issue device init to initialize the vnic-to-switch link.
         * We'll start with carrier off and wait for link UP
         * notification later to turn on carrier.  We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
                }
        }
 
-       /* Setup devcmd lock
-        */
-
-       spin_lock_init(&enic->devcmd_lock);
-
        err = enic_dev_init(enic);
        if (err) {
                dev_err(dev, "Device initialization failed, aborting\n");
index fb35d8b..c089b36 100644 (file)
@@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
                if (!vdev->fw_info)
                        return -ENOMEM;
 
+               memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
+
                a0 = vdev->fw_info_pa;
+               a1 = sizeof(struct vnic_devcmd_fw_info);
 
                /* only get fw_info once and cache it */
                err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+               if (err == ERR_ECMDUNKNOWN) {
+                       err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
+                               &a0, &a1, wait);
+               }
        }
 
        *fw_info = vdev->fw_info;
@@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
        return err;
 }
 
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
-       struct vnic_devcmd_fw_info *fw_info;
-       int err;
-
-       err = vnic_dev_fw_info(vdev, &fw_info);
-       if (err)
-               return err;
-
-       if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
-               *hw_ver = VNIC_DEV_HW_VER_A1;
-       else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
-               *hw_ver = VNIC_DEV_HW_VER_A2;
-       else
-               *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
-       return 0;
-}
-
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
        void *value)
 {
index 05f9a24..e837546 100644 (file)
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-enum vnic_dev_hw_version {
-       VNIC_DEV_HW_VER_UNKNOWN,
-       VNIC_DEV_HW_VER_A1,
-       VNIC_DEV_HW_VER_A2,
-};
-
 enum vnic_dev_intr_mode {
        VNIC_DEV_INTR_MODE_UNKNOWN,
        VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        u64 *a0, u64 *a1, int wait);
 int vnic_dev_fw_info(struct vnic_dev *vdev,
        struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
-       enum vnic_dev_hw_version *hw_ver);
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
        void *value);
 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
index 9abb3d5..d833a07 100644 (file)
 enum vnic_devcmd_cmd {
        CMD_NONE                = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
 
-       /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
-       CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+       /*
+        * mcpu fw info in mem:
+        * in:
+        *   (u64)a0=paddr to struct vnic_devcmd_fw_info
+        * action:
+        *   Fills in struct vnic_devcmd_fw_info (128 bytes)
+        * note:
+        *   An old definition of CMD_MCPU_FW_INFO
+        */
+       CMD_MCPU_FW_INFO_OLD    = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+       /*
+        * mcpu fw info in mem:
+        * in:
+        *   (u64)a0=paddr to struct vnic_devcmd_fw_info
+        *   (u16)a1=size of the structure
+        * out:
+        *       (u16)a1=0                          for in:a1 = 0,
+        *               data size actually written for other values.
+        * action:
+        *   Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+        *            first in:a1 bytes               for 0 < in:a1 <= 132,
+        *            132 bytes                       for other values of in:a1.
+        * note:
+        *   CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+        *   for source compatibility.
+        */
+       CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
 
        /* dev-specific block member:
         *    in: (u16)a0=offset,(u8)a1=size
@@ -291,11 +317,19 @@ enum vnic_devcmd_error {
        ERR_EMAXRES = 10,
 };
 
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ *       but have different formats. hw_version is
+ *       a 32-byte string (e.g. "A2") and asic_rev is
+ *       a 16-bit integer (e.g. 0xA2).
+ */
 struct vnic_devcmd_fw_info {
        char fw_version[32];
        char fw_build[32];
        char hw_version[32];
        char hw_serial_number[32];
+       u16 asic_type;
+       u16 asic_rev;
 };
 
 struct vnic_devcmd_notify {
index 37f08de..2056586 100644 (file)
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
        }
 }
 
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
-       return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
 {
        rq->ring.desc_avail += count;
index 0cb1cf9..a59cf96 100644 (file)
  * Sorry, I had to rewrite most of this for 2.5.x -DaveM
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/capability.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
 }
 
 static const char version[] __initconst =
-       "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
+       "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
 
 static const struct net_device_ops eql_netdev_ops = {
        .ndo_open       = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
        equalizer_t *eql = netdev_priv(dev);
 
        /* XXX We should force this off automatically for the user. */
-       printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
-              "your slave devices.\n", dev->name);
+       netdev_info(dev,
+                   "remember to turn off Van-Jacobson compression on your slave devices\n");
 
        BUG_ON(!list_empty(&eql->queue.all_slaves));
 
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
 {
        int err;
 
-       printk(version);
+       pr_info("%s\n", version);
 
        dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
        if (!dev_eql)
index 2a71373..885d8ba 100644 (file)
@@ -54,7 +54,7 @@
 
 #include "fec.h"
 
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
 #define FEC_ALIGNMENT  0xf
 #else
 #define FEC_ALIGNMENT  0x3
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
-       }
+       },
+       { }
 };
 
 static unsigned char macaddr[ETH_ALEN];
@@ -147,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 #define        OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
 #else
 #define        OPT_FRAME_SIZE  0
@@ -183,7 +183,7 @@ struct fec_enet_private {
        struct bufdesc  *rx_bd_base;
        struct bufdesc  *tx_bd_base;
        /* The next free ring entry */
-       struct bufdesc  *cur_rx, *cur_tx; 
+       struct bufdesc  *cur_rx, *cur_tx;
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
@@ -191,28 +191,21 @@ struct fec_enet_private {
        /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
        spinlock_t hw_lock;
 
-       struct  platform_device *pdev;
+       struct  platform_device *pdev;
 
        int     opened;
 
        /* Phylib and MDIO interface */
-       struct  mii_bus *mii_bus;
-       struct  phy_device *phy_dev;
-       int     mii_timeout;
-       uint    phy_speed;
+       struct  mii_bus *mii_bus;
+       struct  phy_device *phy_dev;
+       int     mii_timeout;
+       uint    phy_speed;
        phy_interface_t phy_interface;
        int     link;
        int     full_duplex;
        struct  completion mdio_done;
 };
 
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST            (1 << 30)
 #define FEC_MMFR_OP_READ       (2 << 28)
@@ -239,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
 }
 
 static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        struct bufdesc *bdp;
@@ -262,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (status & BD_ENET_TX_READY) {
                /* Ooops.  All transmit buffers are full.  Bail out.
-                * This should not happen, since dev->tbusy should be set.
+                * This should not happen, since ndev->tbusy should be set.
                 */
-               printk("%s: tx queue full!.\n", dev->name);
+               printk("%s: tx queue full!.\n", ndev->name);
                spin_unlock_irqrestore(&fep->hw_lock, flags);
                return NETDEV_TX_BUSY;
        }
@@ -284,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
                unsigned int index;
                index = bdp - fep->tx_bd_base;
-               memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+               memcpy(fep->tx_bounce[index], skb->data, skb->len);
                bufaddr = fep->tx_bounce[index];
        }
 
@@ -299,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Save skb pointer */
        fep->tx_skbuff[fep->skb_cur] = skb;
 
-       dev->stats.tx_bytes += skb->len;
+       ndev->stats.tx_bytes += skb->len;
        fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
 
        /* Push the data cache so the CPM does not get stale memory
         * data.
         */
-       bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
                        FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
@@ -326,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (bdp == fep->dirty_tx) {
                fep->tx_full = 1;
-               netif_stop_queue(dev);
+               netif_stop_queue(ndev);
        }
 
        fep->cur_tx = bdp;
@@ -336,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
 static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       const struct platform_device_id *id_entry =
+                               platform_get_device_id(fep->pdev);
+       int i;
+       u32 temp_mac[2];
+       u32 rcntl = OPT_FRAME_SIZE | 0x04;
 
-       dev->stats.tx_errors++;
+       /* Whack a reset.  We should wait for this. */
+       writel(1, fep->hwp + FEC_ECNTRL);
+       udelay(10);
 
-       fec_restart(dev, fep->full_duplex);
-       netif_wake_queue(dev);
-}
+       /*
+        * enet-mac reset will reset mac address registers too,
+        * so need to reconfigure it.
+        */
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+               memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+       }
 
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
-{
-       struct  net_device *dev = dev_id;
-       struct fec_enet_private *fep = netdev_priv(dev);
-       uint    int_events;
-       irqreturn_t ret = IRQ_NONE;
+       /* Clear any outstanding interrupt. */
+       writel(0xffc00000, fep->hwp + FEC_IEVENT);
 
-       do {
-               int_events = readl(fep->hwp + FEC_IEVENT);
-               writel(int_events, fep->hwp + FEC_IEVENT);
+       /* Reset all multicast. */
+       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+       writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+       writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
 
-               if (int_events & FEC_ENET_RXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_rx(dev);
-               }
+       /* Set maximum receive buffer size. */
+       writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 
-               /* Transmit OK, or non-fatal error. Update the buffer
-                * descriptors. FEC handles all errors, we just discover
-                * them as part of the transmit process.
-                */
-               if (int_events & FEC_ENET_TXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_tx(dev);
+       /* Set receive and transmit descriptor base. */
+       writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+       writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+                       fep->hwp + FEC_X_DES_START);
+
+       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+       fep->cur_rx = fep->rx_bd_base;
+
+       /* Reset SKB transmit buffers. */
+       fep->skb_cur = fep->skb_dirty = 0;
+       for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+               if (fep->tx_skbuff[i]) {
+                       dev_kfree_skb_any(fep->tx_skbuff[i]);
+                       fep->tx_skbuff[i] = NULL;
                }
+       }
 
-               if (int_events & FEC_ENET_MII) {
-                       ret = IRQ_HANDLED;
-                       complete(&fep->mdio_done);
+       /* Enable MII mode */
+       if (duplex) {
+               /* FD enable */
+               writel(0x04, fep->hwp + FEC_X_CNTRL);
+       } else {
+               /* No Rcv on Xmit */
+               rcntl |= 0x02;
+               writel(0x0, fep->hwp + FEC_X_CNTRL);
+       }
+
+       fep->full_duplex = duplex;
+
+       /* Set MII speed */
+       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+       /*
+        * The phy interface and speed need to get configured
+        * differently on enet-mac.
+        */
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+               /* Enable flow control and length check */
+               rcntl |= 0x40000000 | 0x00000020;
+
+               /* MII or RMII */
+               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+                       rcntl |= (1 << 8);
+               else
+                       rcntl &= ~(1 << 8);
+
+               /* 10M or 100M */
+               if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+                       rcntl &= ~(1 << 9);
+               else
+                       rcntl |= (1 << 9);
+
+       } else {
+#ifdef FEC_MIIGSK_ENR
+               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+                       /* disable the gasket and wait */
+                       writel(0, fep->hwp + FEC_MIIGSK_ENR);
+                       while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+                               udelay(1);
+
+                       /*
+                        * configure the gasket:
+                        *   RMII, 50 MHz, no loopback, no echo
+                        */
+                       writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+                       /* re-enable the gasket */
+                       writel(2, fep->hwp + FEC_MIIGSK_ENR);
                }
-       } while (int_events);
+#endif
+       }
+       writel(rcntl, fep->hwp + FEC_R_CNTRL);
 
-       return ret;
+       /* And last, enable the transmit and receive processing */
+       writel(2, fep->hwp + FEC_ECNTRL);
+       writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+       /* Enable interrupts we wish to service */
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
+static void
+fec_stop(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       /* We cannot expect a graceful transmit stop without link !!! */
+       if (fep->link) {
+               writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+               udelay(10);
+               if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+                       printk("fec_stop : Graceful transmit stop did not complete !\n");
+       }
+
+       /* Whack a reset.  We should wait for this. */
+       writel(1, fep->hwp + FEC_ECNTRL);
+       udelay(10);
+       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+
+static void
+fec_timeout(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       ndev->stats.tx_errors++;
+
+       fec_restart(ndev, fep->full_duplex);
+       netif_wake_queue(ndev);
+}
 
 static void
-fec_enet_tx(struct net_device *dev)
+fec_enet_tx(struct net_device *ndev)
 {
        struct  fec_enet_private *fep;
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
 
-       fep = netdev_priv(dev);
+       fep = netdev_priv(ndev);
        spin_lock(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
@@ -399,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
                if (bdp == fep->cur_tx && fep->tx_full == 0)
                        break;
 
-               dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                               FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
                bdp->cbd_bufaddr = 0;
 
                skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
                                   BD_ENET_TX_CSL)) {
-                       dev->stats.tx_errors++;
+                       ndev->stats.tx_errors++;
                        if (status & BD_ENET_TX_HB)  /* No heartbeat */
-                               dev->stats.tx_heartbeat_errors++;
+                               ndev->stats.tx_heartbeat_errors++;
                        if (status & BD_ENET_TX_LC)  /* Late collision */
-                               dev->stats.tx_window_errors++;
+                               ndev->stats.tx_window_errors++;
                        if (status & BD_ENET_TX_RL)  /* Retrans limit */
-                               dev->stats.tx_aborted_errors++;
+                               ndev->stats.tx_aborted_errors++;
                        if (status & BD_ENET_TX_UN)  /* Underrun */
-                               dev->stats.tx_fifo_errors++;
+                               ndev->stats.tx_fifo_errors++;
                        if (status & BD_ENET_TX_CSL) /* Carrier lost */
-                               dev->stats.tx_carrier_errors++;
+                               ndev->stats.tx_carrier_errors++;
                } else {
-                       dev->stats.tx_packets++;
+                       ndev->stats.tx_packets++;
                }
 
                if (status & BD_ENET_TX_READY)
@@ -429,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
                 * but we eventually sent the packet OK.
                 */
                if (status & BD_ENET_TX_DEF)
-                       dev->stats.collisions++;
+                       ndev->stats.collisions++;
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
@@ -446,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
                 */
                if (fep->tx_full) {
                        fep->tx_full = 0;
-                       if (netif_queue_stopped(dev))
-                               netif_wake_queue(dev);
+                       if (netif_queue_stopped(ndev))
+                               netif_wake_queue(ndev);
                }
        }
        fep->dirty_tx = bdp;
@@ -461,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
  * effectively tossing the packet.
  */
 static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
 {
-       struct  fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        struct bufdesc *bdp;
@@ -497,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
                /* Check for errors. */
                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
                           BD_ENET_RX_CR | BD_ENET_RX_OV)) {
-                       dev->stats.rx_errors++;
+                       ndev->stats.rx_errors++;
                        if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
                                /* Frame too long or too short. */
-                               dev->stats.rx_length_errors++;
+                               ndev->stats.rx_length_errors++;
                        }
                        if (status & BD_ENET_RX_NO)     /* Frame alignment */
-                               dev->stats.rx_frame_errors++;
+                               ndev->stats.rx_frame_errors++;
                        if (status & BD_ENET_RX_CR)     /* CRC Error */
-                               dev->stats.rx_crc_errors++;
+                               ndev->stats.rx_crc_errors++;
                        if (status & BD_ENET_RX_OV)     /* FIFO overrun */
-                               dev->stats.rx_fifo_errors++;
+                               ndev->stats.rx_fifo_errors++;
                }
 
                /* Report late collisions as a frame error.
@@ -515,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
                 * have in the buffer.  So, just drop this frame on the floor.
                 */
                if (status & BD_ENET_RX_CL) {
-                       dev->stats.rx_errors++;
-                       dev->stats.rx_frame_errors++;
+                       ndev->stats.rx_errors++;
+                       ndev->stats.rx_frame_errors++;
                        goto rx_processing_done;
                }
 
                /* Process the incoming frame. */
-               dev->stats.rx_packets++;
+               ndev->stats.rx_packets++;
                pkt_len = bdp->cbd_datlen;
-               dev->stats.rx_bytes += pkt_len;
+               ndev->stats.rx_bytes += pkt_len;
                data = (__u8*)__va(bdp->cbd_bufaddr);
 
-               dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
-                               DMA_FROM_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                               FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 
                if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
                        swap_buffer(data, pkt_len);
@@ -541,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
 
                if (unlikely(!skb)) {
                        printk("%s: Memory squeeze, dropping packet.\n",
-                                       dev->name);
-                       dev->stats.rx_dropped++;
+                                       ndev->name);
+                       ndev->stats.rx_dropped++;
                } else {
                        skb_reserve(skb, NET_IP_ALIGN);
                        skb_put(skb, pkt_len - 4);      /* Make room */
                        skb_copy_to_linear_data(skb, data, pkt_len - 4);
-                       skb->protocol = eth_type_trans(skb, dev);
+                       skb->protocol = eth_type_trans(skb, ndev);
                        netif_rx(skb);
                }
 
-               bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
-                       DMA_FROM_DEVICE);
+               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+                               FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 rx_processing_done:
                /* Clear the status flags for this buffer */
                status &= ~BD_ENET_RX_STATS;
@@ -577,10 +679,47 @@ rx_processing_done:
        spin_unlock(&fep->hw_lock);
 }
 
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       uint int_events;
+       irqreturn_t ret = IRQ_NONE;
+
+       do {
+               int_events = readl(fep->hwp + FEC_IEVENT);
+               writel(int_events, fep->hwp + FEC_IEVENT);
+
+               if (int_events & FEC_ENET_RXF) {
+                       ret = IRQ_HANDLED;
+                       fec_enet_rx(ndev);
+               }
+
+               /* Transmit OK, or non-fatal error. Update the buffer
+                * descriptors. FEC handles all errors, we just discover
+                * them as part of the transmit process.
+                */
+               if (int_events & FEC_ENET_TXF) {
+                       ret = IRQ_HANDLED;
+                       fec_enet_tx(ndev);
+               }
+
+               if (int_events & FEC_ENET_MII) {
+                       ret = IRQ_HANDLED;
+                       complete(&fep->mdio_done);
+               }
+       } while (int_events);
+
+       return ret;
+}
+
+
+
 /* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *dev)
+static void __inline__ fec_get_mac(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        unsigned char *iap, tmpaddr[ETH_ALEN];
 
@@ -616,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
                iap = &tmpaddr[0];
        }
 
-       memcpy(dev->dev_addr, iap, ETH_ALEN);
+       memcpy(ndev->dev_addr, iap, ETH_ALEN);
 
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
-                dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -628,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
 /*
  * Phy section
  */
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phy_dev = fep->phy_dev;
        unsigned long flags;
 
@@ -647,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
        /* Duplex link change */
        if (phy_dev->link) {
                if (fep->full_duplex != phy_dev->duplex) {
-                       fec_restart(dev, phy_dev->duplex);
+                       fec_restart(ndev, phy_dev->duplex);
                        status_change = 1;
                }
        }
@@ -656,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
        if (phy_dev->link != fep->link) {
                fep->link = phy_dev->link;
                if (phy_dev->link)
-                       fec_restart(dev, phy_dev->duplex);
+                       fec_restart(ndev, phy_dev->duplex);
                else
-                       fec_stop(dev);
+                       fec_stop(ndev);
                status_change = 1;
        }
 
@@ -727,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
        return 0;
 }
 
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phy_dev = NULL;
        char mdio_bus_id[MII_BUS_ID_SIZE];
        char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
 
        if (phy_id >= PHY_MAX_ADDR) {
                printk(KERN_INFO "%s: no PHY, assuming direct connection "
-                       "to switch\n", dev->name);
+                       "to switch\n", ndev->name);
                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
 
        snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
-       phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+       phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
                PHY_INTERFACE_MODE_MII);
        if (IS_ERR(phy_dev)) {
-               printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+               printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
                return PTR_ERR(phy_dev);
        }
 
@@ -776,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
        fep->full_duplex = 0;
 
        printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-               "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+               "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
                fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
                fep->phy_dev->irq);
 
@@ -786,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
        static struct mii_bus *fec0_mii_bus;
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        int err = -ENXIO, i;
@@ -845,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                fep->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(dev, fep->mii_bus);
-
        if (mdiobus_register(fep->mii_bus))
                goto err_out_free_mdio_irq;
 
@@ -873,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
        mdiobus_free(fep->mii_bus);
 }
 
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
                                  struct ethtool_cmd *cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
        if (!phydev)
@@ -885,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
        return phy_ethtool_gset(phydev, cmd);
 }
 
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
                                 struct ethtool_cmd *cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
        if (!phydev)
@@ -897,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
        return phy_ethtool_sset(phydev, cmd);
 }
 
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
                                 struct ethtool_drvinfo *info)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
        strcpy(info->driver, fep->pdev->dev.driver->name);
        strcpy(info->version, "Revision: 1.0");
-       strcpy(info->bus_info, dev_name(&dev->dev));
+       strcpy(info->bus_info, dev_name(&ndev->dev));
 }
 
 static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
 };
 
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
-       if (!netif_running(dev))
+       if (!netif_running(ndev))
                return -EINVAL;
 
        if (!phydev)
@@ -928,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return phy_mii_ioctl(phydev, rq, cmd);
 }
 
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
@@ -940,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
                skb = fep->rx_skbuff[i];
 
                if (bdp->cbd_bufaddr)
-                       dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
                if (skb)
                        dev_kfree_skb(skb);
@@ -952,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
                kfree(fep->tx_bounce[i]);
 }
 
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
@@ -963,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
        for (i = 0; i < RX_RING_SIZE; i++) {
                skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
                if (!skb) {
-                       fec_enet_free_buffers(dev);
+                       fec_enet_free_buffers(ndev);
                        return -ENOMEM;
                }
                fep->rx_skbuff[i] = skb;
 
-               bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
                bdp++;
@@ -995,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
 }
 
 static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
         */
 
-       ret = fec_enet_alloc_buffers(dev);
+       ret = fec_enet_alloc_buffers(ndev);
        if (ret)
                return ret;
 
        /* Probe and connect to PHY when open the interface */
-       ret = fec_enet_mii_probe(dev);
+       ret = fec_enet_mii_probe(ndev);
        if (ret) {
-               fec_enet_free_buffers(dev);
+               fec_enet_free_buffers(ndev);
                return ret;
        }
        phy_start(fep->phy_dev);
-       netif_start_queue(dev);
+       netif_start_queue(ndev);
        fep->opened = 1;
        return 0;
 }
 
 static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
        /* Don't know what to do yet. */
        fep->opened = 0;
-       netif_stop_queue(dev);
-       fec_stop(dev);
+       netif_stop_queue(ndev);
+       fec_stop(ndev);
 
-       if (fep->phy_dev)
+       if (fep->phy_dev) {
+               phy_stop(fep->phy_dev);
                phy_disconnect(fep->phy_dev);
+       }
 
-        fec_enet_free_buffers(dev);
+       fec_enet_free_buffers(ndev);
 
        return 0;
 }
@@ -1051,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
 #define HASH_BITS      6               /* #bits in hash */
 #define CRC32_POLY     0xEDB88320
 
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct netdev_hw_addr *ha;
        unsigned int i, bit, data, crc, tmp;
        unsigned char hash;
 
-       if (dev->flags & IFF_PROMISC) {
+       if (ndev->flags & IFF_PROMISC) {
                tmp = readl(fep->hwp + FEC_R_CNTRL);
                tmp |= 0x8;
                writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
        tmp &= ~0x8;
        writel(tmp, fep->hwp + FEC_R_CNTRL);
 
-       if (dev->flags & IFF_ALLMULTI) {
+       if (ndev->flags & IFF_ALLMULTI) {
                /* Catch all multicast addresses, so set the
                 * filter to all 1's
                 */
@@ -1084,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 
-       netdev_for_each_mc_addr(ha, dev) {
+       netdev_for_each_mc_addr(ha, ndev) {
                /* Only support group multicast for now */
                if (!(ha->addr[0] & 1))
                        continue;
@@ -1092,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
                /* calculate crc32 value of mac address */
                crc = 0xffffffff;
 
-               for (i = 0; i < dev->addr_len; i++) {
+               for (i = 0; i < ndev->addr_len; i++) {
                        data = ha->addr[i];
                        for (bit = 0; bit < 8; bit++, data >>= 1) {
                                crc = (crc >> 1) ^
@@ -1119,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
 
 /* Set a MAC change in hardware. */
 static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct sockaddr *addr = p;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
-       writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
-               (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+       writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+               (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
                fep->hwp + FEC_ADDR_LOW);
-       writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+       writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
                fep->hwp + FEC_ADDR_HIGH);
        return 0;
 }
@@ -1146,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = fec_timeout,
        .ndo_set_mac_address    = fec_set_mac_address,
-       .ndo_do_ioctl           = fec_enet_ioctl,
+       .ndo_do_ioctl           = fec_enet_ioctl,
 };
 
  /*
   * XXX:  We need to clean up on failure exits here.
   *
   */
-static int fec_enet_init(struct net_device *dev)
+static int fec_enet_init(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct bufdesc *cbd_base;
        struct bufdesc *bdp;
        int i;
@@ -1170,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
 
        spin_lock_init(&fep->hw_lock);
 
-       fep->hwp = (void __iomem *)dev->base_addr;
-       fep->netdev = dev;
+       fep->netdev = ndev;
 
        /* Get the Ethernet address */
-       fec_get_mac(dev);
+       fec_get_mac(ndev);
 
        /* Set receive and transmit descriptor base. */
        fep->rx_bd_base = cbd_base;
        fep->tx_bd_base = cbd_base + RX_RING_SIZE;
 
        /* The FEC Ethernet specific entries in the device structure */
-       dev->watchdog_timeo = TX_TIMEOUT;
-       dev->netdev_ops = &fec_netdev_ops;
-       dev->ethtool_ops = &fec_enet_ethtool_ops;
+       ndev->watchdog_timeo = TX_TIMEOUT;
+       ndev->netdev_ops = &fec_netdev_ops;
+       ndev->ethtool_ops = &fec_enet_ethtool_ops;
 
        /* Initialize the receive buffer descriptors. */
        bdp = fep->rx_bd_base;
@@ -1212,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
        bdp--;
        bdp->cbd_sc |= BD_SC_WRAP;
 
-       fec_restart(dev, 0);
+       fec_restart(ndev, 0);
 
        return 0;
 }
 
-/* This function is called to start or restart the FEC during a link
- * change.  This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
-       struct fec_enet_private *fep = netdev_priv(dev);
-       const struct platform_device_id *id_entry =
-                               platform_get_device_id(fep->pdev);
-       int i;
-       u32 val, temp_mac[2];
-
-       /* Whack a reset.  We should wait for this. */
-       writel(1, fep->hwp + FEC_ECNTRL);
-       udelay(10);
-
-       /*
-        * enet-mac reset will reset mac address registers too,
-        * so need to reconfigure it.
-        */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-               memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
-               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
-       }
-
-       /* Clear any outstanding interrupt. */
-       writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
-       /* Reset all multicast. */
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
-       writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
-       writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
-       /* Set maximum receive buffer size. */
-       writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
-       /* Set receive and transmit descriptor base. */
-       writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
-       writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
-                       fep->hwp + FEC_X_DES_START);
-
-       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
-       fep->cur_rx = fep->rx_bd_base;
-
-       /* Reset SKB transmit buffers. */
-       fep->skb_cur = fep->skb_dirty = 0;
-       for (i = 0; i <= TX_RING_MOD_MASK; i++) {
-               if (fep->tx_skbuff[i]) {
-                       dev_kfree_skb_any(fep->tx_skbuff[i]);
-                       fep->tx_skbuff[i] = NULL;
-               }
-       }
-
-       /* Enable MII mode */
-       if (duplex) {
-               /* MII enable / FD enable */
-               writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
-               writel(0x04, fep->hwp + FEC_X_CNTRL);
-       } else {
-               /* MII enable / No Rcv on Xmit */
-               writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
-               writel(0x0, fep->hwp + FEC_X_CNTRL);
-       }
-       fep->full_duplex = duplex;
-
-       /* Set MII speed */
-       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
-       /*
-        * The phy interface and speed need to get configured
-        * differently on enet-mac.
-        */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-               val = readl(fep->hwp + FEC_R_CNTRL);
-
-               /* MII or RMII */
-               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
-                       val |= (1 << 8);
-               else
-                       val &= ~(1 << 8);
-
-               /* 10M or 100M */
-               if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
-                       val &= ~(1 << 9);
-               else
-                       val |= (1 << 9);
-
-               writel(val, fep->hwp + FEC_R_CNTRL);
-       } else {
-#ifdef FEC_MIIGSK_ENR
-               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-                       /* disable the gasket and wait */
-                       writel(0, fep->hwp + FEC_MIIGSK_ENR);
-                       while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-                               udelay(1);
-
-                       /*
-                        * configure the gasket:
-                        *   RMII, 50 MHz, no loopback, no echo
-                        */
-                       writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
-                       /* re-enable the gasket */
-                       writel(2, fep->hwp + FEC_MIIGSK_ENR);
-               }
-#endif
-       }
-
-       /* And last, enable the transmit and receive processing */
-       writel(2, fep->hwp + FEC_ECNTRL);
-       writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
-       /* Enable interrupts we wish to service */
-       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
-       struct fec_enet_private *fep = netdev_priv(dev);
-
-       /* We cannot expect a graceful transmit stop without link !!! */
-       if (fep->link) {
-               writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
-               udelay(10);
-               if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
-                       printk("fec_stop : Graceful transmit stop did not complete !\n");
-       }
-
-       /* Whack a reset.  We should wait for this. */
-       writel(1, fep->hwp + FEC_ECNTRL);
-       udelay(10);
-       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
 static int __devinit
 fec_probe(struct platform_device *pdev)
 {
@@ -1377,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
 
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
-       if (!ndev)
-               return -ENOMEM;
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto failed_alloc_etherdev;
+       }
 
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        /* setup board info structure */
        fep = netdev_priv(ndev);
-       memset(fep, 0, sizeof(*fep));
 
-       ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+       fep->hwp = ioremap(r->start, resource_size(r));
        fep->pdev = pdev;
 
-       if (!ndev->base_addr) {
+       if (!fep->hwp) {
                ret = -ENOMEM;
                goto failed_ioremap;
        }
@@ -1407,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
                        break;
                ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
                if (ret) {
-                       while (i >= 0) {
+                       while (--i >= 0) {
                                irq = platform_get_irq(pdev, i);
                                free_irq(irq, ndev);
-                               i--;
                        }
                        goto failed_irq;
                }
@@ -1453,9 +1450,11 @@ failed_clk:
                        free_irq(irq, ndev);
        }
 failed_irq:
-       iounmap((void __iomem *)ndev->base_addr);
+       iounmap(fep->hwp);
 failed_ioremap:
        free_netdev(ndev);
+failed_alloc_etherdev:
+       release_mem_region(r->start, resource_size(r));
 
        return ret;
 }
@@ -1465,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
-
-       platform_set_drvdata(pdev, NULL);
+       struct resource *r;
 
        fec_stop(ndev);
        fec_enet_mii_remove(fep);
        clk_disable(fep->clk);
        clk_put(fep->clk);
-       iounmap((void __iomem *)ndev->base_addr);
+       iounmap(fep->hwp);
        unregister_netdev(ndev);
        free_netdev(ndev);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       BUG_ON(!r);
+       release_mem_region(r->start, resource_size(r));
+
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
@@ -1483,16 +1488,14 @@ static int
 fec_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep;
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (ndev) {
-               fep = netdev_priv(ndev);
-               if (netif_running(ndev)) {
-                       fec_stop(ndev);
-                       netif_device_detach(ndev);
-               }
-               clk_disable(fep->clk);
+       if (netif_running(ndev)) {
+               fec_stop(ndev);
+               netif_device_detach(ndev);
        }
+       clk_disable(fep->clk);
+
        return 0;
 }
 
@@ -1500,16 +1503,14 @@ static int
 fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep;
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (ndev) {
-               fep = netdev_priv(ndev);
-               clk_enable(fep->clk);
-               if (netif_running(ndev)) {
-                       fec_restart(ndev, fep->full_duplex);
-                       netif_device_attach(ndev);
-               }
+       clk_enable(fep->clk);
+       if (netif_running(ndev)) {
+               fec_restart(ndev, fep->full_duplex);
+               netif_device_attach(ndev);
        }
+
        return 0;
 }
 
index af09296..7b92897 100644 (file)
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                goto out_error;
        }
 
+       netif_carrier_off(dev);
+
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
@@ -5742,7 +5744,7 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
        pci_set_drvdata(pci_dev, NULL);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int nv_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -5793,6 +5795,11 @@ static int nv_resume(struct device *device)
 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
 #define NV_PM_OPS (&nv_pm_ops)
 
+#else
+#define NV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
 static void nv_shutdown(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
@@ -5820,7 +5827,6 @@ static void nv_shutdown(struct pci_dev *pdev)
        }
 }
 #else
-#define NV_PM_OPS NULL
 #define nv_shutdown NULL
 #endif /* CONFIG_PM */
 
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644 (file)
index 0000000..1d6f4b8
--- /dev/null
@@ -0,0 +1,1198 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "ftmac100.h"
+
+#define DRV_NAME       "ftmac100"
+#define DRV_VERSION    "0.2"
+
+#define RX_QUEUE_ENTRIES       128     /* must be power of 2 */
+#define TX_QUEUE_ENTRIES       16      /* must be power of 2 */
+
+#define MAX_PKT_SIZE           1518
+#define RX_BUF_SIZE            2044    /* must be smaller than 0x7ff */
+
+#if MAX_PKT_SIZE > 0x7ff
+#error invalid MAX_PKT_SIZE
+#endif
+
+#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
+#error invalid RX_BUF_SIZE
+#endif
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftmac100_descs {
+       struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+       struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftmac100 {
+       struct resource *res;
+       void __iomem *base;
+       int irq;
+
+       struct ftmac100_descs *descs;
+       dma_addr_t descs_dma_addr;
+
+       unsigned int rx_pointer;
+       unsigned int tx_clean_pointer;
+       unsigned int tx_pointer;
+       unsigned int tx_pending;
+
+       spinlock_t tx_lock;
+
+       struct net_device *netdev;
+       struct device *dev;
+       struct napi_struct napi;
+
+       struct mii_if_info mii;
+};
+
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+                                 struct ftmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED   (FTMAC100_INT_RPKT_FINISH       | \
+                                FTMAC100_INT_NORXBUF           | \
+                                FTMAC100_INT_XPKT_OK           | \
+                                FTMAC100_INT_XPKT_LOST         | \
+                                FTMAC100_INT_RPKT_LOST         | \
+                                FTMAC100_INT_AHB_ERR           | \
+                                FTMAC100_INT_PHYSTS_CHG)
+
+#define INT_MASK_ALL_DISABLED  0
+
+static void ftmac100_enable_all_int(struct ftmac100 *priv)
+{
+       iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_disable_all_int(struct ftmac100 *priv)
+{
+       iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+       iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+       iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
+}
+
+static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
+{
+       iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
+}
+
+static int ftmac100_reset(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       int i;
+
+       /* NOTE: reset clears all registers */
+       iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
+
+       for (i = 0; i < 5; i++) {
+               unsigned int maccr;
+
+               maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+               if (!(maccr & FTMAC100_MACCR_SW_RST)) {
+                       /*
+                        * FTMAC100_MACCR_SW_RST cleared does not indicate
+                        * that hardware reset completed (what the f*ck).
+                        * We still need to wait for a while.
+                        */
+                       usleep_range(500, 1000);
+                       return 0;
+               }
+
+               usleep_range(1000, 10000);
+       }
+
+       netdev_err(netdev, "software reset failed\n");
+       return -EIO;
+}
+
+static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
+{
+       unsigned int maddr = mac[0] << 8 | mac[1];
+       unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+       iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
+       iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
+}
+
+#define MACCR_ENABLE_ALL       (FTMAC100_MACCR_XMT_EN  | \
+                                FTMAC100_MACCR_RCV_EN  | \
+                                FTMAC100_MACCR_XDMA_EN | \
+                                FTMAC100_MACCR_RDMA_EN | \
+                                FTMAC100_MACCR_CRC_APD | \
+                                FTMAC100_MACCR_FULLDUP | \
+                                FTMAC100_MACCR_RX_RUNT | \
+                                FTMAC100_MACCR_RX_BROADPKT)
+
+static int ftmac100_start_hw(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+
+       if (ftmac100_reset(priv))
+               return -EIO;
+
+       /* setup ring buffer base registers */
+       ftmac100_set_rx_ring_base(priv,
+                                 priv->descs_dma_addr +
+                                 offsetof(struct ftmac100_descs, rxdes));
+       ftmac100_set_tx_ring_base(priv,
+                                 priv->descs_dma_addr +
+                                 offsetof(struct ftmac100_descs, txdes));
+
+       iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
+
+       ftmac100_set_mac(priv, netdev->dev_addr);
+
+       iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+       return 0;
+}
+
+static void ftmac100_stop_hw(struct ftmac100 *priv)
+{
+       iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
+}
+
+static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
+}
+
+static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
+{
+       /* clear status bits */
+       rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
+}
+
+static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
+}
+
+static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
+{
+       return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
+}
+
+static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
+}
+
+static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
+                                          unsigned int size)
+{
+       rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+       rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
+}
+
+static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
+{
+       rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+}
+
+static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
+                                       dma_addr_t addr)
+{
+       rxdes->rxdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
+{
+       return le32_to_cpu(rxdes->rxdes2);
+}
+
+/*
+ * rxdes3 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
+{
+       rxdes->rxdes3 = (unsigned int)page;
+}
+
+static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
+{
+       return (struct page *)rxdes->rxdes3;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftmac100_next_rx_pointer(int pointer)
+{
+       return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
+{
+       priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
+{
+       return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftmac100_rxdes *
+ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
+{
+       struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+
+       while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
+               if (ftmac100_rxdes_first_segment(rxdes))
+                       return rxdes;
+
+               ftmac100_rxdes_set_dma_own(rxdes);
+               ftmac100_rx_pointer_advance(priv);
+               rxdes = ftmac100_current_rxdes(priv);
+       }
+
+       return NULL;
+}
+
+static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
+                                    struct ftmac100_rxdes *rxdes)
+{
+       struct net_device *netdev = priv->netdev;
+       bool error = false;
+
+       if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx err\n");
+
+               netdev->stats.rx_errors++;
+               error = true;
+       }
+
+       if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx crc err\n");
+
+               netdev->stats.rx_crc_errors++;
+               error = true;
+       }
+
+       if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx frame too long\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx runt\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx odd nibble\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       }
+
+       return error;
+}
+
+static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+       bool done = false;
+
+       if (net_ratelimit())
+               netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+       do {
+               if (ftmac100_rxdes_last_segment(rxdes))
+                       done = true;
+
+               ftmac100_rxdes_set_dma_own(rxdes);
+               ftmac100_rx_pointer_advance(priv);
+               rxdes = ftmac100_current_rxdes(priv);
+       } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
+
+       netdev->stats.rx_dropped++;
+}
+
+static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_rxdes *rxdes;
+       struct sk_buff *skb;
+       struct page *page;
+       dma_addr_t map;
+       int length;
+
+       rxdes = ftmac100_rx_locate_first_segment(priv);
+       if (!rxdes)
+               return false;
+
+       if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
+               ftmac100_rx_drop_packet(priv);
+               return true;
+       }
+
+       /*
+        * It is impossible to get multi-segment packets
+        * because we always provide big enough receive buffers.
+        */
+       if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
+               BUG();
+
+       /* start processing */
+       skb = netdev_alloc_skb_ip_align(netdev, 128);
+       if (unlikely(!skb)) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "rx skb alloc failed\n");
+
+               ftmac100_rx_drop_packet(priv);
+               return true;
+       }
+
+       if (unlikely(ftmac100_rxdes_multicast(rxdes)))
+               netdev->stats.multicast++;
+
+       map = ftmac100_rxdes_get_dma_addr(rxdes);
+       dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+       length = ftmac100_rxdes_frame_length(rxdes);
+       page = ftmac100_rxdes_get_page(rxdes);
+       skb_fill_page_desc(skb, 0, page, 0, length);
+       skb->len += length;
+       skb->data_len += length;
+       skb->truesize += length;
+       __pskb_pull_tail(skb, min(length, 64));
+
+       ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+       ftmac100_rx_pointer_advance(priv);
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       netdev->stats.rx_packets++;
+       netdev->stats.rx_bytes += skb->len;
+
+       /* push packet to protocol stack */
+       netif_receive_skb(skb);
+
+       (*processed)++;
+       return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
+{
+       /* clear all except end of ring bit */
+       txdes->txdes0 = 0;
+       txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+       txdes->txdes2 = 0;
+       txdes->txdes3 = 0;
+}
+
+static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
+{
+       /*
+        * Make sure dma own bit will not be set before any other
+        * descriptor fields.
+        */
+       wmb();
+       txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
+}
+
+static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
+}
+
+static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+}
+
+static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
+}
+
+static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
+}
+
+static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
+}
+
+static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
+                                          unsigned int len)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
+}
+
+static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
+                                       dma_addr_t addr)
+{
+       txdes->txdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
+{
+       return le32_to_cpu(txdes->txdes2);
+}
+
+/*
+ * txdes3 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
+{
+       txdes->txdes3 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
+{
+       return (struct sk_buff *)txdes->txdes3;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftmac100_next_tx_pointer(int pointer)
+{
+       return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
+{
+       priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
+{
+       priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
+{
+       return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
+{
+       return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_txdes *txdes;
+       struct sk_buff *skb;
+       dma_addr_t map;
+
+       if (priv->tx_pending == 0)
+               return false;
+
+       txdes = ftmac100_current_clean_txdes(priv);
+
+       if (ftmac100_txdes_owned_by_dma(txdes))
+               return false;
+
+       skb = ftmac100_txdes_get_skb(txdes);
+       map = ftmac100_txdes_get_dma_addr(txdes);
+
+       if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
+                    ftmac100_txdes_late_collision(txdes))) {
+               /*
+                * packet transmitted to ethernet lost due to late collision
+                * or excessive collision
+                */
+               netdev->stats.tx_aborted_errors++;
+       } else {
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += skb->len;
+       }
+
+       dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+       dev_kfree_skb(skb);
+
+       ftmac100_txdes_reset(txdes);
+
+       ftmac100_tx_clean_pointer_advance(priv);
+
+       spin_lock(&priv->tx_lock);
+       priv->tx_pending--;
+       spin_unlock(&priv->tx_lock);
+       netif_wake_queue(netdev);
+
+       return true;
+}
+
+static void ftmac100_tx_complete(struct ftmac100 *priv)
+{
+       while (ftmac100_tx_complete_packet(priv))
+               ;
+}
+
+static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
+                        dma_addr_t map)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_txdes *txdes;
+       unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+       txdes = ftmac100_current_txdes(priv);
+       ftmac100_tx_pointer_advance(priv);
+
+       /* setup TX descriptor */
+       ftmac100_txdes_set_skb(txdes, skb);
+       ftmac100_txdes_set_dma_addr(txdes, map);
+
+       ftmac100_txdes_set_first_segment(txdes);
+       ftmac100_txdes_set_last_segment(txdes);
+       ftmac100_txdes_set_txint(txdes);
+       ftmac100_txdes_set_buffer_size(txdes, len);
+
+       spin_lock(&priv->tx_lock);
+       priv->tx_pending++;
+       if (priv->tx_pending == TX_QUEUE_ENTRIES)
+               netif_stop_queue(netdev);
+
+       /* start transmit */
+       ftmac100_txdes_set_dma_own(txdes);
+       spin_unlock(&priv->tx_lock);
+
+       ftmac100_txdma_start_polling(priv);
+       return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+                                 struct ftmac100_rxdes *rxdes, gfp_t gfp)
+{
+       struct net_device *netdev = priv->netdev;
+       struct page *page;
+       dma_addr_t map;
+
+       page = alloc_page(gfp);
+       if (!page) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "failed to allocate rx page\n");
+               return -ENOMEM;
+       }
+
+       map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, map))) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "failed to map rx page\n");
+               __free_page(page);
+               return -ENOMEM;
+       }
+
+       ftmac100_rxdes_set_page(rxdes, page);
+       ftmac100_rxdes_set_dma_addr(rxdes, map);
+       ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
+       ftmac100_rxdes_set_dma_own(rxdes);
+       return 0;
+}
+
+static void ftmac100_free_buffers(struct ftmac100 *priv)
+{
+       int i;
+
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+               struct page *page = ftmac100_rxdes_get_page(rxdes);
+               dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
+
+               if (!page)
+                       continue;
+
+               dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+               __free_page(page);
+       }
+
+       for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
+               struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
+               dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
+
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+               dev_kfree_skb(skb);
+       }
+
+       dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
+                         priv->descs, priv->descs_dma_addr);
+}
+
+static int ftmac100_alloc_buffers(struct ftmac100 *priv)
+{
+       int i;
+
+       priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
+                                        &priv->descs_dma_addr, GFP_KERNEL);
+       if (!priv->descs)
+               return -ENOMEM;
+
+       memset(priv->descs, 0, sizeof(struct ftmac100_descs));
+
+       /* initialize RX ring */
+       ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+               if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+                       goto err;
+       }
+
+       /* initialize TX ring */
+       ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+       return 0;
+
+err:
+       ftmac100_free_buffers(priv);
+       return -ENOMEM;
+}
+
+/******************************************************************************
+ * struct mii_if_info functions
+ *****************************************************************************/
+static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       unsigned int phycr;
+       int i;
+
+       phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+               FTMAC100_PHYCR_REGAD(reg) |
+               FTMAC100_PHYCR_MIIRD;
+
+       iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+       for (i = 0; i < 10; i++) {
+               phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+               if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
+                       return phycr & FTMAC100_PHYCR_MIIRDATA;
+
+               usleep_range(100, 1000);
+       }
+
+       netdev_err(netdev, "mdio read timed out\n");
+       return 0;
+}
+
+static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
+                               int data)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       unsigned int phycr;
+       int i;
+
+       phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+               FTMAC100_PHYCR_REGAD(reg) |
+               FTMAC100_PHYCR_MIIWR;
+
+       data = FTMAC100_PHYWDATA_MIIWDATA(data);
+
+       iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
+       iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+       for (i = 0; i < 10; i++) {
+               phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+               if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
+                       return;
+
+               usleep_range(100, 1000);
+       }
+
+       netdev_err(netdev, "mdio write timed out\n");
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftmac100_get_drvinfo(struct net_device *netdev,
+                                struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+static int ftmac100_nway_reset(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_nway_restart(&priv->mii);
+}
+
+static u32 ftmac100_get_link(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_link_ok(&priv->mii);
+}
+
+static const struct ethtool_ops ftmac100_ethtool_ops = {
+       .set_settings           = ftmac100_set_settings,
+       .get_settings           = ftmac100_get_settings,
+       .get_drvinfo            = ftmac100_get_drvinfo,
+       .nway_reset             = ftmac100_nway_reset,
+       .get_link               = ftmac100_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
+{
+       struct net_device *netdev = dev_id;
+       struct ftmac100 *priv = netdev_priv(netdev);
+
+       if (likely(netif_running(netdev))) {
+               /* Disable interrupts for polling */
+               ftmac100_disable_all_int(priv);
+               napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftmac100_poll(struct napi_struct *napi, int budget)
+{
+       struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
+       struct net_device *netdev = priv->netdev;
+       unsigned int status;
+       bool completed = true;
+       int rx = 0;
+
+       status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
+
+       if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
+               /*
+                * FTMAC100_INT_RPKT_FINISH:
+                *      RX DMA has received packets into RX buffer successfully
+                *
+                * FTMAC100_INT_NORXBUF:
+                *      RX buffer unavailable
+                */
+               bool retry;
+
+               do {
+                       retry = ftmac100_rx_packet(priv, &rx);
+               } while (retry && rx < budget);
+
+               if (retry && rx == budget)
+                       completed = false;
+       }
+
+       if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
+               /*
+                * FTMAC100_INT_XPKT_OK:
+                *      packet transmitted to ethernet successfully
+                *
+                * FTMAC100_INT_XPKT_LOST:
+                *      packet transmitted to ethernet lost due to late
+                *      collision or excessive collision
+                */
+               ftmac100_tx_complete(priv);
+       }
+
+       if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
+                     FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+                                   status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
+                                   status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+                                   status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+                                   status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+               if (status & FTMAC100_INT_NORXBUF) {
+                       /* RX buffer unavailable */
+                       netdev->stats.rx_over_errors++;
+               }
+
+               if (status & FTMAC100_INT_RPKT_LOST) {
+                       /* received packet lost due to RX FIFO full */
+                       netdev->stats.rx_fifo_errors++;
+               }
+
+               if (status & FTMAC100_INT_PHYSTS_CHG) {
+                       /* PHY link status change */
+                       mii_check_link(&priv->mii);
+               }
+       }
+
+       if (completed) {
+               /* stop polling */
+               napi_complete(napi);
+               ftmac100_enable_all_int(priv);
+       }
+
+       return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftmac100_open(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       int err;
+
+       err = ftmac100_alloc_buffers(priv);
+       if (err) {
+               netdev_err(netdev, "failed to allocate buffers\n");
+               goto err_alloc;
+       }
+
+       err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
+       if (err) {
+               netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+               goto err_irq;
+       }
+
+       priv->rx_pointer = 0;
+       priv->tx_clean_pointer = 0;
+       priv->tx_pointer = 0;
+       priv->tx_pending = 0;
+
+       err = ftmac100_start_hw(priv);
+       if (err)
+               goto err_hw;
+
+       napi_enable(&priv->napi);
+       netif_start_queue(netdev);
+
+       ftmac100_enable_all_int(priv);
+
+       return 0;
+
+err_hw:
+       free_irq(priv->irq, netdev);
+err_irq:
+       ftmac100_free_buffers(priv);
+err_alloc:
+       return err;
+}
+
+static int ftmac100_stop(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+
+       ftmac100_disable_all_int(priv);
+       netif_stop_queue(netdev);
+       napi_disable(&priv->napi);
+       ftmac100_stop_hw(priv);
+       free_irq(priv->irq, netdev);
+       ftmac100_free_buffers(priv);
+
+       return 0;
+}
+
+static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       dma_addr_t map;
+
+       if (unlikely(skb->len > MAX_PKT_SIZE)) {
+               if (net_ratelimit())
+                       netdev_dbg(netdev, "tx packet too big\n");
+
+               netdev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, map))) {
+               /* drop packet */
+               if (net_ratelimit())
+                       netdev_err(netdev, "map socket buffer failed\n");
+
+               netdev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       return ftmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       struct mii_ioctl_data *data = if_mii(ifr);
+
+       return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
+}
+
+static const struct net_device_ops ftmac100_netdev_ops = {
+       .ndo_open               = ftmac100_open,
+       .ndo_stop               = ftmac100_stop,
+       .ndo_start_xmit         = ftmac100_hard_start_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = ftmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftmac100_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int irq;
+       struct net_device *netdev;
+       struct ftmac100 *priv;
+       int err;
+
+       if (!pdev)
+               return -ENODEV;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENXIO;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       /* setup net_device */
+       netdev = alloc_etherdev(sizeof(*priv));
+       if (!netdev) {
+               err = -ENOMEM;
+               goto err_alloc_etherdev;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+       netdev->netdev_ops = &ftmac100_netdev_ops;
+
+       platform_set_drvdata(pdev, netdev);
+
+       /* setup private data */
+       priv = netdev_priv(netdev);
+       priv->netdev = netdev;
+       priv->dev = &pdev->dev;
+
+       spin_lock_init(&priv->tx_lock);
+
+       /* initialize NAPI */
+       netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+
+       /* map io memory */
+       priv->res = request_mem_region(res->start, resource_size(res),
+                                      dev_name(&pdev->dev));
+       if (!priv->res) {
+               dev_err(&pdev->dev, "Could not reserve memory region\n");
+               err = -ENOMEM;
+               goto err_req_mem;
+       }
+
+       priv->base = ioremap(res->start, res->end - res->start);
+       if (!priv->base) {
+               dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+               err = -EIO;
+               goto err_ioremap;
+       }
+
+       priv->irq = irq;
+
+       /* initialize struct mii_if_info */
+       priv->mii.phy_id        = 0;
+       priv->mii.phy_id_mask   = 0x1f;
+       priv->mii.reg_num_mask  = 0x1f;
+       priv->mii.dev           = netdev;
+       priv->mii.mdio_read     = ftmac100_mdio_read;
+       priv->mii.mdio_write    = ftmac100_mdio_write;
+
+       /* register network device */
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register netdev\n");
+               goto err_register_netdev;
+       }
+
+       netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+       if (!is_valid_ether_addr(netdev->dev_addr)) {
+               random_ether_addr(netdev->dev_addr);
+               netdev_info(netdev, "generated random MAC address %pM\n",
+                           netdev->dev_addr);
+       }
+
+       return 0;
+
+err_register_netdev:
+       iounmap(priv->base);
+err_ioremap:
+       release_resource(priv->res);
+err_req_mem:
+       netif_napi_del(&priv->napi);
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+err_alloc_etherdev:
+       return err;
+}
+
+static int __exit ftmac100_remove(struct platform_device *pdev)
+{
+       struct net_device *netdev;
+       struct ftmac100 *priv;
+
+       netdev = platform_get_drvdata(pdev);
+       priv = netdev_priv(netdev);
+
+       unregister_netdev(netdev);
+
+       iounmap(priv->base);
+       release_resource(priv->res);
+
+       netif_napi_del(&priv->napi);
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+       return 0;
+}
+
+static struct platform_driver ftmac100_driver = {
+       .probe          = ftmac100_probe,
+       .remove         = __exit_p(ftmac100_remove),
+       .driver         = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+       },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftmac100_init(void)
+{
+       pr_info("Loading version " DRV_VERSION " ...\n");
+       return platform_driver_register(&ftmac100_driver);
+}
+
+static void __exit ftmac100_exit(void)
+{
+       platform_driver_unregister(&ftmac100_driver);
+}
+
+module_init(ftmac100_init);
+module_exit(ftmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644 (file)
index 0000000..46a0c47
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTMAC100_H
+#define __FTMAC100_H
+
+#define        FTMAC100_OFFSET_ISR             0x00
+#define        FTMAC100_OFFSET_IMR             0x04
+#define        FTMAC100_OFFSET_MAC_MADR        0x08
+#define        FTMAC100_OFFSET_MAC_LADR        0x0c
+#define        FTMAC100_OFFSET_MAHT0           0x10
+#define        FTMAC100_OFFSET_MAHT1           0x14
+#define        FTMAC100_OFFSET_TXPD            0x18
+#define        FTMAC100_OFFSET_RXPD            0x1c
+#define        FTMAC100_OFFSET_TXR_BADR        0x20
+#define        FTMAC100_OFFSET_RXR_BADR        0x24
+#define        FTMAC100_OFFSET_ITC             0x28
+#define        FTMAC100_OFFSET_APTC            0x2c
+#define        FTMAC100_OFFSET_DBLAC           0x30
+#define        FTMAC100_OFFSET_MACCR           0x88
+#define        FTMAC100_OFFSET_MACSR           0x8c
+#define        FTMAC100_OFFSET_PHYCR           0x90
+#define        FTMAC100_OFFSET_PHYWDATA        0x94
+#define        FTMAC100_OFFSET_FCR             0x98
+#define        FTMAC100_OFFSET_BPR             0x9c
+#define        FTMAC100_OFFSET_TS              0xc4
+#define        FTMAC100_OFFSET_DMAFIFOS        0xc8
+#define        FTMAC100_OFFSET_TM              0xcc
+#define        FTMAC100_OFFSET_TX_MCOL_SCOL    0xd4
+#define        FTMAC100_OFFSET_RPF_AEP         0xd8
+#define        FTMAC100_OFFSET_XM_PG           0xdc
+#define        FTMAC100_OFFSET_RUNT_TLCC       0xe0
+#define        FTMAC100_OFFSET_CRCER_FTL       0xe4
+#define        FTMAC100_OFFSET_RLC_RCC         0xe8
+#define        FTMAC100_OFFSET_BROC            0xec
+#define        FTMAC100_OFFSET_MULCA           0xf0
+#define        FTMAC100_OFFSET_RP              0xf4
+#define        FTMAC100_OFFSET_XP              0xf8
+
+/*
+ * Interrupt status register & interrupt mask register
+ */
+#define        FTMAC100_INT_RPKT_FINISH        (1 << 0)
+#define        FTMAC100_INT_NORXBUF            (1 << 1)
+#define        FTMAC100_INT_XPKT_FINISH        (1 << 2)
+#define        FTMAC100_INT_NOTXBUF            (1 << 3)
+#define        FTMAC100_INT_XPKT_OK            (1 << 4)
+#define        FTMAC100_INT_XPKT_LOST          (1 << 5)
+#define        FTMAC100_INT_RPKT_SAV           (1 << 6)
+#define        FTMAC100_INT_RPKT_LOST          (1 << 7)
+#define        FTMAC100_INT_AHB_ERR            (1 << 8)
+#define        FTMAC100_INT_PHYSTS_CHG         (1 << 9)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTMAC100_ITC_RXINT_CNT(x)      (((x) & 0xf) << 0)
+#define FTMAC100_ITC_RXINT_THR(x)      (((x) & 0x7) << 4)
+#define FTMAC100_ITC_RXINT_TIME_SEL    (1 << 7)
+#define FTMAC100_ITC_TXINT_CNT(x)      (((x) & 0xf) << 8)
+#define FTMAC100_ITC_TXINT_THR(x)      (((x) & 0x7) << 12)
+#define FTMAC100_ITC_TXINT_TIME_SEL    (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define        FTMAC100_APTC_RXPOLL_CNT(x)     (((x) & 0xf) << 0)
+#define        FTMAC100_APTC_RXPOLL_TIME_SEL   (1 << 4)
+#define        FTMAC100_APTC_TXPOLL_CNT(x)     (((x) & 0xf) << 8)
+#define        FTMAC100_APTC_TXPOLL_TIME_SEL   (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTMAC100_DBLAC_INCR4_EN                (1 << 0)
+#define FTMAC100_DBLAC_INCR8_EN                (1 << 1)
+#define FTMAC100_DBLAC_INCR16_EN       (1 << 2)
+#define FTMAC100_DBLAC_RXFIFO_LTHR(x)  (((x) & 0x7) << 3)
+#define FTMAC100_DBLAC_RXFIFO_HTHR(x)  (((x) & 0x7) << 6)
+#define FTMAC100_DBLAC_RX_THR_EN       (1 << 9)
+
+/*
+ * MAC control register
+ */
+#define        FTMAC100_MACCR_XDMA_EN          (1 << 0)
+#define        FTMAC100_MACCR_RDMA_EN          (1 << 1)
+#define        FTMAC100_MACCR_SW_RST           (1 << 2)
+#define        FTMAC100_MACCR_LOOP_EN          (1 << 3)
+#define        FTMAC100_MACCR_CRC_DIS          (1 << 4)
+#define        FTMAC100_MACCR_XMT_EN           (1 << 5)
+#define        FTMAC100_MACCR_ENRX_IN_HALFTX   (1 << 6)
+#define        FTMAC100_MACCR_RCV_EN           (1 << 8)
+#define        FTMAC100_MACCR_HT_MULTI_EN      (1 << 9)
+#define        FTMAC100_MACCR_RX_RUNT          (1 << 10)
+#define        FTMAC100_MACCR_RX_FTL           (1 << 11)
+#define        FTMAC100_MACCR_RCV_ALL          (1 << 12)
+#define        FTMAC100_MACCR_CRC_APD          (1 << 14)
+#define        FTMAC100_MACCR_FULLDUP          (1 << 15)
+#define        FTMAC100_MACCR_RX_MULTIPKT      (1 << 16)
+#define        FTMAC100_MACCR_RX_BROADPKT      (1 << 17)
+
+/*
+ * PHY control register
+ */
+#define FTMAC100_PHYCR_MIIRDATA                0xffff
+#define FTMAC100_PHYCR_PHYAD(x)                (((x) & 0x1f) << 16)
+#define FTMAC100_PHYCR_REGAD(x)                (((x) & 0x1f) << 21)
+#define FTMAC100_PHYCR_MIIRD           (1 << 26)
+#define FTMAC100_PHYCR_MIIWR           (1 << 27)
+
+/*
+ * PHY write data register
+ */
+#define FTMAC100_PHYWDATA_MIIWDATA(x)  ((x) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftmac100_txdes {
+       unsigned int    txdes0;
+       unsigned int    txdes1;
+       unsigned int    txdes2; /* TXBUF_BADR */
+       unsigned int    txdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define        FTMAC100_TXDES0_TXPKT_LATECOL   (1 << 0)
+#define        FTMAC100_TXDES0_TXPKT_EXSCOL    (1 << 1)
+#define        FTMAC100_TXDES0_TXDMA_OWN       (1 << 31)
+
+#define        FTMAC100_TXDES1_TXBUF_SIZE(x)   ((x) & 0x7ff)
+#define        FTMAC100_TXDES1_LTS             (1 << 27)
+#define        FTMAC100_TXDES1_FTS             (1 << 28)
+#define        FTMAC100_TXDES1_TX2FIC          (1 << 29)
+#define        FTMAC100_TXDES1_TXIC            (1 << 30)
+#define        FTMAC100_TXDES1_EDOTR           (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftmac100_rxdes {
+       unsigned int    rxdes0;
+       unsigned int    rxdes1;
+       unsigned int    rxdes2; /* RXBUF_BADR */
+       unsigned int    rxdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define        FTMAC100_RXDES0_RFL             0x7ff
+#define        FTMAC100_RXDES0_MULTICAST       (1 << 16)
+#define        FTMAC100_RXDES0_BROADCAST       (1 << 17)
+#define        FTMAC100_RXDES0_RX_ERR          (1 << 18)
+#define        FTMAC100_RXDES0_CRC_ERR         (1 << 19)
+#define        FTMAC100_RXDES0_FTL             (1 << 20)
+#define        FTMAC100_RXDES0_RUNT            (1 << 21)
+#define        FTMAC100_RXDES0_RX_ODD_NB       (1 << 22)
+#define        FTMAC100_RXDES0_LRS             (1 << 28)
+#define        FTMAC100_RXDES0_FRS             (1 << 29)
+#define        FTMAC100_RXDES0_RXDMA_OWN       (1 << 31)
+
+#define        FTMAC100_RXDES1_RXBUF_SIZE(x)   ((x) & 0x7ff)
+#define        FTMAC100_RXDES1_EDORR           (1 << 31)
+
+#endif /* __FTMAC100_H */
index ac1d323..8931168 100644 (file)
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
 static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct list_head *p;
+       struct bpqdev *bpqdev = v;
 
        ++*pos;
 
        if (v == SEQ_START_TOKEN)
-               p = rcu_dereference(bpq_devices.next);
+               p = rcu_dereference(list_next_rcu(&bpq_devices));
        else
-               p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+               p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
 
        return (p == &bpq_devices) ? NULL 
                : list_entry(p, struct bpqdev, bpq_list);
index 0a2368f..6b256c2 100644 (file)
@@ -64,7 +64,14 @@ static s32  igb_reset_init_script_82575(struct e1000_hw *);
 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
 static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
-
+static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32  igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
 static const u16 e1000_82580_rxpbs_table[] =
        { 36, 72, 144, 1, 2, 4, 8, 16,
          35, 70, 140 };
@@ -129,6 +136,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        case E1000_DEV_ID_82580_COPPER:
        case E1000_DEV_ID_82580_FIBER:
+       case E1000_DEV_ID_82580_QUAD_FIBER:
        case E1000_DEV_ID_82580_SERDES:
        case E1000_DEV_ID_82580_SGMII:
        case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -194,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
        mac->arc_subsystem_valid =
                (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
                        ? true : false;
-
+       /* enable EEE on i350 parts */
+       if (mac->type == e1000_i350)
+               dev_spec->eee_disable = false;
+       else
+               dev_spec->eee_disable = true;
        /* physical interface link setup */
        mac->ops.setup_physical_interface =
                (hw->phy.media_type == e1000_media_type_copper)
@@ -232,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
         */
        size += NVM_WORD_SIZE_BASE_SHIFT;
 
-       /* EEPROM access above 16k is unsupported */
-       if (size > 14)
-               size = 14;
        nvm->word_size = 1 << size;
+       if (nvm->word_size == (1 << 15))
+               nvm->page_size = 128;
 
-       /* if 82576 then initialize mailbox parameters */
-       if (mac->type == e1000_82576)
+       /* NVM Function Pointers */
+       nvm->ops.acquire = igb_acquire_nvm_82575;
+       if (nvm->word_size < (1 << 15))
+               nvm->ops.read = igb_read_nvm_eerd;
+       else
+               nvm->ops.read = igb_read_nvm_spi;
+
+       nvm->ops.release = igb_release_nvm_82575;
+       switch (hw->mac.type) {
+       case e1000_82580:
+               nvm->ops.validate = igb_validate_nvm_checksum_82580;
+               nvm->ops.update = igb_update_nvm_checksum_82580;
+               break;
+       case e1000_i350:
+               nvm->ops.validate = igb_validate_nvm_checksum_i350;
+               nvm->ops.update = igb_update_nvm_checksum_i350;
+               break;
+       default:
+               nvm->ops.validate = igb_validate_nvm_checksum;
+               nvm->ops.update = igb_update_nvm_checksum;
+       }
+       nvm->ops.write = igb_write_nvm_spi;
+
+       /* if part supports SR-IOV then initialize mailbox parameters */
+       switch (mac->type) {
+       case e1000_82576:
+       case e1000_i350:
                igb_init_mbx_params_pf(hw);
+               break;
+       default:
+               break;
+       }
 
        /* setup PHY parameters */
        if (phy->media_type != e1000_media_type_copper) {
@@ -1747,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
        return ret_val;
 }
 
+/**
+ *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val = 0;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       hw_dbg("NVM Read Error\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+
+       if (checksum != (u16) NVM_SUM) {
+               hw_dbg("NVM Checksum Invalid\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_with_offset - Update EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       hw_dbg("NVM Read Error while updating checksum.\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+       checksum = (u16) NVM_SUM - checksum;
+       ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+                               &checksum);
+       if (ret_val)
+               hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u16 eeprom_regions_count = 1;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               hw_dbg("NVM Read Error\n");
+               goto out;
+       }
+
+       if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+               /* if chekcsums compatibility bit is set validate checksums
+                * for all 4 ports. */
+               eeprom_regions_count = 4;
+       }
+
+       for (j = 0; j < eeprom_regions_count; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = igb_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != 0)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               hw_dbg("NVM Read Error while updating checksum"
+                       " compatibility bit.\n");
+               goto out;
+       }
+
+       if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+               /* set compatibility bit to validate checksums appropriately */
+               nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+               ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+                                       &nvm_data);
+               if (ret_val) {
+                       hw_dbg("NVM Write Error while updating checksum"
+                               " compatibility bit.\n");
+                       goto out;
+               }
+       }
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u16 j;
+       u16 nvm_offset;
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = igb_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != 0)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u16 j;
+       u16 nvm_offset;
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val != 0)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+/**
+ *  igb_set_eee_i350 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u32 ipcnfg, eeer, ctrl_ext;
+
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+       if ((hw->mac.type != e1000_i350) ||
+           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+               goto out;
+       ipcnfg = rd32(E1000_IPCNFG);
+       eeer = rd32(E1000_EEER);
+
+       /* enable or disable per user setting */
+       if (!(hw->dev_spec._82575.eee_disable)) {
+               ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
+                       E1000_IPCNFG_EEE_100M_AN);
+               eeer |= (E1000_EEER_TX_LPI_EN |
+                       E1000_EEER_RX_LPI_EN |
+                       E1000_EEER_LPI_FC);
+
+       } else {
+               ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+                       E1000_IPCNFG_EEE_100M_AN);
+               eeer &= ~(E1000_EEER_TX_LPI_EN |
+                       E1000_EEER_RX_LPI_EN |
+                       E1000_EEER_LPI_FC);
+       }
+       wr32(E1000_IPCNFG, ipcnfg);
+       wr32(E1000_EEER, eeer);
+out:
+
+       return ret_val;
+}
+
 static struct e1000_mac_operations e1000_mac_ops_82575 = {
        .init_hw              = igb_init_hw_82575,
        .check_for_link       = igb_check_for_link_82575,
index 1d01af2..dd6df34 100644 (file)
@@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
 void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
 void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
 u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_set_eee_i350(struct e1000_hw *);
 
 #endif
index 6319ed9..6b80d40 100644 (file)
@@ -51,6 +51,7 @@
 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
 #define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
 #define E1000_CTRL_EXT_EIAME          0x01000000
 #define E1000_CTRL_EXT_IRCA           0x00000001
 /* Interrupt delay cancellation */
 /* Management Control */
 #define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
 #define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OSBMC is Enabled or not */
 /* Enable Neighbor Discovery Filtering */
 #define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
 #define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
 #define E1000_TCTL_COLD   0x003ff000    /* collision distance */
 #define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
 
-/* Transmit Arbitration Count */
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
+                                                       * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Receive
+                                                       * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT       16
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
+                                                       * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT       28
+#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
+                                                       * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Receive Traffic Rate
+                                                       * Threshold */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rcv packet rate in
+                                                       * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rcv Traffic
+                                                       * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rcv Threshold
+                                                       * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT      4
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
 
 /* SerDes Control */
 #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
 #define NVM_INIT_CONTROL3_PORT_A   0x0024
 #define NVM_ALT_MAC_ADDR_PTR       0x0037
 #define NVM_CHECKSUM_REG           0x003F
+#define NVM_COMPATIBILITY_REG_3    0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
 
 #define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
 #define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
 /* NVM Commands - SPI */
 #define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
 #define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
 #define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
 #define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
 #define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
 #define E1000_MDIC_ERROR     0x40000000
 #define E1000_MDIC_DEST      0x80000000
 
+/* Thermal Sensor */
+#define E1000_THSTAT_PWR_DOWN       0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE  0x00000002 /* Link Speed Throttle Event */
+
+/* Energy Efficient Ethernet */
+#define E1000_IPCNFG_EEE_1G_AN       0x00000008  /* EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
+#define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
+
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
 #define E1000_GEN_CTL_ADDRESS_SHIFT     8
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
                                                       on DMA coal */
 
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA          0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK     0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT    14
+#define E1000_RTTBCNRC_RF_INT_MASK     \
+       (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
 #endif
index e2638af..27153e8 100644 (file)
@@ -54,6 +54,7 @@ struct e1000_hw;
 #define E1000_DEV_ID_82580_SERDES             0x1510
 #define E1000_DEV_ID_82580_SGMII              0x1511
 #define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
 #define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
 #define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
 #define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
@@ -247,6 +248,10 @@ struct e1000_hw_stats {
        u64 scvpc;
        u64 hrmpc;
        u64 doosync;
+       u64 o2bgptc;
+       u64 o2bspc;
+       u64 b2ospc;
+       u64 b2ogprc;
 };
 
 struct e1000_phy_stats {
@@ -331,6 +336,8 @@ struct e1000_nvm_operations {
        s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
        void (*release)(struct e1000_hw *);
        s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
+       s32  (*update)(struct e1000_hw *);
+       s32  (*validate)(struct e1000_hw *);
 };
 
 struct e1000_info {
@@ -417,7 +424,6 @@ struct e1000_phy_info {
 
 struct e1000_nvm_info {
        struct e1000_nvm_operations ops;
-
        enum e1000_nvm_type type;
        enum e1000_nvm_override override;
 
@@ -483,6 +489,7 @@ struct e1000_mbx_info {
 struct e1000_dev_spec_82575 {
        bool sgmii_active;
        bool global_device_reset;
+       bool eee_disable;
 };
 
 struct e1000_hw {
index c474cdb..78d48c7 100644 (file)
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
 
-       if (hw->mac.type == e1000_82576) {
-               mbx->timeout = 0;
-               mbx->usec_delay = 0;
-
-               mbx->size = E1000_VFMAILBOX_SIZE;
-
-               mbx->ops.read = igb_read_mbx_pf;
-               mbx->ops.write = igb_write_mbx_pf;
-               mbx->ops.read_posted = igb_read_posted_mbx;
-               mbx->ops.write_posted = igb_write_posted_mbx;
-               mbx->ops.check_for_msg = igb_check_for_msg_pf;
-               mbx->ops.check_for_ack = igb_check_for_ack_pf;
-               mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
-               mbx->stats.msgs_tx = 0;
-               mbx->stats.msgs_rx = 0;
-               mbx->stats.reqs = 0;
-               mbx->stats.acks = 0;
-               mbx->stats.rsts = 0;
-       }
+       mbx->timeout = 0;
+       mbx->usec_delay = 0;
+
+       mbx->size = E1000_VFMAILBOX_SIZE;
+
+       mbx->ops.read = igb_read_mbx_pf;
+       mbx->ops.write = igb_write_mbx_pf;
+       mbx->ops.read_posted = igb_read_posted_mbx;
+       mbx->ops.write_posted = igb_write_posted_mbx;
+       mbx->ops.check_for_msg = igb_check_for_msg_pf;
+       mbx->ops.check_for_ack = igb_check_for_ack_pf;
+       mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
 
        return 0;
 }
index 6b5cc2c..75bf36a 100644 (file)
@@ -317,6 +317,68 @@ out:
        return ret_val;
 }
 
+/**
+ *  igb_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i = 0;
+       s32 ret_val;
+       u16 word_in;
+       u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               hw_dbg("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = igb_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       igb_standby_nvm(hw);
+
+       if ((nvm->address_bits == 8) && (offset >= 128))
+               read_opcode |= NVM_A8_OPCODE_SPI;
+
+       /* Send the READ command (opcode + addr) */
+       igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+       igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+       /*
+        * Read the data.  SPI NVMs increment the address with each byte
+        * read and will roll over if reading beyond the end.  This allows
+        * us to read the whole NVM from any offset
+        */
+       for (i = 0; i < words; i++) {
+               word_in = igb_shift_in_eec_bits(hw, 16);
+               data[i] = (word_in >> 8) | (word_in << 8);
+       }
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
 /**
  *  igb_read_nvm_eerd - Reads EEPROM using EERD register
  *  @hw: pointer to the HW structure
@@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
                        break;
 
                data[i] = (rd32(E1000_EERD) >>
-                          E1000_NVM_RW_REG_DATA);
+                       E1000_NVM_RW_REG_DATA);
        }
 
 out:
index 29c956a..7f43564 100644 (file)
@@ -35,6 +35,7 @@ s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
 s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
                           u32 part_num_size);
 s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
 s32  igb_update_nvm_checksum(struct e1000_hw *hw);
index 8ac83c5..958ca3b 100644 (file)
 
 #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
 
+/* DMA Coalescing registers */
+#define E1000_DMACR             0x02508 /* Control Register */
+#define E1000_DMCTXTH           0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX            0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH           0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT            0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC             0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
+
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL 0x3604  /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRC 0x36B0  /* Tx BCN Rate-Scheduler Config - WO */
+
 /* Split and Replication RX Control - RW */
 #define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 /*
 
 /* DMA Coalescing registers */
 #define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
+
+/* Energy Efficient Ethernet "EEE" register */
+#define E1000_IPCNFG  0x0E38  /* Internal PHY Configuration */
+#define E1000_EEER    0x0E30  /* Energy Efficient Ethernet */
+
+/* Thermal Sensor Register */
+#define E1000_THSTAT    0x08110 /* Thermal Sensor Status */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC    0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC   0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC   0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC    0x0415C /* OS2BMC packets transmitted by host */
+
 #endif
index 92a4ef0..1c687e2 100644 (file)
@@ -77,6 +77,7 @@ struct vf_data_storage {
        unsigned long last_nack;
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
+       u16 tx_rate;
 };
 
 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
        u16 rx_ring_count;
        unsigned int vfs_allocated_count;
        struct vf_data_storage *vf_data;
+       int vf_rate_link_speed;
        u32 rss_queues;
        u32 wvbr;
 };
@@ -331,6 +333,12 @@ struct igb_adapter {
 #define IGB_FLAG_DCA_ENABLED       (1 << 1)
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
 #define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
+#define IGB_FLAG_DMAC              (1 << 4)
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE           20408
+#define IGB_TX_BUF_4096            4096
+#define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
 
 #define IGB_82576_TSYNC_SHIFT 19
 #define IGB_82580_TSYNC_SHIFT 24
index a70e16b..d976733 100644 (file)
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
        IGB_STAT("tx_smbus", stats.mgptc),
        IGB_STAT("rx_smbus", stats.mgprc),
        IGB_STAT("dropped_smbus", stats.mgpdc),
+       IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+       IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+       IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+       IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
 };
 
 #define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
        regs_buff[548] = rd32(E1000_TDFT);
        regs_buff[549] = rd32(E1000_TDFHS);
        regs_buff[550] = rd32(E1000_TDFPC);
-
+       regs_buff[551] = adapter->stats.o2bgptc;
+       regs_buff[552] = adapter->stats.b2ospc;
+       regs_buff[553] = adapter->stats.o2bspc;
+       regs_buff[554] = adapter->stats.b2ogprc;
 }
 
 static int igb_get_eeprom_len(struct net_device *netdev)
@@ -714,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev,
        /* Update the checksum over the first part of the EEPROM if needed
         * and flush shadow RAM for 82573 controllers */
        if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
-               igb_update_nvm_checksum(hw);
+               hw->nvm.ops.update(hw);
 
        kfree(eeprom_buff);
        return ret_val;
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
        char firmware_version[32];
        u16 eeprom_data;
 
-       strncpy(drvinfo->driver,  igb_driver_name, 32);
-       strncpy(drvinfo->version, igb_driver_version, 32);
+       strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
+       strncpy(drvinfo->version, igb_driver_version,
+               sizeof(drvinfo->version) - 1);
 
        /* EEPROM image version # is reported as firmware version # for
         * 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
                (eeprom_data & 0x0FF0) >> 4,
                eeprom_data & 0x000F);
 
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strncpy(drvinfo->fw_version, firmware_version,
+               sizeof(drvinfo->fw_version) - 1);
+       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info) - 1);
        drvinfo->n_stats = IGB_STATS_LEN;
        drvinfo->testinfo_len = IGB_TEST_LEN;
        drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
                {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
        for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
                wr32(reg, (_test[pat] & write));
-               val = rd32(reg);
+               val = rd32(reg) & mask;
                if (val != (_test[pat] & write & mask)) {
                        dev_err(&adapter->pdev->dev, "pattern test reg %04X "
                                "failed: got 0x%08X expected 0x%08X\n",
@@ -1999,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev,
        if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
                return -EINVAL;
 
+       /* If ITR is disabled, disable DMAC */
+       if (ec->rx_coalesce_usecs == 0) {
+               if (adapter->flags & IGB_FLAG_DMAC)
+                       adapter->flags &= ~IGB_FLAG_DMAC;
+       }
+
        /* convert to rate of irq's per second */
        if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
                adapter->rx_itr_setting = ec->rx_coalesce_usecs;
index 58c665b..3d850af 100644 (file)
 #endif
 #include "igb.h"
 
-#define DRV_VERSION "2.1.0-k2"
+#define MAJ 3
+#define MIN 0
+#define BUILD 6
+#define KFIX 2
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+__stringify(BUILD) "-k" __stringify(KFIX)
 char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
@@ -68,6 +73,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -100,6 +106,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
 static void igb_setup_mrqc(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_init_hw_timer(struct igb_adapter *adapter);
 static int igb_sw_init(struct igb_adapter *);
 static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
@@ -149,6 +156,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
                                 struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
 
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1672,7 +1680,58 @@ void igb_reset(struct igb_adapter *adapter)
 
        if (hw->mac.ops.init_hw(hw))
                dev_err(&pdev->dev, "Hardware Error\n");
+       if (hw->mac.type > e1000_82580) {
+               if (adapter->flags & IGB_FLAG_DMAC) {
+                       u32 reg;
 
+                       /*
+                        * DMA Coalescing high water mark needs to be higher
+                        * than * the * Rx threshold.  The Rx threshold is
+                        * currently * pba - 6, so we * should use a high water
+                        * mark of pba * - 4. */
+                       hwm = (pba - 4) << 10;
+
+                       reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+                              & E1000_DMACR_DMACTHR_MASK);
+
+                       /* transition to L0x or L1 if available..*/
+                       reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+                       /* watchdog timer= +-1000 usec in 32usec intervals */
+                       reg |= (1000 >> 5);
+                       wr32(E1000_DMACR, reg);
+
+                       /* no lower threshold to disable coalescing(smart fifb)
+                        * -UTRESH=0*/
+                       wr32(E1000_DMCRTRH, 0);
+
+                       /* set hwm to PBA -  2 * max frame size */
+                       wr32(E1000_FCRTC, hwm);
+
+                       /*
+                        * This sets the time to wait before requesting tran-
+                        * sition to * low power state to number of usecs needed
+                        * to receive 1 512 * byte frame at gigabit line rate
+                        */
+                       reg = rd32(E1000_DMCTLX);
+                       reg |= IGB_DMCTLX_DCFLUSH_DIS;
+
+                       /* Delay 255 usec before entering Lx state. */
+                       reg |= 0xFF;
+                       wr32(E1000_DMCTLX, reg);
+
+                       /* free space in Tx packet buffer to wake from DMAC */
+                       wr32(E1000_DMCTXTH,
+                            (IGB_MIN_TXPBSIZE -
+                            (IGB_TX_BUF_4096 + adapter->max_frame_size))
+                            >> 6);
+
+                       /* make low power state decision controlled by DMAC */
+                       reg = rd32(E1000_PCIEMISC);
+                       reg |= E1000_PCIEMISC_LX_DECISION;
+                       wr32(E1000_PCIEMISC, reg);
+               } /* end if IGB_FLAG_DMAC set */
+       }
        if (hw->mac.type == e1000_82580) {
                u32 reg = rd32(E1000_PCIEMISC);
                wr32(E1000_PCIEMISC,
@@ -1882,7 +1941,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        hw->mac.ops.reset_hw(hw);
 
        /* make sure the NVM is good */
-       if (igb_validate_nvm_checksum(hw) < 0) {
+       if (hw->nvm.ops.validate(hw) < 0) {
                dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
                err = -EIO;
                goto err_eeprom;
@@ -1990,6 +2049,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        }
 
 #endif
+       /* do hw tstamp init after resetting */
+       igb_init_hw_timer(adapter);
+
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2012,7 +2074,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                adapter->msix_entries ? "MSI-X" :
                (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
                adapter->num_rx_queues, adapter->num_tx_queues);
-
+       switch (hw->mac.type) {
+       case e1000_i350:
+               igb_set_eee_i350(hw);
+               break;
+       default:
+               break;
+       }
        return 0;
 
 err_register:
@@ -2149,6 +2217,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
                        random_ether_addr(mac_addr);
                        igb_set_vf_mac(adapter, i, mac_addr);
                }
+               /* DMA Coalescing is not supported in IOV mode. */
+               if (adapter->flags & IGB_FLAG_DMAC)
+                       adapter->flags &= ~IGB_FLAG_DMAC;
        }
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2286,9 +2357,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
        spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
-       if (hw->mac.type == e1000_82576)
-               adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_i350:
+               if (max_vfs > 7) {
+                       dev_warn(&pdev->dev,
+                                "Maximum of 7 VFs per PF, using max\n");
+                       adapter->vfs_allocated_count = 7;
+               } else
+                       adapter->vfs_allocated_count = max_vfs;
+               break;
+       default:
+               break;
+       }
 #endif /* CONFIG_PCI_IOV */
        adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 
@@ -2307,12 +2388,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
                return -ENOMEM;
        }
 
-       igb_init_hw_timer(adapter);
        igb_probe_vfs(adapter);
 
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
 
+       if (hw->mac.type == e1000_i350)
+               adapter->flags &= ~IGB_FLAG_DMAC;
+
        set_bit(__IGB_DOWN, &adapter->state);
        return 0;
 }
@@ -3467,7 +3550,7 @@ static void igb_watchdog_task(struct work_struct *work)
                                                    watchdog_task);
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
-       u32 link;
+       u32 link, ctrl_ext, thstat;
        int i;
 
        link = igb_has_link(adapter);
@@ -3491,6 +3574,25 @@ static void igb_watchdog_task(struct work_struct *work)
                               ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
                               ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
+                       /* check for thermal sensor event on i350,
+                        * copper only */
+                       if (hw->mac.type == e1000_i350) {
+                               thstat = rd32(E1000_THSTAT);
+                               ctrl_ext = rd32(E1000_CTRL_EXT);
+                               if ((hw->phy.media_type ==
+                                    e1000_media_type_copper) && !(ctrl_ext &
+                                    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+                                       if (thstat &
+                                           E1000_THSTAT_LINK_THROTTLE) {
+                                               printk(KERN_INFO "igb: %s The "
+                                                      "network adapter link "
+                                                      "speed was downshifted "
+                                                      "because it "
+                                                      "overheated.\n",
+                                                      netdev->name);
+                                       }
+                               }
+                       }
                        /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
@@ -3505,6 +3607,7 @@ static void igb_watchdog_task(struct work_struct *work)
                        netif_carrier_on(netdev);
 
                        igb_ping_all_vfs(adapter);
+                       igb_check_vf_rate_limit(adapter);
 
                        /* link state has changed, schedule phy info update */
                        if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3515,6 +3618,22 @@ static void igb_watchdog_task(struct work_struct *work)
                if (netif_carrier_ok(netdev)) {
                        adapter->link_speed = 0;
                        adapter->link_duplex = 0;
+                       /* check for thermal sensor event on i350
+                        * copper only*/
+                       if (hw->mac.type == e1000_i350) {
+                               thstat = rd32(E1000_THSTAT);
+                               ctrl_ext = rd32(E1000_CTRL_EXT);
+                               if ((hw->phy.media_type ==
+                                    e1000_media_type_copper) && !(ctrl_ext &
+                                    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+                                       if (thstat & E1000_THSTAT_PWR_DOWN) {
+                                               printk(KERN_ERR "igb: %s The "
+                                               "network adapter was stopped "
+                                               "because it overheated.\n",
+                                               netdev->name);
+                                       }
+                               }
+                       }
                        /* Links status message must follow this format */
                        printk(KERN_INFO "igb: %s NIC Link is Down\n",
                               netdev->name);
@@ -4547,6 +4666,15 @@ void igb_update_stats(struct igb_adapter *adapter,
        adapter->stats.mgptc += rd32(E1000_MGTPTC);
        adapter->stats.mgprc += rd32(E1000_MGTPRC);
        adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+       /* OS2BMC Stats */
+       reg = rd32(E1000_MANC);
+       if (reg & E1000_MANC_EN_BMC2OS) {
+               adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+               adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+               adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+               adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+       }
 }
 
 static irqreturn_t igb_msix_other(int irq, void *data)
@@ -6593,9 +6721,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        return igb_set_vf_mac(adapter, vf, mac);
 }
 
+static int igb_link_mbps(int internal_link_speed)
+{
+       switch (internal_link_speed) {
+       case SPEED_100:
+               return 100;
+       case SPEED_1000:
+               return 1000;
+       default:
+               return 0;
+       }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+                                 int link_speed)
+{
+       int rf_dec, rf_int;
+       u32 bcnrc_val;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = link_speed / tx_rate;
+               rf_dec = (link_speed - (rf_int * tx_rate));
+               rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+               bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+                              E1000_RTTBCNRC_RF_INT_MASK);
+               bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+       wr32(E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+       int actual_link_speed, i;
+       bool reset_rate = false;
+
+       /* VF TX rate limit was not set or not supported */
+       if ((adapter->vf_rate_link_speed == 0) ||
+           (adapter->hw.mac.type != e1000_82576))
+               return;
+
+       actual_link_speed = igb_link_mbps(adapter->link_speed);
+       if (actual_link_speed != adapter->vf_rate_link_speed) {
+               reset_rate = true;
+               adapter->vf_rate_link_speed = 0;
+               dev_info(&adapter->pdev->dev,
+                        "Link speed has been changed. VF Transmit "
+                        "rate is disabled\n");
+       }
+
+       for (i = 0; i < adapter->vfs_allocated_count; i++) {
+               if (reset_rate)
+                       adapter->vf_data[i].tx_rate = 0;
+
+               igb_set_vf_rate_limit(&adapter->hw, i,
+                                     adapter->vf_data[i].tx_rate,
+                                     actual_link_speed);
+       }
+}
+
 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
-       return -EOPNOTSUPP;
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       int actual_link_speed;
+
+       if (hw->mac.type != e1000_82576)
+               return -EOPNOTSUPP;
+
+       actual_link_speed = igb_link_mbps(adapter->link_speed);
+       if ((vf >= adapter->vfs_allocated_count) ||
+           (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+           (tx_rate < 0) || (tx_rate > actual_link_speed))
+               return -EINVAL;
+
+       adapter->vf_rate_link_speed = actual_link_speed;
+       adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+       igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+       return 0;
 }
 
 static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6816,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
                return -EINVAL;
        ivi->vf = vf;
        memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
-       ivi->tx_rate = 0;
+       ivi->tx_rate = adapter->vf_data[vf].tx_rate;
        ivi->vlan = adapter->vf_data[vf].pf_vlan;
        ivi->qos = adapter->vf_data[vf].pf_qos;
        return 0;
index ed6e3d9..1d943aa 100644 (file)
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 *regs_buff = p;
-       u8 revision_id;
 
        memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
 
-       pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
-       regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+                       adapter->pdev->device;
 
        regs_buff[0] = er32(CTRL);
        regs_buff[1] = er32(STATUS);
index 990c329..d5dad5d 100644 (file)
@@ -201,9 +201,6 @@ struct igbvf_adapter {
        unsigned int restart_queue;
        u32 txd_cmd;
 
-       bool detect_tx_hung;
-       u8 tx_timeout_factor;
-
        u32 tx_int_delay;
        u32 tx_abs_int_delay;
 
index 6352c81..6ccc32f 100644 (file)
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
        buffer_info->time_stamp = 0;
 }
 
-static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
-{
-       struct igbvf_ring *tx_ring = adapter->tx_ring;
-       unsigned int i = tx_ring->next_to_clean;
-       unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
-       union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-
-       /* detected Tx unit hang */
-       dev_err(&adapter->pdev->dev,
-               "Detected Tx Unit Hang:\n"
-               "  TDH                  <%x>\n"
-               "  TDT                  <%x>\n"
-               "  next_to_use          <%x>\n"
-               "  next_to_clean        <%x>\n"
-               "buffer_info[next_to_clean]:\n"
-               "  time_stamp           <%lx>\n"
-               "  next_to_watch        <%x>\n"
-               "  jiffies              <%lx>\n"
-               "  next_to_watch.status <%x>\n",
-               readl(adapter->hw.hw_addr + tx_ring->head),
-               readl(adapter->hw.hw_addr + tx_ring->tail),
-               tx_ring->next_to_use,
-               tx_ring->next_to_clean,
-               tx_ring->buffer_info[eop].time_stamp,
-               eop,
-               jiffies,
-               eop_desc->wb.status);
-}
-
 /**
  * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
  * @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 {
        struct igbvf_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct igbvf_buffer *buffer_info;
        struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                }
        }
 
-       if (adapter->detect_tx_hung) {
-               /* Detect a transmit hang in hardware, this serializes the
-                * check with the clearing of time_stamp and movement of i */
-               adapter->detect_tx_hung = false;
-               if (tx_ring->buffer_info[i].time_stamp &&
-                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
-                              (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-
-                       tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
-                       /* detected Tx unit hang */
-                       igbvf_print_tx_hang(adapter);
-
-                       netif_stop_queue(netdev);
-               }
-       }
        adapter->net_stats.tx_bytes += total_bytes;
        adapter->net_stats.tx_packets += total_packets;
        return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
                                                  &adapter->link_duplex);
                        igbvf_print_link_info(adapter);
 
-                       /* adjust timeout factor according to speed/duplex */
-                       adapter->tx_timeout_factor = 1;
-                       switch (adapter->link_speed) {
-                       case SPEED_10:
-                               adapter->tx_timeout_factor = 16;
-                               break;
-                       case SPEED_100:
-                               /* maybe add some timeout factor ? */
-                               break;
-                       }
-
                        netif_carrier_on(netdev);
                        netif_wake_queue(netdev);
                }
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
        /* Cause software interrupt to ensure Rx ring is cleaned */
        ew32(EICS, adapter->rx_ring->eims_value);
 
-       /* Force detection of hung controller every watchdog period */
-       adapter->detect_tx_hung = 1;
-
        /* Reset the timer */
        if (!test_bit(__IGBVF_DOWN, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
-
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->revision_id = pdev->revision;
 
        err = -EIO;
        adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
index 74486a8..af3822f 100644 (file)
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
                                   u8 *mc_addr_list, u32 mc_addr_count,
                                   u32 rar_used_count, u32 rar_count)
 {
index aa93655..a5b0f0e 100644 (file)
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
 
        if (phyaddr != 0x1f) {
                u16 mii_phyctrl, mii_1000cr;
-               u8 revisionid = 0;
 
                mii_1000cr  = mdio_read(dev, phyaddr, MII_CTRL1000);
                mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
                mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
 
                /* Set default phyparam */
-               pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
-               ipg_set_phy_default_param(revisionid, dev, phyaddr);
+               ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
 
                /* Reset PHY */
                mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
index 521c0c7..8f3df04 100644 (file)
@@ -149,7 +149,7 @@ struct ixgb_desc_ring {
 
 struct ixgb_adapter {
        struct timer_list watchdog_timer;
-       struct vlan_group *vlgrp;
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        u32 bd_number;
        u32 rx_buffer_len;
        u32 part_num;
index 43994c1..cc53aa1 100644 (file)
@@ -706,6 +706,43 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
        }
 }
 
+static int ixgb_set_flags(struct net_device *netdev, u32 data)
+{
+       struct ixgb_adapter *adapter = netdev_priv(netdev);
+       bool need_reset;
+       int rc;
+
+       /*
+        * Tx VLAN insertion does not work per HW design when Rx stripping is
+        * disabled.  Disable txvlan when rxvlan is turned off, and enable
+        * rxvlan when txvlan is turned on.
+        */
+       if (!(data & ETH_FLAG_RXVLAN) &&
+           (netdev->features & NETIF_F_HW_VLAN_TX))
+               data &= ~ETH_FLAG_TXVLAN;
+       else if (data & ETH_FLAG_TXVLAN)
+               data |= ETH_FLAG_RXVLAN;
+
+       need_reset = (data & ETH_FLAG_RXVLAN) !=
+                    (netdev->features & NETIF_F_HW_VLAN_RX);
+
+       rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
+                                               ETH_FLAG_TXVLAN);
+       if (rc)
+               return rc;
+
+       if (need_reset) {
+               if (netif_running(netdev)) {
+                       ixgb_down(adapter, true);
+                       ixgb_up(adapter);
+                       ixgb_set_speed_duplex(netdev);
+               } else
+                       ixgb_reset(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops ixgb_ethtool_ops = {
        .get_settings = ixgb_get_settings,
        .set_settings = ixgb_set_settings,
@@ -732,6 +769,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
        .phys_id = ixgb_phys_id,
        .get_sset_count = ixgb_get_sset_count,
        .get_ethtool_stats = ixgb_get_ethtool_stats,
+       .get_flags = ethtool_op_get_flags,
+       .set_flags = ixgb_set_flags,
 };
 
 void ixgb_set_ethtool_ops(struct net_device *netdev)
index 5639ccc..0f681ac 100644 (file)
@@ -100,8 +100,6 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
 
 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_register(struct net_device *netdev,
-                                  struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -336,7 +334,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
        .ndo_set_mac_address    = ixgb_set_mac,
        .ndo_change_mtu         = ixgb_change_mtu,
        .ndo_tx_timeout         = ixgb_tx_timeout,
-       .ndo_vlan_rx_register   = ixgb_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1508,7 +1505,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                      DESC_NEEDED)))
                return NETDEV_TX_BUSY;
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= IXGB_TX_FLAGS_VLAN;
                vlan_id = vlan_tx_tag_get(skb);
        }
@@ -2049,12 +2046,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
                ixgb_rx_checksum(adapter, rx_desc, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
-               if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
-                       vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-                                               le16_to_cpu(rx_desc->special));
-               } else {
-                       netif_receive_skb(skb);
-               }
+               if (status & IXGB_RX_DESC_STATUS_VP)
+                       __vlan_hwaccel_put_tag(skb,
+                                              le16_to_cpu(rx_desc->special));
+
+               netif_receive_skb(skb);
 
 rxdesc_done:
                /* clean up descriptor, might be written over by hw */
@@ -2152,20 +2148,6 @@ map_skb:
        }
 }
 
-/**
- * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
- *
- * @param netdev network interface device structure
- * @param grp indicates to enable or disable tagging/stripping
- **/
-static void
-ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
-{
-       struct ixgb_adapter *adapter = netdev_priv(netdev);
-
-       adapter->vlgrp = grp;
-}
-
 static void
 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
 {
@@ -2200,6 +2182,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
        vfta |= (1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
+       set_bit(vid, adapter->active_vlans);
 }
 
 static void
@@ -2208,35 +2191,22 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        struct ixgb_adapter *adapter = netdev_priv(netdev);
        u32 vfta, index;
 
-       ixgb_irq_disable(adapter);
-
-       vlan_group_set_device(adapter->vlgrp, vid, NULL);
-
-       /* don't enable interrupts unless we are UP */
-       if (adapter->netdev->flags & IFF_UP)
-               ixgb_irq_enable(adapter);
-
        /* remove VID from filter table */
 
        index = (vid >> 5) & 0x7F;
        vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
        vfta &= ~(1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
+       clear_bit(vid, adapter->active_vlans);
 }
 
 static void
 ixgb_restore_vlan(struct ixgb_adapter *adapter)
 {
-       ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
-
-       if (adapter->vlgrp) {
-               u16 vid;
-               for (vid = 0; vid < VLAN_N_VID; vid++) {
-                       if (!vlan_group_get_device(adapter->vlgrp, vid))
-                               continue;
-                       ixgb_vlan_rx_add_vid(adapter->netdev, vid);
-               }
-       }
+       u16 vid;
+
+       for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+               ixgb_vlan_rx_add_vid(adapter->netdev, vid);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
index 3b8c924..8d46802 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -118,6 +118,7 @@ struct vf_data_storage {
        bool pf_set_mac;
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
+       u16 tx_rate;
 };
 
 /* wrapper around a pointer to a socket buffer,
@@ -209,6 +210,7 @@ struct ixgbe_ring {
                                         * associated with this ring, which is
                                         * different for DCB and RSS modes
                                         */
+       u8 dcb_tc;
 
        u16 work_limit;                 /* max work per interrupt */
 
@@ -243,7 +245,7 @@ enum ixgbe_ring_f_enum {
        RING_F_ARRAY_SIZE      /* must be last in enum set */
 };
 
-#define IXGBE_MAX_DCB_INDICES   8
+#define IXGBE_MAX_DCB_INDICES  64
 #define IXGBE_MAX_RSS_INDICES  16
 #define IXGBE_MAX_VMDQ_INDICES 64
 #define IXGBE_MAX_FDIR_INDICES 64
@@ -334,9 +336,14 @@ struct ixgbe_adapter {
        u16 bd_number;
        struct work_struct reset_task;
        struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+       /* DCB parameters */
+       struct ieee_pfc *ixgbe_ieee_pfc;
+       struct ieee_ets *ixgbe_ieee_ets;
        struct ixgbe_dcb_config dcb_cfg;
        struct ixgbe_dcb_config temp_dcb_cfg;
        u8 dcb_set_bitmap;
+       u8 dcbx_cap;
        enum ixgbe_fc_mode last_lfc_mode;
 
        /* Interrupt Throttle Rate */
@@ -462,6 +469,7 @@ struct ixgbe_adapter {
        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
        unsigned int num_vfs;
        struct vf_data_storage *vfinfo;
+       int vf_rate_link_speed;
 };
 
 enum ixbge_state_t {
@@ -521,7 +529,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
 extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
 extern int ethtool_ioctl(struct ifreq *ifr);
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
 extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -538,6 +545,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
                                struct ixgbe_ring *ring);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fso(struct ixgbe_adapter *adapter,
@@ -549,6 +557,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                           struct sk_buff *skb);
 extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
                               struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                                struct scatterlist *sgl, unsigned int sgc);
 extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
 extern int ixgbe_fcoe_enable(struct net_device *netdev);
 extern int ixgbe_fcoe_disable(struct net_device *netdev);
index d0f1d9d..845c679 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 
        switch (hw->phy.type) {
        case ixgbe_phy_tn:
+               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.get_firmware_version =
                             &ixgbe_get_phy_firmware_version_tnx;
@@ -280,10 +281,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 {
        enum ixgbe_media_type media_type;
 
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+               media_type = ixgbe_media_type_copper;
+               goto out;
+       default:
+               break;
+       }
+
        /* Media type for I82598 is based on device ID */
        switch (hw->device_id) {
        case IXGBE_DEV_ID_82598:
        case IXGBE_DEV_ID_82598_BX:
+               /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
                break;
        case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -306,7 +319,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
                media_type = ixgbe_media_type_unknown;
                break;
        }
-
+out:
        return media_type;
 }
 
@@ -354,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
                goto out;
 
        /* Disable any previous flow control settings */
@@ -372,10 +385,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *     we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
-        * other: Invalid.
 #ifdef CONFIG_DCB
         * 4: Priority Flow Control is enabled.
 #endif
+        * other: Invalid.
         */
        switch (hw->fc.current_mode) {
        case ixgbe_fc_none:
@@ -432,9 +445,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
                reg = (rx_pba_size - hw->fc.low_water) << 6;
                if (hw->fc.send_xon)
                        reg |= IXGBE_FCRTL_XONE;
+
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
 
-               reg = (rx_pba_size - hw->fc.high_water) << 10;
+               reg = (rx_pba_size - hw->fc.high_water) << 6;
                reg |= IXGBE_FCRTH_FCEN;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -627,13 +641,12 @@ out:
        return 0;
 }
 
-
 /**
  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if auto-negotiation enabled
- *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
@@ -672,7 +685,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                 * ixgbe_hw This will write the AUTOC register based on the new
                 * stored values
                 */
-               status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+               status = ixgbe_start_mac_link_82598(hw,
+                                                   autoneg_wait_to_complete);
        }
 
        return status;
@@ -698,7 +712,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
                                              autoneg_wait_to_complete);
-
        /* Set up MAC */
        ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 
@@ -770,7 +783,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
                else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
                        goto no_phy_reset;
 
-
                hw->phy.ops.reset(hw);
        }
 
@@ -779,12 +791,9 @@ no_phy_reset:
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  This needs to be a SW reset.
         * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +814,19 @@ no_phy_reset:
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        msleep(50);
 
        gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +846,15 @@ no_phy_reset:
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
        }
 
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
        /*
         * Store MAC address from RAR0, clear receive address registers, and
         * clear the multicast table
         */
        hw->mac.ops.init_rx_addrs(hw);
 
-       /* Store the permanent mac address */
-       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
 reset_hw_out:
        if (phy_status)
                status = phy_status;
@@ -849,6 +871,13 @@ reset_hw_out:
 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 {
        u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
 
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
        rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +897,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-               if (rar_high & IXGBE_RAH_VIND_MASK) {
-                       rar_high &= ~IXGBE_RAH_VIND_MASK;
-                       IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
-               }
-       } else {
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       if (rar_high & IXGBE_RAH_VIND_MASK) {
+               rar_high &= ~IXGBE_RAH_VIND_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
        }
 
        return 0;
@@ -994,13 +1026,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
 }
 
 /**
- *  ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module
- *  over I2C interface through an intermediate phy.
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
  *  @byte_offset: EEPROM byte offset to read
  *  @eeprom_data: value read
  *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
  **/
 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                                       u8 *eeprom_data)
@@ -1074,10 +1105,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 
        /* Copper PHY must be checked before AUTOC LMS to determine correct
         * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                    &ext_ability);
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+       case ixgbe_phy_cu_unknown:
+               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
+               MDIO_MMD_PMAPMD, &ext_ability);
                if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
                if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1085,6 +1118,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
                if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
+       default:
+               break;
        }
 
        switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1179,13 +1214,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
        .set_vmdq               = &ixgbe_set_vmdq_82598,
        .clear_vmdq             = &ixgbe_clear_vmdq_82598,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
        .clear_vfta             = &ixgbe_clear_vfta_82598,
        .set_vfta               = &ixgbe_set_vfta_82598,
        .fc_enable              = &ixgbe_fc_enable_82598,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
index a21f581..00aeba3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                        goto setup_sfp_out;
 
                /* PHY config will finish before releasing the semaphore */
-               ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SWFW_SYNC;
                        goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        enum ixgbe_media_type media_type;
 
        /* Detect if there is a copper PHY attached. */
-       if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq) {
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
                media_type = ixgbe_media_type_copper;
                goto out;
+       default:
+               break;
        }
 
        switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_CX4:
                media_type = ixgbe_media_type_cx4;
                break;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               media_type = ixgbe_media_type_copper;
+               break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        return status;
 }
 
- /**
 *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
 *  @hw: pointer to hardware structure
 *
 *  The base drivers may require better control over SFP+ module
 *  PHY states.  This includes selectively shutting down the Tx
 *  laser on the PHY, effectively halting physical link.
 **/
+/**
+ *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively shutting down the Tx
+ *  laser on the PHY, effectively halting physical link.
+ **/
 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  **/
 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
-       hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
-
        if (hw->mac.autotry_restart) {
                ixgbe_disable_tx_laser_multispeed_fiber(hw);
                ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                                           bool autoneg_wait_to_complete)
 {
        s32 status = 0;
-       ixgbe_link_speed phy_link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        u32 speedcnt = 0;
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+       u32 i = 0;
        bool link_up = false;
        bool negotiation;
-       int i;
 
        /* Mask off requested but non-supported speeds */
-       hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
-       speed &= phy_link_speed;
+       status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
+                                                  &negotiation);
+       if (status != 0)
+               return status;
+
+       speed &= link_speed;
 
        /*
         * Try each speed one by one, highest priority first.  We do this in
@@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
 
-               if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+               if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
                        goto out;
 
                /* Set the module link speed */
@@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
-                                              IXGBE_LINK_SPEED_10GB_FULL,
-                                              autoneg,
-                                              autoneg_wait_to_complete);
+                                                   IXGBE_LINK_SPEED_10GB_FULL,
+                                                   autoneg,
+                                                   autoneg_wait_to_complete);
                if (status != 0)
                        return status;
 
@@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                 * Section 73.10.2, we may have to wait up to 500ms if KR is
                 * attempted.  82599 uses the same timing for 10g SFI.
                 */
-
                for (i = 0; i < 5; i++) {
                        /* Wait for the link partner to also set speed */
                        msleep(100);
 
                        /* If we have link, just jump out */
-                       hw->mac.ops.check_link(hw, &phy_link_speed,
-                                              &link_up, false);
+                       status = hw->mac.ops.check_link(hw, &link_speed,
+                                                       &link_up, false);
+                       if (status != 0)
+                               return status;
+
                        if (link_up)
                                goto out;
                }
@@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
 
-               if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+               if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
                        goto out;
 
                /* Set the module link speed */
@@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
-                                                     IXGBE_LINK_SPEED_1GB_FULL,
-                                                     autoneg,
-                                                     autoneg_wait_to_complete);
+                                                   IXGBE_LINK_SPEED_1GB_FULL,
+                                                   autoneg,
+                                                   autoneg_wait_to_complete);
                if (status != 0)
                        return status;
 
@@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(100);
 
                /* If we have link, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
+
                if (link_up)
                        goto out;
        }
@@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                                     bool autoneg_wait_to_complete)
 {
        s32 status = 0;
-       ixgbe_link_speed link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        s32 i, j;
        bool link_up = false;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       struct ixgbe_adapter *adapter = hw->back;
-
-       hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
 
         /* Set autoneg_advertised value based on input link speed */
        hw->phy.autoneg_advertised = 0;
@@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
        for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
                status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
                                                    autoneg_wait_to_complete);
-               if (status)
+               if (status != 0)
                        goto out;
 
                /*
@@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                        mdelay(100);
 
                        /* If we have link, just jump out */
-                       hw->mac.ops.check_link(hw, &link_speed,
-                                              &link_up, false);
+                       status = hw->mac.ops.check_link(hw, &link_speed,
+                                                       &link_up, false);
+                       if (status != 0)
+                               goto out;
+
                        if (link_up)
                                goto out;
                }
@@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
        hw->phy.smart_speed_active = true;
        status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
                                            autoneg_wait_to_complete);
-       if (status)
+       if (status != 0)
                goto out;
 
        /*
@@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                mdelay(100);
 
                /* If we have link, just jump out */
-               hw->mac.ops.check_link(hw, &link_speed,
-                                      &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed,
+                                               &link_up, false);
+               if (status != 0)
+                       goto out;
+
                if (link_up)
                        goto out;
        }
@@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
 out:
        if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-               e_info(hw, "Smartspeed has downgraded the link speed from "
+               hw_dbg(hw, "Smartspeed has downgraded the link speed from "
                       "the maximum advertised\n");
        return status;
 }
@@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 
        /* Check to see if speed passed in is supported. */
        hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+       if (status != 0)
+               goto out;
+
        speed &= link_capabilities;
 
        if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        else
                orig_autoc = autoc;
 
-
        if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 
        /* PHY ops must be identified and initialized prior to reset */
 
-       /* Init PHY and function pointers, perform SFP setup */
+       /* Identify PHY and related function pointers */
        status = hw->phy.ops.init(hw);
 
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                hw->phy.sfp_setup_needed = false;
        }
 
+       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto reset_hw_out;
+
        /* Reset PHY */
        if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
                hw->phy.ops.reset(hw);
@@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  This needs to be a SW reset.
         * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        msleep(50);
 
        /*
@@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                }
        }
 
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
        /*
         * Store MAC address from RAR0, clear receive address registers, and
         * clear the multicast table.  Also reset num_rar_entries to 128,
@@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
        hw->mac.num_rar_entries = 128;
        hw->mac.ops.init_rx_addrs(hw);
 
-       /* Store the permanent mac address */
-       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
        /* Store the permanent SAN mac address */
        hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
 
@@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *
  *  Determines the physical layer module found on the current adapter.
+ *  If PHY already detected, maintains current PHY type in hw struct,
+ *  otherwise executes the PHY detection routine.
  **/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+       /* Detect PHY if not unknown - returns success if already detected. */
        status = ixgbe_identify_phy_generic(hw);
-       if (status != 0)
-               status = ixgbe_identify_sfp_module_generic(hw);
+       if (status != 0) {
+               /* 82599 10GBASE-T requires an external PHY */
+               if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+                       goto out;
+               else
+                       status = ixgbe_identify_sfp_module_generic(hw);
+       }
+
+       /* Set PHY type none if no PHY detected */
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               hw->phy.type = ixgbe_phy_none;
+               status = 0;
+       }
+
+       /* Return error if SFP module has been detected but is not supported */
+       if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
        return status;
 }
 
@@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
 
        hw->phy.ops.identify(hw);
 
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+       case ixgbe_phy_cu_unknown:
                hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                    &ext_ability);
+                                                        &ext_ability);
                if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
                if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
                if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
+       default:
+               break;
        }
 
        switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
                if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
                        break;
                else
+                       /* Use interrupt-safe sleep just in case */
                        udelay(10);
        }
 
@@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .set_vmdq               = &ixgbe_set_vmdq_generic,
        .clear_vmdq             = &ixgbe_clear_vmdq_generic,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
@@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .setup_sfp              = &ixgbe_setup_sfp_modules_82599,
        .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
        .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync,
+
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
-       .init_params            = &ixgbe_init_eeprom_params_generic,
-       .read                   = &ixgbe_read_eerd_generic,
-       .write                  = &ixgbe_write_eeprom_generic,
-       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
-       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
-       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
+       .init_params            = &ixgbe_init_eeprom_params_generic,
+       .read                   = &ixgbe_read_eerd_generic,
+       .write                  = &ixgbe_write_eeprom_generic,
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
+       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
+       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
 };
 
 static struct ixgbe_phy_operations phy_ops_82599 = {
-       .identify               = &ixgbe_identify_phy_82599,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
-       .init                               = &ixgbe_init_phy_ops_82599,
-       .reset                  = &ixgbe_reset_phy_generic,
-       .read_reg               = &ixgbe_read_phy_reg_generic,
-       .write_reg              = &ixgbe_write_phy_reg_generic,
-       .setup_link             = &ixgbe_setup_phy_link_generic,
-       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
-       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
-       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
-       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
-       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
-       .check_overtemp         = &ixgbe_tn_check_overtemp,
+       .identify               = &ixgbe_identify_phy_82599,
+       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
+       .init                   = &ixgbe_init_phy_ops_82599,
+       .reset                  = &ixgbe_reset_phy_generic,
+       .read_reg               = &ixgbe_read_phy_reg_generic,
+       .write_reg              = &ixgbe_write_phy_reg_generic,
+       .setup_link             = &ixgbe_setup_phy_link_generic,
+       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
+       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
+       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
+       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
 };
 
 struct ixgbe_info ixgbe_82599_info = {
index d5ede2d..bcd9529 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
 
 /**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        IXGBE_READ_REG(hw, IXGBE_MRFC);
        IXGBE_READ_REG(hw, IXGBE_RLEC);
        IXGBE_READ_REG(hw, IXGBE_LXONTXC);
-       IXGBE_READ_REG(hw, IXGBE_LXONRXC);
        IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
-       IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+       if (hw->mac.type >= ixgbe_mac_82599EB) {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+       }
 
        for (i = 0; i < 8; i++) {
                IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-               IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               }
        }
-
+       if (hw->mac.type >= ixgbe_mac_82599EB)
+               for (i = 0; i < 8; i++)
+                       IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
        IXGBE_READ_REG(hw, IXGBE_PRC64);
        IXGBE_READ_REG(hw, IXGBE_PRC127);
        IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        IXGBE_READ_REG(hw, IXGBE_BPTC);
        for (i = 0; i < 16; i++) {
                IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-               IXGBE_READ_REG(hw, IXGBE_QBRC(i));
                IXGBE_READ_REG(hw, IXGBE_QPTC(i));
-               IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               }
+       }
+
+       if (hw->mac.type == ixgbe_mac_X540) {
+               if (hw->phy.id == 0)
+                       hw->phy.ops.identify(hw);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
        }
 
        return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests
         */
-       if (ixgbe_disable_pcie_master(hw) != 0)
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+       ixgbe_disable_pcie_master(hw);
 
        return 0;
 }
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
                ixgbe_shift_out_eeprom_bits(hw, data, 16);
                ixgbe_standby_eeprom(hw);
 
-               msleep(hw->eeprom.semaphore_delay);
                /* Done with writing - release the EEPROM */
                ixgbe_release_eeprom(hw);
        }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 {
        s32 status = 0;
-       u32 eec = 0;
+       u32 eec;
        u32 i;
 
-       if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
                        IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
                        hw_dbg(hw, "Could not acquire EEPROM grant\n");
 
-                       ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+                       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
                        status = IXGBE_ERR_EEPROM;
                }
-       }
 
-       /* Setup EEPROM for Read/Write */
-       if (status == 0) {
-               /* Clear CS and SK */
-               eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-               IXGBE_WRITE_FLUSH(hw);
-               udelay(1);
+               /* Setup EEPROM for Read/Write */
+               if (status == 0) {
+                       /* Clear CS and SK */
+                       eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+                       IXGBE_WRITE_FLUSH(hw);
+                       udelay(1);
+               }
        }
        return status;
 }
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_EEPROM;
-       u32 timeout;
+       u32 timeout = 2000;
        u32 i;
        u32 swsm;
 
-       /* Set timeout value based on size of EEPROM */
-       timeout = hw->eeprom.word_size + 1;
-
        /* Get SMBI software semaphore between device drivers first */
        for (i = 0; i < timeout; i++) {
                /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                        status = 0;
                        break;
                }
-               msleep(1);
+               udelay(50);
        }
 
        /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * was not granted because we don't have access to the EEPROM
                 */
                if (i >= timeout) {
-                       hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
+                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
                               "not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
+       } else {
+               hw_dbg(hw, "Software semaphore SMBI between device drivers "
+                      "not granted.\n");
        }
 
        return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
        eec &= ~IXGBE_EEC_REQ;
        IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
 
-       ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+       /* Delay before attempt to obtain semaphore again to allow FW access */
+       msleep(hw->eeprom.semaphore_delay);
 }
 
 /**
- *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
        if (status == 0) {
                checksum = hw->eeprom.ops.calc_checksum(hw);
                status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
-                                           checksum);
+                                             checksum);
        } else {
                hw_dbg(hw, "EEPROM read failed\n");
        }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
        u32 rar_low, rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
 
+       /* Make sure we are using a valid rar index range */
+       if (index >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", index);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
        /* setup VMDq pool selection before this RAR gets enabled */
        hw->mac.ops.set_vmdq(hw, index, vmdq);
 
-       /* Make sure we are using a valid rar index range */
-       if (index < rar_entries) {
-               /*
-                * HW expects these in little endian so we reverse the byte
-                * order from network order (big endian) to little endian
-                */
-               rar_low = ((u32)addr[0] |
-                          ((u32)addr[1] << 8) |
-                          ((u32)addr[2] << 16) |
-                          ((u32)addr[3] << 24));
-               /*
-                * Some parts put the VMDq setting in the extra RAH bits,
-                * so save everything except the lower 16 bits that hold part
-                * of the address and the address valid bit.
-                */
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-               rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-               rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+       /*
+        * HW expects these in little endian so we reverse the byte
+        * order from network order (big endian) to little endian
+        */
+       rar_low = ((u32)addr[0] |
+                  ((u32)addr[1] << 8) |
+                  ((u32)addr[2] << 16) |
+                  ((u32)addr[3] << 24));
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+       rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
 
-               if (enable_addr != 0)
-                       rar_high |= IXGBE_RAH_AV;
+       if (enable_addr != 0)
+               rar_high |= IXGBE_RAH_AV;
 
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-       } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", index);
-               return IXGBE_ERR_RAR_INDEX;
-       }
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
        return 0;
 }
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
        u32 rar_entries = hw->mac.num_rar_entries;
 
        /* Make sure we are using a valid rar index range */
-       if (index < rar_entries) {
-               /*
-                * Some parts put the VMDq setting in the extra RAH bits,
-                * so save everything except the lower 16 bits that hold part
-                * of the address and the address valid bit.
-                */
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-               rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-       } else {
+       if (index >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", index);
-               return IXGBE_ERR_RAR_INDEX;
+               return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
-       /* clear VMDq pool/queue selection for this RAR */
-       hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
-
-       return 0;
-}
-
-/**
- *  ixgbe_enable_rar - Enable Rx address register
- *  @hw: pointer to hardware structure
- *  @index: index into the RAR table
- *
- *  Enables the select receive address register.
- **/
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
-{
-       u32 rar_high;
-
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-       rar_high |= IXGBE_RAH_AV;
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-}
 
-/**
- *  ixgbe_disable_rar - Disable Rx address register
- *  @hw: pointer to hardware structure
- *  @index: index into the RAR table
- *
- *  Disables the select receive address register.
- **/
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
-{
-       u32 rar_high;
+       /* clear VMDq pool/queue selection for this RAR */
+       hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
 
-       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-       rar_high &= (~IXGBE_RAH_AV);
-       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+       return 0;
 }
 
 /**
@@ -1370,6 +1371,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
                hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+               /*  clear VMDq pool/queue selection for RAR 0 */
+               hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
        }
        hw->addr_ctrl.overflow_promisc = 0;
 
@@ -1383,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        }
 
        /* Clear the MTA */
-       hw->addr_ctrl.mc_addr_in_rar_count = 0;
        hw->addr_ctrl.mta_in_use = 0;
        IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
@@ -1397,105 +1400,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        return 0;
 }
 
-/**
- *  ixgbe_add_uc_addr - Adds a secondary unicast address.
- *  @hw: pointer to hardware structure
- *  @addr: new address
- *
- *  Adds it to unused receive address register or goes into promiscuous mode.
- **/
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-       u32 rar_entries = hw->mac.num_rar_entries;
-       u32 rar;
-
-       hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
-                 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
-       /*
-        * Place this address in the RAR if there is room,
-        * else put the controller into promiscuous mode
-        */
-       if (hw->addr_ctrl.rar_used_count < rar_entries) {
-               rar = hw->addr_ctrl.rar_used_count -
-                     hw->addr_ctrl.mc_addr_in_rar_count;
-               hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-               hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
-               hw->addr_ctrl.rar_used_count++;
-       } else {
-               hw->addr_ctrl.overflow_promisc++;
-       }
-
-       hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
-}
-
-/**
- *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
- *  @hw: pointer to hardware structure
- *  @netdev: pointer to net device structure
- *
- *  The given list replaces any existing list.  Clears the secondary addrs from
- *  receive address registers.  Uses unused receive address registers for the
- *  first secondary addresses, and falls back to promiscuous mode as needed.
- *
- *  Drivers using secondary unicast addresses must set user_set_promisc when
- *  manually putting the device into promiscuous mode.
- **/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev)
-{
-       u32 i;
-       u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
-       u32 uc_addr_in_use;
-       u32 fctrl;
-       struct netdev_hw_addr *ha;
-
-       /*
-        * Clear accounting of old secondary address list,
-        * don't count RAR[0]
-        */
-       uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
-       hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
-       hw->addr_ctrl.overflow_promisc = 0;
-
-       /* Zero out the other receive addresses */
-       hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
-       for (i = 0; i < uc_addr_in_use; i++) {
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
-       }
-
-       /* Add the new addresses */
-       netdev_for_each_uc_addr(ha, netdev) {
-               hw_dbg(hw, " Adding the secondary addresses:\n");
-               ixgbe_add_uc_addr(hw, ha->addr, 0);
-       }
-
-       if (hw->addr_ctrl.overflow_promisc) {
-               /* enable promisc if not already in overflow or set by user */
-               if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
-                       hw_dbg(hw, " Entering address overflow promisc mode\n");
-                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                       fctrl |= IXGBE_FCTRL_UPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-                       hw->addr_ctrl.uc_set_promisc = true;
-               }
-       } else {
-               /* only disable if set by overflow, not by user */
-               if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
-                  !(hw->addr_ctrl.user_set_promisc)) {
-                       hw_dbg(hw, " Leaving address overflow promisc mode\n");
-                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                       fctrl &= ~IXGBE_FCTRL_UPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-                       hw->addr_ctrl.uc_set_promisc = false;
-               }
-       }
-
-       hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
-       return 0;
-}
-
 /**
  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
  *  @hw: pointer to hardware structure
@@ -1547,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
        u32 vector;
        u32 vector_bit;
        u32 vector_reg;
-       u32 mta_reg;
 
        hw->addr_ctrl.mta_in_use++;
 
@@ -1565,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
         */
        vector_reg = (vector >> 5) & 0x7F;
        vector_bit = vector & 0x1F;
-       mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-       mta_reg |= (1 << vector_bit);
-       IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+       hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
 }
 
 /**
@@ -1593,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
        hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
        hw->addr_ctrl.mta_in_use = 0;
 
-       /* Clear the MTA */
+       /* Clear mta_shadow */
        hw_dbg(hw, " Clearing MTA\n");
-       for (i = 0; i < hw->mac.mcft_size; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
 
-       /* Add the new addresses */
+       /* Update mta shadow */
        netdev_for_each_mc_addr(ha, netdev) {
                hw_dbg(hw, " Adding the multicast addresses:\n");
                ixgbe_set_mta(hw, ha->addr);
        }
 
        /* Enable mta */
+       for (i = 0; i < hw->mac.mcft_size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+                                     hw->mac.mta_shadow[i]);
+
        if (hw->addr_ctrl.mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1621,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
  **/
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 {
-       u32 i;
-       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-       if (a->mc_addr_in_rar_count > 0)
-               for (i = (rar_entries - a->mc_addr_in_rar_count);
-                    i < rar_entries; i++)
-                       ixgbe_enable_rar(hw, i);
-
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
                                hw->mac.mc_filter_type);
@@ -1645,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 {
-       u32 i;
-       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-       if (a->mc_addr_in_rar_count > 0)
-               for (i = (rar_entries - a->mc_addr_in_rar_count);
-                    i < rar_entries; i++)
-                       ixgbe_disable_rar(hw, i);
-
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
@@ -1682,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 #endif /* CONFIG_DCB */
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
                goto out;
 
        /* Disable any previous flow control settings */
@@ -1700,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *    we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
         * 4: Priority Flow Control is enabled.
+#endif
         * other: Invalid.
         */
        switch (hw->fc.current_mode) {
@@ -1788,12 +1680,13 @@ out:
  **/
 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
        ixgbe_link_speed speed;
-       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-       u32 links2, anlp1_reg, autoc_reg, links;
        bool link_up;
 
+       if (hw->fc.disable_fc_autoneg)
+               goto out;
+
        /*
         * AN should have completed when the cable was plugged in.
         * Look for reasons to bail out.  Bail out if:
@@ -1804,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
         * So use link_up_wait_to_complete=false.
         */
        hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
-       if (hw->fc.disable_fc_autoneg || (!link_up)) {
-               hw->fc.fc_was_autonegged = false;
-               hw->fc.current_mode = hw->fc.requested_mode;
+       if (!link_up) {
+               ret_val = IXGBE_ERR_FLOW_CONTROL;
                goto out;
        }
 
-       /*
-        * On backplane, bail out if
-        * - backplane autoneg was not completed, or if
-        * - we are 82599 and link partner is not AN enabled
-        */
-       if (hw->phy.media_type == ixgbe_media_type_backplane) {
-               links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-               if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
-                       hw->fc.fc_was_autonegged = false;
-                       hw->fc.current_mode = hw->fc.requested_mode;
-                       goto out;
-               }
+       switch (hw->phy.media_type) {
+       /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber:
+               if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+                       ret_val = ixgbe_fc_autoneg_fiber(hw);
+               break;
 
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-                       if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
-                               hw->fc.fc_was_autonegged = false;
-                               hw->fc.current_mode = hw->fc.requested_mode;
-                               goto out;
-                       }
-               }
+       /* Autoneg flow control on backplane adapters */
+       case ixgbe_media_type_backplane:
+               ret_val = ixgbe_fc_autoneg_backplane(hw);
+               break;
+
+       /* Autoneg flow control on copper adapters */
+       case ixgbe_media_type_copper:
+               if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+                       ret_val = ixgbe_fc_autoneg_copper(hw);
+               break;
+
+       default:
+               break;
        }
 
+out:
+       if (ret_val == 0) {
+               hw->fc.fc_was_autonegged = true;
+       } else {
+               hw->fc.fc_was_autonegged = false;
+               hw->fc.current_mode = hw->fc.requested_mode;
+       }
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+       s32 ret_val;
+
        /*
         * On multispeed fiber at 1g, bail out if
         * - link is up but AN did not complete, or if
         * - link is up and AN completed but timed out
         */
-       if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
-               linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-               if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-                   ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-                       hw->fc.fc_was_autonegged = false;
-                       hw->fc.current_mode = hw->fc.requested_mode;
-                       goto out;
-               }
+
+       linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+       if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+           ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               goto out;
        }
 
+       pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+       pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+       ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+                              pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE,
+                              IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+       u32 links2, anlp1_reg, autoc_reg, links;
+       s32 ret_val;
+
        /*
-        * Bail out on
-        * - copper or CX4 adapters
-        * - fiber adapters running at 10gig
+        * On backplane, bail out if
+        * - backplane autoneg was not completed, or if
+        * - we are 82599 and link partner is not AN enabled
         */
-       if ((hw->phy.media_type == ixgbe_media_type_copper) ||
-            (hw->phy.media_type == ixgbe_media_type_cx4) ||
-            ((hw->phy.media_type == ixgbe_media_type_fiber) &&
-            (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+       links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
                hw->fc.fc_was_autonegged = false;
                hw->fc.current_mode = hw->fc.requested_mode;
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
                goto out;
        }
 
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+               links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+                       hw->fc.fc_was_autonegged = false;
+                       hw->fc.current_mode = hw->fc.requested_mode;
+                       ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+                       goto out;
+               }
+       }
        /*
-        * Read the AN advertisement and LP ability registers and resolve
+        * Read the 10g AN autoc and LP ability registers and resolve
         * local flow control settings accordingly
         */
-       if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
-           (hw->phy.media_type != ixgbe_media_type_backplane)) {
-               pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-               pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-               if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                   (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
-                       /*
-                        * Now we need to check if the user selected Rx ONLY
-                        * of pause frames.  In this case, we had to advertise
-                        * FULL flow control because we could not advertise RX
-                        * ONLY. Hence, we must now check to see if we need to
-                        * turn OFF the TRANSMISSION of PAUSE frames.
-                        */
-                       if (hw->fc.requested_mode == ixgbe_fc_full) {
-                               hw->fc.current_mode = ixgbe_fc_full;
-                               hw_dbg(hw, "Flow Control = FULL.\n");
-                       } else {
-                               hw->fc.current_mode = ixgbe_fc_rx_pause;
-                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
-                       }
-               } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_tx_pause;
-                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-               } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                          !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-               } else {
-                       hw->fc.current_mode = ixgbe_fc_none;
-                       hw_dbg(hw, "Flow Control = NONE.\n");
-               }
-       }
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 
-       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+       ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+               anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+               IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+       u16 technology_ability_reg = 0;
+       u16 lp_technology_ability_reg = 0;
+
+       hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                            MDIO_MMD_AN,
+                            &technology_ability_reg);
+       hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
+                            MDIO_MMD_AN,
+                            &lp_technology_ability_reg);
+
+       return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+                                 (u32)lp_technology_ability_reg,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ *  ixgbe_negotiate_fc - Negotiate flow control
+ *  @hw: pointer to hardware structure
+ *  @adv_reg: flow control advertised settings
+ *  @lp_reg: link partner's flow control settings
+ *  @adv_sym: symmetric pause bit in advertisement
+ *  @adv_asm: asymmetric pause bit in advertisement
+ *  @lp_sym: symmetric pause bit in link partner advertisement
+ *  @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ *  Find the intersection between advertised settings and link partner's
+ *  advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+       if ((!(adv_reg)) ||  (!(lp_reg)))
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+       if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
                /*
-                * Read the 10g AN autoc and LP ability registers and resolve
-                * local flow control settings accordingly
+                * Now we need to check if the user selected Rx ONLY
+                * of pause frames.  In this case, we had to advertise
+                * FULL flow control because we could not advertise RX
+                * ONLY. Hence, we must now check to see if we need to
+                * turn OFF the TRANSMISSION of PAUSE frames.
                 */
-               autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-               anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
-               if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                   (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
-                       /*
-                        * Now we need to check if the user selected Rx ONLY
-                        * of pause frames.  In this case, we had to advertise
-                        * FULL flow control because we could not advertise RX
-                        * ONLY. Hence, we must now check to see if we need to
-                        * turn OFF the TRANSMISSION of PAUSE frames.
-                        */
-                       if (hw->fc.requested_mode == ixgbe_fc_full) {
-                               hw->fc.current_mode = ixgbe_fc_full;
-                               hw_dbg(hw, "Flow Control = FULL.\n");
-                       } else {
-                               hw->fc.current_mode = ixgbe_fc_rx_pause;
-                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
-                       }
-               } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_tx_pause;
-                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-               } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
-                          !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+               if (hw->fc.requested_mode == ixgbe_fc_full) {
+                       hw->fc.current_mode = ixgbe_fc_full;
+                       hw_dbg(hw, "Flow Control = FULL.\n");
                } else {
-                       hw->fc.current_mode = ixgbe_fc_none;
-                       hw_dbg(hw, "Flow Control = NONE.\n");
+                       hw->fc.current_mode = ixgbe_fc_rx_pause;
+                       hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
                }
+       } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_tx_pause;
+               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+       } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_rx_pause;
+               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+       } else {
+               hw->fc.current_mode = ixgbe_fc_none;
+               hw_dbg(hw, "Flow Control = NONE.\n");
        }
-       /* Record that current_mode is the result of a successful autoneg */
-       hw->fc.fc_was_autonegged = true;
-
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -1962,7 +1901,8 @@ out:
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
 {
        s32 ret_val = 0;
-       u32 reg;
+       u32 reg = 0, reg_bp = 0;
+       u16 reg_cu = 0;
 
 #ifdef CONFIG_DCB
        if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1970,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                goto out;
        }
 
-#endif
+#endif /* CONFIG_DCB */
        /* Validate the packetbuf configuration */
        if (packetbuf_num < 0 || packetbuf_num > 7) {
                hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2008,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                hw->fc.requested_mode = ixgbe_fc_full;
 
        /*
-        * Set up the 1G flow control advertisement registers so the HW will be
-        * able to do fc autoneg once the cable is plugged in.  If we end up
-        * using 10g instead, this is harmless.
+        * Set up the 1G and 10G flow control advertisement registers so the
+        * HW will be able to do fc autoneg once the cable is plugged in.  If
+        * we link at 10G, the 1G advertisement is harmless and vice versa.
         */
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+       case ixgbe_media_type_backplane:
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               break;
+
+       case ixgbe_media_type_copper:
+               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                                       MDIO_MMD_AN, &reg_cu);
+               break;
+
+       default:
+               ;
+       }
 
        /*
         * The possible values of fc.requested_mode are:
@@ -2031,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
        case ixgbe_fc_none:
                /* Flow control completely disabled by software override. */
                reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+                                   IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
        case ixgbe_fc_rx_pause:
                /*
@@ -2042,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                 * disable the adapter's ability to send PAUSE frames.
                 */
                reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
        case ixgbe_fc_tx_pause:
                /*
@@ -2050,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                 */
                reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
                reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane) {
+                       reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+               } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+                       reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+               }
                break;
        case ixgbe_fc_full:
                /* Flow control (both Rx and Tx) is enabled by SW override. */
                reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
 #ifdef CONFIG_DCB
        case ixgbe_fc_pfc:
@@ -2067,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                break;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-       /* Disable AN timeout */
-       if (hw->fc.strict_ieee)
-               reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+       if (hw->mac.type != ixgbe_mac_X540) {
+               /*
+                * Enable auto-negotiation between the MAC & PHY;
+                * the MAC will advertise clause 37 flow control.
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-       hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+               /* Disable AN timeout */
+               if (hw->fc.strict_ieee)
+                       reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
 
-       /*
-        * Set up the 10G flow control advertisement registers so the HW
-        * can do fc autoneg once the cable is plugged in.  If we end up
-        * using 1g instead, this is harmless.
-        */
-       reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+               hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+       }
 
        /*
-        * The possible values of fc.requested_mode are:
-        * 0: Flow control is completely disabled
-        * 1: Rx flow control is enabled (we can receive pause frames,
-        *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
-        *    we do not support receiving pause frames).
-        * 3: Both Rx and Tx flow control (symmetric) are enabled.
-        * other: Invalid.
+        * AUTOC restart handles negotiation of 1G and 10G on backplane
+        * and copper. There is no need to set the PCS1GCTL register.
+        *
         */
-       switch (hw->fc.requested_mode) {
-       case ixgbe_fc_none:
-               /* Flow control completely disabled by software override. */
-               reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-       case ixgbe_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is
-                * disabled by software override. Since there really
-                * isn't a way to advertise that we are capable of RX
-                * Pause ONLY, we will advertise that we support both
-                * symmetric and asymmetric Rx PAUSE.  Later, we will
-                * disable the adapter's ability to send PAUSE frames.
-                */
-               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-       case ixgbe_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled by software override.
-                */
-               reg |= (IXGBE_AUTOC_ASM_PAUSE);
-               reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
-               break;
-       case ixgbe_fc_full:
-               /* Flow control (both Rx and Tx) is enabled by SW override. */
-               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
-       default:
-               hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
-               break;
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+               reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+       } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+                   (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+                                     MDIO_MMD_AN, reg_cu);
        }
-       /*
-        * AUTOC restart handles negotiation of 1G and 10G. There is
-        * no need to set the PCS1GCTL register.
-        */
-       reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
-       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 
+       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 out:
        return ret_val;
 }
@@ -2156,10 +2090,16 @@ out:
  **/
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
+       struct ixgbe_adapter *adapter = hw->back;
        u32 i;
        u32 reg_val;
        u32 number_of_queues;
-       s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+       s32 status = 0;
+       u16 dev_status = 0;
+
+       /* Just jump out if bus mastering is already disabled */
+       if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+               goto out;
 
        /* Disable the receive unit by stopping each queue */
        number_of_queues = hw->mac.max_rx_queues;
@@ -2176,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
 
        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
-               if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
-                       status = 0;
+               if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+                       goto check_device_status;
+               udelay(100);
+       }
+
+       hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+       status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+       /*
+        * Before proceeding, make sure that the PCIe block does not have
+        * transactions pending.
+        */
+check_device_status:
+       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+               pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+                                                        &dev_status);
+               if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
                        break;
-               }
                udelay(100);
        }
 
+       if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
+               hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+       else
+               goto out;
+
+       /*
+        * Two consecutive resets are required via CTRL.RST per datasheet
+        * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
+        * of this need.  The first reset prevents new master requests from
+        * being issued by our device.  We then must wait 1usec for any
+        * remaining completions from the PCIe bus to trickle in, and then reset
+        * again to clear out any effects they may have had on our device.
+        */
+        hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+out:
        return status;
 }
 
@@ -2192,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *  @mask: Mask to specify which semaphore to acquire
  *
- *  Acquires the SWFW semaphore thought the GSSR register for the specified
+ *  Acquires the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2203,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        s32 timeout = 200;
 
        while (timeout) {
+               /*
+                * SW EEPROM semaphore bit is used for access to all
+                * SW_FW_SYNC/GSSR bits (not just EEPROM)
+                */
                if (ixgbe_get_eeprom_semaphore(hw))
                        return IXGBE_ERR_SWFW_SYNC;
 
@@ -2220,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        }
 
        if (!timeout) {
-               hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
+               hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
                return IXGBE_ERR_SWFW_SYNC;
        }
 
@@ -2236,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
  *  @hw: pointer to hardware structure
  *  @mask: Mask to specify which semaphore to release
  *
- *  Releases the SWFW semaphore thought the GSSR register for the specified
+ *  Releases the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2424,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 mpsar_lo, mpsar_hi;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-               mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
 
-               if (!mpsar_lo && !mpsar_hi)
-                       goto done;
+       mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+       mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
 
-               if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
-                       if (mpsar_lo) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-                               mpsar_lo = 0;
-                       }
-                       if (mpsar_hi) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
-                               mpsar_hi = 0;
-                       }
-               } else if (vmdq < 32) {
-                       mpsar_lo &= ~(1 << vmdq);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
-               } else {
-                       mpsar_hi &= ~(1 << (vmdq - 32));
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
-               }
+       if (!mpsar_lo && !mpsar_hi)
+               goto done;
 
-               /* was that the last pool using this rar? */
-               if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
-                       hw->mac.ops.clear_rar(hw, rar);
+       if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+               if (mpsar_lo) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+                       mpsar_lo = 0;
+               }
+               if (mpsar_hi) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+                       mpsar_hi = 0;
+               }
+       } else if (vmdq < 32) {
+               mpsar_lo &= ~(1 << vmdq);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
        } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               mpsar_hi &= ~(1 << (vmdq - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
        }
 
+       /* was that the last pool using this rar? */
+       if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+               hw->mac.ops.clear_rar(hw, rar);
 done:
        return 0;
 }
@@ -2470,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 mpsar;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               if (vmdq < 32) {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-                       mpsar |= 1 << vmdq;
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
-               } else {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-                       mpsar |= 1 << (vmdq - 32);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
-               }
-       } else {
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       if (vmdq < 32) {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+               mpsar |= 1 << vmdq;
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+       } else {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+               mpsar |= 1 << (vmdq - 32);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
        }
        return 0;
 }
@@ -2494,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
 {
        int i;
 
-
        for (i = 0; i < 128; i++)
                IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 
@@ -2723,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
  *  Reads the links register to determine if link is up and the current speed
  **/
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete)
+                                bool *link_up, bool link_up_wait_to_complete)
 {
-       u32 links_reg;
+       u32 links_reg, links_orig;
        u32 i;
 
+       /* clear the old state */
+       links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
        links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+       if (links_orig != links_reg) {
+               hw_dbg(hw, "LINKS changed from %08X to %08X\n",
+                      links_orig, links_reg);
+       }
+
        if (link_up_wait_to_complete) {
                for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
                        if (links_reg & IXGBE_LINKS_UP) {
@@ -2751,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
            IXGBE_LINKS_SPEED_10G_82599)
                *speed = IXGBE_LINK_SPEED_10GB_FULL;
        else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
-                IXGBE_LINKS_SPEED_1G_82599)
+                IXGBE_LINKS_SPEED_1G_82599)
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
-       else
+       else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+                IXGBE_LINKS_SPEED_100_82599)
                *speed = IXGBE_LINK_SPEED_100_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
 
        /* if link is down, zero out the current_mode */
        if (*link_up == false) {
@@ -2810,6 +2798,28 @@ wwn_prefix_out:
        return 0;
 }
 
+/**
+ *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ *  control
+ *  @hw: pointer to hardware structure
+ *
+ *  There are several phys that do not support autoneg flow control. This
+ *  function check the device id to see if the associated phy supports
+ *  autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X540T:
+               return 0;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               return 0;
+       default:
+               return IXGBE_ERR_FC_NOT_SUPPORTED;
+       }
+}
+
 /**
  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
  *  @hw: pointer to hardware structure
index 66ed045..508f635 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
 #define _IXGBE_COMMON_H_
 
 #include "ixgbe_type.h"
+#include "ixgbe.h"
 
 u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
                                      struct net_device *netdev);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev);
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
-       netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+       netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
 #define e_dev_info(format, arg...) \
        dev_info(&adapter->pdev->dev, format, ## arg)
 #define e_dev_warn(format, arg...) \
index d16c260..41c529f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #include "ixgbe_dcb_82598.h"
 #include "ixgbe_dcb_82599.h"
 
+/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+       int min_percent = 100;
+       int min_credit, multiplier;
+       int i;
+
+       min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+                       DCB_CREDIT_QUANTUM;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if (bw[i] < min_percent && bw[i])
+                       min_percent = bw[i];
+       }
+
+       multiplier = (min_credit / min_percent) + 1;
+
+       /* Find out the hw credits for each TC */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+               if (val < min_credit)
+                       val = min_credit;
+               refill[i] = val;
+
+               max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
+       }
+       return 0;
+}
+
 /**
  * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
  * @ixgbe_dcb_config: Struct containing DCB settings.
@@ -141,6 +177,59 @@ out:
        return ret_val;
 }
 
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+       int i;
+
+       *pfc_en = 0;
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+               *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+                            u16 *refill)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               refill[i] = p->data_credits_refill;
+       }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+               max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+                           u8 *bwgid)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               bwgid[i] = p->bwg_id;
+       }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+                           u8 *ptype)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               ptype[i] = p->prio_type;
+       }
+}
+
 /**
  * ixgbe_dcb_hw_config - Config and enable DCB
  * @hw: pointer to hardware structure
@@ -152,13 +241,32 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
                         struct ixgbe_dcb_config *dcb_config)
 {
        s32 ret = 0;
+       u8 pfc_en;
+       u8 ptype[MAX_TRAFFIC_CLASS];
+       u8 bwgid[MAX_TRAFFIC_CLASS];
+       u16 refill[MAX_TRAFFIC_CLASS];
+       u16 max[MAX_TRAFFIC_CLASS];
+       /* CEE does not define a priority to tc mapping so map 1:1 */
+       u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+       /* Unpack CEE standard containers */
+       ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+       ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
+               ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+                                               pfc_en, refill, max, bwgid,
+                                               ptype);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+                                               pfc_en, refill, max, bwgid,
+                                               ptype, prio_tc);
                break;
        default:
                break;
@@ -166,3 +274,49 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
        return ret;
 }
 
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+       int ret = -EINVAL;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+                           u16 *refill, u16 *max, u8 *bwg_id,
+                           u8 *prio_type, u8 *prio_tc)
+{
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+                                                       prio_type);
+               ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+                                                            bwg_id, prio_type);
+               ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+                                                            bwg_id, prio_type);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+                                                 bwg_id, prio_type, prio_tc);
+               ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+                                                      bwg_id, prio_type);
+               ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+                                                      prio_type, prio_tc);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
index 1cfe38e..944838f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
        struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
        u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
        bool   pfc_mode_enable;
-       bool   round_robin_enable;
 
        enum dcb_rx_pba_cfg rx_pba_cfg;
 
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
 };
 
 /* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
 
 /* DCB credits calculation */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
 s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
                                   struct ixgbe_dcb_config *, int, u8);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+                           u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
 
 /* DCB definitions for credit calculation */
index 9a5e89c..1bc57e5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
-                                                struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
 {
        s32 ret_val = 0;
        u32 value = IXGBE_RXPBSIZE_64KB;
        u8  i = 0;
 
        /* Setup Rx packet buffer sizes */
-       switch (dcb_config->rx_pba_cfg) {
+       switch (rx_pba) {
        case pba_80_48:
                /* Setup the first four at 80KB */
                value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
  *
  * Configure Rx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *prio_type)
 {
-       struct tc_bw_alloc    *p;
        u32    reg           = 0;
        u32    credit_refill = 0;
        u32    credit_max    = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-               credit_refill = p->data_credits_refill;
-               credit_max    = p->data_credits_max;
+               credit_refill = refill[i];
+               credit_max    = max[i];
 
                reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RT2CR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32    reg, max_credits;
        u8     i;
 
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Enable arbiter */
        reg &= ~IXGBE_DPMCS_ARBDIS;
-       if (!(dcb_config->round_robin_enable)) {
-               /* Enable DFP and Recycle mode */
-               reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
-       }
+       /* Enable DFP and Recycle mode */
+       reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
        reg |= IXGBE_DPMCS_TSOEF;
        /* Configure Max TSO packet size 34KB including payload and headers */
        reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               max_credits = dcb_config->tc_config[i].desc_credits_max;
+               max_credits = max[i];
                reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
-               reg |= p->data_credits_refill;
-               reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+               reg |= refill[i];
+               reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_TDTQ2TCCR_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_TDTQ2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Tx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32 reg;
        u8 i;
 
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               reg = p->data_credits_refill;
-               reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
-               reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+               reg = refill[i];
+               reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_TDPT2TCCR_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_TDPT2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,59 +228,57 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Priority Flow Control for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 {
        u32 reg, rx_pba_size;
        u8  i;
 
-       if (!dcb_config->pfc_mode_enable)
-               goto out;
-
-       /* Enable Transmit Priority Flow Control */
-       reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-       reg &= ~IXGBE_RMCS_TFCE_802_3X;
-       /* correct the reporting of our flow control status */
-       reg |= IXGBE_RMCS_TFCE_PRIORITY;
-       IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
-
-       /* Enable Receive Priority Flow Control */
-       reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-       reg &= ~IXGBE_FCTRL_RFCE;
-       reg |= IXGBE_FCTRL_RPFCE;
-       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+       if (pfc_en) {
+               /* Enable Transmit Priority Flow Control */
+               reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+               reg &= ~IXGBE_RMCS_TFCE_802_3X;
+               /* correct the reporting of our flow control status */
+               reg |= IXGBE_RMCS_TFCE_PRIORITY;
+               IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+               /* Enable Receive Priority Flow Control */
+               reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+               reg &= ~IXGBE_FCTRL_RFCE;
+               reg |= IXGBE_FCTRL_RPFCE;
+               IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+               /* Configure pause time */
+               for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
+
+               /* Configure flow control refresh threshold value */
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
+       }
 
        /*
         * Configure flow control thresholds and enable priority flow control
         * for each traffic class.
         */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int enabled = pfc_en & (1 << i);
                rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
                rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
                reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+               if (enabled == pfc_enabled_tx ||
+                   enabled == pfc_enabled_full)
                        reg |= IXGBE_FCRTL_XONE;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
 
                reg = (rx_pba_size - hw->fc.high_water) << 10;
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+               if (enabled == pfc_enabled_tx ||
+                   enabled == pfc_enabled_full)
                        reg |= IXGBE_FCRTH_FCEN;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
        }
 
-       /* Configure pause time */
-       for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
-               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
-
-       /* Configure flow control refresh threshold value */
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
-
-out:
        return 0;
 }
 
@@ -292,7 +289,7 @@ out:
  * Configure queue statistics registers, all queues belonging to same traffic
  * class uses a single set of queue statistics counters.
  */
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
 {
        u32 reg = 0;
        u8  i   = 0;
@@ -325,13 +322,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type)
 {
-       ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
-       ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+       ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+       ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_pfc_82598(hw, pfc_en);
        ixgbe_dcb_config_tc_stats_82598(hw);
 
        return 0;
index abc03cc..1e9750c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
 
 /* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type);
 
 #endif /* _DCB_82598_CONFIG_H */
index 374e1f7..025af8c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /**
  * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
 {
        s32 ret_val = 0;
        u32 value = IXGBE_RXPBSIZE_64KB;
        u8  i = 0;
 
        /* Setup Rx packet buffer sizes */
-       switch (dcb_config->rx_pba_cfg) {
+       switch (rx_pba) {
        case pba_80_48:
                /* Setup the first four at 80KB */
                value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,20 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Rx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+                                     u16 *refill,
+                                     u16 *max,
+                                     u8 *bwg_id,
+                                     u8 *prio_type,
+                                     u8 *prio_tc)
 {
-       struct tc_bw_alloc    *p;
        u32    reg           = 0;
        u32    credit_refill = 0;
        u32    credit_max    = 0;
@@ -98,20 +103,18 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
        /* Map all traffic classes to their UP, 1 to 1 */
        reg = 0;
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+               reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
        IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
-               credit_refill = p->data_credits_refill;
-               credit_max    = p->data_credits_max;
+               credit_refill = refill[i];
+               credit_max    = max[i];
                reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
 
-               reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTRPT4C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +133,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+                                          u16 *refill,
+                                          u16 *max,
+                                          u8 *bwg_id,
+                                          u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32    reg, max_credits;
        u8     i;
 
@@ -149,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               max_credits = dcb_config->tc_config[i].desc_credits_max;
+               max_credits = max[i];
                reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
-               reg |= p->data_credits_refill;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+               reg |= refill[i];
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_RTTDT2C_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTTDT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +184,20 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+                                          u16 *refill,
+                                          u16 *max,
+                                          u8 *bwg_id,
+                                          u8 *prio_type,
+                                          u8 *prio_tc)
 {
-       struct tc_bw_alloc *p;
        u32 reg;
        u8 i;
 
@@ -200,20 +213,19 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
        /* Map all traffic classes to their UP, 1 to 1 */
        reg = 0;
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-               reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+               reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
        IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               reg = p->data_credits_refill;
-               reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+               reg = refill[i];
+               reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_RTTPT2C_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTTPT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,63 +245,59 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure Priority Flow Control (PFC) for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
 {
        u32 i, reg, rx_pba_size;
 
-       /* If PFC is disabled globally then fall back to LFC. */
-       if (!dcb_config->pfc_mode_enable) {
-               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-                       hw->mac.ops.fc_enable(hw, i);
-               goto out;
-       }
-
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int enabled = pfc_en & (1 << i);
                rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
                rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
                reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+               if (enabled)
                        reg |= IXGBE_FCRTL_XONE;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
 
                reg = (rx_pba_size - hw->fc.high_water) << 10;
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+               if (enabled)
                        reg |= IXGBE_FCRTH_FCEN;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
        }
 
-       /* Configure pause time (2 TCs per register) */
-       reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-       for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
-               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-       /* Configure flow control refresh threshold value */
-       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-       /* Enable Transmit PFC */
-       reg = IXGBE_FCCFG_TFCE_PRIORITY;
-       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
+       if (pfc_en) {
+               /* Configure pause time (2 TCs per register) */
+               reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+               for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+               /* Configure flow control refresh threshold value */
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+
+               reg = IXGBE_FCCFG_TFCE_PRIORITY;
+               IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
+               /*
+                * Enable Receive PFC
+                * We will always honor XOFF frames we receive when
+                * we are in PFC mode.
+                */
+               reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+               reg &= ~IXGBE_MFLCN_RFCE;
+               reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
+               IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+       } else {
+               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+                       hw->mac.ops.fc_enable(hw, i);
+       }
 
-       /*
-        * Enable Receive PFC
-        * We will always honor XOFF frames we receive when
-        * we are in PFC mode.
-        */
-       reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-       reg &= ~IXGBE_MFLCN_RFCE;
-       reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
-       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-out:
        return 0;
 }
 
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_config_82599 - Configure general DCB parameters
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure general DCB parameters.
  */
@@ -406,19 +413,28 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
 {
-       ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+       ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
        ixgbe_dcb_config_82599(hw);
-       ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+       ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+                                         prio_type, prio_tc);
+       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+                                              bwg_id, prio_type, prio_tc);
+       ixgbe_dcb_config_pfc_82599(hw, pfc_en);
        ixgbe_dcb_config_tc_stats_82599(hw);
 
        return 0;
index 3841649..148fd8b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *bwg_id,
+                                       u8 *prio_type,
+                                       u8 *prio_tc);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type,
+                                               u8 *prio_tc);
+
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *config);
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type,
+                             u8 *prio_tc);
 
 #endif /* _DCB_82599_CONFIG_H */
index bf566e8..fec4c72 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
 #define BIT_PG_RX      0x04
 #define BIT_PG_TX      0x08
 #define BIT_APP_UPCHG  0x10
-#define BIT_RESETLINK   0x40
 #define BIT_LINKSPEED   0x80
 
 /* Responses for the DCB_C_SET_ALL command */
@@ -130,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                        netdev->netdev_ops->ndo_stop(netdev);
                ixgbe_clear_interrupt_scheme(adapter);
 
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82598EB:
                        adapter->last_lfc_mode = adapter->hw.fc.current_mode;
@@ -146,6 +144,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                }
 
                adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+               if (!netdev_get_num_tc(netdev))
+                       ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
+
                ixgbe_init_interrupt_scheme(adapter);
                if (netif_running(netdev))
                        netdev->netdev_ops->ndo_open(netdev);
@@ -160,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                        adapter->temp_dcb_cfg.pfc_mode_enable = false;
                        adapter->dcb_cfg.pfc_mode_enable = false;
                        adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-                       adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
                        switch (adapter->hw.mac.type) {
                        case ixgbe_mac_82599EB:
                        case ixgbe_mac_X540:
@@ -170,6 +170,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                                break;
                        }
 
+                       ixgbe_setup_tc(netdev, 0);
+
                        ixgbe_init_interrupt_scheme(adapter);
                        if (netif_running(netdev))
                                netdev->netdev_ops->ndo_open(netdev);
@@ -225,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
            (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
             adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
            (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
                adapter->dcb_set_bitmap |= BIT_PG_TX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
        adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
 
        if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+           adapter->dcb_cfg.bw_percentage[0][bwg_id])
                adapter->dcb_set_bitmap |= BIT_PG_TX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
            (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
             adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
            (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
                adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
        adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
 
        if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+           adapter->dcb_cfg.bw_percentage[1][bwg_id])
                adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -355,31 +349,28 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int ret;
 
-       if (!adapter->dcb_set_bitmap)
+       if (!adapter->dcb_set_bitmap ||
+           !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return DCB_NO_HW_CHG;
 
        ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-                                adapter->ring_feature[RING_F_DCB].indices);
+                                MAX_TRAFFIC_CLASS);
 
        if (ret)
                return DCB_NO_HW_CHG;
 
        /*
-        * Only take down the adapter if the configuration change
-        * requires a reset.
+        * Only take down the adapter if an app change occured. FCoE
+        * may shuffle tx rings in this case and this can not be done
+        * without a reset currently.
         */
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
                while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                        msleep(1);
 
-               if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-                       if (netif_running(netdev))
-                               netdev->netdev_ops->ndo_stop(netdev);
-                       ixgbe_clear_interrupt_scheme(adapter);
-               } else {
-                       if (netif_running(netdev))
-                               ixgbe_down(adapter);
-               }
+               if (netif_running(netdev))
+                       netdev->netdev_ops->ndo_stop(netdev);
+               ixgbe_clear_interrupt_scheme(adapter);
        }
 
        if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +399,53 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                }
        }
 
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
-               if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-                       ixgbe_init_interrupt_scheme(adapter);
-                       if (netif_running(netdev))
-                               netdev->netdev_ops->ndo_open(netdev);
-               } else {
-                       if (netif_running(netdev))
-                               ixgbe_up(adapter);
-               }
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+               ixgbe_init_interrupt_scheme(adapter);
+               if (netif_running(netdev))
+                       netdev->netdev_ops->ndo_open(netdev);
                ret = DCB_HW_CHG_RST;
-       } else if (adapter->dcb_set_bitmap & BIT_PFC) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       ixgbe_dcb_config_pfc_82598(&adapter->hw,
-                                                  &adapter->dcb_cfg);
-               else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       ixgbe_dcb_config_pfc_82599(&adapter->hw,
-                                                  &adapter->dcb_cfg);
+       }
+
+       if (adapter->dcb_set_bitmap & BIT_PFC) {
+               u8 pfc_en;
+               ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+               ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
                ret = DCB_HW_CHG;
        }
+
+       if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+               u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+               u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+               /* Priority to TC mapping in CEE case default to 1:1 */
+               u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
+               int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+               if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+                       max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+                                              max_frame, DCB_TX_CONFIG);
+               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+                                              max_frame, DCB_RX_CONFIG);
+
+               ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+                                       DCB_TX_CONFIG, refill);
+               ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+               ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+                                      DCB_TX_CONFIG, bwg_id);
+               ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+                                     DCB_TX_CONFIG, prio_type);
+
+               ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+                                       bwg_id, prio_type, prio_tc);
+       }
+
        if (adapter->dcb_cfg.pfc_mode_enable)
                adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
                clear_bit(__IXGBE_RESETTING, &adapter->state);
        adapter->dcb_set_bitmap = 0x00;
        return ret;
@@ -439,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u8 rval = 0;
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               switch (capid) {
-               case DCB_CAP_ATTR_PG:
-                       *cap = true;
-                       break;
-               case DCB_CAP_ATTR_PFC:
-                       *cap = true;
-                       break;
-               case DCB_CAP_ATTR_UP2TC:
-                       *cap = false;
-                       break;
-               case DCB_CAP_ATTR_PG_TCS:
-                       *cap = 0x80;
-                       break;
-               case DCB_CAP_ATTR_PFC_TCS:
-                       *cap = 0x80;
-                       break;
-               case DCB_CAP_ATTR_GSP:
-                       *cap = true;
-                       break;
-               case DCB_CAP_ATTR_BCN:
-                       *cap = false;
-                       break;
-               default:
-                       rval = -EINVAL;
-                       break;
-               }
-       } else {
-               rval = -EINVAL;
+       switch (capid) {
+       case DCB_CAP_ATTR_PG:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_PFC:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_UP2TC:
+               *cap = false;
+               break;
+       case DCB_CAP_ATTR_PG_TCS:
+               *cap = 0x80;
+               break;
+       case DCB_CAP_ATTR_PFC_TCS:
+               *cap = 0x80;
+               break;
+       case DCB_CAP_ATTR_GSP:
+               *cap = true;
+               break;
+       case DCB_CAP_ATTR_BCN:
+               *cap = false;
+               break;
+       case DCB_CAP_ATTR_DCBX:
+               *cap = adapter->dcbx_cap;
+               break;
+       default:
+               *cap = false;
+               break;
        }
 
-       return rval;
+       return 0;
 }
 
 static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -533,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
  */
 static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
 {
-       u8 rval = 0;
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct dcb_app app = {
+                               .selector = idtype,
+                               .protocol = id,
+                            };
 
-       switch (idtype) {
-       case DCB_APP_IDTYPE_ETHTYPE:
-#ifdef IXGBE_FCOE
-               if (id == ETH_P_FCOE)
-                       rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
-#endif
-               break;
-       case DCB_APP_IDTYPE_PORTNUM:
-               break;
-       default:
-               break;
-       }
-       return rval;
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+               return 0;
+
+       return dcb_getapp(netdev, &app);
 }
 
 /**
@@ -562,24 +570,45 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
 static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
                              u8 idtype, u16 id, u8 up)
 {
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u8 rval = 1;
+       struct dcb_app app = {
+                             .selector = idtype,
+                             .protocol = id,
+                             .priority = up
+                            };
+
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+               return rval;
+
+       rval = dcb_setapp(netdev, &app);
 
        switch (idtype) {
        case DCB_APP_IDTYPE_ETHTYPE:
 #ifdef IXGBE_FCOE
                if (id == ETH_P_FCOE) {
-                       u8 tc;
-                       struct ixgbe_adapter *adapter;
+                       u8 old_tc;
 
-                       adapter = netdev_priv(netdev);
-                       tc = adapter->fcoe.tc;
+                       /* Get current programmed tc */
+                       old_tc = adapter->fcoe.tc;
                        rval = ixgbe_fcoe_setapp(adapter, up);
-                       if ((!rval) && (tc != adapter->fcoe.tc) &&
-                           (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-                           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+                       if (rval ||
+                          !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+                          !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+                               break;
+
+                       /* The FCoE application priority may be changed multiple
+                        * times in quick sucession with switches that build up
+                        * TLVs. To avoid creating uneeded device resets this
+                        * checks the actual HW configuration and clears
+                        * BIT_APP_UPCHG if a HW configuration change is not
+                        * need
+                        */
+                       if (old_tc == adapter->fcoe.tc)
+                               adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+                       else
                                adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-                               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-                       }
                }
 #endif
                break;
@@ -591,7 +620,204 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
        return rval;
 }
 
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+                                  struct ieee_ets *ets)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+       /* No IEEE PFC settings available */
+       if (!my_ets)
+               return -EINVAL;
+
+       ets->ets_cap = MAX_TRAFFIC_CLASS;
+       ets->cbs = my_ets->cbs;
+       memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+       memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+       return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+                                  struct ieee_ets *ets)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+       __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       int i, err;
+       __u64 *p = (__u64 *) ets->prio_tc;
+       /* naively give each TC a bwg to map onto CEE hardware */
+       __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+
+       if (!adapter->ixgbe_ieee_ets) {
+               adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+                                                 GFP_KERNEL);
+               if (!adapter->ixgbe_ieee_ets)
+                       return -ENOMEM;
+       }
+
+       memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+       /* Map TSA onto CEE prio type */
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       prio_type[i] = 2;
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       prio_type[i] = 0;
+                       break;
+               default:
+                       /* Hardware only supports priority strict or
+                        * ETS transmission selection algorithms if
+                        * we receive some other value from dcbnl
+                        * throw an error
+                        */
+                       return -EINVAL;
+               }
+       }
+
+       if (*p)
+               ixgbe_dcbnl_set_state(dev, 1);
+       else
+               ixgbe_dcbnl_set_state(dev, 0);
+
+       ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+       err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+                                     bwg_id, prio_type, ets->prio_tc);
+       return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+                                  struct ieee_pfc *pfc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+       int i;
+
+       /* No IEEE PFC settings available */
+       if (!my_pfc)
+               return -EINVAL;
+
+       pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+       pfc->pfc_en = my_pfc->pfc_en;
+       pfc->mbc = my_pfc->mbc;
+       pfc->delay = my_pfc->delay;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               pfc->requests[i] = adapter->stats.pxoffrxc[i];
+               pfc->indications[i] = adapter->stats.pxofftxc[i];
+       }
+
+       return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+                                  struct ieee_pfc *pfc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       int err;
+
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+
+       if (!adapter->ixgbe_ieee_pfc) {
+               adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+                                                 GFP_KERNEL);
+               if (!adapter->ixgbe_ieee_pfc)
+                       return -ENOMEM;
+       }
+
+       memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+       err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+       return err;
+}
+
+static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
+                                  struct dcb_app *app)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+#ifdef IXGBE_FCOE
+       if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
+               if (adapter->fcoe.tc == app->priority)
+                       goto setapp;
+
+               /* In IEEE mode map up to tc 1:1 */
+               adapter->fcoe.tc = app->priority;
+               adapter->fcoe.up = app->priority;
+
+               /* Force hardware reset required to push FCoE
+                * setup on {tx|rx}_rings
+                */
+               adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
+               ixgbe_dcbnl_set_all(dev);
+       }
+
+setapp:
+#endif
+       dcb_setapp(dev, app);
+       return 0;
+}
+
+static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       return adapter->dcbx_cap;
+}
+
+static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ieee_ets ets = {0};
+       struct ieee_pfc pfc = {0};
+
+       /* no support for LLD_MANAGED modes or CEE+IEEE */
+       if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+           ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+           !(mode & DCB_CAP_DCBX_HOST))
+               return 1;
+
+       if (mode == adapter->dcbx_cap)
+               return 0;
+
+       adapter->dcbx_cap = mode;
+
+       /* ETS and PFC defaults */
+       ets.ets_cap = 8;
+       pfc.pfc_cap = 8;
+
+       if (mode & DCB_CAP_DCBX_VER_IEEE) {
+               ixgbe_dcbnl_ieee_setets(dev, &ets);
+               ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+       } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+               adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+               ixgbe_dcbnl_set_all(dev);
+       } else {
+               /* Drop into single TC mode strict priority as this
+                * indicates CEE and IEEE versions are disabled
+                */
+               ixgbe_dcbnl_ieee_setets(dev, &ets);
+               ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+               ixgbe_dcbnl_set_state(dev, 0);
+       }
+
+       return 0;
+}
+
 const struct dcbnl_rtnl_ops dcbnl_ops = {
+       .ieee_getets    = ixgbe_dcbnl_ieee_getets,
+       .ieee_setets    = ixgbe_dcbnl_ieee_setets,
+       .ieee_getpfc    = ixgbe_dcbnl_ieee_getpfc,
+       .ieee_setpfc    = ixgbe_dcbnl_ieee_setpfc,
+       .ieee_setapp    = ixgbe_dcbnl_ieee_setapp,
        .getstate       = ixgbe_dcbnl_get_state,
        .setstate       = ixgbe_dcbnl_set_state,
        .getpermhwaddr  = ixgbe_dcbnl_get_perm_hw_addr,
@@ -613,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
        .setpfcstate    = ixgbe_dcbnl_setpfcstate,
        .getapp         = ixgbe_dcbnl_getapp,
        .setapp         = ixgbe_dcbnl_setapp,
+       .getdcbx        = ixgbe_dcbnl_getdcbx,
+       .setdcbx        = ixgbe_dcbnl_setdcbx,
 };
-
index 2002ea8..76380a2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
                ecmd->supported |= (SUPPORTED_1000baseT_Full |
                                    SUPPORTED_Autoneg);
 
+               switch (hw->mac.type) {
+               case ixgbe_mac_X540:
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       break;
+               default:
+                       break;
+               }
+
                ecmd->advertising = ADVERTISED_Autoneg;
-               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               /*
-                * It's possible that phy.autoneg_advertised may not be
-                * set yet.  If so display what the default would be -
-                * both 1G and 10G supported.
-                */
-               if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
-                                          ADVERTISED_10000baseT_Full)))
+               if (hw->phy.autoneg_advertised) {
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_100_FULL)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_10GB_FULL)
+                               ecmd->advertising |= ADVERTISED_10000baseT_Full;
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_1GB_FULL)
+                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               } else {
+                       /*
+                        * Default advertised modes in case
+                        * phy.autoneg_advertised isn't set.
+                        */
                        ecmd->advertising |= (ADVERTISED_10000baseT_Full |
                                              ADVERTISED_1000baseT_Full);
+                       if (hw->mac.type == ixgbe_mac_X540)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+               }
 
                if (hw->phy.media_type == ixgbe_media_type_copper) {
                        ecmd->supported |= SUPPORTED_TP;
@@ -271,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
 
        hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
        if (link_up) {
-               ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
-                              SPEED_10000 : SPEED_1000;
+               switch (link_speed) {
+               case IXGBE_LINK_SPEED_10GB_FULL:
+                       ecmd->speed = SPEED_10000;
+                       break;
+               case IXGBE_LINK_SPEED_1GB_FULL:
+                       ecmd->speed = SPEED_1000;
+                       break;
+               case IXGBE_LINK_SPEED_100_FULL:
+                       ecmd->speed = SPEED_100;
+                       break;
+               default:
+                       break;
+               }
                ecmd->duplex = DUPLEX_FULL;
        } else {
                ecmd->speed = -1;
@@ -306,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
                if (ecmd->advertising & ADVERTISED_1000baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
+               if (ecmd->advertising & ADVERTISED_100baseT_Full)
+                       advertised |= IXGBE_LINK_SPEED_100_FULL;
+
                if (old == advertised)
                        return err;
                /* this sets the link speed and restarts auto-neg */
index 6342d48..dba7d77 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -135,22 +135,19 @@ out_ddp_put:
        return len;
 }
 
+
 /**
- * ixgbe_fcoe_ddp_get - called to set up ddp context
+ * ixgbe_fcoe_ddp_setup - called to set up ddp context
  * @netdev: the corresponding net_device
  * @xid: the exchange id requesting ddp
  * @sgl: the scatter-gather list for this request
  * @sgc: the number of scatter-gather items
  *
- * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
- * and is expected to be called from ULD, e.g., FCP layer of libfc
- * to set up ddp for the corresponding xid of the given sglist for
- * the corresponding I/O.
- *
  * Returns : 1 for success and 0 for no ddp
  */
-int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                      struct scatterlist *sgl, unsigned int sgc)
+static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+                               struct scatterlist *sgl, unsigned int sgc,
+                               int target_mode)
 {
        struct ixgbe_adapter *adapter;
        struct ixgbe_hw *hw;
@@ -159,13 +156,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        struct scatterlist *sg;
        unsigned int i, j, dmacount;
        unsigned int len;
-       static const unsigned int bufflen = 4096;
+       static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
        unsigned int firstoff = 0;
        unsigned int lastsize;
        unsigned int thisoff = 0;
        unsigned int thislen = 0;
-       u32 fcbuff, fcdmarw, fcfltrw;
-       dma_addr_t addr;
+       u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
+       dma_addr_t addr = 0;
 
        if (!netdev || !sgl)
                return 0;
@@ -254,9 +251,30 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        /* only the last buffer may have non-full bufflen */
        lastsize = thisoff + thislen;
 
+       /*
+        * lastsize can not be buffer len.
+        * If it is then adding another buffer with lastsize = 1.
+        */
+       if (lastsize == bufflen) {
+               if (j >= IXGBE_BUFFCNT_MAX) {
+                       e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+                               "not enough user buffers. We need an extra "
+                               "buffer because lastsize is bufflen.\n",
+                               xid, i, j, dmacount, (u64)addr);
+                       goto out_noddp_free;
+               }
+
+               ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+               j++;
+               lastsize = 1;
+       }
+
        fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
        fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
        fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
+       /* Set WRCONTX bit to allow DDP for target */
+       if (target_mode)
+               fcbuff |= (IXGBE_FCBUFF_WRCONTX);
        fcbuff |= (IXGBE_FCBUFF_VALID);
 
        fcdmarw = xid;
@@ -269,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        /* program DMA context */
        hw = &adapter->hw;
        spin_lock_bh(&fcoe->lock);
+
+       /* turn on last frame indication for target mode as FCP_RSPtarget is
+        * supposed to send FCP_RSP when it is done. */
+       if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
+               set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
+               fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
+               fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
+       }
+
        IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
        IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
        IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -277,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
        IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
        IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+
        spin_unlock_bh(&fcoe->lock);
 
        return 1;
@@ -290,6 +319,47 @@ out_noddp_unmap:
        return 0;
 }
 
+/**
+ * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                      struct scatterlist *sgl, unsigned int sgc)
+{
+       return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+/**
+ * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                           struct scatterlist *sgl, unsigned int sgc)
+{
+       return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
 /**
  * ixgbe_fcoe_ddp - check ddp status and mark it done
  * @adapter: ixgbe adapter
@@ -313,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_fcoe_ddp *ddp;
        struct fc_frame_header *fh;
+       struct fcoe_crc_eof *crc;
 
        if (!ixgbe_rx_is_fcoe(rx_desc))
                goto ddp_out;
@@ -366,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                else if (ddp->len)
                        rc = ddp->len;
        }
-
+       /* In target mode, check the last data frame of the sequence.
+        * For DDP in target mode, data is already DDPed but the header
+        * indication of the last data frame ould allow is to tell if we
+        * got all the data and the ULP can send FCP_RSP back, as this is
+        * not a full fcoe frame, we fill the trailer here so it won't be
+        * dropped by the ULP stack.
+        */
+       if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
+           (fctl & FC_FC_END_SEQ)) {
+               crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+               crc->fcoe_eof = FC_EOF_T;
+       }
 ddp_out:
        return rc;
 }
@@ -532,6 +614,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
                        e_err(drv, "failed to allocated FCoE DDP pool\n");
 
                spin_lock_init(&fcoe->lock);
+
+               /* Extra buffer to be shared by all DDPs for HW work around */
+               fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+               if (fcoe->extra_ddp_buffer == NULL) {
+                       e_err(drv, "failed to allocated extra DDP buffer\n");
+                       goto out_extra_ddp_buffer_alloc;
+               }
+
+               fcoe->extra_ddp_buffer_dma =
+                       dma_map_single(&adapter->pdev->dev,
+                                      fcoe->extra_ddp_buffer,
+                                      IXGBE_FCBUFF_MIN,
+                                      DMA_FROM_DEVICE);
+               if (dma_mapping_error(&adapter->pdev->dev,
+                                     fcoe->extra_ddp_buffer_dma)) {
+                       e_err(drv, "failed to map extra DDP buffer\n");
+                       goto out_extra_ddp_buffer_dma;
+               }
        }
 
        /* Enable L2 eth type filter for FCoE */
@@ -581,6 +681,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
                }
        }
 #endif
+
+       return;
+
+out_extra_ddp_buffer_dma:
+       kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+       pci_pool_destroy(fcoe->pool);
+       fcoe->pool = NULL;
 }
 
 /**
@@ -600,6 +708,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
        if (fcoe->pool) {
                for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
                        ixgbe_fcoe_ddp_put(adapter->netdev, i);
+               dma_unmap_single(&adapter->pdev->dev,
+                                fcoe->extra_ddp_buffer_dma,
+                                IXGBE_FCBUFF_MIN,
+                                DMA_FROM_DEVICE);
+               kfree(fcoe->extra_ddp_buffer);
                pci_pool_destroy(fcoe->pool);
                fcoe->pool = NULL;
        }
@@ -699,21 +812,6 @@ out_disable:
 }
 
 #ifdef CONFIG_IXGBE_DCB
-/**
- * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
- * @adapter : ixgbe adapter
- *
- * Finds out the corresponding user priority bitmap from the current
- * traffic class that FCoE belongs to. Returns 0 as the invalid user
- * priority bitmap to indicate an error.
- *
- * Returns : 802.1p user priority bitmap for FCoE
- */
-u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
-{
-       return 1 << adapter->fcoe.up;
-}
-
 /**
  * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
  * @adapter : ixgbe adapter
@@ -791,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
        }
        return rc;
 }
-
-
index 4bc2c55..5a650a4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
 /* fcerr */
 #define IXGBE_FCERR_BADCRC       0x00100000
 
+/* FCoE DDP for target mode */
+#define __IXGBE_FCOE_TARGET    1
+
 struct ixgbe_fcoe_ddp {
        int len;
        u32 err;
@@ -66,10 +69,13 @@ struct ixgbe_fcoe {
        u8 tc;
        u8 up;
 #endif
+       unsigned long mode;
        atomic_t refcnt;
        spinlock_t lock;
        struct pci_pool *pool;
        struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+       unsigned char *extra_ddp_buffer;
+       dma_addr_t extra_ddp_buffer_dma;
 };
 
 #endif /* _IXGBE_FCOE_H */
index 602078b..f17e4a7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -52,9 +52,10 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                              "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "3.0.12-k2"
+#define DRV_VERSION "3.2.9-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
+static const char ixgbe_copyright[] =
+                               "Copyright (c) 1999-2011 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -648,10 +649,10 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
  *
  * Returns : a tc index for use in range 0-7, or 0-3
  */
-u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
        int tc = -1;
-       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+       int dcb_i = netdev_get_num_tc(adapter->netdev);
 
        /* if DCB is not enabled the queues have no TC */
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 
                i--;
                for (; i >= 0; i--) {
+                       /* free only the irqs that were actually requested */
+                       if (!adapter->q_vector[i]->rxr_count &&
+                           !adapter->q_vector[i]->txr_count)
+                               continue;
+
                        free_irq(adapter->msix_entries[i].vector,
                                 adapter->q_vector[i]);
                }
@@ -2886,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
                                        );
 
        switch (mask) {
+#ifdef CONFIG_IXGBE_DCB
+       case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
+               mrqc = IXGBE_MRQC_RTRSS8TCEN;
+               break;
+       case (IXGBE_FLAG_DCB_ENABLED):
+               mrqc = IXGBE_MRQC_RT8TCEN;
+               break;
+#endif /* CONFIG_IXGBE_DCB */
        case (IXGBE_FLAG_RSS_ENABLED):
                mrqc = IXGBE_MRQC_RSSEN;
                break;
        case (IXGBE_FLAG_SRIOV_ENABLED):
                mrqc = IXGBE_MRQC_VMDQEN;
                break;
-#ifdef CONFIG_IXGBE_DCB
-       case (IXGBE_FLAG_DCB_ENABLED):
-               mrqc = IXGBE_MRQC_RT8TCEN;
-               break;
-#endif /* CONFIG_IXGBE_DCB */
        default:
                break;
        }
@@ -3077,6 +3086,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
 
+       /* If operating in IOV mode set RLPML for X540 */
+       if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+           hw->mac.type == ixgbe_mac_X540) {
+               rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+               rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+                           ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+       }
+
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /*
                 * enable cache line friendly hardware writes:
@@ -3176,9 +3193,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
        u32 mhadd, hlreg0;
 
        /* Decide whether to use packet split mode or not */
+       /* On by default */
+       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
        /* Do not use packet split if we're in SR-IOV Mode */
-       if (!adapter->num_vfs)
-               adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+       if (adapter->num_vfs)
+               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+       /* Disable packet split due to 82599 erratum #45 */
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3634,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-#ifdef CONFIG_FCOE
-       if (adapter->netdev->features & NETIF_F_FCOE_MTU)
-               max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
-#endif
-
-       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
-                                       DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
-                                       DCB_RX_CONFIG);
 
        /* Enable VLAN tag insert/strip */
        adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
@@ -3650,7 +3665,43 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 
        /* reconfigure the hardware */
-       ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
+       if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
+#ifdef CONFIG_FCOE
+               if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+                       max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+               ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+                                               DCB_TX_CONFIG);
+               ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+                                               DCB_RX_CONFIG);
+               ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
+       } else {
+               struct net_device *dev = adapter->netdev;
+
+               if (adapter->ixgbe_ieee_ets)
+                       dev->dcbnl_ops->ieee_setets(dev,
+                                                   adapter->ixgbe_ieee_ets);
+               if (adapter->ixgbe_ieee_pfc)
+                       dev->dcbnl_ops->ieee_setpfc(dev,
+                                                   adapter->ixgbe_ieee_pfc);
+       }
+
+       /* Enable RSS Hash per TC */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               int i;
+               u32 reg = 0;
+
+               for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+                       u8 msb = 0;
+                       u8 cnt = adapter->netdev->tc_to_txq[i].count;
+
+                       while (cnt >>= 1)
+                               msb++;
+
+                       reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+               }
+               IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+       }
 }
 
 #endif
@@ -3721,7 +3772,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
                         * We need to try and force an autonegotiation
                         * session, then bring up link.
                         */
-                       hw->mac.ops.setup_sfp(hw);
+                       if (hw->mac.ops.setup_sfp)
+                               hw->mac.ops.setup_sfp(hw);
                        if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
                                schedule_work(&adapter->multispeed_fiber_task);
                } else {
@@ -3753,7 +3805,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
        if (ret)
                goto link_cfg_out;
 
-       if (hw->mac.ops.get_link_capabilities)
+       autoneg = hw->phy.autoneg_advertised;
+       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
                ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
                                                        &negotiation);
        if (ret)
@@ -3868,7 +3921,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
         * If we're not hot-pluggable SFP+, we just need to configure link
         * and bring it up.
         */
-       if (hw->phy.type == ixgbe_phy_unknown)
+       if (hw->phy.type == ixgbe_phy_none)
                schedule_work(&adapter->sfp_config_module_task);
 
        /* enable transmits */
@@ -4235,24 +4288,6 @@ static void ixgbe_reset_task(struct work_struct *work)
        ixgbe_reinit_locked(adapter);
 }
 
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
-{
-       bool ret = false;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
-
-       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-               return ret;
-
-       f->mask = 0x7 << 3;
-       adapter->num_rx_queues = f->indices;
-       adapter->num_tx_queues = f->indices;
-       ret = true;
-
-       return ret;
-}
-#endif
-
 /**
  * ixgbe_set_rss_queues: Allocate queues for RSS
  * @adapter: board private structure to initialize
@@ -4323,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
  **/
 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 {
-       bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
 
-       f->indices = min((int)num_online_cpus(), f->indices);
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-               adapter->num_rx_queues = 1;
-               adapter->num_tx_queues = 1;
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return false;
+
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       e_info(probe, "FCoE enabled with DCB\n");
-                       ixgbe_set_dcb_queues(adapter);
-               }
+               int tc;
+               struct net_device *dev = adapter->netdev;
+
+               tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+               f->indices = dev->tc_to_txq[tc].count;
+               f->mask = dev->tc_to_txq[tc].offset;
 #endif
+       } else {
+               f->indices = min((int)num_online_cpus(), f->indices);
+
+               adapter->num_rx_queues = 1;
+               adapter->num_tx_queues = 1;
+
                if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                        e_info(probe, "FCoE enabled with RSS\n");
                        if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -4348,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
                f->mask = adapter->num_rx_queues;
                adapter->num_rx_queues += f->indices;
                adapter->num_tx_queues += f->indices;
+       }
 
-               ret = true;
+       return true;
+}
+#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+       bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
+       int i, q;
+
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return ret;
+
+       f->indices = 0;
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
+               f->indices += q;
        }
 
+       f->mask = 0x7 << 3;
+       adapter->num_rx_queues = f->indices;
+       adapter->num_tx_queues = f->indices;
+       ret = true;
+
+#ifdef IXGBE_FCOE
+       /* FCoE enabled queues require special configuration done through
+        * configure_fcoe() and others. Here we map FCoE indices onto the
+        * DCB queue pairs allowing FCoE to own configuration later.
+        */
+       ixgbe_set_fcoe_queues(adapter);
+#endif
+
        return ret;
 }
+#endif
 
-#endif /* IXGBE_FCOE */
 /**
  * ixgbe_set_sriov_queues: Allocate queues for IOV use
  * @adapter: board private structure to initialize
@@ -4391,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
        if (ixgbe_set_sriov_queues(adapter))
                goto done;
 
-#ifdef IXGBE_FCOE
-       if (ixgbe_set_fcoe_queues(adapter))
-               goto done;
-
-#endif /* IXGBE_FCOE */
 #ifdef CONFIG_IXGBE_DCB
        if (ixgbe_set_dcb_queues(adapter))
                goto done;
 
 #endif
+#ifdef IXGBE_FCOE
+       if (ixgbe_set_fcoe_queues(adapter))
+               goto done;
+
+#endif /* IXGBE_FCOE */
        if (ixgbe_set_fdir_queues(adapter))
                goto done;
 
@@ -4492,6 +4565,110 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 }
 
 #ifdef CONFIG_IXGBE_DCB
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+                            unsigned int *tx, unsigned int *rx)
+{
+       struct net_device *dev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 num_tcs = netdev_get_num_tc(dev);
+
+       *tx = 0;
+       *rx = 0;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               *tx = tc << 3;
+               *rx = tc << 2;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (num_tcs == 8) {
+                       if (tc < 3) {
+                               *tx = tc << 5;
+                               *rx = tc << 4;
+                       } else if (tc <  5) {
+                               *tx = ((tc + 2) << 4);
+                               *rx = tc << 4;
+                       } else if (tc < num_tcs) {
+                               *tx = ((tc + 8) << 3);
+                               *rx = tc << 4;
+                       }
+               } else if (num_tcs == 4) {
+                       *rx =  tc << 5;
+                       switch (tc) {
+                       case 0:
+                               *tx =  0;
+                               break;
+                       case 1:
+                               *tx = 64;
+                               break;
+                       case 2:
+                               *tx = 96;
+                               break;
+                       case 3:
+                               *tx = 112;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+#define IXGBE_MAX_Q_PER_TC     (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+       int i;
+       unsigned int q, offset = 0;
+
+       if (!tc) {
+               netdev_reset_tc(dev);
+       } else {
+               struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+               /* Hardware supports up to 8 traffic classes */
+               if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
+                       return -EINVAL;
+
+               /* Partition Tx queues evenly amongst traffic classes */
+               for (i = 0; i < tc; i++) {
+                       q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
+                       netdev_set_prio_tc_map(dev, i, i);
+                       netdev_set_tc_queue(dev, i, q, offset);
+                       offset += q;
+               }
+
+               /* This enables multiple traffic class support in the hardware
+                * which defaults to strict priority transmission by default.
+                * If traffic classes are already enabled perhaps through DCB
+                * code path then existing configuration will be used.
+                */
+               if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+                   dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
+                       struct ieee_ets ets = {
+                                       .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
+                                             };
+                       u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+
+                       dev->dcbnl_ops->setdcbx(dev, mode);
+                       dev->dcbnl_ops->ieee_setets(dev, &ets);
+               }
+       }
+       return 0;
+}
+
 /**
  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  * @adapter: board private structure to initialize
@@ -4501,72 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  **/
 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 {
-       int i;
-       bool ret = false;
-       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+       struct net_device *dev = adapter->netdev;
+       int i, j, k;
+       u8 num_tcs = netdev_get_num_tc(dev);
 
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
                return false;
 
-       /* the number of queues is assumed to be symmetric */
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82598EB:
-               for (i = 0; i < dcb_i; i++) {
-                       adapter->rx_ring[i]->reg_idx = i << 3;
-                       adapter->tx_ring[i]->reg_idx = i << 2;
-               }
-               ret = true;
-               break;
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               if (dcb_i == 8) {
-                       /*
-                        * Tx TC0 starts at: descriptor queue 0
-                        * Tx TC1 starts at: descriptor queue 32
-                        * Tx TC2 starts at: descriptor queue 64
-                        * Tx TC3 starts at: descriptor queue 80
-                        * Tx TC4 starts at: descriptor queue 96
-                        * Tx TC5 starts at: descriptor queue 104
-                        * Tx TC6 starts at: descriptor queue 112
-                        * Tx TC7 starts at: descriptor queue 120
-                        *
-                        * Rx TC0-TC7 are offset by 16 queues each
-                        */
-                       for (i = 0; i < 3; i++) {
-                               adapter->tx_ring[i]->reg_idx = i << 5;
-                               adapter->rx_ring[i]->reg_idx = i << 4;
-                       }
-                       for ( ; i < 5; i++) {
-                               adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
-                               adapter->rx_ring[i]->reg_idx = i << 4;
-                       }
-                       for ( ; i < dcb_i; i++) {
-                               adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
-                               adapter->rx_ring[i]->reg_idx = i << 4;
-                       }
-                       ret = true;
-               } else if (dcb_i == 4) {
-                       /*
-                        * Tx TC0 starts at: descriptor queue 0
-                        * Tx TC1 starts at: descriptor queue 64
-                        * Tx TC2 starts at: descriptor queue 96
-                        * Tx TC3 starts at: descriptor queue 112
-                        *
-                        * Rx TC0-TC3 are offset by 32 queues each
-                        */
-                       adapter->tx_ring[0]->reg_idx = 0;
-                       adapter->tx_ring[1]->reg_idx = 64;
-                       adapter->tx_ring[2]->reg_idx = 96;
-                       adapter->tx_ring[3]->reg_idx = 112;
-                       for (i = 0 ; i < dcb_i; i++)
-                               adapter->rx_ring[i]->reg_idx = i << 5;
-                       ret = true;
+       for (i = 0, k = 0; i < num_tcs; i++) {
+               unsigned int tx_s, rx_s;
+               u16 count = dev->tc_to_txq[i].count;
+
+               ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
+               for (j = 0; j < count; j++, k++) {
+                       adapter->tx_ring[k]->reg_idx = tx_s + j;
+                       adapter->rx_ring[k]->reg_idx = rx_s + j;
+                       adapter->tx_ring[k]->dcb_tc = i;
+                       adapter->rx_ring[k]->dcb_tc = i;
                }
-               break;
-       default:
-               break;
        }
-       return ret;
+
+       return true;
 }
 #endif
 
@@ -4612,33 +4744,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
        if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
                return false;
 
-#ifdef CONFIG_IXGBE_DCB
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-
-               ixgbe_cache_ring_dcb(adapter);
-               /* find out queues in TC for FCoE */
-               fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
-               fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
-               /*
-                * In 82599, the number of Tx queues for each traffic
-                * class for both 8-TC and 4-TC modes are:
-                * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
-                * 8 TCs:  32  32  16  16   8   8   8   8
-                * 4 TCs:  64  64  32  32
-                * We have max 8 queues for FCoE, where 8 the is
-                * FCoE redirection table size. If TC for FCoE is
-                * less than or equal to TC3, we have enough queues
-                * to add max of 8 queues for FCoE, so we start FCoE
-                * Tx queue from the next one, i.e., reg_idx + 1.
-                * If TC for FCoE is above TC3, implying 8 TC mode,
-                * and we need 8 for FCoE, we have to take all queues
-                * in that traffic class for FCoE.
-                */
-               if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
-                       fcoe_tx_i--;
-       }
-#endif /* CONFIG_IXGBE_DCB */
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
                    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
@@ -4695,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        if (ixgbe_cache_ring_sriov(adapter))
                return;
 
+#ifdef CONFIG_IXGBE_DCB
+       if (ixgbe_cache_ring_dcb(adapter))
+               return;
+#endif
+
 #ifdef IXGBE_FCOE
        if (ixgbe_cache_ring_fcoe(adapter))
                return;
-
 #endif /* IXGBE_FCOE */
-#ifdef CONFIG_IXGBE_DCB
-       if (ixgbe_cache_ring_dcb(adapter))
-               return;
 
-#endif
        if (ixgbe_cache_ring_fdir(adapter))
                return;
 
@@ -4863,16 +4968,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
 {
        int q_idx, num_q_vectors;
        struct ixgbe_q_vector *q_vector;
-       int napi_vectors;
        int (*poll)(struct napi_struct *, int);
 
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-               napi_vectors = adapter->num_rx_queues;
                poll = &ixgbe_clean_rxtx_many;
        } else {
                num_q_vectors = 1;
-               napi_vectors = 1;
                poll = &ixgbe_poll;
        }
 
@@ -5169,10 +5271,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
        adapter->dcb_cfg.rx_pba_cfg = pba_equal;
        adapter->dcb_cfg.pfc_mode_enable = false;
-       adapter->dcb_cfg.round_robin_enable = false;
        adapter->dcb_set_bitmap = 0x00;
+       adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
        ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
-                          adapter->ring_feature[RING_F_DCB].indices);
+                          MAX_TRAFFIC_CLASS);
 
 #endif
 
@@ -5437,8 +5539,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
-       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
-               return -EINVAL;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+           hw->mac.type != ixgbe_mac_X540) {
+               if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+                       return -EINVAL;
+       } else {
+               if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+                       return -EINVAL;
+       }
 
        e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
        /* must set new MTU before calling down or up */
@@ -5606,6 +5714,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        }
 
        ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+       kfree(adapter->ixgbe_ieee_pfc);
+       kfree(adapter->ixgbe_ieee_ets);
+#endif
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -5964,7 +6076,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
                unregister_netdev(adapter->netdev);
                return;
        }
-       hw->mac.ops.setup_sfp(hw);
+       if (hw->mac.ops.setup_sfp)
+               hw->mac.ops.setup_sfp(hw);
 
        if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
                /* This will also work for DA Twinax connections */
@@ -6095,12 +6208,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
                               "10 Gbps" :
                               (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-                              "1 Gbps" : "unknown speed")),
+                              "1 Gbps" :
+                              (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+                              "100 Mbps" :
+                              "unknown speed"))),
                               ((flow_rx && flow_tx) ? "RX/TX" :
                               (flow_rx ? "RX" :
                               (flow_tx ? "TX" : "None"))));
 
                        netif_carrier_on(netdev);
+                       ixgbe_check_vf_rate_limit(adapter);
                } else {
                        /* Force detection of hung controller */
                        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -6630,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 
        protocol = vlan_get_protocol(skb);
 
-       if ((protocol == htons(ETH_P_FCOE)) ||
-           (protocol == htons(ETH_P_FIP))) {
-               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-                       txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-                       txq += adapter->ring_feature[RING_F_FCOE].mask;
-                       return txq;
-#ifdef CONFIG_IXGBE_DCB
-               } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       txq = adapter->fcoe.up;
-                       return txq;
-#endif
-               }
+       if (((protocol == htons(ETH_P_FCOE)) ||
+           (protocol == htons(ETH_P_FIP))) &&
+           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+               txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+               txq += adapter->ring_feature[RING_F_FCOE].mask;
+               return txq;
        }
 #endif
 
@@ -6651,15 +6762,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
                return txq;
        }
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               if (skb->priority == TC_PRIO_CONTROL)
-                       txq = adapter->ring_feature[RING_F_DCB].indices-1;
-               else
-                       txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
-                              >> 13;
-               return txq;
-       }
-
        return skb_tx_hash(dev, skb);
 }
 
@@ -6681,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                tx_flags |= vlan_tx_tag_get(skb);
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-                       tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+                       tx_flags |= tx_ring->dcb_tc << 13;
                }
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
                   skb->priority != TC_PRIO_CONTROL) {
-               tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+               tx_flags |= tx_ring->dcb_tc << 13;
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
@@ -6696,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        /* for FCoE with DCB, we force the priority to what
         * was specified by the switch */
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-           (protocol == htons(ETH_P_FCOE) ||
-            protocol == htons(ETH_P_FIP))) {
-#ifdef CONFIG_IXGBE_DCB
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-                                     << IXGBE_TX_FLAGS_VLAN_SHIFT);
-                       tx_flags |= ((adapter->fcoe.up << 13)
-                                     << IXGBE_TX_FLAGS_VLAN_SHIFT);
-               }
-#endif
-               /* flag for FCoE offloads */
-               if (protocol == htons(ETH_P_FCOE))
-                       tx_flags |= IXGBE_TX_FLAGS_FCOE;
-       }
+           (protocol == htons(ETH_P_FCOE)))
+               tx_flags |= IXGBE_TX_FLAGS_FCOE;
 #endif
 
        /* four things can cause us to need a context descriptor */
@@ -6982,11 +7072,15 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_vf_tx_rate     = ixgbe_ndo_set_vf_bw,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
+#ifdef CONFIG_IXGBE_DCB
+       .ndo_setup_tc           = ixgbe_setup_tc,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+       .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
        .ndo_fcoe_enable = ixgbe_fcoe_enable,
        .ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -7122,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        else
                indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
 
+#if defined(CONFIG_DCB)
        indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
-#ifdef IXGBE_FCOE
+#elif defined(IXGBE_FCOE)
        indices += min_t(unsigned int, num_possible_cpus(),
                         IXGBE_MAX_FCOE_INDICES);
 #endif
@@ -7279,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
                                    IXGBE_FLAG_DCB_ENABLED);
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 
 #ifdef CONFIG_IXGBE_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
@@ -7700,16 +7793,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
 
 #endif /* CONFIG_IXGBE_DCA */
 
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
-       struct ixgbe_adapter *adapter = hw->back;
-       return adapter->netdev;
-}
-
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
index ea82c5a..1ff0eef 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
                udelay(mbx->usec_delay);
        }
 
-       /* if we failed, all future posted messages fail until reset */
-       if (!countdown)
-               mbx->timeout = 0;
 out:
        return countdown ? 0 : IXGBE_ERR_MBX;
 }
@@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
                udelay(mbx->usec_delay);
        }
 
-       /* if we failed, all future posted messages fail until reset */
-       if (!countdown)
-               mbx->timeout = 0;
 out:
        return countdown ? 0 : IXGBE_ERR_MBX;
 }
@@ -437,6 +431,7 @@ out_no_read:
        return ret_val;
 }
 
+#ifdef CONFIG_PCI_IOV
 /**
  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
  *  @hw: pointer to the HW structure
@@ -447,24 +442,22 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               mbx->timeout = 0;
-               mbx->usec_delay = 0;
+       if (hw->mac.type != ixgbe_mac_82599EB &&
+           hw->mac.type != ixgbe_mac_X540)
+               return;
 
-               mbx->size = IXGBE_VFMAILBOX_SIZE;
+       mbx->timeout = 0;
+       mbx->usec_delay = 0;
 
-               mbx->stats.msgs_tx = 0;
-               mbx->stats.msgs_rx = 0;
-               mbx->stats.reqs = 0;
-               mbx->stats.acks = 0;
-               mbx->stats.rsts = 0;
-               break;
-       default:
-               break;
-       }
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
 }
+#endif /* CONFIG_PCI_IOV */
 
 struct ixgbe_mbx_operations mbx_ops_generic = {
        .read                   = ixgbe_read_mbx_pf,
index 3df9b15..fe6ea81 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
 s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
 
 extern struct ixgbe_mbx_operations mbx_ops_generic;
 
index 8f7123e..f72f705 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 phy_addr;
+       u16 ext_ability = 0;
 
        if (hw->phy.type == ixgbe_phy_unknown) {
                for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                                ixgbe_get_phy_id(hw);
                                hw->phy.type =
                                        ixgbe_get_phy_type_from_id(hw->phy.id);
+
+                               if (hw->phy.type == ixgbe_phy_unknown) {
+                                       hw->phy.ops.read_reg(hw,
+                                                            MDIO_PMA_EXTABLE,
+                                                            MDIO_MMD_PMAPMD,
+                                                            &ext_ability);
+                                       if (ext_ability &
+                                           (MDIO_PMA_EXTABLE_10GBT |
+                                            MDIO_PMA_EXTABLE_1000BT))
+                                               hw->phy.type =
+                                                        ixgbe_phy_cu_unknown;
+                                       else
+                                               hw->phy.type =
+                                                        ixgbe_phy_generic;
+                               }
+
                                status = 0;
                                break;
                        }
                }
                /* clear value if nothing found */
-               hw->phy.mdio.prtad = 0;
+               if (status != 0)
+                       hw->phy.mdio.prtad = 0;
        } else {
                status = 0;
        }
@@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
  **/
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
 {
+       u32 i;
+       u16 ctrl = 0;
+       s32 status = 0;
+
+       if (hw->phy.type == ixgbe_phy_unknown)
+               status = ixgbe_identify_phy_generic(hw);
+
+       if (status != 0 || hw->phy.type == ixgbe_phy_none)
+               goto out;
+
        /* Don't reset PHY if it's shut down due to overtemp. */
        if (!hw->phy.reset_if_overtemp &&
            (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
-               return 0;
+               goto out;
 
        /*
         * Perform soft PHY reset to the PHY_XS.
         * This will cause a soft reset to the PHY
         */
-       return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                                    MDIO_CTRL1_RESET);
+       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+                             MDIO_MMD_PHYXS,
+                             MDIO_CTRL1_RESET);
+
+       /*
+        * Poll for reset bit to self-clear indicating reset is complete.
+        * Some PHYs could take up to 3 seconds to complete and need about
+        * 1.7 usec delay after the reset is complete.
+        */
+       for (i = 0; i < 30; i++) {
+               msleep(100);
+               hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+                                    MDIO_MMD_PHYXS, &ctrl);
+               if (!(ctrl & MDIO_CTRL1_RESET)) {
+                       udelay(2);
+                       break;
+               }
+       }
+
+       if (ctrl & MDIO_CTRL1_RESET) {
+               status = IXGBE_ERR_RESET_FAILED;
+               hw_dbg(hw, "PHY reset polling failed to complete.\n");
+       }
+
+out:
+       return status;
 }
 
 /**
@@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                        }
                }
 
-               ixgbe_release_swfw_sync(hw, gssr);
+               hw->mac.ops.release_swfw_sync(hw, gssr);
        }
 
        return status;
@@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                        }
                }
 
-               ixgbe_release_swfw_sync(hw, gssr);
+               hw->mac.ops.release_swfw_sync(hw, gssr);
        }
 
        return status;
@@ -350,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
  **/
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_NOT_IMPLEMENTED;
+       s32 status = 0;
        u32 time_out;
        u32 max_time_out = 10;
-       u16 autoneg_reg;
+       u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+       bool autoneg = false;
+       ixgbe_link_speed speed;
 
-       /*
-        * Set advertisement settings in PHY based on autoneg_advertised
-        * settings. If autoneg_advertised = 0, then advertise default values
-        * tnx devices cannot be "forced" to a autoneg 10G and fail.  But can
-        * for a 1G.
-        */
-       hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
+       ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+               /* Set or unset auto-negotiation 10G advertisement */
+               hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
 
-       if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
                autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
-       else
-               autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+               hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+               /* Set or unset auto-negotiation 1G advertisement */
+               hw->phy.ops.read_reg(hw,
+                                    IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw,
+                                     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL) {
+               /* Set or unset auto-negotiation 100M advertisement */
+               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
 
-       hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
+               autoneg_reg &= ~ADVERTISE_100FULL;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+                       autoneg_reg |= ADVERTISE_100FULL;
+
+               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
 
        /* Restart PHY autonegotiation and wait for completion */
-       hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg);
+       hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+                            MDIO_MMD_AN, &autoneg_reg);
 
        autoneg_reg |= MDIO_AN_CTRL1_RESTART;
 
-       hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg);
+       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+                             MDIO_MMD_AN, autoneg_reg);
 
        /* Wait for autonegotiation to finish */
        for (time_out = 0; time_out < max_time_out; time_out++) {
                udelay(10);
                /* Restart PHY autonegotiation and wait for completion */
-               status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
-                                             &autoneg_reg);
+               status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+                                             MDIO_MMD_AN,
+                                             &autoneg_reg);
 
                autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
                if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
-                       status = 0;
                        break;
                }
        }
 
-       if (time_out == max_time_out)
+       if (time_out == max_time_out) {
                status = IXGBE_ERR_LINK_SETUP;
+               hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+       }
 
        return status;
 }
@@ -421,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
        if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
+       if (speed & IXGBE_LINK_SPEED_100_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
        /* Setup link based on the new speed settings */
        hw->phy.ops.setup_link(hw);
 
@@ -460,6 +555,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
        return status;
 }
 
+/**
+ *  ixgbe_check_phy_link_tnx - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the VS1 register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                            bool *link_up)
+{
+       s32 status = 0;
+       u32 time_out;
+       u32 max_time_out = 10;
+       u16 phy_link = 0;
+       u16 phy_speed = 0;
+       u16 phy_data = 0;
+
+       /* Initialize speed and link to default case */
+       *link_up = false;
+       *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+       /*
+        * Check current speed and link status of the PHY register.
+        * This is a vendor specific register and may have to
+        * be changed for other copper PHYs.
+        */
+       for (time_out = 0; time_out < max_time_out; time_out++) {
+               udelay(10);
+               status = hw->phy.ops.read_reg(hw,
+                                             MDIO_STAT1,
+                                             MDIO_MMD_VEND1,
+                                             &phy_data);
+               phy_link = phy_data &
+                           IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+               phy_speed = phy_data &
+                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+               if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+                       *link_up = true;
+                       if (phy_speed ==
+                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+                               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+                       break;
+               }
+       }
+
+       return status;
+}
+
+/**
+ *     ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ *     @hw: pointer to hardware structure
+ *
+ *     Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+       s32 status = 0;
+       u32 time_out;
+       u32 max_time_out = 10;
+       u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+       bool autoneg = false;
+       ixgbe_link_speed speed;
+
+       ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+               /* Set or unset auto-negotiation 10G advertisement */
+               hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+               hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+               /* Set or unset auto-negotiation 1G advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL) {
+               /* Set or unset auto-negotiation 100M advertisement */
+               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                                    MDIO_MMD_AN,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~ADVERTISE_100FULL;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+                       autoneg_reg |= ADVERTISE_100FULL;
+
+               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+                                     MDIO_MMD_AN,
+                                     autoneg_reg);
+       }
+
+       /* Restart PHY autonegotiation and wait for completion */
+       hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+                            MDIO_MMD_AN, &autoneg_reg);
+
+       autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+
+       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+                             MDIO_MMD_AN, autoneg_reg);
+
+       /* Wait for autonegotiation to finish */
+       for (time_out = 0; time_out < max_time_out; time_out++) {
+               udelay(10);
+               /* Restart PHY autonegotiation and wait for completion */
+               status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+                                             MDIO_MMD_AN,
+                                             &autoneg_reg);
+
+               autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
+               if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
+                       break;
+       }
+
+       if (time_out == max_time_out) {
+               status = IXGBE_ERR_LINK_SETUP;
+               hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+                                      u16 *firmware_version)
+{
+       s32 status = 0;
+
+       status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+                                     MDIO_MMD_VEND1,
+                                     firmware_version);
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                          u16 *firmware_version)
+{
+       s32 status = 0;
+
+       status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+                                     MDIO_MMD_VEND1,
+                                     firmware_version);
+
+       return status;
+}
+
 /**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
@@ -556,11 +825,10 @@ out:
 }
 
 /**
- *  ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns
- *                                      the PHY type.
+ *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
  *  @hw: pointer to hardware structure
  *
- *  Searches for and indentifies the SFP module.  Assings appropriate PHY type.
+ *  Searches for and identifies the SFP module and assigns appropriate PHY type.
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
@@ -581,41 +849,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                goto out;
        }
 
-       status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_IDENTIFIER,
                                             &identifier);
 
-       if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               if (hw->phy.type != ixgbe_phy_nl) {
-                       hw->phy.id = 0;
-                       hw->phy.type = ixgbe_phy_unknown;
-               }
-               goto out;
-       }
+       if (status == IXGBE_ERR_SWFW_SYNC ||
+           status == IXGBE_ERR_I2C ||
+           status == IXGBE_ERR_SFP_NOT_PRESENT)
+               goto err_read_i2c_eeprom;
 
-       if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
-                                           &comp_codes_1g);
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
-                                           &comp_codes_10g);
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
-                                           &cable_tech);
-
-               /* ID Module
-                * =========
-                * 0    SFP_DA_CU
-                * 1    SFP_SR
-                * 2    SFP_LR
-                * 3    SFP_DA_CORE0 - 82599-specific
-                * 4    SFP_DA_CORE1 - 82599-specific
-                * 5    SFP_SR/LR_CORE0 - 82599-specific
-                * 6    SFP_SR/LR_CORE1 - 82599-specific
-                * 7    SFP_act_lmt_DA_CORE0 - 82599-specific
-                * 8    SFP_act_lmt_DA_CORE1 - 82599-specific
-                * 9    SFP_1g_cu_CORE0 - 82599-specific
-                * 10   SFP_1g_cu_CORE1 - 82599-specific
-                */
+       /* LAN ID is needed for sfp_type determination */
+       hw->mac.ops.set_lan_id(hw);
+
+       if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+       } else {
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_1GBE_COMP_CODES,
+                                                    &comp_codes_1g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_10GBE_COMP_CODES,
+                                                    &comp_codes_10g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_CABLE_TECHNOLOGY,
+                                                    &cable_tech);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+                /* ID Module
+                 * =========
+                 * 0   SFP_DA_CU
+                 * 1   SFP_SR
+                 * 2   SFP_LR
+                 * 3   SFP_DA_CORE0 - 82599-specific
+                 * 4   SFP_DA_CORE1 - 82599-specific
+                 * 5   SFP_SR/LR_CORE0 - 82599-specific
+                 * 6   SFP_SR/LR_CORE1 - 82599-specific
+                 * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+                 * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+                 * 9   SFP_1g_cu_CORE0 - 82599-specific
+                 * 10  SFP_1g_cu_CORE1 - 82599-specific
+                 */
                if (hw->mac.type == ixgbe_mac_82598EB) {
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +936,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                                ixgbe_sfp_type_da_act_lmt_core1;
                                } else {
                                        hw->phy.sfp_type =
-                                               ixgbe_sfp_type_unknown;
+                                                       ixgbe_sfp_type_unknown;
                                }
-                       } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                       } else if (comp_codes_10g &
+                                  (IXGBE_SFF_10GBASESR_CAPABLE |
+                                   IXGBE_SFF_10GBASELR_CAPABLE)) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
                                                      ixgbe_sfp_type_srlr_core0;
                                else
                                        hw->phy.sfp_type =
                                                      ixgbe_sfp_type_srlr_core1;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
-                       else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+                       } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
                                                ixgbe_sfp_type_1g_cu_core0;
                                else
                                        hw->phy.sfp_type =
                                                ixgbe_sfp_type_1g_cu_core1;
-                       else
+                       } else {
                                hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+                       }
                }
 
                if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +973,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                /* Determine PHY vendor */
                if (hw->phy.type != ixgbe_phy_nl) {
                        hw->phy.id = identifier;
-                       hw->phy.ops.read_i2c_eeprom(hw,
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE0,
                                                    &oui_bytes[0]);
-                       hw->phy.ops.read_i2c_eeprom(hw,
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE1,
                                                    &oui_bytes[1]);
-                       hw->phy.ops.read_i2c_eeprom(hw,
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE2,
                                                    &oui_bytes[2]);
 
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
                        vendor_oui =
                          ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
                           (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +1009,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        case IXGBE_SFF_VENDOR_OUI_TYCO:
                                if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                        hw->phy.type =
-                                               ixgbe_phy_sfp_passive_tyco;
+                                                   ixgbe_phy_sfp_passive_tyco;
                                break;
                        case IXGBE_SFF_VENDOR_OUI_FTL:
                                if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +1026,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        default:
                                if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                        hw->phy.type =
-                                               ixgbe_phy_sfp_passive_unknown;
+                                                ixgbe_phy_sfp_passive_unknown;
                                else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
                                        hw->phy.type =
                                                ixgbe_phy_sfp_active_unknown;
@@ -734,7 +1036,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        }
                }
 
-               /* All passive DA cables are supported */
+               /* Allow any DA cable vendor */
                if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
                    IXGBE_SFF_DA_ACTIVE_CABLE)) {
                        status = 0;
@@ -756,7 +1058,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        goto out;
                }
 
-               /* This is guaranteed to be 82599, no need to check for NULL */
                hw->mac.ops.get_device_caps(hw, &enforce_sfp);
                if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
                    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -776,15 +1077,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
 out:
        return status;
+
+err_read_i2c_eeprom:
+       hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+       if (hw->phy.type != ixgbe_phy_nl) {
+               hw->phy.id = 0;
+               hw->phy.type = ixgbe_phy_unknown;
+       }
+       return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
 /**
- *  ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see
- *  if it supports a given SFP+ module type, if so it returns the offsets to the
- *  phy init sequence block.
+ *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
  *  @hw: pointer to hardware structure
  *  @list_offset: offset to the SFP ID list
  *  @data_offset: offset to the SFP data block
+ *
+ *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ *  so it returns the offsets to the phy init sequence block.
  **/
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
                                         u16 *list_offset,
@@ -899,11 +1209,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                 u8 dev_addr, u8 *data)
 {
        s32 status = 0;
-       u32 max_retry = 1;
+       u32 max_retry = 10;
        u32 retry = 0;
+       u16 swfw_mask = 0;
        bool nack = 1;
 
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
        do {
+               if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+                       status = IXGBE_ERR_SWFW_SYNC;
+                       goto read_byte_out;
+               }
+
                ixgbe_i2c_start(hw);
 
                /* Device Address and write indication */
@@ -946,6 +1267,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                break;
 
 fail:
+               ixgbe_release_swfw_sync(hw, swfw_mask);
+               msleep(100);
                ixgbe_i2c_bus_clear(hw);
                retry++;
                if (retry < max_retry)
@@ -955,6 +1278,9 @@ fail:
 
        } while (retry < max_retry);
 
+       ixgbe_release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
        return status;
 }
 
@@ -973,6 +1299,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        s32 status = 0;
        u32 max_retry = 1;
        u32 retry = 0;
+       u16 swfw_mask = 0;
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+       if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+               status = IXGBE_ERR_SWFW_SYNC;
+               goto write_byte_out;
+       }
 
        do {
                ixgbe_i2c_start(hw);
@@ -1013,6 +1350,9 @@ fail:
                        hw_dbg(hw, "I2C byte write error.\n");
        } while (retry < max_retry);
 
+       ixgbe_release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
        return status;
 }
 
@@ -1331,6 +1671,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
        u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        u32 i;
 
+       ixgbe_i2c_start(hw);
+
        ixgbe_set_i2c_data(hw, &i2cctl, 1);
 
        for (i = 0; i < 9; i++) {
@@ -1345,90 +1687,12 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
                udelay(IXGBE_I2C_T_LOW);
        }
 
+       ixgbe_i2c_start(hw);
+
        /* Put the i2c bus back to default state */
        ixgbe_i2c_stop(hw);
 }
 
-/**
- *  ixgbe_check_phy_link_tnx - Determine link and speed status
- *  @hw: pointer to hardware structure
- *
- *  Reads the VS1 register to determine if link is up and the current speed for
- *  the PHY.
- **/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-                             bool *link_up)
-{
-       s32 status = 0;
-       u32 time_out;
-       u32 max_time_out = 10;
-       u16 phy_link = 0;
-       u16 phy_speed = 0;
-       u16 phy_data = 0;
-
-       /* Initialize speed and link to default case */
-       *link_up = false;
-       *speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-       /*
-        * Check current speed and link status of the PHY register.
-        * This is a vendor specific register and may have to
-        * be changed for other copper PHYs.
-        */
-       for (time_out = 0; time_out < max_time_out; time_out++) {
-               udelay(10);
-               status = hw->phy.ops.read_reg(hw,
-                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
-                                       MDIO_MMD_VEND1,
-                                       &phy_data);
-               phy_link = phy_data &
-                          IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
-               phy_speed = phy_data &
-                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
-               if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
-                       *link_up = true;
-                       if (phy_speed ==
-                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
-                               *speed = IXGBE_LINK_SPEED_1GB_FULL;
-                       break;
-               }
-       }
-
-       return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                       u16 *firmware_version)
-{
-       s32 status = 0;
-
-       status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
-                                     firmware_version);
-
-       return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
-**/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-                                           u16 *firmware_version)
-{
-       s32 status = 0;
-
-       status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
-                                     firmware_version);
-
-       return status;
-}
-
 /**
  *  ixgbe_tn_check_overtemp - Checks if an overtemp occured.
  *  @hw: pointer to hardware structure
index e2c6b7e..197bdd1 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
 
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE                  0x400
+#define IXGBE_TAF_ASM_PAUSE                  0x800
+
 /* Bit-shift macros */
 #define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24
 #define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16
@@ -104,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
                              ixgbe_link_speed *speed,
                              bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
                                        u16 *firmware_version);
 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
index 47b1573..6e50d83 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -110,12 +110,37 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
+void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int new_mtu = msgbuf[1];
+       u32 max_frs;
+       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+       /* Only X540 supports jumbo frames in IOV mode */
+       if (adapter->hw.mac.type != ixgbe_mac_X540)
+               return;
+
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+               e_err(drv, "VF mtu %d out of range\n", new_mtu);
+               return;
+       }
+
+       max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+                  IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+       if (max_frs < new_mtu) {
+               max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+       }
+
+       e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
 
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
        vmolr |= (IXGBE_VMOLR_ROMPE |
-                 IXGBE_VMOLR_ROPE |
                  IXGBE_VMOLR_BAM);
        if (aupe)
                vmolr |= IXGBE_VMOLR_AUPE;
@@ -304,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                                                 hash_list, vf);
                break;
        case IXGBE_VF_SET_LPE:
-               WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+               ixgbe_set_vf_lpe(adapter, msgbuf);
                break;
        case IXGBE_VF_SET_VLAN:
                add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
@@ -453,9 +478,90 @@ out:
        return err;
 }
 
+static int ixgbe_link_mbps(int internal_link_speed)
+{
+       switch (internal_link_speed) {
+       case IXGBE_LINK_SPEED_100_FULL:
+               return 100;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               return 1000;
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               return 10000;
+       default:
+               return 0;
+       }
+}
+
+static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
+                                   int link_speed)
+{
+       int rf_dec, rf_int;
+       u32 bcnrc_val;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = link_speed / tx_rate;
+               rf_dec = (link_speed - (rf_int * tx_rate));
+               rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                              IXGBE_RTTBCNRC_RF_INT_MASK);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+}
+
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
+{
+       int actual_link_speed, i;
+       bool reset_rate = false;
+
+       /* VF Tx rate limit was not set */
+       if (adapter->vf_rate_link_speed == 0)
+               return;
+
+       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+       if (actual_link_speed != adapter->vf_rate_link_speed) {
+               reset_rate = true;
+               adapter->vf_rate_link_speed = 0;
+               dev_info(&adapter->pdev->dev,
+                        "Link speed has been changed. VF Transmit rate "
+                        "is disabled\n");
+       }
+
+       for (i = 0; i < adapter->num_vfs; i++) {
+               if (reset_rate)
+                       adapter->vfinfo[i].tx_rate = 0;
+
+               ixgbe_set_vf_rate_limit(&adapter->hw, i,
+                                       adapter->vfinfo[i].tx_rate,
+                                       actual_link_speed);
+       }
+}
+
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
-       return -EOPNOTSUPP;
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       int actual_link_speed;
+
+       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+       if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
+           (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
+           ((tx_rate != 0) && (tx_rate <= 10)))
+           /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+               return -EINVAL;
+
+       adapter->vf_rate_link_speed = actual_link_speed;
+       adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
+       ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+       return 0;
 }
 
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
@@ -466,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                return -EINVAL;
        ivi->vf = vf;
        memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
-       ivi->tx_rate = 0;
+       ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
        ivi->qos = adapter->vfinfo[vf].pf_qos;
        return 0;
index 49dc14d..3417556 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi);
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
 
 #endif /* _IXGBE_SRIOV_H_ */
 
index fd3358f..25c1fb7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
 
 /* General Receive Control */
 #define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
 
 #define IXGBE_VPDDIAG0  0x10204
 #define IXGBE_VPDDIAG1  0x10208
 /* Wake Up Control */
 #define IXGBE_WUC_PME_EN     0x00000002 /* PME Enable */
 #define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_ADVD3WUC   0x00000010 /* D3Cold wake up cap. enable*/
+#define IXGBE_WUC_WKEN       0x00000010 /* Enable PE_WAKE_N pin assertion  */
 
 /* Wake Up Filter Control */
 #define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
 #define IXGBE_RTTDTECC    0x04990
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
+#define IXGBE_RTTBCNRC_RS_ENA  0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK     0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT    14
+#define IXGBE_RTTBCNRC_RF_INT_MASK     \
+       (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
 
 /* FCoE registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
 #define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
 #define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
 #define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
 #define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_PCRC8ECL  0x0E810
+#define IXGBE_PCRC8ECH  0x0E811
+#define IXGBE_PCRC8ECH_MASK     0x1F
+#define IXGBE_LDPCECL   0x0E820
+#define IXGBE_LDPCECH   0x0E821
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */
 
+/* MII clause 22/28 definitions */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG             0x17   /* 1G XNP Transmit */
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX      0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE             0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_AUTONEG_REG                    0x0
+
 #define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0
 #define IXGBE_MAX_PHY_ADDR             32
 
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
 
 /* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS   0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
 #define IXGBE_PCI_LINK_STATUS     0xB2
 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8
 #define IXGBE_PCI_LINK_WIDTH      0x3F0
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN   0x00008000
 
 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
 #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -2240,6 +2264,7 @@ enum ixgbe_mac_type {
 
 enum ixgbe_phy_type {
        ixgbe_phy_unknown = 0,
+       ixgbe_phy_none,
        ixgbe_phy_tn,
        ixgbe_phy_aq,
        ixgbe_phy_cu_unknown,
@@ -2328,32 +2353,31 @@ enum ixgbe_bus_type {
 /* PCI bus speeds */
 enum ixgbe_bus_speed {
        ixgbe_bus_speed_unknown = 0,
-       ixgbe_bus_speed_33,
-       ixgbe_bus_speed_66,
-       ixgbe_bus_speed_100,
-       ixgbe_bus_speed_120,
-       ixgbe_bus_speed_133,
-       ixgbe_bus_speed_2500,
-       ixgbe_bus_speed_5000,
+       ixgbe_bus_speed_33      = 33,
+       ixgbe_bus_speed_66      = 66,
+       ixgbe_bus_speed_100     = 100,
+       ixgbe_bus_speed_120     = 120,
+       ixgbe_bus_speed_133     = 133,
+       ixgbe_bus_speed_2500    = 2500,
+       ixgbe_bus_speed_5000    = 5000,
        ixgbe_bus_speed_reserved
 };
 
 /* PCI bus widths */
 enum ixgbe_bus_width {
        ixgbe_bus_width_unknown = 0,
-       ixgbe_bus_width_pcie_x1,
-       ixgbe_bus_width_pcie_x2,
+       ixgbe_bus_width_pcie_x1 = 1,
+       ixgbe_bus_width_pcie_x2 = 2,
        ixgbe_bus_width_pcie_x4 = 4,
        ixgbe_bus_width_pcie_x8 = 8,
-       ixgbe_bus_width_32,
-       ixgbe_bus_width_64,
+       ixgbe_bus_width_32      = 32,
+       ixgbe_bus_width_64      = 64,
        ixgbe_bus_width_reserved
 };
 
 struct ixgbe_addr_filter_info {
        u32 num_mc_addrs;
        u32 rar_used_count;
-       u32 mc_addr_in_rar_count;
        u32 mta_in_use;
        u32 overflow_promisc;
        bool uc_set_promisc;
@@ -2491,6 +2515,8 @@ struct ixgbe_mac_operations {
        s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
        s32 (*setup_sfp)(struct ixgbe_hw *);
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+       void (*release_swfw_sync)(struct ixgbe_hw *, u16);
 
        /* Link */
        void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2513,7 +2539,6 @@ struct ixgbe_mac_operations {
        s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
-       s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2554,6 +2579,7 @@ struct ixgbe_eeprom_info {
        u16                             address_bits;
 };
 
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED      0x01
 struct ixgbe_mac_info {
        struct ixgbe_mac_operations     ops;
        enum ixgbe_mac_type             type;
@@ -2564,6 +2590,8 @@ struct ixgbe_mac_info {
        u16                             wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
        u16                             wwpn_prefix;
+#define IXGBE_MAX_MTA                  128
+       u32                             mta_shadow[IXGBE_MAX_MTA];
        s32                             mc_filter_type;
        u32                             mcft_size;
        u32                             vft_size;
@@ -2576,6 +2604,7 @@ struct ixgbe_mac_info {
        u32                             orig_autoc2;
        bool                            orig_link_settings_stored;
        bool                            autotry_restart;
+       u8                              flags;
 };
 
 struct ixgbe_phy_info {
@@ -2682,7 +2711,9 @@ struct ixgbe_info {
 #define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_OVERTEMP                      -26
-#define IXGBE_ERR_RAR_INDEX                     -27
+#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED              -28
+#define IXGBE_ERR_FLOW_CONTROL                  -29
 #define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
index 3a89239..f47e93f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
 
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
-//#include "ixgbe_mbx.h"
 
 #define IXGBE_X540_MAX_TX_QUEUES 128
 #define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  Needs to be SW reset if link is up.
         * If link reset is used when link is up, it might reset the PHY when
@@ -133,21 +129,34 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
        }
 
        ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-       IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
        IXGBE_WRITE_FLUSH(hw);
 
        /* Poll for reset bit to self-clear indicating reset is complete */
        for (i = 0; i < 10; i++) {
                udelay(1);
                ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-               if (!(ctrl & IXGBE_CTRL_RST))
+               if (!(ctrl & reset_bit))
                        break;
        }
-       if (ctrl & IXGBE_CTRL_RST) {
+       if (ctrl & reset_bit) {
                status = IXGBE_ERR_RESET_FAILED;
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
        ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
         * clear the multicast table.  Also reset num_rar_entries to 128,
         * since we modify this value when programming the SAN MAC address.
         */
-       hw->mac.num_rar_entries = 128;
+       hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
        hw->mac.ops.init_rx_addrs(hw);
 
        /* Store the permanent mac address */
@@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
- * @hw: pointer to hardware structure
+ *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
  **/
 static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 {
@@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
                                          IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
-                       eeprom->type, eeprom->word_size);
+                      eeprom->type, eeprom->word_size);
        }
 
        return 0;
@@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
        s32 status;
 
-       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
                status = ixgbe_read_eerd_generic(hw, offset, data);
        else
                status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
               (data << IXGBE_EEPROM_RW_REG_DATA) |
               IXGBE_EEPROM_RW_REG_START;
 
-       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
                if (status != 0) {
                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .set_vmdq               = &ixgbe_set_vmdq_generic,
        .clear_vmdq             = &ixgbe_clear_vmdq_generic,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
@@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .setup_sfp              = NULL,
        .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
        .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
        .identify               = &ixgbe_identify_phy_generic,
        .identify_sfp           = &ixgbe_identify_sfp_module_generic,
        .init                   = NULL,
-       .reset                  = &ixgbe_reset_phy_generic,
+       .reset                  = NULL,
        .read_reg               = &ixgbe_read_phy_reg_generic,
        .write_reg              = &ixgbe_write_phy_reg_generic,
        .setup_link             = &ixgbe_setup_phy_link_generic,
index de643eb..78abb6f 100644 (file)
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN   0x00008000
 
 /* DCA Control */
 #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
index fa29b3c..0563ab2 100644 (file)
@@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = {
        "IXGBE_VFSTATUS",
        "IXGBE_VFLINKS",
        "IXGBE_VFRXMEMWRAP",
-       "IXGBE_VFRTIMER",
+       "IXGBE_VFFRTIMER",
        "IXGBE_VTEICR",
        "IXGBE_VTEICS",
        "IXGBE_VTEIMS",
@@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev,
        regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
        regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
        regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
-       regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+       regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
 
        /* Interrupt */
        /* don't read EICR because it can clear interrupt causes, instead
index a63efcb..b703f60 100644 (file)
@@ -207,7 +207,6 @@ struct ixgbevf_adapter {
        u64 hw_tso_ctxt;
        u64 hw_tso6_ctxt;
        u32 tx_timeout_count;
-       bool detect_tx_hung;
 
        /* RX */
        struct ixgbevf_ring *rx_ring;   /* One per active queue */
index 464e6c9..054ab05 100644 (file)
@@ -49,9 +49,9 @@
 
 char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
-       "Intel(R) 82599 Virtual Function";
+       "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "1.0.19-k0"
+#define DRV_VERSION "2.0.0-k2"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
 }
 
 /*
- * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
  * @adapter: pointer to adapter struct
  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  * @queue: queue to map the corresponding interrupt to
@@ -162,42 +162,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
        /* tx_buffer_info must be completely set up in the transmit path */
 }
 
-static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
-                                        struct ixgbevf_ring *tx_ring,
-                                        unsigned int eop)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 head, tail;
-
-       /* Detect a transmit hang in hardware, this serializes the
-        * check with the clearing of time_stamp and movement of eop */
-       head = readl(hw->hw_addr + tx_ring->head);
-       tail = readl(hw->hw_addr + tx_ring->tail);
-       adapter->detect_tx_hung = false;
-       if ((head != tail) &&
-           tx_ring->tx_buffer_info[eop].time_stamp &&
-           time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
-               /* detected Tx unit hang */
-               union ixgbe_adv_tx_desc *tx_desc;
-               tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-               printk(KERN_ERR "Detected Tx Unit Hang\n"
-                      "  Tx Queue             <%d>\n"
-                      "  TDH, TDT             <%x>, <%x>\n"
-                      "  next_to_use          <%x>\n"
-                      "  next_to_clean        <%x>\n"
-                      "tx_buffer_info[next_to_clean]\n"
-                      "  time_stamp           <%lx>\n"
-                      "  jiffies              <%lx>\n",
-                      tx_ring->queue_index,
-                      head, tail,
-                      tx_ring->next_to_use, eop,
-                      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
-               return true;
-       }
-
-       return false;
-}
-
 #define IXGBE_MAX_TXD_PWR      14
 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
 
@@ -293,16 +257,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
 #endif
        }
 
-       if (adapter->detect_tx_hung) {
-               if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
-                       /* schedule immediate reset if we believe we hung */
-                       printk(KERN_INFO
-                              "tx hang %d detected, resetting adapter\n",
-                              adapter->tx_timeout_count + 1);
-                       ixgbevf_tx_timeout(adapter->netdev);
-               }
-       }
-
        /* re-arm the interrupt */
        if ((count >= tx_ring->work_limit) &&
            (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
@@ -334,7 +288,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-       int ret;
 
        if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
                if (adapter->vlgrp && is_vlan)
@@ -345,9 +298,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
                        napi_gro_receive(&q_vector->napi, skb);
        } else {
                if (adapter->vlgrp && is_vlan)
-                       ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+                       vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
                else
-                       ret = netif_rx(skb);
+                       netif_rx(skb);
        }
 }
 
@@ -1017,7 +970,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
 }
 
 /**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
  * @irq: unused
  * @data: pointer to our q_vector struct for this interrupt vector
  **/
@@ -1665,6 +1618,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                j = adapter->rx_ring[i].reg_idx;
                rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
                rxdctl |= IXGBE_RXDCTL_ENABLE;
+               if (hw->mac.type == ixgbe_mac_X540_vf) {
+                       rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+                       rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
+                                  IXGBE_RXDCTL_RLPML_EN);
+               }
                IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
                ixgbevf_rx_desc_queue_enable(adapter, i);
        }
@@ -1967,7 +1925,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
 }
 
 /*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
  *
  * This is the top level queue allocation routine.  The order here is very
@@ -2216,7 +2174,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
 
        hw->vendor_id = pdev->vendor;
        hw->device_id = pdev->device;
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->revision_id = pdev->revision;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
 
@@ -2410,9 +2368,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
                               10 : 1);
                        netif_carrier_on(netdev);
                        netif_tx_wake_all_queues(netdev);
-               } else {
-                       /* Force detection of hung controller */
-                       adapter->detect_tx_hung = true;
                }
        } else {
                adapter->link_up = false;
@@ -2427,9 +2382,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
        ixgbevf_update_stats(adapter);
 
 pf_has_reset:
-       /* Force detection of hung controller every watchdog period */
-       adapter->detect_tx_hung = true;
-
        /* Reset the timer */
        if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
@@ -3217,10 +3169,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+       u32 msg[2];
+
+       if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+               max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
 
        /* MTU < 68 is an error and causes problems on some kernels */
-       if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+       if ((new_mtu < 68) || (max_frame > max_possible_frame))
                return -EINVAL;
 
        hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3186,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       msg[0] = IXGBE_VF_SET_LPE;
+       msg[1] = max_frame;
+       hw->mbx.ops.write_posted(hw, msg, 2);
+
        if (netif_running(netdev))
                ixgbevf_reinit_locked(adapter);
 
@@ -3272,8 +3234,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 {
-       struct ixgbevf_adapter *adapter;
-       adapter = netdev_priv(dev);
        dev->netdev_ops = &ixgbe_netdev_ops;
        ixgbevf_set_ethtool_ops(dev);
        dev->watchdog_timeo = 5 * HZ;
@@ -3519,9 +3479,9 @@ static struct pci_driver ixgbevf_driver = {
 };
 
 /**
- * ixgbe_init_module - Driver Registration Routine
+ * ixgbevf_init_module - Driver Registration Routine
  *
- * ixgbe_init_module is the first routine called when the driver is
+ * ixgbevf_init_module is the first routine called when the driver is
  * loaded. All it does is register with the PCI subsystem.
  **/
 static int __init ixgbevf_init_module(void)
@@ -3539,9 +3499,9 @@ static int __init ixgbevf_init_module(void)
 module_init(ixgbevf_init_module);
 
 /**
- * ixgbe_exit_module - Driver Exit Cleanup Routine
+ * ixgbevf_exit_module - Driver Exit Cleanup Routine
  *
- * ixgbe_exit_module is called just before the driver is removed
+ * ixgbevf_exit_module is called just before the driver is removed
  * from memory.
  **/
 static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3511,7 @@ static void __exit ixgbevf_exit_module(void)
 
 #ifdef DEBUG
 /**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbevf_get_hw_dev_name - return device name string
  * used by hardware layer to print debugging information
  **/
 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
index fb80ca1..189200e 100644 (file)
@@ -31,7 +31,7 @@
 #define IXGBE_VFCTRL           0x00000
 #define IXGBE_VFSTATUS         0x00008
 #define IXGBE_VFLINKS          0x00010
-#define IXGBE_VFRTIMER         0x00048
+#define IXGBE_VFFRTIMER        0x00048
 #define IXGBE_VFRXMEMWRAP      0x03190
 #define IXGBE_VTEICR           0x00100
 #define IXGBE_VTEICS           0x00104
index e97ebef..f690474 100644 (file)
@@ -160,6 +160,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
        }
 }
 
+static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+       jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+       jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+       u32 speed = jme->reg_ghc & GHC_SPEED;
+       if (speed == GHC_SPEED_1000M)
+               jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+       else
+               jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+                            GPREG1_RSSPATCH);
+       jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+       jme->reg_ghc |= GHC_SWRST;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~GHC_SWRST;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
 static inline void
 jme_reset_mac_processor(struct jme_adapter *jme)
 {
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
        u32 gpreg0;
        int i;
 
-       jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
-       udelay(2);
-       jwrite32(jme, JME_GHC, jme->reg_ghc);
+       jme_reset_ghc_speed(jme);
+       jme_reset_250A2_workaround(jme);
+
+       jme_mac_rxclk_on(jme);
+       jme_mac_txclk_on(jme);
+       udelay(1);
+       jme_assert_ghc_reset(jme);
+       udelay(1);
+       jme_mac_rxclk_off(jme);
+       jme_mac_txclk_off(jme);
+       udelay(1);
+       jme_clear_ghc_reset(jme);
+       udelay(1);
+       jme_mac_rxclk_on(jme);
+       jme_mac_txclk_on(jme);
+       udelay(1);
+       jme_mac_rxclk_off(jme);
+       jme_mac_txclk_off(jme);
 
        jwrite32(jme, JME_RXDBA_LO, 0x00000000);
        jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
        else
                gpreg0 = GPREG0_DEFAULT;
        jwrite32(jme, JME_GPREG0, gpreg0);
-       jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
-       jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
-       jwrite32(jme, JME_GHC, jme->reg_ghc);
 }
 
 static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
 }
 
 static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
 {
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
 }
 
 static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
 {
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
 }
@@ -351,7 +419,7 @@ static int
 jme_check_link(struct net_device *netdev, int testonly)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+       u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
        char linkmsg[64];
        int rc = 0;
 
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
 
                jme->phylink = phylink;
 
-               ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
-                               GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+               /*
+                * The speed/duplex setting of jme->reg_ghc already cleared
+                * by jme_reset_mac_processor()
+                */
                switch (phylink & PHY_LINK_SPEED_MASK) {
                case PHY_LINK_SPEED_10M:
-                       ghc |= GHC_SPEED_10M |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+                       jme->reg_ghc |= GHC_SPEED_10M;
                        strcat(linkmsg, "10 Mbps, ");
                        break;
                case PHY_LINK_SPEED_100M:
-                       ghc |= GHC_SPEED_100M |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+                       jme->reg_ghc |= GHC_SPEED_100M;
                        strcat(linkmsg, "100 Mbps, ");
                        break;
                case PHY_LINK_SPEED_1000M:
-                       ghc |= GHC_SPEED_1000M |
-                               GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+                       jme->reg_ghc |= GHC_SPEED_1000M;
                        strcat(linkmsg, "1000 Mbps, ");
                        break;
                default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
 
                if (phylink & PHY_LINK_DUPLEX) {
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
-                       ghc |= GHC_DPX;
+                       jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+                       jme->reg_ghc |= GHC_DPX;
                } else {
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
                                                TXMCS_BACKOFF |
                                                TXMCS_CARRIERSENSE |
                                                TXMCS_COLLISION);
-                       jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
-                               ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
-                               TXTRHD_TXREN |
-                               ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+                       jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
                }
 
-               gpreg1 = GPREG1_DEFAULT;
+               jwrite32(jme, JME_GHC, jme->reg_ghc);
+
                if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+                       jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+                                            GPREG1_RSSPATCH);
                        if (!(phylink & PHY_LINK_DUPLEX))
-                               gpreg1 |= GPREG1_HALFMODEPATCH;
+                               jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
                        switch (phylink & PHY_LINK_SPEED_MASK) {
                        case PHY_LINK_SPEED_10M:
-                               jme_set_phyfifoa(jme);
-                               gpreg1 |= GPREG1_RSSPATCH;
+                               jme_set_phyfifo_8level(jme);
+                               jme->reg_gpreg1 |= GPREG1_RSSPATCH;
                                break;
                        case PHY_LINK_SPEED_100M:
-                               jme_set_phyfifob(jme);
-                               gpreg1 |= GPREG1_RSSPATCH;
+                               jme_set_phyfifo_5level(jme);
+                               jme->reg_gpreg1 |= GPREG1_RSSPATCH;
                                break;
                        case PHY_LINK_SPEED_1000M:
-                               jme_set_phyfifoa(jme);
+                               jme_set_phyfifo_8level(jme);
                                break;
                        default:
                                break;
                        }
                }
-
-               jwrite32(jme, JME_GPREG1, gpreg1);
-               jwrite32(jme, JME_GHC, ghc);
-               jme->reg_ghc = ghc;
+               jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 
                strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
                                        "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
         * Enable TX Engine
         */
        wmb();
-       jwrite32(jme, JME_TXCS, jme->reg_txcs |
+       jwrite32f(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
                                TXCS_ENABLE);
 
+       /*
+        * Start clock for TX MAC Processor
+        */
+       jme_mac_txclk_on(jme);
 }
 
 static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
 
        if (!i)
                pr_err("Disable TX engine timeout\n");
+
+       /*
+        * Stop clock for TX MAC Processor
+        */
+       jme_mac_txclk_off(jme);
 }
 
 static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
        /*
         * Setup Unicast Filter
         */
+       jme_set_unicastaddr(jme->dev);
        jme_set_multi(jme->dev);
 
        /*
         * Enable RX Engine
         */
        wmb();
-       jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+       jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
                                RXCS_QUEUESEL_Q0 |
                                RXCS_ENABLE |
                                RXCS_QST);
+
+       /*
+        * Start clock for RX MAC Processor
+        */
+       jme_mac_rxclk_on(jme);
 }
 
 static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
        if (!i)
                pr_err("Disable RX engine timeout\n");
 
+       /*
+        * Stop clock for RX MAC Processor
+        */
+       jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+       u16 csum = 0xFFFFu;
+
+       if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+               return csum;
+       if (skb->protocol != htons(ETH_P_IP))
+               return csum;
+       skb_set_network_header(skb, ETH_HLEN);
+       if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+           (skb->len < (ETH_HLEN +
+                       (ip_hdr(skb)->ihl << 2) +
+                       sizeof(struct udphdr)))) {
+               skb_reset_network_header(skb);
+               return csum;
+       }
+       skb_set_transport_header(skb,
+                       ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+       csum = udp_hdr(skb)->check;
+       skb_reset_transport_header(skb);
+       skb_reset_network_header(skb);
+
+       return csum;
 }
 
 static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
 {
        if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
                return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
        }
 
        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
-                       == RXWBFLAG_UDPON)) {
+                       == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
                if (flags & RXWBFLAG_IPV4)
                        netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
                return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
                skb_put(skb, framesize);
                skb->protocol = eth_type_trans(skb, jme->dev);
 
-               if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+               if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
                        skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
        tasklet_disable(&jme->rxempty_task);
 
        if (netif_carrier_ok(netdev)) {
-               jme_reset_ghc_speed(jme);
                jme_disable_rx_engine(jme);
                jme_disable_tx_engine(jme);
                jme_reset_mac_processor(jme);
@@ -1576,6 +1684,38 @@ jme_free_irq(struct jme_adapter *jme)
        }
 }
 
+static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+       u32 reg;
+
+       reg = jread32(jme, JME_PHY_PWR);
+       reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+                PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+       jwrite32(jme, JME_PHY_PWR, reg);
+
+       pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+       reg &= ~PE1_GPREG0_PBG;
+       reg |= PE1_GPREG0_ENBG;
+       pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+       u32 reg;
+
+       reg = jread32(jme, JME_PHY_PWR);
+       reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+              PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+       jwrite32(jme, JME_PHY_PWR, reg);
+
+       pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+       reg &= ~PE1_GPREG0_PBG;
+       reg |= PE1_GPREG0_PDD3COLD;
+       pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
 static inline void
 jme_phy_on(struct jme_adapter *jme)
 {
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
        bmcr &= ~BMCR_PDOWN;
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+       if (new_phy_power_ctrl(jme->chip_main_rev))
+               jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+       u32 bmcr;
+
+       bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+       bmcr |= BMCR_PDOWN;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+       if (new_phy_power_ctrl(jme->chip_main_rev))
+               jme_new_phy_off(jme);
 }
 
 static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
 
        jme_start_irq(jme);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_reset_link(jme);
 
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
        }
 }
 
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
-       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
 static void
 jme_powersave_phy(struct jme_adapter *jme)
 {
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
        tasklet_disable(&jme->rxclean_task);
        tasklet_disable(&jme->rxempty_task);
 
-       jme_reset_ghc_speed(jme);
        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
        jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        return NETDEV_TX_OK;
 }
 
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+       u32 val;
+
+       val = (netdev->dev_addr[3] & 0xff) << 24 |
+             (netdev->dev_addr[2] & 0xff) << 16 |
+             (netdev->dev_addr[1] & 0xff) <<  8 |
+             (netdev->dev_addr[0] & 0xff);
+       jwrite32(jme, JME_RXUMA_LO, val);
+       val = (netdev->dev_addr[5] & 0xff) << 8 |
+             (netdev->dev_addr[4] & 0xff);
+       jwrite32(jme, JME_RXUMA_HI, val);
+}
+
 static int
 jme_set_macaddr(struct net_device *netdev, void *p)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        struct sockaddr *addr = p;
-       u32 val;
 
        if (netif_running(netdev))
                return -EBUSY;
 
        spin_lock_bh(&jme->macaddr_lock);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
-       val = (addr->sa_data[3] & 0xff) << 24 |
-             (addr->sa_data[2] & 0xff) << 16 |
-             (addr->sa_data[1] & 0xff) <<  8 |
-             (addr->sa_data[0] & 0xff);
-       jwrite32(jme, JME_RXUMA_LO, val);
-       val = (addr->sa_data[5] & 0xff) << 8 |
-             (addr->sa_data[4] & 0xff);
-       jwrite32(jme, JME_RXUMA_HI, val);
+       jme_set_unicastaddr(netdev);
        spin_unlock_bh(&jme->macaddr_lock);
 
        return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
 
        jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
        jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+       jme->chip_main_rev = jme->chiprev & 0xF;
+       jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
 }
 
 static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
        jme->reg_rxmcs = RXMCS_DEFAULT;
        jme->reg_txpfc = 0;
        jme->reg_pmcs = PMCS_MFEN;
+       jme->reg_gpreg1 = GPREG1_DEFAULT;
        set_bit(JME_FLAG_TXCSUM, &jme->flags);
        set_bit(JME_FLAG_TSO, &jme->flags);
 
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
        jme->mii_if.mdio_write = jme_mdio_write;
 
        jme_clear_pm(jme);
-       jme_set_phyfifoa(jme);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+       jme_set_phyfifo_5level(jme);
+       jme->pcirev = pdev->revision;
        if (!jme->fpgaver)
                jme_phy_init(jme);
        jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
                goto err_out_unmap;
        }
 
-       netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+       netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
                   "JMC250 Gigabit Ethernet" :
                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
                   "JMC260 Fast Ethernet" : "Unknown",
                   (jme->fpgaver != 0) ? " (FPGA)" : "",
                   (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
-                  jme->rev, netdev->dev_addr);
+                  jme->pcirev, netdev->dev_addr);
 
        return 0;
 
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
                        jme_polling_mode(jme);
 
                jme_stop_pcc_timer(jme);
-               jme_reset_ghc_speed(jme);
                jme_disable_rx_engine(jme);
                jme_disable_tx_engine(jme);
                jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
        jme_clear_pm(jme);
        pci_restore_state(pdev);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_start_irq(jme);
        netif_device_attach(netdev);
index eac0926..8bf3045 100644 (file)
@@ -26,7 +26,7 @@
 #define __JME_H_INCLUDED__
 
 #define DRV_NAME       "jme"
-#define DRV_VERSION    "1.0.7"
+#define DRV_VERSION    "1.0.8"
 #define PFX            DRV_NAME ": "
 
 #define PCI_DEVICE_ID_JMICRON_JMC250   0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
 #define HALF_US 500    /* 500 ns */
 #define JMESPIIOCTL    SIOCDEVPRIVATE
 
+#define PCI_PRIV_PE1           0xE4
+
+enum pci_priv_pe1_bit_masks {
+       PE1_ASPMSUPRT   = 0x00000003, /*
+                                      * RW:
+                                      * Aspm_support[1:0]
+                                      * (R/W Port of 5C[11:10])
+                                      */
+       PE1_MULTIFUN    = 0x00000004, /* RW: Multi_fun_bit */
+       PE1_RDYDMA      = 0x00000008, /* RO: ~link.rdy_for_dma */
+       PE1_ASPMOPTL    = 0x00000030, /* RW: link.rx10s_option[1:0] */
+       PE1_ASPMOPTH    = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+       PE1_GPREG0      = 0x0000FF00, /*
+                                      * SRW:
+                                      * Cfg_gp_reg0
+                                      * [7:6] phy_giga BG control
+                                      * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+                                      * [4:0] Reserved
+                                      */
+       PE1_GPREG0_PBG  = 0x0000C000, /* phy_giga BG control */
+       PE1_GPREG1      = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+       PE1_REVID       = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+       PE1_GPREG0_ENBG         = 0x00000000, /* en BG */
+       PE1_GPREG0_PDD3COLD     = 0x00004000, /* giga_PD + d3cold */
+       PE1_GPREG0_PDPCIESD     = 0x00008000, /* giga_PD + pcie_shutdown */
+       PE1_GPREG0_PDPCIEIDDQ   = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
 /*
  * Dynamic(adaptive)/Static PCC values
  */
@@ -403,6 +434,7 @@ struct jme_adapter {
        u32                     reg_rxmcs;
        u32                     reg_ghc;
        u32                     reg_pmcs;
+       u32                     reg_gpreg1;
        u32                     phylink;
        u32                     tx_ring_size;
        u32                     tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
        u32                     rx_ring_mask;
        u8                      mrrs;
        unsigned int            fpgaver;
-       unsigned int            chiprev;
-       u8                      rev;
+       u8                      chiprev;
+       u8                      chip_main_rev;
+       u8                      chip_sub_rev;
+       u8                      pcirev;
        u32                     msg_enable;
        struct ethtool_cmd      old_ecmd;
        unsigned int            old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
        JME_PMCS        = JME_MAC | 0x60, /* Power Management Control/Stat */
 
 
+       JME_PHY_PWR     = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
        JME_PHY_CS      = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
        JME_PHY_LINK    = JME_PHY | 0x30, /* PHY Link Status Register */
        JME_SMBCSR      = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
        TXTRHD_TXRL_SHIFT       = 0,
 };
 
+enum jme_txtrhd_values {
+       TXTRHD_FULLDUPLEX       = 0x00000000,
+       TXTRHD_HALFDUPLEX       = TXTRHD_TXPEN |
+                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+                                 TXTRHD_TXREN |
+                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
 /*
  * RX Control/Status Bits
  */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
  */
 enum jme_ghc_bit_mask {
        GHC_SWRST               = 0x40000000,
+       GHC_TO_CLK_SRC          = 0x00C00000,
+       GHC_TXMAC_CLK_SRC       = 0x00300000,
        GHC_DPX                 = 0x00000040,
        GHC_SPEED               = 0x00000030,
        GHC_LINK_POLL           = 0x00000001,
@@ -832,6 +877,21 @@ enum jme_pmcs_bit_masks {
        PMCS_MFEN       = 0x00000001,
 };
 
+/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+       PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+       PHY_PWR_DWN1SW  = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+       PHY_PWR_DWN2    = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+       PHY_PWR_CLKSEL  = 0x08000000, /*
+                                      * XTL_OUT Clock select
+                                      * (an internal free-running clock)
+                                      * 0: xtl_out = phy_giga.A_XTL25_O
+                                      * 1: xtl_out = phy_giga.PD_OSC
+                                      */
+};
+
 /*
  * Giga PHY Status Registers
  */
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
 
 /*
  * General Purpose REG-1
- * Note: All theses bits defined here are for
- *       Chip mode revision 0x11 only
  */
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+       GPREG1_RXCLKOFF         = 0x04000000,
+       GPREG1_PCREQN           = 0x00020000,
+       GPREG1_HALFMODEPATCH    = 0x00000040, /* For Chip revision 0x11 only */
+       GPREG1_RSSPATCH         = 0x00000020, /* For Chip revision 0x11 only */
        GPREG1_INTRDELAYUNIT    = 0x00000018,
        GPREG1_INTRDELAYENABLE  = 0x00000007,
 };
 
 enum jme_gpreg1_vals {
-       GPREG1_RSSPATCH         = 0x00000040,
-       GPREG1_HALFMODEPATCH    = 0x00000020,
-
        GPREG1_INTDLYUNIT_16NS  = 0x00000000,
        GPREG1_INTDLYUNIT_256NS = 0x00000008,
        GPREG1_INTDLYUNIT_1US   = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
        GPREG1_INTDLYEN_6U      = 0x00000006,
        GPREG1_INTDLYEN_7U      = 0x00000007,
 
-       GPREG1_DEFAULT          = 0x00000000,
+       GPREG1_DEFAULT          = GPREG1_PCREQN,
 };
 
 /*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
 /*
  * Workaround
  */
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
 {
        return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
 }
 
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+       return chip_main_rev >= 5;
+}
+
 /*
  * Function prototypes
  */
 static int jme_set_settings(struct net_device *netdev,
                                struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
 static void jme_set_multi(struct net_device *netdev);
 
 #endif
index 2d9663a..ea0dc45 100644 (file)
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
 
 static const struct ethtool_ops loopback_ethtool_ops = {
        .get_link               = always_on,
-       .set_tso                = ethtool_op_set_tso,
-       .get_tx_csum            = always_on,
-       .get_sg                 = always_on,
-       .get_rx_csum            = always_on,
 };
 
 static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
        dev->priv_flags        &= ~IFF_XMIT_DST_RELEASE;
+       dev->hw_features        = NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
-               | NETIF_F_TSO
+               | NETIF_F_ALL_TSO
+               | NETIF_F_UFO
                | NETIF_F_NO_CSUM
+               | NETIF_F_RXCSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
                | NETIF_F_NETNS_LOCAL;
index f69e73e..79ccb54 100644 (file)
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
+       dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        if (mdiobus_register(bp->mii_bus))
                goto err_out_free_mdio_irq;
index 5933621..6696e56 100644 (file)
@@ -39,7 +39,7 @@ struct macvtap_queue {
        struct socket sock;
        struct socket_wq wq;
        int vnet_hdr_sz;
-       struct macvlan_dev *vlan;
+       struct macvlan_dev __rcu *vlan;
        struct file *file;
        unsigned int flags;
 };
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
        struct macvlan_dev *vlan;
 
        spin_lock(&macvtap_lock);
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_protected(q->vlan,
+                                        lockdep_is_held(&macvtap_lock));
        if (vlan) {
                int index = get_slot(vlan, q);
 
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
        /* macvtap_put_queue can free some slots, so go through all slots */
        spin_lock(&macvtap_lock);
        for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
-               q = rcu_dereference(vlan->taps[i]);
+               q = rcu_dereference_protected(vlan->taps[i],
+                                             lockdep_is_held(&macvtap_lock));
                if (q) {
                        qlist[j++] = q;
                        rcu_assign_pointer(vlan->taps[i], NULL);
@@ -528,8 +530,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
                vnet_hdr_len = q->vnet_hdr_sz;
 
                err = -EINVAL;
-               if ((len -= vnet_hdr_len) < 0)
+               if (len < vnet_hdr_len)
                        goto err;
+               len -= vnet_hdr_len;
 
                err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
                                           sizeof(vnet_hdr));
@@ -569,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
        }
 
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                macvlan_start_xmit(skb, vlan->dev);
        else
@@ -583,7 +586,7 @@ err_kfree:
 
 err:
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                vlan->dev->stats.tx_dropped++;
        rcu_read_unlock_bh();
@@ -631,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
        ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
 
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                macvlan_count_rx(vlan, len, ret == 0, 0);
        rcu_read_unlock_bh();
@@ -727,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
 
        case TUNGETIFF:
                rcu_read_lock_bh();
-               vlan = rcu_dereference(q->vlan);
+               vlan = rcu_dereference_bh(q->vlan);
                if (vlan)
                        dev_hold(vlan->dev);
                rcu_read_unlock_bh();
@@ -736,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                        return -ENOLINK;
 
                ret = 0;
-               if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
                    put_user(q->flags, &ifr->ifr_flags))
                        ret = -EFAULT;
                dev_put(vlan->dev);
index 210b2b1..0a6c6a2 100644 (file)
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
        if (!new_carrier) {
                netif_carrier_off(mii->dev);
                if (ok_to_print)
-                       printk(KERN_INFO "%s: link down\n", mii->dev->name);
+                       netdev_info(mii->dev, "link down\n");
                return 0; /* duplex did not change */
        }
 
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
                duplex = 1;
 
        if (ok_to_print)
-               printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
-                      mii->dev->name,
-                      lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" :
-                      media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
-                      duplex ? "full" : "half",
-                      lpa);
+               netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
+                           lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+                           media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
+                           100 : 10,
+                           duplex ? "full" : "half",
+                           lpa);
 
        if ((init_media) || (mii->full_duplex != duplex)) {
                mii->full_duplex = duplex;
index 4ffdc18..2765a3c 100644 (file)
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
        { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
        { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
        { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
        { 0, }
 };
 
index 02076e1..34425b9 100644 (file)
@@ -35,6 +35,8 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
 #include <linux/in.h>
@@ -627,9 +629,8 @@ err:
                if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
                        (RX_FIRST_DESC | RX_LAST_DESC)) {
                        if (net_ratelimit())
-                               dev_printk(KERN_ERR, &mp->dev->dev,
-                                          "received packet spanning "
-                                          "multiple descriptors\n");
+                               netdev_err(mp->dev,
+                                          "received packet spanning multiple descriptors\n");
                }
 
                if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
                txq->tx_dropped++;
-               dev_printk(KERN_DEBUG, &dev->dev,
-                          "failed to linearize skb with tiny "
-                          "unaligned fragment\n");
+               netdev_printk(KERN_DEBUG, dev,
+                             "failed to linearize skb with tiny unaligned fragment\n");
                return NETDEV_TX_BUSY;
        }
 
        if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
                if (net_ratelimit())
-                       dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
+                       netdev_err(dev, "tx queue full?!\n");
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                        skb = __skb_dequeue(&txq->tx_skb);
 
                if (cmd_sts & ERROR_SUMMARY) {
-                       dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
+                       netdev_info(mp->dev, "tx error\n");
                        mp->dev->stats.tx_errors++;
                }
 
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
        int ret;
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
        writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
        ret = readl(smi_reg);
        if (!(ret & SMI_READ_VALID)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
+               pr_warn("SMI bus read not valid\n");
                return -ENODEV;
        }
 
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
        void __iomem *smi_reg = msp->base + SMI_REG;
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
                (addr << 16) | (val & 0xffff), smi_reg);
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
        if (netif_running(dev)) {
                mv643xx_eth_stop(dev);
                if (mv643xx_eth_open(dev)) {
-                       dev_printk(KERN_ERR, &dev->dev,
-                                  "fatal error on re-opening device after "
-                                  "ring param change\n");
+                       netdev_err(dev,
+                                  "fatal error on re-opening device after ring param change\n");
                        return -ENOMEM;
                }
        }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
        }
 
        if (rxq->rx_desc_area == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
+               netdev_err(mp->dev,
                           "can't allocate rx ring (%d bytes)\n", size);
                goto out;
        }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
        rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
                                                                GFP_KERNEL);
        if (rxq->rx_skb == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
-                          "can't allocate rx skb ring\n");
+               netdev_err(mp->dev, "can't allocate rx skb ring\n");
                goto out_free;
        }
 
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
        }
 
        if (rxq->rx_desc_count) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
-                          "error freeing rx ring -- %d skbs stuck\n",
+               netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
                           rxq->rx_desc_count);
        }
 
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
        }
 
        if (txq->tx_desc_area == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
+               netdev_err(mp->dev,
                           "can't allocate tx ring (%d bytes)\n", size);
                return -ENOMEM;
        }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
                if (netif_carrier_ok(dev)) {
                        int i;
 
-                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netdev_info(dev, "link down\n");
 
                        netif_carrier_off(dev);
 
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
        duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
        fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
 
-       printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
-                        "flow control %sabled\n", dev->name,
-                        speed, duplex ? "full" : "half",
-                        fc ? "en" : "dis");
+       netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+                   speed, duplex ? "full" : "half", fc ? "en" : "dis");
 
        if (!netif_carrier_ok(dev))
                netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
        err = request_irq(dev->irq, mv643xx_eth_irq,
                          IRQF_SHARED, dev->name, dev);
        if (err) {
-               dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+               netdev_err(dev, "can't assign irq\n");
                return -EAGAIN;
        }
 
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
         */
        mv643xx_eth_stop(dev);
        if (mv643xx_eth_open(dev)) {
-               dev_printk(KERN_ERR, &dev->dev,
-                          "fatal error on re-opening device after "
-                          "MTU change\n");
+               netdev_err(dev,
+                          "fatal error on re-opening device after MTU change\n");
        }
 
        return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
 
-       dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
+       netdev_info(dev, "tx timeout\n");
 
        schedule_work(&mp->tx_timeout_task);
 }
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        int ret;
 
        if (!mv643xx_eth_version_printed++)
-               printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
-                       "driver version %s\n", mv643xx_eth_driver_version);
+               pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+                         mv643xx_eth_driver_version);
 
        ret = -EINVAL;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        pd = pdev->dev.platform_data;
        if (pd == NULL) {
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "no mv643xx_eth_platform_data\n");
+               dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
                return -ENODEV;
        }
 
        if (pd->shared == NULL) {
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "no mv643xx_eth_platform_data->shared\n");
+               dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
                return -ENODEV;
        }
 
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        if (err)
                goto out;
 
-       dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
-                  mp->port_num, dev->dev_addr);
+       netdev_notice(dev, "port %d with MAC address %pM\n",
+                     mp->port_num, dev->dev_addr);
 
        if (mp->tx_desc_sram_size > 0)
-               dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
+               netdev_notice(dev, "configured with sram\n");
 
        return 0;
 
index ea5cfe2..a7f2eed 100644 (file)
@@ -253,7 +253,7 @@ struct myri10ge_priv {
        unsigned long serial_number;
        int vendor_specific_offset;
        int fw_multicast_support;
-       unsigned long features;
+       u32 features;
        u32 max_tso6;
        u32 read_dma;
        u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
 static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
 {
        struct myri10ge_priv *mgp = netdev_priv(netdev);
-       unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+       u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
 
        if (tso_enabled)
                netdev->features |= flags;
index a113805..d7299f1 100644 (file)
@@ -739,7 +739,8 @@ struct netxen_recv_context {
 #define NX_CDRP_CMD_READ_PEXQ_PARAMETERS       0x0000001c
 #define NX_CDRP_CMD_GET_LIC_CAPABILITIES       0x0000001d
 #define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD     0x0000001e
-#define NX_CDRP_CMD_MAX                                0x0000001f
+#define NX_CDRP_CMD_CONFIG_GBE_PORT            0x0000001f
+#define NX_CDRP_CMD_MAX                                0x00000020
 
 #define NX_RCODE_SUCCESS               0
 #define NX_RCODE_NO_HOST_MEM           1
@@ -1054,6 +1055,7 @@ typedef struct {
 #define NX_FW_CAPABILITY_BDG                   (1 << 8)
 #define NX_FW_CAPABILITY_FVLANTX               (1 << 9)
 #define NX_FW_CAPABILITY_HW_LRO                        (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG          (1 << 11)
 
 /* module types */
 #define LINKEVENT_MODULE_NOT_PRESENT                   1
@@ -1349,6 +1351,8 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
 void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
 void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
 
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+                               u32 speed, u32 duplex, u32 autoneg);
 int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
 int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
 int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
index f7d06cb..f16966a 100644 (file)
@@ -112,6 +112,21 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
        return 0;
 }
 
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+                       u32 speed, u32 duplex, u32 autoneg)
+{
+
+       return netxen_issue_cmd(adapter,
+                               adapter->ahw.pci_func,
+                               NXHAL_VERSION,
+                               speed,
+                               duplex,
+                               autoneg,
+                               NX_CDRP_CMD_CONFIG_GBE_PORT);
+
+}
+
 static int
 nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
 {
index 587498e..653d308 100644 (file)
@@ -214,7 +214,6 @@ skip:
                        check_sfp_module = netif_running(dev) &&
                                adapter->has_link_events;
                } else {
-                       ecmd->autoneg = AUTONEG_ENABLE;
                        ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
                        ecmd->advertising |=
                                (ADVERTISED_TP | ADVERTISED_Autoneg);
@@ -252,53 +251,24 @@ static int
 netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 {
        struct netxen_adapter *adapter = netdev_priv(dev);
-       __u32 status;
+       int ret;
 
-       /* read which mode */
-       if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
-               /* autonegotiation */
-               if (adapter->phy_write &&
-                   adapter->phy_write(adapter,
-                                      NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                      ecmd->autoneg) != 0)
-                       return -EIO;
-               else
-                       adapter->link_autoneg = ecmd->autoneg;
+       if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+               return -EOPNOTSUPP;
 
-               if (adapter->phy_read &&
-                   adapter->phy_read(adapter,
-                                     NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                     &status) != 0)
-                       return -EIO;
+       if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+               return -EOPNOTSUPP;
 
-               /* speed */
-               switch (ecmd->speed) {
-               case SPEED_10:
-                       netxen_set_phy_speed(status, 0);
-                       break;
-               case SPEED_100:
-                       netxen_set_phy_speed(status, 1);
-                       break;
-               case SPEED_1000:
-                       netxen_set_phy_speed(status, 2);
-                       break;
-               }
-               /* set duplex mode */
-               if (ecmd->duplex == DUPLEX_HALF)
-                       netxen_clear_phy_duplex(status);
-               if (ecmd->duplex == DUPLEX_FULL)
-                       netxen_set_phy_duplex(status);
-               if (adapter->phy_write &&
-                   adapter->phy_write(adapter,
-                                      NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                      *((int *)&status)) != 0)
-                       return -EIO;
-               else {
-                       adapter->link_speed = ecmd->speed;
-                       adapter->link_duplex = ecmd->duplex;
-               }
-       } else
+       ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex,
+                                    ecmd->autoneg);
+       if (ret == NX_RCODE_NOT_SUPPORTED)
                return -EOPNOTSUPP;
+       else if (ret)
+               return -EIO;
+
+       adapter->link_speed = ecmd->speed;
+       adapter->link_duplex = ecmd->duplex;
+       adapter->link_autoneg = ecmd->autoneg;
 
        if (!netif_running(dev))
                return 0;
index 33fac32..83348dc 100644 (file)
@@ -1032,6 +1032,9 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
 
+       if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+               netxen_linkevent_request(adapter, 0);
+
        if (adapter->stop_port)
                adapter->stop_port(adapter);
 
index 2541321..9fb59d3 100644 (file)
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
 {
        struct niu_parent *parent = np->parent;
        int first_rx_channel, first_tx_channel;
+       int num_rx_rings, num_tx_rings;
+       struct rx_ring_info *rx_rings;
+       struct tx_ring_info *tx_rings;
        int i, port, err;
 
        port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
                first_tx_channel += parent->txchan_per_port[i];
        }
 
-       np->num_rx_rings = parent->rxchan_per_port[port];
-       np->num_tx_rings = parent->txchan_per_port[port];
+       num_rx_rings = parent->rxchan_per_port[port];
+       num_tx_rings = parent->txchan_per_port[port];
 
-       netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
-       netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
-
-       np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
-                              GFP_KERNEL);
+       rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+                          GFP_KERNEL);
        err = -ENOMEM;
-       if (!np->rx_rings)
+       if (!rx_rings)
                goto out_err;
 
+       np->num_rx_rings = num_rx_rings;
+       smp_wmb();
+       np->rx_rings = rx_rings;
+
+       netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
        for (i = 0; i < np->num_rx_rings; i++) {
                struct rx_ring_info *rp = &np->rx_rings[i];
 
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
                        return err;
        }
 
-       np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
-                              GFP_KERNEL);
+       tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+                          GFP_KERNEL);
        err = -ENOMEM;
-       if (!np->tx_rings)
+       if (!tx_rings)
                goto out_err;
 
+       np->num_tx_rings = num_tx_rings;
+       smp_wmb();
+       np->tx_rings = tx_rings;
+
+       netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
        for (i = 0; i < np->num_tx_rings; i++) {
                struct tx_ring_info *rp = &np->tx_rings[i];
 
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
 static void niu_get_rx_stats(struct niu *np)
 {
        unsigned long pkts, dropped, errors, bytes;
+       struct rx_ring_info *rx_rings;
        int i;
 
        pkts = dropped = errors = bytes = 0;
+
+       rx_rings = ACCESS_ONCE(np->rx_rings);
+       if (!rx_rings)
+               goto no_rings;
+
        for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
+               struct rx_ring_info *rp = &rx_rings[i];
 
                niu_sync_rx_discard_stats(np, rp, 0);
 
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
                dropped += rp->rx_dropped;
                errors += rp->rx_errors;
        }
+
+no_rings:
        np->dev->stats.rx_packets = pkts;
        np->dev->stats.rx_bytes = bytes;
        np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
 static void niu_get_tx_stats(struct niu *np)
 {
        unsigned long pkts, errors, bytes;
+       struct tx_ring_info *tx_rings;
        int i;
 
        pkts = errors = bytes = 0;
+
+       tx_rings = ACCESS_ONCE(np->tx_rings);
+       if (!tx_rings)
+               goto no_rings;
+
        for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
+               struct tx_ring_info *rp = &tx_rings[i];
 
                pkts += rp->tx_packets;
                bytes += rp->tx_bytes;
                errors += rp->tx_errors;
        }
+
+no_rings:
        np->dev->stats.tx_packets = pkts;
        np->dev->stats.tx_bytes = bytes;
        np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
 {
        struct niu *np = netdev_priv(dev);
 
-       niu_get_rx_stats(np);
-       niu_get_tx_stats(np);
-
+       if (netif_running(dev)) {
+               niu_get_rx_stats(np);
+               niu_get_tx_stats(np);
+       }
        return &dev->stats;
 }
 
index a0c26a9..e1e33c8 100644 (file)
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
        struct pch_gbe_regs_mac_adr mac_adr[16];
        u32 ADDR_MASK;
        u32 MIIM;
-       u32 reserve2;
+       u32 MAC_ADDR_LOAD;
        u32 RGMII_ST;
        u32 RGMII_CTRL;
        u32 reserve3[3];
index d735530..b99e90a 100644 (file)
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_SHORT_PKT              64
 #define DSC_INIT16                     0xC000
 #define PCH_GBE_DMA_ALIGN              0
+#define PCH_GBE_DMA_PADDING            2
 #define PCH_GBE_WATCHDOG_PERIOD                (1 * HZ)        /* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+       iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
 /**
  * pch_gbe_mac_read_mac_addr - Read MAC address
  * @hw:                    Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
        struct pch_gbe_adapter *adapter;
        adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 
+       rtnl_lock();
        pch_gbe_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
  */
 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
-
-       rtnl_lock();
-       if (netif_running(netdev)) {
-               pch_gbe_down(adapter);
-               pch_gbe_up(adapter);
-       }
-       rtnl_unlock();
+       pch_gbe_down(adapter);
+       pch_gbe_up(adapter);
 }
 
 /**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
        struct pch_gbe_buffer *buffer_info;
        struct pch_gbe_rx_desc *rx_desc;
        u32 length;
-       unsigned char tmp_packet[ETH_HLEN];
        unsigned int i;
        unsigned int cleaned_count = 0;
        bool cleaned = false;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *new_skb;
        u8 dma_status;
        u16 gbec_status;
        u32 tcp_ip_status;
-       u8 skb_copy_flag = 0;
-       u8 skb_padding_flag = 0;
 
        i = rx_ring->next_to_clean;
 
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        pr_err("Receive CRC Error\n");
                } else {
                        /* get receive length */
-                       /* length convert[-3], padding[-2] */
-                       length = (rx_desc->rx_words_eob) - 3 - 2;
+                       /* length convert[-3] */
+                       length = (rx_desc->rx_words_eob) - 3;
 
                        /* Decide the data conversion method */
                        if (!adapter->rx_csum) {
                                /* [Header:14][payload] */
-                               skb_padding_flag = 0;
-                               skb_copy_flag = 1;
+                               if (NET_IP_ALIGN) {
+                                       /* Because alignment differs,
+                                        * the new_skb is newly allocated,
+                                        * and data is copied to new_skb.*/
+                                       new_skb = netdev_alloc_skb(netdev,
+                                                        length + NET_IP_ALIGN);
+                                       if (!new_skb) {
+                                               /* dorrop error */
+                                               pr_err("New skb allocation "
+                                                       "Error\n");
+                                               goto dorrop;
+                                       }
+                                       skb_reserve(new_skb, NET_IP_ALIGN);
+                                       memcpy(new_skb->data, skb->data,
+                                              length);
+                                       skb = new_skb;
+                               } else {
+                                       /* DMA buffer is used as SKB as it is.*/
+                                       buffer_info->skb = NULL;
+                               }
                        } else {
                                /* [Header:14][padding:2][payload] */
-                               skb_padding_flag = 1;
-                               if (length < copybreak)
-                                       skb_copy_flag = 1;
-                               else
-                                       skb_copy_flag = 0;
-                       }
-
-                       /* Data conversion */
-                       if (skb_copy_flag) {    /* recycle  skb */
-                               struct sk_buff *new_skb;
-                               new_skb =
-                                   netdev_alloc_skb(netdev,
-                                                    length + NET_IP_ALIGN);
-                               if (new_skb) {
-                                       if (!skb_padding_flag) {
-                                               skb_reserve(new_skb,
-                                                               NET_IP_ALIGN);
+                               /* The length includes padding length */
+                               length = length - PCH_GBE_DMA_PADDING;
+                               if ((length < copybreak) ||
+                                   (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+                                       /* Because alignment differs,
+                                        * the new_skb is newly allocated,
+                                        * and data is copied to new_skb.
+                                        * Padding data is deleted
+                                        * at the time of a copy.*/
+                                       new_skb = netdev_alloc_skb(netdev,
+                                                        length + NET_IP_ALIGN);
+                                       if (!new_skb) {
+                                               /* dorrop error */
+                                               pr_err("New skb allocation "
+                                                       "Error\n");
+                                               goto dorrop;
                                        }
+                                       skb_reserve(new_skb, NET_IP_ALIGN);
                                        memcpy(new_skb->data, skb->data,
-                                               length);
-                                       /* save the skb
-                                        * in buffer_info as good */
+                                              ETH_HLEN);
+                                       memcpy(&new_skb->data[ETH_HLEN],
+                                              &skb->data[ETH_HLEN +
+                                              PCH_GBE_DMA_PADDING],
+                                              length - ETH_HLEN);
                                        skb = new_skb;
-                               } else if (!skb_padding_flag) {
-                                       /* dorrop error */
-                                       pr_err("New skb allocation Error\n");
-                                       goto dorrop;
+                               } else {
+                                       /* Padding data is deleted
+                                        * by moving header data.*/
+                                       memmove(&skb->data[PCH_GBE_DMA_PADDING],
+                                               &skb->data[0], ETH_HLEN);
+                                       skb_reserve(skb, NET_IP_ALIGN);
+                                       buffer_info->skb = NULL;
                                }
-                       } else {
-                               buffer_info->skb = NULL;
                        }
-                       if (skb_padding_flag) {
-                               memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
-                               memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
-                                       ETH_HLEN);
-                               skb_reserve(skb, NET_IP_ALIGN);
-
-                       }
-
+                       /* The length includes FCS length */
+                       length = length - ETH_FCS_LEN;
                        /* update status of driver */
                        adapter->stats.rx_bytes += length;
                        adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-       flush_scheduled_work();
+       cancel_work_sync(&adapter->reset_task);
        unregister_netdev(netdev);
 
        pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
        pch_gbe_set_ethtool_ops(netdev);
 
+       pch_gbe_mac_load_mac_addr(&adapter->hw);
        pch_gbe_mac_reset_hw(&adapter->hw);
 
        /* setup the private structure */
index 1f42f6a..d3cb772 100644 (file)
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
     
        /* 
         * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
-        * Early datasheets said to poll the reset bit, but now they say that
-        * it "is not a reliable indicator and subsequently should be ignored."
-        * We wait at least 10ms.
+        * We wait at least 2ms.
         */
 
-       mdelay(10);
+       mdelay(2);
 
        /*
         * Reset RBCR[01] back to zero as per magic incantation.
index 9226cda..530ab5a 100644 (file)
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
        PCMCIA_DEVICE_NULL,
 };
index 35fda5a..392a6c4 100644 (file)
@@ -77,7 +77,6 @@ config NATIONAL_PHY
          Currently supports the DP83865 PHY.
 
 config STE10XP
-       depends on PHYLIB
        tristate "Driver for STMicroelectronics STe10Xp PHYs"
        ---help---
          This is the driver for the STe100p and STe101p PHYs.
index 0fd1678..590f902 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/phy.h>
-
-#define        PHY_ID_KSZ9021                  0x00221611
-#define        PHY_ID_KS8737                   0x00221720
-#define        PHY_ID_KS8041                   0x00221510
-#define        PHY_ID_KS8051                   0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define        PHY_ID_KS8001                   0x0022161A
+#include <linux/micrel_phy.h>
 
 /* general Interrupt control/status reg in vendor specific block. */
 #define MII_KSZPHY_INTCS                       0x1B
@@ -46,6 +40,7 @@
 #define KSZPHY_CTRL_INT_ACTIVE_HIGH            (1 << 9)
 #define KSZ9021_CTRL_INT_ACTIVE_HIGH           (1 << 14)
 #define KS8737_CTRL_INT_ACTIVE_HIGH            (1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK                 (1 << 7)
 
 static int kszphy_ack_interrupt(struct phy_device *phydev)
 {
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int ks8051_config_init(struct phy_device *phydev)
+{
+       int regval;
+
+       if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+               regval = phy_read(phydev, MII_KSZPHY_CTRL);
+               regval |= KSZ8051_RMII_50MHZ_CLK;
+               phy_write(phydev, MII_KSZPHY_CTRL, regval);
+       }
+
+       return 0;
+}
+
 static struct phy_driver ks8737_driver = {
        .phy_id         = PHY_ID_KS8737,
        .phy_id_mask    = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = ks8051_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index a8445c7..f767033 100644 (file)
@@ -319,7 +319,8 @@ int phy_mii_ioctl(struct phy_device *phydev,
                /* fall through */
 
        case SIOCGMIIREG:
-               mii_data->val_out = phy_read(phydev, mii_data->reg_num);
+               mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
+                                                mii_data->reg_num);
                break;
 
        case SIOCSMIIREG:
@@ -350,8 +351,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
                        }
                }
 
-               phy_write(phydev, mii_data->reg_num, val);
-               
+               mdiobus_write(phydev->bus, mii_data->phy_id,
+                             mii_data->reg_num, val);
+
                if (mii_data->reg_num == MII_BMCR &&
                    val & BMCR_RESET &&
                    phydev->drv->config_init) {
index c7a6c44..9f6d670 100644 (file)
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        ppp_release(NULL, file);
                        err = 0;
                } else
-                       printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
-                              atomic_long_read(&file->f_count));
+                       pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+                               atomic_long_read(&file->f_count));
                mutex_unlock(&ppp_mutex);
                return err;
        }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        if (pf->kind != INTERFACE) {
                /* can't happen */
-               printk(KERN_ERR "PPP: not interface or channel??\n");
+               pr_err("PPP: not interface or channel??\n");
                return -EINVAL;
        }
 
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                }
                vj = slhc_init(val2+1, val+1);
                if (!vj) {
-                       printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+                       netdev_err(ppp->dev,
+                                  "PPP: no memory (VJ compressor)\n");
                        err = -ENOMEM;
                        break;
                }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
 {
        int err;
 
-       printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+       pr_info("PPP generic driver version " PPP_VERSION "\n");
 
        err = register_pernet_device(&ppp_net_ops);
        if (err) {
-               printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+               pr_err("failed to register PPP pernet device (%d)\n", err);
                goto out;
        }
 
        err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
        if (err) {
-               printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+               pr_err("failed to register PPP device (%d)\n", err);
                goto out_net;
        }
 
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
        new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
        if (!new_skb) {
                if (net_ratelimit())
-                       printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+                       netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
                return NULL;
        }
        if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
                 * the same number.
                 */
                if (net_ratelimit())
-                       printk(KERN_ERR "ppp: compressor dropped pkt\n");
+                       netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
                kfree_skb(skb);
                kfree_skb(new_skb);
                new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                if (ppp->pass_filter &&
                    sk_run_filter(skb, ppp->pass_filter) == 0) {
                        if (ppp->debug & 1)
-                               printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "PPP: outbound frame "
+                                             "not passed\n");
                        kfree_skb(skb);
                        return;
                }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
                                    GFP_ATOMIC);
                if (!new_skb) {
-                       printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+                       netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
                        goto drop;
                }
                skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
            proto != PPP_LCP && proto != PPP_CCP) {
                if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
                        if (net_ratelimit())
-                               printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+                               netdev_err(ppp->dev,
+                                          "ppp: compression required but "
+                                          "down - pkt dropped.\n");
                        goto drop;
                }
                skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
  noskb:
        spin_unlock_bh(&pch->downl);
        if (ppp->debug & 1)
-               printk(KERN_ERR "PPP: no memory (fragment)\n");
+               netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
        ++ppp->dev->stats.tx_errors;
        ++ppp->nxseq;
        return 1;       /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        /* copy to a new sk_buff with more tailroom */
                        ns = dev_alloc_skb(skb->len + 128);
                        if (!ns) {
-                               printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+                               netdev_err(ppp->dev, "PPP: no memory "
+                                          "(VJ decomp)\n");
                                goto err;
                        }
                        skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 
                len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
                if (len <= 0) {
-                       printk(KERN_DEBUG "PPP: VJ decompression error\n");
+                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                     "PPP: VJ decompression error\n");
                        goto err;
                }
                len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        goto err;
 
                if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
-                       printk(KERN_ERR "PPP: VJ uncompressed error\n");
+                       netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
                        goto err;
                }
                proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        if (ppp->pass_filter &&
                            sk_run_filter(skb, ppp->pass_filter) == 0) {
                                if (ppp->debug & 1)
-                                       printk(KERN_DEBUG "PPP: inbound frame "
-                                              "not passed\n");
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "PPP: inbound frame "
+                                                     "not passed\n");
                                kfree_skb(skb);
                                return;
                        }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
 
                ns = dev_alloc_skb(obuff_size);
                if (!ns) {
-                       printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+                       netdev_err(ppp->dev, "ppp_decompress_frame: "
+                                  "no memory\n");
                        goto err;
                }
                /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
        u32 seq = ppp->nextseq;
        u32 minseq = ppp->minseq;
        struct sk_buff_head *list = &ppp->mrq;
-       struct sk_buff *p, *next;
+       struct sk_buff *p, *tmp;
        struct sk_buff *head, *tail;
        struct sk_buff *skb = NULL;
        int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
                return NULL;
        head = list->next;
        tail = NULL;
-       for (p = head; p != (struct sk_buff *) list; p = next) {
-               next = p->next;
+       skb_queue_walk_safe(list, p, tmp) {
+       again:
                if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
                        /* this can't happen, anyway ignore the skb */
-                       printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
-                              PPP_MP_CB(p)->sequence, seq);
-                       head = next;
+                       netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+                                  "seq %u < %u\n",
+                                  PPP_MP_CB(p)->sequence, seq);
+                       __skb_unlink(p, list);
+                       kfree_skb(p);
                        continue;
                }
                if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
                        lost = 1;
                        seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
                                minseq + 1: PPP_MP_CB(p)->sequence;
-                       next = p;
-                       continue;
+                       goto again;
                }
 
                /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
                    (PPP_MP_CB(head)->BEbits & B)) {
                        if (len > ppp->mrru + 2) {
                                ++ppp->dev->stats.rx_length_errors;
-                               printk(KERN_DEBUG "PPP: reconstructed packet"
-                                      " is too long (%d)\n", len);
-                       } else if (p == head) {
-                               /* fragment is complete packet - reuse skb */
-                               tail = p;
-                               skb = skb_get(p);
-                               break;
-                       } else if ((skb = dev_alloc_skb(len)) == NULL) {
-                               ++ppp->dev->stats.rx_missed_errors;
-                               printk(KERN_DEBUG "PPP: no memory for "
-                                      "reconstructed packet");
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "PPP: reconstructed packet"
+                                             " is too long (%d)\n", len);
                        } else {
                                tail = p;
                                break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
                 * and we haven't found a complete valid packet yet,
                 * we can discard up to and including this fragment.
                 */
-               if (PPP_MP_CB(p)->BEbits & E)
-                       head = next;
+               if (PPP_MP_CB(p)->BEbits & E) {
+                       struct sk_buff *tmp2;
 
+                       skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+                               __skb_unlink(p, list);
+                               kfree_skb(p);
+                       }
+                       head = skb_peek(list);
+                       if (!head)
+                               break;
+               }
                ++seq;
        }
 
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
                   signal a receive error. */
                if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
                        if (ppp->debug & 1)
-                               printk(KERN_DEBUG "  missed pkts %u..%u\n",
-                                      ppp->nextseq,
-                                      PPP_MP_CB(head)->sequence-1);
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "  missed pkts %u..%u\n",
+                                             ppp->nextseq,
+                                             PPP_MP_CB(head)->sequence-1);
                        ++ppp->dev->stats.rx_dropped;
                        ppp_receive_error(ppp);
                }
 
-               if (head != tail)
-                       /* copy to a single skb */
-                       for (p = head; p != tail->next; p = p->next)
-                               skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
-               ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
-               head = tail->next;
-       }
+               skb = head;
+               if (head != tail) {
+                       struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+                       p = skb_queue_next(list, head);
+                       __skb_unlink(skb, list);
+                       skb_queue_walk_from_safe(list, p, tmp) {
+                               __skb_unlink(p, list);
+                               *fragpp = p;
+                               p->next = NULL;
+                               fragpp = &p->next;
+
+                               skb->len += p->len;
+                               skb->data_len += p->len;
+                               skb->truesize += p->len;
+
+                               if (p == tail)
+                                       break;
+                       }
+               } else {
+                       __skb_unlink(skb, list);
+               }
 
-       /* Discard all the skbuffs that we have copied the data out of
-          or that we can't use. */
-       while ((p = list->next) != head) {
-               __skb_unlink(p, list);
-               kfree_skb(p);
+               ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
        }
 
        return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ret = register_netdev(dev);
        if (ret != 0) {
                unit_put(&pn->units_idr, unit);
-               printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
-                      dev->name, ret);
+               netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+                          dev->name, ret);
                goto out2;
        }
 
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
 
        if (!ppp->file.dead || ppp->n_channels) {
                /* "can't happen" */
-               printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
-                      "n_channels=%d !\n", ppp, ppp->file.dead,
-                      ppp->n_channels);
+               netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+                          "but dead=%d n_channels=%d !\n",
+                          ppp, ppp->file.dead, ppp->n_channels);
                return;
        }
 
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
 
        if (!pch->file.dead) {
                /* "can't happen" */
-               printk(KERN_ERR "ppp: destroying undead channel %p !\n",
-                      pch);
+               pr_err("ppp: destroying undead channel %p !\n", pch);
                return;
        }
        skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
 {
        /* should never happen */
        if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
-               printk(KERN_ERR "PPP: removing module but units remain!\n");
+               pr_err("PPP: removing module but units remain!\n");
        unregister_chrdev(PPP_MAJOR, "ppp");
        device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
        class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
 
 again:
        if (!idr_pre_get(p, GFP_KERNEL)) {
-               printk(KERN_ERR "PPP: No free memory for idr\n");
+               pr_err("PPP: No free memory for idr\n");
                return -ENOMEM;
        }
 
index 164cfad..51dfcf8 100644 (file)
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        struct pptp_opt *opt = &po->proto.pptp;
        struct pptp_gre_header *hdr;
        unsigned int header_len = sizeof(*hdr);
-       int err = 0;
        int islcp;
        int len;
        unsigned char *data;
@@ -190,18 +189,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (sk_pppox(po)->sk_state & PPPOX_DEAD)
                goto tx_error;
 
-       {
-               struct flowi fl = { .oif = 0,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = opt->dst_addr.sin_addr.s_addr,
-                                       .saddr = opt->src_addr.sin_addr.s_addr,
-                                       .tos = RT_TOS(0) } },
-                       .proto = IPPROTO_GRE };
-               err = ip_route_output_key(&init_net, &rt, &fl);
-               if (err)
-                       goto tx_error;
-       }
+       rt = ip_route_output_ports(&init_net, NULL,
+                                  opt->dst_addr.sin_addr.s_addr,
+                                  opt->src_addr.sin_addr.s_addr,
+                                  0, 0, IPPROTO_GRE,
+                                  RT_TOS(0), 0);
+       if (IS_ERR(rt))
+               goto tx_error;
+
        tdev = rt->dst.dev;
 
        max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
@@ -468,21 +463,17 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
        po->chan.private = sk;
        po->chan.ops = &pptp_chan_ops;
 
-       {
-               struct flowi fl = {
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = opt->dst_addr.sin_addr.s_addr,
-                                       .saddr = opt->src_addr.sin_addr.s_addr,
-                                       .tos = RT_CONN_FLAGS(sk) } },
-                       .proto = IPPROTO_GRE };
-               security_sk_classify_flow(sk, &fl);
-               if (ip_route_output_key(&init_net, &rt, &fl)) {
-                       error = -EHOSTUNREACH;
-                       goto end;
-               }
-               sk_setup_caps(sk, &rt->dst);
+       rt = ip_route_output_ports(&init_net, sk,
+                                  opt->dst_addr.sin_addr.s_addr,
+                                  opt->src_addr.sin_addr.s_addr,
+                                  0, 0,
+                                  IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+       if (IS_ERR(rt)) {
+               error = -EHOSTUNREACH;
+               goto end;
        }
+       sk_setup_caps(sk, &rt->dst);
+
        po->chan.mtu = dst_mtu(&rt->dst);
        if (!po->chan.mtu)
                po->chan.mtu = PPP_MTU;
index 1a3584e..2d21c60 100644 (file)
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
        u32 previousBit;
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Clock in a zero, then do the start bit */
        ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
        u32 dataBit;
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Read the data bits */
        /* The first bit is a dummy.  Clock right over it. */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        u32 value;
        struct ql3xxx_port_registers __iomem *port_regs =
                qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
        struct ql3xxx_host_memory_registers __iomem *hmem_regs =
                (void __iomem *)port_regs;
        u32 delay = 10;
index 44e316f..dc44564 100644 (file)
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
 #define LINKEVENT_LINKSPEED_MBPS       0
 #define LINKEVENT_LINKSPEED_ENCODED    1
 
-#define AUTO_FW_RESET_ENABLED  0x01
 /* firmware response header:
  *     63:58 - message type
  *     57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
 #define MAX_BW                 100     /* % of link speed */
 #define MAX_VLAN_ID            4095
 #define MIN_VLAN_ID            2
-#define MAX_TX_QUEUES          1
-#define MAX_RX_QUEUES          4
 #define DEFAULT_MAC_LEARN      1
 
 #define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
 #define IS_VALID_BW(bw)                (bw <= MAX_BW)
-#define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
-#define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
 
 struct qlcnic_pci_func_cfg {
        u16     func_type;
index 37c04b4..cd88c7e 100644 (file)
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
 module_param(use_msi_x, int, 0444);
 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
 
-static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = 1;
 module_param(auto_fw_reset, int, 0644);
 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
 
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->reset_context &&
-                   auto_fw_reset == AUTO_FW_RESET_ENABLED) {
+               if (adapter->reset_context && auto_fw_reset) {
                        qlcnic_reset_hw_context(adapter);
                        adapter->netdev->trans_start = jiffies;
                }
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
 
        qlcnic_dev_request_reset(adapter);
 
-       if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
+       if (auto_fw_reset)
                clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
 
        dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
        adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
                QLCNIC_DEV_NEED_RESET;
 
-       if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+       if (auto_fw_reset &&
                !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
 
                qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
                if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
                        return QL_STATUS_INVALID_PARAM;
 
-               if (!IS_VALID_BW(np_cfg[i].min_bw)
-                               || !IS_VALID_BW(np_cfg[i].max_bw)
-                               || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
-                               || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+                   !IS_VALID_BW(np_cfg[i].max_bw))
                        return QL_STATUS_INVALID_PARAM;
        }
        return 0;
index 27e6f6d..e3ebd90 100644 (file)
@@ -49,8 +49,8 @@
 #include <asm/processor.h>
 
 #define DRV_NAME       "r6040"
-#define DRV_VERSION    "0.26"
-#define DRV_RELDATE    "30May2010"
+#define DRV_VERSION    "0.27"
+#define DRV_RELDATE    "23Feb2011"
 
 /* PHY CHIP Address */
 #define PHY1_ADDR      1       /* For MAC1 */
@@ -69,6 +69,8 @@
 
 /* MAC registers */
 #define MCR0           0x00    /* Control register 0 */
+#define  MCR0_PROMISC  0x0020  /* Promiscuous mode */
+#define  MCR0_HASH_EN  0x0100  /* Enable multicast hash table function */
 #define MCR1           0x04    /* Control register 1 */
 #define  MAC_RST       0x0001  /* Reset the MAC */
 #define MBCR           0x08    /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
 {
        struct r6040_private *lp = netdev_priv(dev);
        void __iomem *ioaddr = lp->base;
-       u16 *adrp;
-       u16 reg;
        unsigned long flags;
        struct netdev_hw_addr *ha;
        int i;
+       u16 *adrp;
+       u16 hash_table[4] = { 0 };
+
+       spin_lock_irqsave(&lp->lock, flags);
 
-       /* MAC Address */
+       /* Keep our MAC Address */
        adrp = (u16 *)dev->dev_addr;
        iowrite16(adrp[0], ioaddr + MID_0L);
        iowrite16(adrp[1], ioaddr + MID_0M);
        iowrite16(adrp[2], ioaddr + MID_0H);
 
-       /* Promiscous Mode */
-       spin_lock_irqsave(&lp->lock, flags);
-
        /* Clear AMCP & PROM bits */
-       reg = ioread16(ioaddr) & ~0x0120;
-       if (dev->flags & IFF_PROMISC) {
-               reg |= 0x0020;
-               lp->mcr0 |= 0x0020;
-       }
-       /* Too many multicast addresses
-        * accept all traffic */
-       else if ((netdev_mc_count(dev) > MCAST_MAX) ||
-                (dev->flags & IFF_ALLMULTI))
-               reg |= 0x0020;
+       lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
 
-       iowrite16(reg, ioaddr);
-       spin_unlock_irqrestore(&lp->lock, flags);
+       /* Promiscuous mode */
+       if (dev->flags & IFF_PROMISC)
+               lp->mcr0 |= MCR0_PROMISC;
 
-       /* Build the hash table */
-       if (netdev_mc_count(dev) > MCAST_MAX) {
-               u16 hash_table[4];
-               u32 crc;
+       /* Enable multicast hash table function to
+        * receive all multicast packets. */
+       else if (dev->flags & IFF_ALLMULTI) {
+               lp->mcr0 |= MCR0_HASH_EN;
 
-               for (i = 0; i < 4; i++)
-                       hash_table[i] = 0;
+               for (i = 0; i < MCAST_MAX ; i++) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+               }
 
+               for (i = 0; i < 4; i++)
+                       hash_table[i] = 0xffff;
+       }
+       /* Use internal multicast address registers if the number of
+        * multicast addresses is not greater than MCAST_MAX. */
+       else if (netdev_mc_count(dev) <= MCAST_MAX) {
+               i = 0;
                netdev_for_each_mc_addr(ha, dev) {
-                       char *addrs = ha->addr;
+                       u16 *adrp = (u16 *) ha->addr;
+                       iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+                       iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+                       iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+                       i++;
+               }
+               while (i < MCAST_MAX) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+                       i++;
+               }
+       }
+       /* Otherwise, Enable multicast hash table function. */
+       else {
+               u32 crc;
 
-                       if (!(*addrs & 1))
-                               continue;
+               lp->mcr0 |= MCR0_HASH_EN;
+
+               for (i = 0; i < MCAST_MAX ; i++) {
+                       iowrite16(0, ioaddr + MID_1L + 8 * i);
+                       iowrite16(0, ioaddr + MID_1M + 8 * i);
+                       iowrite16(0, ioaddr + MID_1H + 8 * i);
+               }
 
-                       crc = ether_crc_le(6, addrs);
+               /* Build multicast hash table */
+               netdev_for_each_mc_addr(ha, dev) {
+                       u8 *addrs = ha->addr;
+
+                       crc = ether_crc(ETH_ALEN, addrs);
                        crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
                }
-               /* Fill the MAC hash tables with their values */
+       }
+
+       iowrite16(lp->mcr0, ioaddr + MCR0);
+
+       /* Fill the MAC hash tables with their values */
+       if (lp->mcr0 && MCR0_HASH_EN) {
                iowrite16(hash_table[0], ioaddr + MAR0);
                iowrite16(hash_table[1], ioaddr + MAR1);
                iowrite16(hash_table[2], ioaddr + MAR2);
                iowrite16(hash_table[3], ioaddr + MAR3);
        }
-       /* Multicast Address 1~4 case */
-       i = 0;
-       netdev_for_each_mc_addr(ha, dev) {
-               if (i >= MCAST_MAX)
-                       break;
-               adrp = (u16 *) ha->addr;
-               iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
-               iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
-               iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
-               i++;
-       }
-       while (i < MCAST_MAX) {
-               iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
-               iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
-               iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
-               i++;
-       }
+
+       spin_unlock_irqrestore(&lp->lock, flags);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev,
index bde7d61..5e40351 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
+#include <linux/pci-aspm.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -36,6 +37,7 @@
 
 #define FIRMWARE_8168D_1       "rtl_nic/rtl8168d-1.fw"
 #define FIRMWARE_8168D_2       "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8105E_1       "rtl_nic/rtl8105e-1.fw"
 
 #ifdef RTL8169_DEBUG
 #define assert(expr) \
@@ -123,6 +125,8 @@ enum mac_version {
        RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
        RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
        RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
+       RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
+       RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
 };
 
 #define _R(NAME,MAC,MASK) \
@@ -160,7 +164,9 @@ static const struct {
        _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
        _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
        _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
-       _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_28, 0xff7e1880)  // PCI-E
+       _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
+       _R("RTL8105e",          RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
+       _R("RTL8105e",          RTL_GIGA_MAC_VER_30, 0xff7e1880)  // PCI-E
 };
 #undef _R
 
@@ -267,9 +273,15 @@ enum rtl8168_8101_registers {
 #define        EPHYAR_REG_MASK                 0x1f
 #define        EPHYAR_REG_SHIFT                16
 #define        EPHYAR_DATA_MASK                0xffff
+       DLLPR                   = 0xd0,
+#define        PM_SWITCH                       (1 << 6)
        DBG_REG                 = 0xd1,
 #define        FIX_NAK_1                       (1 << 4)
 #define        FIX_NAK_2                       (1 << 3)
+       TWSI                    = 0xd2,
+       MCU                     = 0xd3,
+#define        EN_NDP                          (1 << 3)
+#define        EN_OOB_RESET                    (1 << 2)
        EFUSEAR                 = 0xdc,
 #define        EFUSEAR_FLAG                    0x80000000
 #define        EFUSEAR_WRITE_CMD               0x80000000
@@ -526,9 +538,6 @@ struct rtl8169_private {
        u16 napi_event;
        u16 intr_mask;
        int phy_1000_ctrl_reg;
-#ifdef CONFIG_R8169_VLAN
-       struct vlan_group *vlgrp;
-#endif
 
        struct mdio_ops {
                void (*write)(void __iomem *, int, int);
@@ -540,7 +549,7 @@ struct rtl8169_private {
                void (*up)(struct rtl8169_private *);
        } pll_power_ops;
 
-       int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+       int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
        int (*get_settings)(struct net_device *, struct ethtool_cmd *);
        void (*phy_reset_enable)(struct rtl8169_private *tp);
        void (*hw_start)(struct net_device *);
@@ -568,6 +577,7 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(RTL8169_VERSION);
 MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8105E_1);
 
 static int rtl8169_open(struct net_device *dev);
 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -617,8 +627,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
        }
 }
 
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        int i;
 
        RTL_W8(ERIDR, cmd);
@@ -630,7 +641,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
                        break;
        }
 
-       ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+       ocp_write(tp, 0x1, 0x30, 0x00000001);
 }
 
 #define OOB_CMD_RESET          0x00
@@ -973,7 +984,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
                if (pm)
                        pm_request_resume(&tp->pci_dev->dev);
                netif_carrier_on(dev);
-               netif_info(tp, ifup, dev, "link up\n");
+               if (net_ratelimit())
+                       netif_info(tp, ifup, dev, "link up\n");
        } else {
                netif_carrier_off(dev);
                netif_info(tp, ifdown, dev, "link down\n");
@@ -1095,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
 }
 
 static int rtl8169_set_speed_tbi(struct net_device *dev,
-                                u8 autoneg, u16 speed, u8 duplex)
+                                u8 autoneg, u16 speed, u8 duplex, u32 ignored)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
@@ -1118,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
 }
 
 static int rtl8169_set_speed_xmii(struct net_device *dev,
-                                 u8 autoneg, u16 speed, u8 duplex)
+                                 u8 autoneg, u16 speed, u8 duplex, u32 adv)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        int giga_ctrl, bmcr;
+       int rc = -EINVAL;
+
+       rtl_writephy(tp, 0x1f, 0x0000);
 
        if (autoneg == AUTONEG_ENABLE) {
                int auto_nego;
 
                auto_nego = rtl_readphy(tp, MII_ADVERTISE);
-               auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
-                             ADVERTISE_100HALF | ADVERTISE_100FULL);
+               auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+                               ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+               if (adv & ADVERTISED_10baseT_Half)
+                       auto_nego |= ADVERTISE_10HALF;
+               if (adv & ADVERTISED_10baseT_Full)
+                       auto_nego |= ADVERTISE_10FULL;
+               if (adv & ADVERTISED_100baseT_Half)
+                       auto_nego |= ADVERTISE_100HALF;
+               if (adv & ADVERTISED_100baseT_Full)
+                       auto_nego |= ADVERTISE_100FULL;
+
                auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 
                giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
@@ -1142,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                    (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
                    (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
                    (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
-                   (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
-                       giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
-               } else {
+                   (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
+                   (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
+                   (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
+                       if (adv & ADVERTISED_1000baseT_Half)
+                               giga_ctrl |= ADVERTISE_1000HALF;
+                       if (adv & ADVERTISED_1000baseT_Full)
+                               giga_ctrl |= ADVERTISE_1000FULL;
+               } else if (adv & (ADVERTISED_1000baseT_Half |
+                                 ADVERTISED_1000baseT_Full)) {
                        netif_info(tp, link, dev,
                                   "PHY does not support 1000Mbps\n");
+                       goto out;
                }
 
                bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
 
-               if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
-                   (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
-                   (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
-                       /*
-                        * Wake up the PHY.
-                        * Vendor specific (0x1f) and reserved (0x0e) MII
-                        * registers.
-                        */
-                       rtl_writephy(tp, 0x1f, 0x0000);
-                       rtl_writephy(tp, 0x0e, 0x0000);
-               }
-
                rtl_writephy(tp, MII_ADVERTISE, auto_nego);
                rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
        } else {
@@ -1173,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                else if (speed == SPEED_100)
                        bmcr = BMCR_SPEED100;
                else
-                       return -EINVAL;
+                       goto out;
 
                if (duplex == DUPLEX_FULL)
                        bmcr |= BMCR_FULLDPLX;
-
-               rtl_writephy(tp, 0x1f, 0x0000);
        }
 
        tp->phy_1000_ctrl_reg = giga_ctrl;
@@ -1196,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                }
        }
 
-       return 0;
+       rc = 0;
+out:
+       return rc;
 }
 
 static int rtl8169_set_speed(struct net_device *dev,
-                            u8 autoneg, u16 speed, u8 duplex)
+                            u8 autoneg, u16 speed, u8 duplex, u32 advertising)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        int ret;
 
-       ret = tp->set_speed(dev, autoneg, speed, duplex);
+       ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
 
        if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
                mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1220,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        int ret;
 
        spin_lock_irqsave(&tp->lock, flags);
-       ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+       ret = rtl8169_set_speed(dev,
+               cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
        spin_unlock_irqrestore(&tp->lock, flags);
 
        return ret;
@@ -1254,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
        return 0;
 }
 
-#ifdef CONFIG_R8169_VLAN
-
 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
                                      struct sk_buff *skb)
 {
@@ -1263,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
                TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 }
 
-static void rtl8169_vlan_rx_register(struct net_device *dev,
-                                    struct vlan_group *grp)
+#define NETIF_F_HW_VLAN_TX_RX  (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
+
+static void rtl8169_vlan_mode(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
        unsigned long flags;
 
        spin_lock_irqsave(&tp->lock, flags);
-       tp->vlgrp = grp;
-       /*
-        * Do not disable RxVlan on 8110SCd.
-        */
-       if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
+       if (dev->features & NETIF_F_HW_VLAN_RX)
                tp->cp_cmd |= RxVlan;
        else
                tp->cp_cmd &= ~RxVlan;
        RTL_W16(CPlusCmd, tp->cp_cmd);
+       /* PCI commit */
        RTL_R16(CPlusCmd);
        spin_unlock_irqrestore(&tp->lock, flags);
+
+       dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
 }
 
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
-                              struct sk_buff *skb, int polling)
+static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
 {
        u32 opts2 = le32_to_cpu(desc->opts2);
-       struct vlan_group *vlgrp = tp->vlgrp;
-       int ret;
 
-       if (vlgrp && (opts2 & RxVlanTag)) {
-               u16 vtag = swab16(opts2 & 0xffff);
+       if (opts2 & RxVlanTag)
+               __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 
-               if (likely(polling))
-                       vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
-               else
-                       __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
-               ret = 0;
-       } else
-               ret = -1;
        desc->opts2 = 0;
-       return ret;
-}
-
-#else /* !CONFIG_R8169_VLAN */
-
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
-                                     struct sk_buff *skb)
-{
-       return 0;
-}
-
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
-                              struct sk_buff *skb, int polling)
-{
-       return -1;
 }
 
-#endif
-
 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -1491,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        }
 }
 
+static int rtl8169_set_flags(struct net_device *dev, u32 data)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+       unsigned long old_feat = dev->features;
+       int rc;
+
+       if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
+           !(data & ETH_FLAG_RXVLAN)) {
+               netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
+               return -EINVAL;
+       }
+
+       rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
+       if (rc)
+               return rc;
+
+       if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
+               rtl8169_vlan_mode(dev);
+
+       return 0;
+}
+
 static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_drvinfo            = rtl8169_get_drvinfo,
        .get_regs_len           = rtl8169_get_regs_len,
@@ -1510,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_strings            = rtl8169_get_strings,
        .get_sset_count         = rtl8169_get_sset_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
+       .set_flags              = rtl8169_set_flags,
+       .get_flags              = ethtool_op_get_flags,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1558,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                { 0x7c800000, 0x30000000,       RTL_GIGA_MAC_VER_11 },
 
                /* 8101 family. */
+               { 0x7cf00000, 0x40a00000,       RTL_GIGA_MAC_VER_30 },
+               { 0x7cf00000, 0x40900000,       RTL_GIGA_MAC_VER_29 },
+               { 0x7c800000, 0x40800000,       RTL_GIGA_MAC_VER_30 },
                { 0x7cf00000, 0x34a00000,       RTL_GIGA_MAC_VER_09 },
                { 0x7cf00000, 0x24a00000,       RTL_GIGA_MAC_VER_09 },
                { 0x7cf00000, 0x34900000,       RTL_GIGA_MAC_VER_08 },
@@ -2434,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
 
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
+{
+       static const struct phy_reg phy_reg_init[] = {
+               { 0x1f, 0x0005 },
+               { 0x1a, 0x0000 },
+               { 0x1f, 0x0000 },
+
+               { 0x1f, 0x0004 },
+               { 0x1c, 0x0000 },
+               { 0x1f, 0x0000 },
+
+               { 0x1f, 0x0001 },
+               { 0x15, 0x7701 },
+               { 0x1f, 0x0000 }
+       };
+
+       /* Disable ALDPS before ram code */
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
+
+       if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
+               netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+
+       rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
 static void rtl_hw_phy_config(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -2501,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
        case RTL_GIGA_MAC_VER_28:
                rtl8168d_4_hw_phy_config(tp);
                break;
+       case RTL_GIGA_MAC_VER_29:
+       case RTL_GIGA_MAC_VER_30:
+               rtl8105e_hw_phy_config(tp);
+               break;
 
        default:
                break;
@@ -2632,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 
        rtl8169_phy_reset(dev, tp);
 
-       /*
-        * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
-        * only 8101. Don't panic.
-        */
-       rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
+       rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+               ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+               ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+               tp->mii.supports_gmii ?
+                       ADVERTISED_1000baseT_Half |
+                       ADVERTISED_1000baseT_Full : 0);
 
        if (RTL_R8(PHYstatus) & TBI_Enable)
                netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2792,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
        .ndo_set_mac_address    = rtl_set_mac_address,
        .ndo_do_ioctl           = rtl8169_ioctl,
        .ndo_set_multicast_list = rtl_set_rx_mode,
-#ifdef CONFIG_R8169_VLAN
-       .ndo_vlan_rx_register   = rtl8169_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = rtl8169_netpoll,
 #endif
@@ -2867,8 +2915,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+       if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+            (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+           (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
                return;
+       }
 
        if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
             (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2890,6 +2941,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_27:
+       case RTL_GIGA_MAC_VER_28:
                RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
                break;
        }
@@ -2899,12 +2952,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+       if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+            (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+           (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
                return;
+       }
 
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_27:
+       case RTL_GIGA_MAC_VER_28:
                RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
                break;
        }
@@ -2939,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_09:
        case RTL_GIGA_MAC_VER_10:
        case RTL_GIGA_MAC_VER_16:
+       case RTL_GIGA_MAC_VER_29:
+       case RTL_GIGA_MAC_VER_30:
                ops->down       = r810x_pll_power_down;
                ops->up         = r810x_pll_power_up;
                break;
@@ -3008,6 +3068,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        mii->reg_num_mask = 0x1f;
        mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 
+       /* disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                                    PCIE_LINK_STATE_CLKPM);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pci_enable_device(pdev);
        if (rc < 0) {
@@ -3041,7 +3106,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_mwi_2;
        }
 
-       tp->cp_cmd = PCIMulRW | RxChkSum;
+       tp->cp_cmd = RxChkSum;
 
        if ((sizeof(dma_addr_t) > 4) &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3086,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Identify chip attached to board */
        rtl8169_get_mac_version(tp, ioaddr);
 
+       /*
+        * Pretend we are using VLANs; This bypasses a nasty bug where
+        * Interrupts stop flowing on high load on 8110SCd controllers.
+        */
+       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+               tp->cp_cmd |= RxVlan;
+
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
 
@@ -3154,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
 
-#ifdef CONFIG_R8169_VLAN
-       dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
-       dev->features |= NETIF_F_GRO;
+       dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
 
        tp->intr_mask = 0xffff;
        tp->hw_start = cfg->hw_start;
@@ -3189,6 +3258,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_noidle(&pdev->dev);
 
+       netif_carrier_off(dev);
+
 out:
        return rc;
 
@@ -3273,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev)
 
        rtl8169_init_phy(dev, tp);
 
-       /*
-        * Pretend we are using VLANs; This bypasses a nasty bug where
-        * Interrupts stop flowing on high load on 8110SCd controllers.
-        */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+       rtl8169_vlan_mode(dev);
 
        rtl_pll_power_up(tp);
 
@@ -3315,7 +3381,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        /* Disable interrupts */
        rtl8169_irq_mask_and_ack(ioaddr);
 
-       if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+           tp->mac_version == RTL_GIGA_MAC_VER_28) {
                while (RTL_R8(TxPoll) & NPQ)
                        udelay(20);
 
@@ -3757,7 +3824,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+           tp->mac_version == RTL_GIGA_MAC_VER_22) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -3843,8 +3911,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        Cxpl_dbg_sel | \
        ASF | \
        PktCntrDisable | \
-       PCIDAC | \
-       PCIMulRW)
+       Mac_dbgo_sel)
 
 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
@@ -3874,8 +3941,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
        if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
                RTL_W8(Config1, cfg1 & ~LEDS0);
 
-       RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
        rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
@@ -3887,8 +3952,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 
        RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
-       RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
 }
 
 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3898,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
        rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
 }
 
+static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+       static const struct ephy_info e_info_8105e_1[] = {
+               { 0x07, 0, 0x4000 },
+               { 0x19, 0, 0x0200 },
+               { 0x19, 0, 0x0020 },
+               { 0x1e, 0, 0x2000 },
+               { 0x03, 0, 0x0001 },
+               { 0x19, 0, 0x0100 },
+               { 0x19, 0, 0x0004 },
+               { 0x0a, 0, 0x0020 }
+       };
+
+       /* Force LAN exit from ASPM if Rx/Tx are not idel */
+       RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+       /* disable Early Tally Counter */
+       RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+
+       RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+       RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
+
+       rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+}
+
+static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+       rtl_hw_start_8105e_1(ioaddr, pdev);
+       rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+}
+
 static void rtl_hw_start_8101(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -3914,6 +4008,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
                }
        }
 
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
+
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_07:
                rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3926,16 +4022,22 @@ static void rtl_hw_start_8101(struct net_device *dev)
        case RTL_GIGA_MAC_VER_09:
                rtl_hw_start_8102e_2(ioaddr, pdev);
                break;
+
+       case RTL_GIGA_MAC_VER_29:
+               rtl_hw_start_8105e_1(ioaddr, pdev);
+               break;
+       case RTL_GIGA_MAC_VER_30:
+               rtl_hw_start_8105e_2(ioaddr, pdev);
+               break;
        }
 
-       RTL_W8(Cfg9346, Cfg9346_Unlock);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
        rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
-       tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+       tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
        RTL_W16(CPlusCmd, tp->cp_cmd);
 
        RTL_W16(IntrMitigate, 0x0000);
@@ -3945,14 +4047,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
        rtl_set_rx_tx_config_registers(tp);
 
-       RTL_W8(Cfg9346, Cfg9346_Lock);
-
        RTL_R8(IntrMask);
 
        rtl_set_rx_mode(dev);
 
-       RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
        RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
 
        RTL_W16(IntrMask, tp->intr_event);
@@ -4589,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
 
-                       if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
-                               if (likely(polling))
-                                       napi_gro_receive(&tp->napi, skb);
-                               else
-                                       netif_rx(skb);
-                       }
+                       rtl8169_rx_vlan_tag(desc, skb);
+
+                       if (likely(polling))
+                               napi_gro_receive(&tp->napi, skb);
+                       else
+                               netif_rx(skb);
 
                        dev->stats.rx_bytes += pkt_size;
                        dev->stats.rx_packets++;
@@ -4639,12 +4737,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        break;
                }
 
-               /* Work around for rx fifo overflow */
-               if (unlikely(status & RxFIFOOver) &&
-               (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
-                       netif_stop_queue(dev);
-                       rtl8169_tx_timeout(dev);
-                       break;
+               if (unlikely(status & RxFIFOOver)) {
+                       switch (tp->mac_version) {
+                       /* Work around for rx fifo overflow */
+                       case RTL_GIGA_MAC_VER_11:
+                       case RTL_GIGA_MAC_VER_22:
+                       case RTL_GIGA_MAC_VER_26:
+                               netif_stop_queue(dev);
+                               rtl8169_tx_timeout(dev);
+                               goto done;
+                       /* Testers needed. */
+                       case RTL_GIGA_MAC_VER_17:
+                       case RTL_GIGA_MAC_VER_19:
+                       case RTL_GIGA_MAC_VER_20:
+                       case RTL_GIGA_MAC_VER_21:
+                       case RTL_GIGA_MAC_VER_23:
+                       case RTL_GIGA_MAC_VER_24:
+                       case RTL_GIGA_MAC_VER_27:
+                       case RTL_GIGA_MAC_VER_28:
+                       /* Experimental science. Pktgen proof. */
+                       case RTL_GIGA_MAC_VER_12:
+                       case RTL_GIGA_MAC_VER_25:
+                               if (status == RxFIFOOver)
+                                       goto done;
+                               break;
+                       default:
+                               break;
+                       }
                }
 
                if (unlikely(status & SYSErr)) {
@@ -4680,7 +4799,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        (status & RxFIFOOver) ? (status | RxOverflow) : status);
                status = RTL_R16(IntrStatus);
        }
-
+done:
        return IRQ_RETVAL(handled);
 }
 
index 39c17ce..2ad6364 100644 (file)
@@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
                         */
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        if (ring_data->lro) {
-                               u32 tcp_len;
+                               u32 tcp_len = 0;
                                u8 *tcp;
                                int ret = 0;
 
index 002bac7..b8bd936 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/topology.h>
 #include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
 #include "net_driver.h"
 #include "efx.h"
 #include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
                        channel->irq_mod_score = 0;
                }
 
+               efx_filter_rfs_expire(channel);
+
                /* There is no race here; although napi_disable() will
                 * only wait for napi_complete(), this isn't a problem
                 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
 
                efx_for_each_channel_rx_queue(rx_queue, channel)
                        efx_fini_rx_queue(rx_queue);
-               efx_for_each_channel_tx_queue(tx_queue, channel)
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                        efx_fini_tx_queue(tx_queue);
                efx_fini_eventq(channel);
        }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
 
        efx_for_each_channel_rx_queue(rx_queue, channel)
                efx_remove_rx_queue(rx_queue);
-       efx_for_each_channel_tx_queue(tx_queue, channel)
+       efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
 }
@@ -1101,8 +1104,8 @@ static int efx_init_io(struct efx_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-       efx->membase = ioremap_nocache(efx->membase_phys,
-                                      efx->type->mem_map_size);
+       efx->membase = ioremap_wc(efx->membase_phys,
+                                 efx->type->mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
        return count;
 }
 
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc;
+
+       efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+       if (!efx->net_dev->rx_cpu_rmap)
+               return -ENOMEM;
+       for (i = 0; i < efx->n_rx_channels; i++) {
+               rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+                                     xentries[i].vector);
+               if (rc) {
+                       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+                       efx->net_dev->rx_cpu_rmap = NULL;
+                       return rc;
+               }
+       }
+#endif
+       return 0;
+}
+
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
-static void efx_probe_interrupts(struct efx_nic *efx)
+static int efx_probe_interrupts(struct efx_nic *efx)
 {
        int max_channels =
                min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
                                efx->n_tx_channels = efx->n_channels;
                                efx->n_rx_channels = efx->n_channels;
                        }
+                       rc = efx_init_rx_cpu_rmap(efx, xentries);
+                       if (rc) {
+                               pci_disable_msix(efx->pci_dev);
+                               return rc;
+                       }
                        for (i = 0; i < n_channels; i++)
                                efx_get_channel(efx, i)->irq =
                                        xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
                efx->n_tx_channels = 1;
                efx->legacy_irq = efx->pci_dev->irq;
        }
+
+       return 0;
 }
 
 static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
 
 static void efx_set_channels(struct efx_nic *efx)
 {
-       struct efx_channel *channel;
-       struct efx_tx_queue *tx_queue;
-
        efx->tx_channel_offset =
                separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
-       /* Channel pointers were set in efx_init_struct() but we now
-        * need to clear them for TX queues in any RX-only channels. */
-       efx_for_each_channel(channel, efx) {
-               if (channel->channel - efx->tx_channel_offset >=
-                   efx->n_tx_channels) {
-                       efx_for_each_channel_tx_queue(tx_queue, channel)
-                               tx_queue->channel = NULL;
-               }
-       }
 }
 
 static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
 
        /* Determine the number of channels and queues by trying to hook
         * in MSI-X interrupts. */
-       efx_probe_interrupts(efx);
+       rc = efx_probe_interrupts(efx);
+       if (rc)
+               goto fail;
 
        if (efx->n_channels > 1)
                get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
        efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
 
        return 0;
+
+fail:
+       efx->type->remove(efx);
+       return rc;
 }
 
 static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
        efx->irq_rx_adaptive = rx_adaptive;
        efx->irq_rx_moderation = rx_ticks;
        efx_for_each_channel(channel, efx) {
-               if (efx_channel_get_rx_queue(channel))
+               if (efx_channel_has_rx_queue(channel))
                        channel->irq_moderation = rx_ticks;
-               else if (efx_channel_get_tx_queue(channel, 0))
+               else if (efx_channel_has_tx_queues(channel))
                        channel->irq_moderation = tx_ticks;
        }
 }
@@ -1848,6 +1873,10 @@ static const struct net_device_ops efx_netdev_ops = {
        .ndo_set_multicast_list = efx_set_multicast_list,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = efx_netpoll,
+#endif
+       .ndo_setup_tc           = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
 };
 
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        efx_for_each_channel(channel, efx) {
                struct efx_tx_queue *tx_queue;
-               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       tx_queue->core_txq = netdev_get_tx_queue(
-                               efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
-               }
+               efx_for_each_channel_tx_queue(tx_queue, channel)
+                       efx_init_tx_queue_core_txq(tx_queue);
        }
 
        /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+       efx->net_dev->rx_cpu_rmap = NULL;
+#endif
        efx_nic_fini_interrupt(efx);
        efx_fini_channels(efx);
        efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
        int i, rc;
 
        /* Allocate and initialise a struct net_device and struct efx_nic */
-       net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+       net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+                                    EFX_MAX_RX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
        net_dev->features |= (type->offload_features | NETIF_F_SG |
index d43a7e5..3d83a1f 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
 extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
 extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
 extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
 extern netdev_tx_t
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
                                    struct efx_filter_spec *spec);
 extern void efx_filter_clear_rx(struct efx_nic *efx,
                                enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                         u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+       if (channel->rfs_filters_added >= 60 &&
+           __efx_filter_rfs_expire(channel->efx, 100))
+               channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
index 0e8bb19..807178e 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
        enum {
                EFX_ETHTOOL_STAT_SOURCE_mac_stats,
                EFX_ETHTOOL_STAT_SOURCE_nic,
-               EFX_ETHTOOL_STAT_SOURCE_channel
+               EFX_ETHTOOL_STAT_SOURCE_channel,
+               EFX_ETHTOOL_STAT_SOURCE_tx_queue
        } source;
        unsigned offset;
        u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
        EFX_ETHTOOL_STAT(field, channel, n_##field,             \
                         unsigned int, efx_get_uint_stat)
 
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field)                       \
+       EFX_ETHTOOL_STAT(tx_##field, tx_queue, field,           \
+                        unsigned int, efx_get_uint_stat)
+
 static struct efx_ethtool_stat efx_ethtool_stats[] = {
        EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+       EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
        strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
        if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
-               siena_print_fwver(efx, info->fw_version,
-                                 sizeof(info->fw_version));
+               efx_mcdi_print_fwver(efx, info->fw_version,
+                                    sizeof(info->fw_version));
        strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
 }
 
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
        struct efx_mac_stats *mac_stats = &efx->mac_stats;
        struct efx_ethtool_stat *stat;
        struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
        struct rtnl_link_stats64 temp;
        int i;
 
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
                                data[i] += stat->get_stat((void *)channel +
                                                          stat->offset);
                        break;
+               case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+                       data[i] = 0;
+                       efx_for_each_channel(channel, efx) {
+                               efx_for_each_channel_tx_queue(tx_queue, channel)
+                                       data[i] +=
+                                               stat->get_stat((void *)tx_queue
+                                                              + stat->offset);
+                       }
+                       break;
                }
        }
 }
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
 {
        struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
-       unsigned long features;
+       u32 features;
 
        features = NETIF_F_TSO;
        if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+       u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
 
        if (enable)
                net_dev->features |= features;
@@ -569,9 +588,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                                  struct ethtool_test *test, u64 *data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_self_tests efx_tests;
+       struct efx_self_tests *efx_tests;
        int already_up;
-       int rc;
+       int rc = -ENOMEM;
+
+       efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+       if (!efx_tests)
+               goto fail;
+
 
        ASSERT_RTNL();
        if (efx->state != STATE_RUNNING) {
@@ -589,13 +613,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                if (rc) {
                        netif_err(efx, drv, efx->net_dev,
                                  "failed opening device.\n");
-                       goto fail2;
+                       goto fail1;
                }
        }
 
-       memset(&efx_tests, 0, sizeof(efx_tests));
-
-       rc = efx_selftest(efx, &efx_tests, test->flags);
+       rc = efx_selftest(efx, efx_tests, test->flags);
 
        if (!already_up)
                dev_close(efx->net_dev);
@@ -604,10 +626,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                   rc == 0 ? "passed" : "failed",
                   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
- fail2:
- fail1:
+fail1:
        /* Fill ethtool results structures */
-       efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+       efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+       kfree(efx_tests);
+fail:
        if (rc)
                test->flags |= ETH_TEST_FL_FAILED;
 }
@@ -631,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
        /* Find lowest IRQ moderation across all used TX queues */
        coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
        efx_for_each_channel(channel, efx) {
-               if (!efx_channel_get_tx_queue(channel, 0))
+               if (!efx_channel_has_tx_queues(channel))
                        continue;
                if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
                        if (channel->channel < efx->n_rx_channels)
@@ -676,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
 
        /* If the channel is shared only allow RX parameters to be set */
        efx_for_each_channel(channel, efx) {
-               if (efx_channel_get_rx_queue(channel) &&
-                   efx_channel_get_tx_queue(channel, 0) &&
+               if (efx_channel_has_rx_queue(channel) &&
+                   efx_channel_has_tx_queues(channel) &&
                    tx_usecs) {
                        netif_err(efx, drv, efx->net_dev, "Channel is shared. "
                                  "Only RX coalescing may be set\n");
index 61ddd2c..734fcfb 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
        /* RX control FIFO thresholds (32 entries) */
        const unsigned ctrl_xon_thr = 20;
        const unsigned ctrl_xoff_thr = 25;
-       /* RX data FIFO thresholds (256-byte units; size varies) */
-       int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
-       int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
        efx_oword_t reg;
 
        efx_reado(efx, &reg, FR_AZ_RX_CFG);
        if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
                /* Data FIFO size is 5.5K */
-               if (data_xon_thr < 0)
-                       data_xon_thr = 512 >> 8;
-               if (data_xoff_thr < 0)
-                       data_xoff_thr = 2048 >> 8;
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
                                    huge_buf_size);
-               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
-               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
        } else {
                /* Data FIFO size is 80K; register fields moved */
-               if (data_xon_thr < 0)
-                       data_xon_thr = 27648 >> 8; /* ~3*max MTU */
-               if (data_xoff_thr < 0)
-                       data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
                                    huge_buf_size);
-               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
-               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+               /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
index 2dd16f0..b9cc846 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index b49e843..2c9ee5d 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index d4722c4..95a980f 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/in.h>
+#include <net/ip.h>
 #include "efx.h"
 #include "filter.h"
 #include "io.h"
  */
 #define FILTER_CTL_SRCH_MAX 200
 
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
 enum efx_filter_table_id {
        EFX_FILTER_TABLE_RX_IP = 0,
        EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
 struct efx_filter_state {
        spinlock_t      lock;
        struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+       u32             *rps_flow_id;
+       unsigned        rps_expire_index;
+#endif
 };
 
 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
                             struct efx_filter_spec *spec, u32 key,
                             bool for_insert, int *depth_required)
 {
-       unsigned hash, incr, filter_idx, depth;
+       unsigned hash, incr, filter_idx, depth, depth_max;
        struct efx_filter_spec *cmp;
 
        hash = efx_filter_hash(key);
        incr = efx_filter_increment(key);
+       depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
+                    FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
 
        for (depth = 1, filter_idx = hash & (table->size - 1);
-            depth <= FILTER_CTL_SRCH_MAX &&
-                    test_bit(filter_idx, table->used_bitmap);
+            depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
             ++depth) {
                cmp = &table->spec[filter_idx];
                if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
        }
        if (!for_insert)
                return -ENOENT;
-       if (depth > FILTER_CTL_SRCH_MAX)
+       if (depth > depth_max)
                return -EBUSY;
 found:
        *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
        spin_lock_init(&state->lock);
 
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+               state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+                                            sizeof(*state->rps_flow_id),
+                                            GFP_KERNEL);
+               if (!state->rps_flow_id)
+                       goto fail;
+#endif
                table = &state->table[EFX_FILTER_TABLE_RX_IP];
                table->id = EFX_FILTER_TABLE_RX_IP;
                table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
                kfree(state->table[table_id].used_bitmap);
                vfree(state->table[table_id].spec);
        }
+#ifdef CONFIG_RFS_ACCEL
+       kfree(state->rps_flow_id);
+#endif
        kfree(state);
 }
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_channel *channel;
+       struct efx_filter_state *state = efx->filter_state;
+       struct efx_filter_spec spec;
+       const struct iphdr *ip;
+       const __be16 *ports;
+       int nhoff;
+       int rc;
+
+       nhoff = skb_network_offset(skb);
+
+       if (skb->protocol != htons(ETH_P_IP))
+               return -EPROTONOSUPPORT;
+
+       /* RFS must validate the IP header length before calling us */
+       EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+       ip = (const struct iphdr *)(skb->data + nhoff);
+       if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+               return -EPROTONOSUPPORT;
+       EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+       ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+       rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+                                     ip->daddr, ports[1], ip->saddr, ports[0]);
+       if (rc)
+               return rc;
+
+       rc = efx_filter_insert_filter(efx, &spec, true);
+       if (rc < 0)
+               return rc;
+
+       /* Remember this so we can check whether to expire the filter later */
+       state->rps_flow_id[rc] = flow_id;
+       channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+       ++channel->rfs_filters_added;
+
+       netif_info(efx, rx_status, efx->net_dev,
+                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+                  (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+                  &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+                  rxq_index, flow_id, rc);
+
+       return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+       unsigned mask = table->size - 1;
+       unsigned index;
+       unsigned stop;
+
+       if (!spin_trylock_bh(&state->lock))
+               return false;
+
+       index = state->rps_expire_index;
+       stop = (index + quota) & mask;
+
+       while (index != stop) {
+               if (test_bit(index, table->used_bitmap) &&
+                   table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+                   rps_may_expire_flow(efx->net_dev,
+                                       table->spec[index].dmaq_id,
+                                       state->rps_flow_id[index], index)) {
+                       netif_info(efx, rx_status, efx->net_dev,
+                                  "expiring filter %d [flow %u]\n",
+                                  index, state->rps_flow_id[index]);
+                       efx_filter_table_clear_entry(efx, table, index);
+               }
+               index = (index + 1) & mask;
+       }
+
+       state->rps_expire_index = stop;
+       if (table->used == 0)
+               efx_filter_table_reset_search_depth(table);
+
+       spin_unlock_bh(&state->lock);
+       return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
index 6da4ae2..d9d8c2e 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -48,9 +48,9 @@
  *   replacing the low 96 bits with zero does not affect functionality.
  * - If the host writes to the last dword address of such a register
  *   (i.e. the high 32 bits) the underlying register will always be
- *   written.  If the collector does not hold values for the low 96
- *   bits of the register, they will be written as zero.  Writing to
- *   the last qword does not have this effect and must not be done.
+ *   written.  If the collector and the current write together do not
+ *   provide values for all 128 bits of the register, the low 96 bits
+ *   will be written as zero.
  * - If the host writes to the address of any other part of such a
  *   register while the collector already holds values for some other
  *   register, the write is discarded and the collector maintains its
@@ -103,6 +103,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
+       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -125,6 +126,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
        __raw_writel((__force u32)value->u32[0], membase + addr);
        __raw_writel((__force u32)value->u32[1], membase + addr + 4);
 #endif
+       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -139,6 +141,7 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 
        /* No lock required */
        _efx_writed(efx, value->u32[0], reg);
+       wmb();
 }
 
 /* Read a 128-bit CSR, locking as appropriate. */
@@ -237,12 +240,14 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
 
 #ifdef EFX_USE_QWORD_IO
        _efx_writeq(efx, value->u64[0], reg + 0);
+       _efx_writeq(efx, value->u64[1], reg + 8);
 #else
        _efx_writed(efx, value->u32[0], reg + 0);
        _efx_writed(efx, value->u32[1], reg + 4);
-#endif
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+       wmb();
 }
 #define efx_writeo_page(efx, value, reg, page)                         \
        _efx_writeo_page(efx, value,                                    \
index b716e82..5e118f0 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
 
        efx_writed(efx, &hdr, pdu);
 
-       for (i = 0; i < inlen; i += 4)
+       for (i = 0; i < inlen; i += 4) {
                _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
-
-       /* Ensure the payload is written out before the header */
-       wmb();
+               /* use wmb() within loop to inhibit write combining */
+               wmb();
+       }
 
        /* ring the doorbell with a distinctive value */
        _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+       wmb();
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
@@ -602,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
  **************************************************************************
  */
 
-int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
 {
        u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
        size_t outlength;
@@ -616,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
        if (rc)
                goto fail;
 
-       if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
-               *version = 0;
-               *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-               return 0;
-       }
-
        if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
                rc = -EIO;
                goto fail;
        }
 
        ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
-       *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
-                   ((u64)le16_to_cpu(ver_words[1]) << 32) |
-                   ((u64)le16_to_cpu(ver_words[2]) << 16) |
-                   le16_to_cpu(ver_words[3]));
-       *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-
-       return 0;
+       snprintf(buf, len, "%u.%u.%u.%u",
+                le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+                le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+       return;
 
 fail:
        netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
-       return rc;
+       buf[0] = 0;
 }
 
 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
index c792f1d..aced2a7 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
+extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
 extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                               bool *was_attached_out);
 extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
index f88f4bf..33f7294 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 90359e6..b86a15f 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 0e97eed..ec3f740 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 56b0266..19e68c2 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
        return spins ? spins : -ETIMEDOUT;
 }
 
-static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
 {
        int status;
 
-       if (LOOPBACK_INTERNAL(efx))
-               return 0;
-
        if (mmd != MDIO_MMD_AN) {
                /* Read MMD STATUS2 to check it is responding. */
                status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
                }
        }
 
-       /* Read MMD STATUS 1 to check for fault. */
-       status = efx_mdio_read(efx, mmd, MDIO_STAT1);
-       if (status & MDIO_STAT1_FAULT) {
-               if (fault_fatal) {
-                       netif_err(efx, hw, efx->net_dev,
-                                 "PHY MMD %d reporting fatal"
-                                 " fault: status %x\n", mmd, status);
-                       return -EIO;
-               } else {
-                       netif_dbg(efx, hw, efx->net_dev,
-                                 "PHY MMD %d reporting status"
-                                 " %x (expected)\n", mmd, status);
-               }
-       }
        return 0;
 }
 
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
        return rc;
 }
 
-int efx_mdio_check_mmds(struct efx_nic *efx,
-                       unsigned int mmd_mask, unsigned int fatal_mask)
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
 {
        int mmd = 0, probe_mmd, devs1, devs2;
        u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
 
        /* Check all required MMDs are responding and happy. */
        while (mmd_mask) {
-               if (mmd_mask & 1) {
-                       int fault_fatal = fatal_mask & 1;
-                       if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
-                               return -EIO;
-               }
+               if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+                       return -EIO;
                mmd_mask = mmd_mask >> 1;
-               fatal_mask = fatal_mask >> 1;
                mmd++;
        }
 
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
                          "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
                rc = -EINVAL;
        } else {
-               rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+               rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
        }
 
        mutex_unlock(&efx->mac_lock);
index 75791d3..df07039 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
                              int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
-int efx_mdio_check_mmds(struct efx_nic *efx,
-                       unsigned int mmd_mask, unsigned int fatal_mask);
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
 extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
index d386274..e646bfc 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 28df866..215d5c5 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
  *
  **************************************************************************/
 
-#define EFX_DRIVER_VERSION     "3.0"
+#define EFX_DRIVER_VERSION     "3.1"
 
 #ifdef EFX_ENABLE_DEBUG
 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
 /* Checksum generation is a per-queue option in hardware, so each
  * queue visible to the networking core is backed by two hardware TX
  * queues. */
-#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD   1
-#define EFX_TXQ_TYPES          2
-#define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC          2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD   1       /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI   2       /* flag */
+#define EFX_TXQ_TYPES          4
+#define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
 
 /**
  * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
  * @buffer: The software buffer ring
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
  * @flushed: Used when handling queue flushing
  * @read_count: Current read pointer.
  *     This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
        struct efx_tx_buffer *buffer;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
+       bool initialised;
        enum efx_flush_state flushed;
 
        /* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
  *     If both this and page are %NULL, the buffer slot is currently free.
  * @page: The associated page buffer, if any.
  *     If both this and skb are %NULL, the buffer slot is currently free.
- * @data: Pointer to ethernet header
  * @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
  */
 struct efx_rx_buffer {
        dma_addr_t dma_addr;
-       struct sk_buff *skb;
-       struct page *page;
-       char *data;
+       union {
+               struct sk_buff *skb;
+               struct page *page;
+       } u;
        unsigned int len;
+       bool is_page;
 };
 
 /**
@@ -358,6 +364,9 @@ struct efx_channel {
 
        unsigned int irq_count;
        unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+       unsigned int rfs_filters_added;
+#endif
 
        int rx_alloc_level;
        int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
        bool rx_pkt_csummed;
 
        struct efx_rx_queue rx_queue;
-       struct efx_tx_queue tx_queue[2];
+       struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
 };
 
 enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
        unsigned int phys_addr_channels;
        unsigned int tx_dc_base;
        unsigned int rx_dc_base;
-       unsigned long offload_features;
+       u32 offload_features;
        u32 reset_world_flags;
 };
 
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
        return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
 }
 
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+       return channel->channel - channel->efx->tx_channel_offset <
+               channel->efx->n_tx_channels;
+}
+
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
 {
-       struct efx_tx_queue *tx_queue = channel->tx_queue;
-       EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
-       return tx_queue->channel ? tx_queue + type : NULL;
+       EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+                           type >= EFX_TXQ_TYPES);
+       return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+       return !(tx_queue->efx->net_dev->num_tc < 2 &&
+                tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
 }
 
 /* Iterate over all TX queues belonging to a channel */
 #define efx_for_each_channel_tx_queue(_tx_queue, _channel)             \
-       for (_tx_queue = efx_channel_get_tx_queue(channel, 0);          \
-            _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+       if (!efx_channel_has_tx_queues(_channel))                       \
+               ;                                                       \
+       else                                                            \
+               for (_tx_queue = (_channel)->tx_queue;                  \
+                    _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+                            efx_tx_queue_used(_tx_queue);              \
+                    _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)    \
+       for (_tx_queue = (_channel)->tx_queue;                          \
+            _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;          \
             _tx_queue++)
 
 static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
        return &efx->channel[index]->rx_queue;
 }
 
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+       return channel->channel < channel->efx->n_rx_channels;
+}
+
 static inline struct efx_rx_queue *
 efx_channel_get_rx_queue(struct efx_channel *channel)
 {
-       return channel->channel < channel->efx->n_rx_channels ?
-               &channel->rx_queue : NULL;
+       EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+       return &channel->rx_queue;
 }
 
 /* Iterate over all RX queues belonging to a channel */
 #define efx_for_each_channel_rx_queue(_rx_queue, _channel)             \
-       for (_rx_queue = efx_channel_get_rx_queue(channel);             \
-            _rx_queue;                                                 \
-            _rx_queue = NULL)
+       if (!efx_channel_has_rx_queue(_channel))                        \
+               ;                                                       \
+       else                                                            \
+               for (_rx_queue = &(_channel)->rx_queue;                 \
+                    _rx_queue;                                         \
+                    _rx_queue = NULL)
 
 static inline struct efx_channel *
 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
index da38659..e839661 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 #define RX_DC_ENTRIES 64
 #define RX_DC_ENTRIES_ORDER 3
 
-/* RX FIFO XOFF watermark
- *
- * When the amount of the RX FIFO increases used increases past this
- * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xoff_thresh = -1;
-module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
-
-/* RX FIFO XON watermark
- *
- * When the amount of the RX FIFO used decreases below this
- * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xon_thresh = -1;
-module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
-
 /* If EFX_MAX_INT_ERRORS internal errors occur within
  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
 
 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
 {
-       efx_oword_t tx_desc_ptr;
        struct efx_nic *efx = tx_queue->efx;
+       efx_oword_t reg;
 
        tx_queue->flushed = FLUSH_NONE;
 
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
        efx_init_special_buffer(efx, &tx_queue->txd);
 
        /* Push TX descriptor ring to card */
-       EFX_POPULATE_OWORD_10(tx_desc_ptr,
+       EFX_POPULATE_OWORD_10(reg,
                              FRF_AZ_TX_DESCQ_EN, 1,
                              FRF_AZ_TX_ISCSI_DDIG_EN, 0,
                              FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
 
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
                int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
-               EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
-               EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
                                    !csum);
        }
 
-       efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+       efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
                         tx_queue->queue);
 
        if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
-               efx_oword_t reg;
-
                /* Only 128 bits in this register */
                BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
 
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
                        set_bit_le(tx_queue->queue, (void *)&reg);
                efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
        }
+
+       if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+               EFX_POPULATE_OWORD_1(reg,
+                                    FRF_BZ_TX_PACE,
+                                    (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                                    FFE_BZ_TX_PACE_OFF :
+                                    FFE_BZ_TX_PACE_RESERVED);
+               efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+                                tx_queue->queue);
+       }
 }
 
 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
 
        /* Flush all tx queues in parallel */
        efx_for_each_channel(channel, efx) {
-               efx_for_each_channel_tx_queue(tx_queue, channel)
-                       efx_flush_tx_queue(tx_queue);
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                       if (tx_queue->initialised)
+                               efx_flush_tx_queue(tx_queue);
+               }
        }
 
        /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
                                        ++rx_pending;
                                }
                        }
-                       efx_for_each_channel_tx_queue(tx_queue, channel) {
-                               if (tx_queue->flushed != FLUSH_DONE)
+                       efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                               if (tx_queue->initialised &&
+                                   tx_queue->flushed != FLUSH_DONE)
                                        ++tx_pending;
                        }
                }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
        /* Mark the queues as all flushed. We're going to return failure
         * leading to a reset, or fake up success anyway */
        efx_for_each_channel(channel, efx) {
-               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       if (tx_queue->flushed != FLUSH_DONE)
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                       if (tx_queue->initialised &&
+                           tx_queue->flushed != FLUSH_DONE)
                                netif_err(efx, hw, efx->net_dev,
                                          "tx queue %d flush command timed out\n",
                                          tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
                EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
        efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+       if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+               EFX_POPULATE_OWORD_4(temp,
+                                    /* Default values */
+                                    FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+                                    FRF_BZ_TX_PACE_SB_AF, 0xb,
+                                    FRF_BZ_TX_PACE_FB_BASE, 0,
+                                    /* Allow large pace values in the
+                                     * fast bin. */
+                                    FRF_BZ_TX_PACE_BIN_TH,
+                                    FFE_BZ_TX_PACE_RESERVED);
+               efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+       }
 }
 
 /* Register dump */
index eb05869..d9de1b6 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 
 /**
  * struct siena_nic_data - Siena NIC state
- * @fw_version: Management controller firmware version
- * @fw_build: Firmware build number
  * @mcdi: Management-Controller-to-Driver Interface
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
-       u64 fw_version;
-       u32 fw_build;
        struct efx_mcdi_iface mcdi;
        int wol_filter_id;
 };
 
-extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-
 extern struct efx_nic_type falcon_a1_nic_type;
 extern struct efx_nic_type falcon_b0_nic_type;
 extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
 /* MAC/PHY */
 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
 extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
 
 /* Interrupts and test events */
 extern int efx_nic_init_interrupt(struct efx_nic *efx);
index 1dab609..b3b7947 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index ea3ae00..55f9092 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 96430ed..cc2c86b 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
 
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
 /* DRIVER_EV */
 /* Sub-fields of an RX flush completion event */
 #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
index 3925fd6..c0fdb59 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
  */
 #define EFX_RXD_HEAD_ROOM 2
 
-static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+                                            struct efx_rx_buffer *buf)
 {
        /* Offset is always within one page, so we don't need to consider
         * the page order.
         */
-       return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+       return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+               efx->type->rx_buffer_hash_size);
 }
 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 {
        return PAGE_SIZE << efx->rx_buffer_order;
 }
 
-static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
 {
+       if (buf->is_page)
+               return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+       else
+               return ((u8 *)buf->u.skb->data +
+                       efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+       /* The ethernet header is always directly after any hash. */
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
-       return __le32_to_cpup((const __le32 *)(buf->data - 4));
+       return __le32_to_cpup((const __le32 *)(eh - 4));
 #else
-       const u8 *data = (const u8 *)(buf->data - 4);
+       const u8 *data = eh - 4;
        return ((u32)data[0]       |
                (u32)data[1] << 8  |
                (u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
        struct efx_nic *efx = rx_queue->efx;
        struct net_device *net_dev = efx->net_dev;
        struct efx_rx_buffer *rx_buf;
+       struct sk_buff *skb;
        int skb_len = efx->rx_buffer_len;
        unsigned index, count;
 
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
 
-               rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
-               if (unlikely(!rx_buf->skb))
+               rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+               if (unlikely(!skb))
                        return -ENOMEM;
-               rx_buf->page = NULL;
 
                /* Adjust the SKB for padding and checksum */
-               skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+               skb_reserve(skb, NET_IP_ALIGN);
                rx_buf->len = skb_len - NET_IP_ALIGN;
-               rx_buf->data = (char *)rx_buf->skb->data;
-               rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+               rx_buf->is_page = false;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                rx_buf->dma_addr = pci_map_single(efx->pci_dev,
-                                                 rx_buf->data, rx_buf->len,
+                                                 skb->data, rx_buf->len,
                                                  PCI_DMA_FROMDEVICE);
                if (unlikely(pci_dma_mapping_error(efx->pci_dev,
                                                   rx_buf->dma_addr))) {
-                       dev_kfree_skb_any(rx_buf->skb);
-                       rx_buf->skb = NULL;
+                       dev_kfree_skb_any(skb);
+                       rx_buf->u.skb = NULL;
                        return -EIO;
                }
 
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
-               rx_buf->skb = NULL;
-               rx_buf->page = page;
-               rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+               rx_buf->u.page = page;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+               rx_buf->is_page = true;
                ++rx_queue->added_count;
                ++rx_queue->alloc_page_count;
                ++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                struct efx_rx_buffer *rx_buf)
 {
-       if (rx_buf->page) {
+       if (rx_buf->is_page && rx_buf->u.page) {
                struct efx_rx_page_state *state;
 
-               EFX_BUG_ON_PARANOID(rx_buf->skb);
-
-               state = page_address(rx_buf->page);
+               state = page_address(rx_buf->u.page);
                if (--state->refcnt == 0) {
                        pci_unmap_page(efx->pci_dev,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       PCI_DMA_FROMDEVICE);
                }
-       } else if (likely(rx_buf->skb)) {
+       } else if (!rx_buf->is_page && rx_buf->u.skb) {
                pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
                                 rx_buf->len, PCI_DMA_FROMDEVICE);
        }
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
 static void efx_free_rx_buffer(struct efx_nic *efx,
                               struct efx_rx_buffer *rx_buf)
 {
-       if (rx_buf->page) {
-               __free_pages(rx_buf->page, efx->rx_buffer_order);
-               rx_buf->page = NULL;
-       } else if (likely(rx_buf->skb)) {
-               dev_kfree_skb_any(rx_buf->skb);
-               rx_buf->skb = NULL;
+       if (rx_buf->is_page && rx_buf->u.page) {
+               __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+               rx_buf->u.page = NULL;
+       } else if (!rx_buf->is_page && rx_buf->u.skb) {
+               dev_kfree_skb_any(rx_buf->u.skb);
+               rx_buf->u.skb = NULL;
        }
 }
 
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
                                    struct efx_rx_buffer *rx_buf)
 {
-       struct efx_rx_page_state *state = page_address(rx_buf->page);
+       struct efx_rx_page_state *state = page_address(rx_buf->u.page);
        struct efx_rx_buffer *new_buf;
        unsigned fill_level, index;
 
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
        }
 
        ++state->refcnt;
-       get_page(rx_buf->page);
+       get_page(rx_buf->u.page);
 
        index = rx_queue->added_count & rx_queue->ptr_mask;
        new_buf = efx_rx_buffer(rx_queue, index);
        new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
-       new_buf->skb = NULL;
-       new_buf->page = rx_buf->page;
-       new_buf->data = (void *)
-               ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+       new_buf->u.page = rx_buf->u.page;
        new_buf->len = rx_buf->len;
+       new_buf->is_page = true;
        ++rx_queue->added_count;
 }
 
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
        struct efx_rx_buffer *new_buf;
        unsigned index;
 
-       if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
-           page_count(rx_buf->page) == 1)
+       if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+           page_count(rx_buf->u.page) == 1)
                efx_resurrect_rx_buffer(rx_queue, rx_buf);
 
        index = rx_queue->added_count & rx_queue->ptr_mask;
        new_buf = efx_rx_buffer(rx_queue, index);
 
        memcpy(new_buf, rx_buf, sizeof(*new_buf));
-       rx_buf->page = NULL;
-       rx_buf->skb = NULL;
+       rx_buf->u.page = NULL;
        ++rx_queue->added_count;
 }
 
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                 * data at the end of the skb will be trashed. So
                 * we have no choice but to leak the fragment.
                 */
-               *leak_packet = (rx_buf->skb != NULL);
+               *leak_packet = !rx_buf->is_page;
                efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
        } else {
                if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  */
 static void efx_rx_packet_gro(struct efx_channel *channel,
                              struct efx_rx_buffer *rx_buf,
-                             bool checksummed)
+                             const u8 *eh, bool checksummed)
 {
        struct napi_struct *napi = &channel->napi_str;
        gro_result_t gro_result;
 
        /* Pass the skb/page into the GRO engine */
-       if (rx_buf->page) {
+       if (rx_buf->is_page) {
                struct efx_nic *efx = channel->efx;
-               struct page *page = rx_buf->page;
+               struct page *page = rx_buf->u.page;
                struct sk_buff *skb;
 
-               EFX_BUG_ON_PARANOID(rx_buf->skb);
-               rx_buf->page = NULL;
+               rx_buf->u.page = NULL;
 
                skb = napi_get_frags(napi);
                if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                }
 
                if (efx->net_dev->features & NETIF_F_RXHASH)
-                       skb->rxhash = efx_rx_buf_hash(rx_buf);
+                       skb->rxhash = efx_rx_buf_hash(eh);
 
                skb_shinfo(skb)->frags[0].page = page;
                skb_shinfo(skb)->frags[0].page_offset =
-                       efx_rx_buf_offset(rx_buf);
+                       efx_rx_buf_offset(efx, rx_buf);
                skb_shinfo(skb)->frags[0].size = rx_buf->len;
                skb_shinfo(skb)->nr_frags = 1;
 
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
 
                gro_result = napi_gro_frags(napi);
        } else {
-               struct sk_buff *skb = rx_buf->skb;
+               struct sk_buff *skb = rx_buf->u.skb;
 
-               EFX_BUG_ON_PARANOID(!skb);
                EFX_BUG_ON_PARANOID(!checksummed);
-               rx_buf->skb = NULL;
+               rx_buf->u.skb = NULL;
 
                gro_result = napi_gro_receive(napi, skb);
        }
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        bool leak_packet = false;
 
        rx_buf = efx_rx_buffer(rx_queue, index);
-       EFX_BUG_ON_PARANOID(!rx_buf->data);
-       EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
-       EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
 
        /* This allows the refill path to post another buffer.
         * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.
         */
-       prefetch(rx_buf->data);
+       prefetch(efx_rx_buf_eh(efx, rx_buf));
 
        /* Pipeline receives so that we give time for packet headers to be
         * prefetched into cache.
         */
-       rx_buf->len = len;
+       rx_buf->len = len - efx->type->rx_buffer_hash_size;
 out:
        if (channel->rx_pkt)
                __efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
 {
        struct efx_nic *efx = channel->efx;
        struct sk_buff *skb;
-
-       rx_buf->data += efx->type->rx_buffer_hash_size;
-       rx_buf->len -= efx->type->rx_buffer_hash_size;
+       u8 *eh = efx_rx_buf_eh(efx, rx_buf);
 
        /* If we're in loopback test, then pass the packet directly to the
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
-               efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+               efx_loopback_rx_packet(efx, eh, rx_buf->len);
                efx_free_rx_buffer(efx, rx_buf);
                return;
        }
 
-       if (rx_buf->skb) {
-               prefetch(skb_shinfo(rx_buf->skb));
+       if (!rx_buf->is_page) {
+               skb = rx_buf->u.skb;
 
-               skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
-               skb_put(rx_buf->skb, rx_buf->len);
+               prefetch(skb_shinfo(skb));
+
+               skb_reserve(skb, efx->type->rx_buffer_hash_size);
+               skb_put(skb, rx_buf->len);
 
                if (efx->net_dev->features & NETIF_F_RXHASH)
-                       rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+                       skb->rxhash = efx_rx_buf_hash(eh);
 
                /* Move past the ethernet header. rx_buf->data still points
                 * at the ethernet header */
-               rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
-                                                      efx->net_dev);
+               skb->protocol = eth_type_trans(skb, efx->net_dev);
 
-               skb_record_rx_queue(rx_buf->skb, channel->channel);
+               skb_record_rx_queue(skb, channel->channel);
        }
 
-       if (likely(checksummed || rx_buf->page)) {
-               efx_rx_packet_gro(channel, rx_buf, checksummed);
+       if (likely(checksummed || rx_buf->is_page)) {
+               efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
                return;
        }
 
        /* We now own the SKB */
-       skb = rx_buf->skb;
-       rx_buf->skb = NULL;
-       EFX_BUG_ON_PARANOID(!skb);
+       skb = rx_buf->u.skb;
+       rx_buf->u.skb = NULL;
 
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
index 0ebfb99..a0f49b3 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
                        goto out;
                }
 
-               /* Test both types of TX queue */
+               /* Test all enabled types of TX queue */
                efx_for_each_channel_tx_queue(tx_queue, channel) {
                        state->offload_csum = (tx_queue->queue &
                                               EFX_TXQ_TYPE_OFFLOAD);
index aed495a..dba5456 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index bf84561..e4dd898 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
        if (rc)
                goto fail1;
 
-       rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
-       if (rc) {
-               netif_err(efx, probe, efx->net_dev,
-                         "Failed to read MCPU firmware version - rc %d\n", rc);
-               goto fail1; /* MCPU absent? */
-       }
-
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
        rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
               FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
        efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 
-       if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
-               /* No MCDI operation has been defined to set thresholds */
-               netif_err(efx, hw, efx->net_dev,
-                         "ignoring RX flow control thresholds\n");
-
        /* Enable event logging */
        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
        if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
        efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
 }
 
-void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       snprintf(buf, len, "%u.%u.%u.%u",
-                (unsigned int)(nic_data->fw_version >> 48),
-                (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
-                (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
-                (unsigned int)(nic_data->fw_version & 0xffff));
-}
-
 /**************************************************************************
  *
  * Wake on LAN
index 879b7f6..71f2e3e 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index f102912..efdceb3 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
                if (rc < 0)
                        return rc;
 
-               rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+               rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
                if (rc < 0)
                        return rc;
        }
index 2f5e9da..1398019 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_tx_queue *tx_queue;
+       unsigned index, type;
 
        if (unlikely(efx->port_inhibited))
                return NETDEV_TX_BUSY;
 
-       tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
-                                   skb->ip_summed == CHECKSUM_PARTIAL ?
-                                   EFX_TXQ_TYPE_OFFLOAD : 0);
+       index = skb_get_queue_mapping(skb);
+       type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+       if (index >= efx->n_tx_channels) {
+               index -= efx->n_tx_channels;
+               type |= EFX_TXQ_TYPE_HIGHPRI;
+       }
+       tx_queue = efx_get_tx_queue(efx, index, type);
 
        return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+
+       /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+       tx_queue->core_txq =
+               netdev_get_tx_queue(efx->net_dev,
+                                   tx_queue->queue / EFX_TXQ_TYPES +
+                                   ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                                    efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       unsigned tc;
+       int rc;
+
+       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+               return -EINVAL;
+
+       if (num_tc == net_dev->num_tc)
+               return 0;
+
+       for (tc = 0; tc < num_tc; tc++) {
+               net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+               net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+       }
+
+       if (num_tc > net_dev->num_tc) {
+               /* Initialise high-priority queues as necessary */
+               efx_for_each_channel(channel, efx) {
+                       efx_for_each_possible_channel_tx_queue(tx_queue,
+                                                              channel) {
+                               if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+                                       continue;
+                               if (!tx_queue->buffer) {
+                                       rc = efx_probe_tx_queue(tx_queue);
+                                       if (rc)
+                                               return rc;
+                               }
+                               if (!tx_queue->initialised)
+                                       efx_init_tx_queue(tx_queue);
+                               efx_init_tx_queue_core_txq(tx_queue);
+                       }
+               }
+       } else {
+               /* Reduce number of classes before number of queues */
+               net_dev->num_tc = num_tc;
+       }
+
+       rc = netif_set_real_num_tx_queues(net_dev,
+                                         max_t(int, num_tc, 1) *
+                                         efx->n_tx_channels);
+       if (rc)
+               return rc;
+
+       /* Do not destroy high-priority queues when they become
+        * unused.  We would have to flush them first, and it is
+        * fairly difficult to flush a subset of TX queues.  Leave
+        * it to efx_fini_channels().
+        */
+
+       net_dev->num_tc = num_tc;
+       return 0;
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 
        /* Set up TX descriptor ring */
        efx_nic_init_tx(tx_queue);
+
+       tx_queue->initialised = true;
 }
 
 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       if (!tx_queue->initialised)
+               return;
+
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
                  "shutting down TX queue %d\n", tx_queue->queue);
 
+       tx_queue->initialised = false;
+
        /* Flush TX queue, remove descriptor ring */
        efx_nic_fini_tx(tx_queue);
 
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       if (!tx_queue->buffer)
+               return;
+
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
                  "destroying TX queue %d\n", tx_queue->queue);
        efx_nic_remove_tx(tx_queue);
index 351794a..d9886ad 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
                goto fail;
 
        /* Check that all the MMDs we expect are present and responding. */
-       rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
+       rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
        if (rc < 0)
                goto fail;
 
index e0d6308..e4dd3a7 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 819c175..e9e7a53 100644 (file)
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/ethtool.h>
 #include <asm/cacheflush.h>
 
 #include "sh_eth.h"
 
+#define SH_ETH_DEF_MSG_ENABLE \
+               (NETIF_MSG_LINK | \
+               NETIF_MSG_TIMER | \
+               NETIF_MSG_RX_ERR| \
+               NETIF_MSG_TX_ERR)
+
 /* There is CPU dependent code */
 #if defined(CONFIG_CPU_SUBTYPE_SH7724)
 #define SH_ETH_RESET_DEFAULT   1
 static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
        else            /* Half */
-               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
                break;
        case 100:/* 100BASE */
-               writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
                break;
        default:
                break;
@@ -89,29 +94,28 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 };
 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-#define SH_ETH_RESET_DEFAULT   1
+#define SH_ETH_HAS_BOTH_MODULES        1
+#define SH_ETH_HAS_TSU 1
 static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
        else            /* Half */
-               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               writel(0, ioaddr + RTRATE);
+               sh_eth_write(ndev, 0, RTRATE);
                break;
        case 100:/* 100BASE */
-               writel(1, ioaddr + RTRATE);
+               sh_eth_write(ndev, 1, RTRATE);
                break;
        default:
                break;
@@ -138,24 +142,154 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .no_ade         = 1,
 };
 
+#define SH_GIGA_ETH_BASE       0xfee00000
+#define GIGA_MALR(port)                (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
+#define GIGA_MAHR(port)                (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
+static void sh_eth_chip_reset_giga(struct net_device *ndev)
+{
+       int i;
+       unsigned long mahr[2], malr[2];
+
+       /* save MAHR and MALR */
+       for (i = 0; i < 2; i++) {
+               malr[i] = readl(GIGA_MALR(i));
+               mahr[i] = readl(GIGA_MAHR(i));
+       }
+
+       /* reset device */
+       writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
+       mdelay(1);
+
+       /* restore MAHR and MALR */
+       for (i = 0; i < 2; i++) {
+               writel(malr[i], GIGA_MALR(i));
+               writel(mahr[i], GIGA_MAHR(i));
+       }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp);
+static void sh_eth_reset(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int cnt = 100;
+
+       if (sh_eth_is_gether(mdp)) {
+               sh_eth_write(ndev, 0x03, EDSR);
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
+                               EDMR);
+               while (cnt > 0) {
+                       if (!(sh_eth_read(ndev, EDMR) & 0x3))
+                               break;
+                       mdelay(1);
+                       cnt--;
+               }
+               if (cnt < 0)
+                       printk(KERN_ERR "Device reset fail\n");
+
+               /* Table Init */
+               sh_eth_write(ndev, 0x0, TDLAR);
+               sh_eth_write(ndev, 0x0, TDFAR);
+               sh_eth_write(ndev, 0x0, TDFXR);
+               sh_eth_write(ndev, 0x0, TDFFR);
+               sh_eth_write(ndev, 0x0, RDLAR);
+               sh_eth_write(ndev, 0x0, RDFAR);
+               sh_eth_write(ndev, 0x0, RDFXR);
+               sh_eth_write(ndev, 0x0, RDFFR);
+       } else {
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
+                               EDMR);
+               mdelay(3);
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
+                               EDMR);
+       }
+}
+
+static void sh_eth_set_duplex_giga(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       if (mdp->duplex) /* Full */
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+       else            /* Half */
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate_giga(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       switch (mdp->speed) {
+       case 10: /* 10BASE */
+               sh_eth_write(ndev, 0x00000000, GECMR);
+               break;
+       case 100:/* 100BASE */
+               sh_eth_write(ndev, 0x00000010, GECMR);
+               break;
+       case 1000: /* 1000BASE */
+               sh_eth_write(ndev, 0x00000020, GECMR);
+               break;
+       default:
+               break;
+       }
+}
+
+/* SH7757(GETHERC) */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+       .chip_reset     = sh_eth_chip_reset_giga,
+       .set_duplex     = sh_eth_set_duplex_giga,
+       .set_rate       = sh_eth_set_rate_giga,
+
+       .ecsr_value     = ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+                         EESR_ECI,
+       .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+                         EESR_TFE,
+       .fdr_value      = 0x0000072f,
+       .rmcr_value     = 0x00000001,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .bculr          = 1,
+       .hw_swap        = 1,
+       .rpadir         = 1,
+       .rpadir_value   = 2 << 16,
+       .no_trimd       = 1,
+       .no_ade         = 1,
+};
+
+static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
+{
+       if (sh_eth_is_gether(mdp))
+               return &sh_eth_my_cpu_data_giga;
+       else
+               return &sh_eth_my_cpu_data;
+}
+
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 #define SH_ETH_HAS_TSU 1
 static void sh_eth_chip_reset(struct net_device *ndev)
 {
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
        /* reset device */
-       writel(ARSTR_ARSTR, ARSTR);
+       sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
        mdelay(1);
 }
 
 static void sh_eth_reset(struct net_device *ndev)
 {
-       u32 ioaddr = ndev->base_addr;
        int cnt = 100;
 
-       writel(EDSR_ENALL, ioaddr + EDSR);
-       writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+       sh_eth_write(ndev, EDSR_ENALL, EDSR);
+       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
        while (cnt > 0) {
-               if (!(readl(ioaddr + EDMR) & 0x3))
+               if (!(sh_eth_read(ndev, EDMR) & 0x3))
                        break;
                mdelay(1);
                cnt--;
@@ -164,41 +298,39 @@ static void sh_eth_reset(struct net_device *ndev)
                printk(KERN_ERR "Device reset fail\n");
 
        /* Table Init */
-       writel(0x0, ioaddr + TDLAR);
-       writel(0x0, ioaddr + TDFAR);
-       writel(0x0, ioaddr + TDFXR);
-       writel(0x0, ioaddr + TDFFR);
-       writel(0x0, ioaddr + RDLAR);
-       writel(0x0, ioaddr + RDFAR);
-       writel(0x0, ioaddr + RDFXR);
-       writel(0x0, ioaddr + RDFFR);
+       sh_eth_write(ndev, 0x0, TDLAR);
+       sh_eth_write(ndev, 0x0, TDFAR);
+       sh_eth_write(ndev, 0x0, TDFXR);
+       sh_eth_write(ndev, 0x0, TDFFR);
+       sh_eth_write(ndev, 0x0, RDLAR);
+       sh_eth_write(ndev, 0x0, RDFAR);
+       sh_eth_write(ndev, 0x0, RDFXR);
+       sh_eth_write(ndev, 0x0, RDFFR);
 }
 
 static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
        else            /* Half */
-               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               writel(GECMR_10, ioaddr + GECMR);
+               sh_eth_write(ndev, GECMR_10, GECMR);
                break;
        case 100:/* 100BASE */
-               writel(GECMR_100, ioaddr + GECMR);
+               sh_eth_write(ndev, GECMR_100, GECMR);
                break;
        case 1000: /* 1000BASE */
-               writel(GECMR_1000, ioaddr + GECMR);
+               sh_eth_write(ndev, GECMR_1000, GECMR);
                break;
        default:
                break;
@@ -229,6 +361,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .hw_swap        = 1,
        .no_trimd       = 1,
        .no_ade         = 1,
+       .tsu            = 1,
 };
 
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -246,6 +379,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 #define SH_ETH_HAS_TSU 1
 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .tsu            = 1,
 };
 #endif
 
@@ -281,11 +415,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 /* Chip Reset */
 static void sh_eth_reset(struct net_device *ndev)
 {
-       u32 ioaddr = ndev->base_addr;
-
-       writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
        mdelay(3);
-       writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
 }
 #endif
 
@@ -334,13 +466,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
  */
 static void update_mac_address(struct net_device *ndev)
 {
-       u32 ioaddr = ndev->base_addr;
-
-       writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
-                 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
-                 ioaddr + MAHR);
-       writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
-                 ioaddr + MALR);
+       sh_eth_write(ndev,
+               (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+               (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+       sh_eth_write(ndev,
+               (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
 }
 
 /*
@@ -353,21 +483,36 @@ static void update_mac_address(struct net_device *ndev)
  */
 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
-       u32 ioaddr = ndev->base_addr;
-
        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
                memcpy(ndev->dev_addr, mac, 6);
        } else {
-               ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
-               ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
-               ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
-               ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
-               ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
-               ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
+               ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
+               ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
+               ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
+               ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
+               ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
+               ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
        }
 }
 
+static int sh_eth_is_gether(struct sh_eth_private *mdp)
+{
+       if (mdp->reg_offset == sh_eth_offset_gigabit)
+               return 1;
+       else
+               return 0;
+}
+
+static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+{
+       if (sh_eth_is_gether(mdp))
+               return EDTRR_TRNS_GETHER;
+       else
+               return EDTRR_TRNS_ETHER;
+}
+
 struct bb_info {
+       void (*set_gate)(unsigned long addr);
        struct mdiobb_ctrl ctrl;
        u32 addr;
        u32 mmd_msk;/* MMD */
@@ -398,6 +543,10 @@ static int bb_read(u32 addr, u32 msk)
 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 {
        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+       if (bitbang->set_gate)
+               bitbang->set_gate(bitbang->addr);
+
        if (bit)
                bb_set(bitbang->addr, bitbang->mmd_msk);
        else
@@ -409,6 +558,9 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
 {
        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 
+       if (bitbang->set_gate)
+               bitbang->set_gate(bitbang->addr);
+
        if (bit)
                bb_set(bitbang->addr, bitbang->mdo_msk);
        else
@@ -419,6 +571,10 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
 {
        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+       if (bitbang->set_gate)
+               bitbang->set_gate(bitbang->addr);
+
        return bb_read(bitbang->addr, bitbang->mdi_msk);
 }
 
@@ -427,6 +583,9 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 {
        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 
+       if (bitbang->set_gate)
+               bitbang->set_gate(bitbang->addr);
+
        if (bit)
                bb_set(bitbang->addr, bitbang->mdc_msk);
        else
@@ -470,7 +629,6 @@ static void sh_eth_ring_free(struct net_device *ndev)
 /* format skb and descriptor buffer */
 static void sh_eth_ring_format(struct net_device *ndev)
 {
-       u32 ioaddr = ndev->base_addr;
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int i;
        struct sk_buff *skb;
@@ -506,10 +664,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
                /* Rx descriptor address set */
                if (i == 0) {
-                       writel(mdp->rx_desc_dma, ioaddr + RDLAR);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-                       writel(mdp->rx_desc_dma, ioaddr + RDFAR);
-#endif
+                       sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
+                       if (sh_eth_is_gether(mdp))
+                               sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
                }
        }
 
@@ -528,10 +685,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
                txdesc->buffer_length = 0;
                if (i == 0) {
                        /* Tx descriptor address set */
-                       writel(mdp->tx_desc_dma, ioaddr + TDLAR);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-                       writel(mdp->tx_desc_dma, ioaddr + TDFAR);
-#endif
+                       sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
+                       if (sh_eth_is_gether(mdp))
+                               sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
                }
        }
 
@@ -613,7 +769,6 @@ static int sh_eth_dev_init(struct net_device *ndev)
 {
        int ret = 0;
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
        u_int32_t rx_int_var, tx_int_var;
        u32 val;
 
@@ -623,71 +778,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
        /* Descriptor format */
        sh_eth_ring_format(ndev);
        if (mdp->cd->rpadir)
-               writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
+               sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
 
        /* all sh_eth int mask */
-       writel(0, ioaddr + EESIPR);
+       sh_eth_write(ndev, 0, EESIPR);
 
 #if defined(__LITTLE_ENDIAN__)
        if (mdp->cd->hw_swap)
-               writel(EDMR_EL, ioaddr + EDMR);
+               sh_eth_write(ndev, EDMR_EL, EDMR);
        else
 #endif
-               writel(0, ioaddr + EDMR);
+               sh_eth_write(ndev, 0, EDMR);
 
        /* FIFO size set */
-       writel(mdp->cd->fdr_value, ioaddr + FDR);
-       writel(0, ioaddr + TFTR);
+       sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
+       sh_eth_write(ndev, 0, TFTR);
 
        /* Frame recv control */
-       writel(mdp->cd->rmcr_value, ioaddr + RMCR);
+       sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
 
        rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
        tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
-       writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
+       sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
 
        if (mdp->cd->bculr)
-               writel(0x800, ioaddr + BCULR);  /* Burst sycle set */
+               sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
 
-       writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
+       sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
 
        if (!mdp->cd->no_trimd)
-               writel(0, ioaddr + TRIMD);
+               sh_eth_write(ndev, 0, TRIMD);
 
        /* Recv frame limit set register */
-       writel(RFLR_VALUE, ioaddr + RFLR);
+       sh_eth_write(ndev, RFLR_VALUE, RFLR);
 
-       writel(readl(ioaddr + EESR), ioaddr + EESR);
-       writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
+       sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 
        /* PAUSE Prohibition */
-       val = (readl(ioaddr + ECMR) & ECMR_DM) |
+       val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
                ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 
-       writel(val, ioaddr + ECMR);
+       sh_eth_write(ndev, val, ECMR);
 
        if (mdp->cd->set_rate)
                mdp->cd->set_rate(ndev);
 
        /* E-MAC Status Register clear */
-       writel(mdp->cd->ecsr_value, ioaddr + ECSR);
+       sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
 
        /* E-MAC Interrupt Enable register */
-       writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+       sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
 
        /* Set MAC address */
        update_mac_address(ndev);
 
        /* mask reset */
        if (mdp->cd->apr)
-               writel(APR_AP, ioaddr + APR);
+               sh_eth_write(ndev, APR_AP, APR);
        if (mdp->cd->mpr)
-               writel(MPR_MP, ioaddr + MPR);
+               sh_eth_write(ndev, MPR_MP, MPR);
        if (mdp->cd->tpauser)
-               writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+               sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 
        /* Setting the Rx mode will start the Rx process. */
-       writel(EDRRR_R, ioaddr + EDRRR);
+       sh_eth_write(ndev, EDRRR_R, EDRRR);
 
        netif_start_queue(ndev);
 
@@ -811,24 +966,37 @@ static int sh_eth_rx(struct net_device *ndev)
 
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
-       if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
-               writel(EDRRR_R, ndev->base_addr + EDRRR);
+       if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+               sh_eth_write(ndev, EDRRR_R, EDRRR);
 
        return 0;
 }
 
+static void sh_eth_rcv_snd_disable(struct net_device *ndev)
+{
+       /* disable tx and rx */
+       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
+               ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(struct net_device *ndev)
+{
+       /* enable tx and rx */
+       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
+               (ECMR_RE | ECMR_TE), ECMR);
+}
+
 /* error control function */
 static void sh_eth_error(struct net_device *ndev, int intr_status)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
        u32 felic_stat;
        u32 link_stat;
        u32 mask;
 
        if (intr_status & EESR_ECI) {
-               felic_stat = readl(ioaddr + ECSR);
-               writel(felic_stat, ioaddr + ECSR);      /* clear int */
+               felic_stat = sh_eth_read(ndev, ECSR);
+               sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
                if (felic_stat & ECSR_ICD)
                        mdp->stats.tx_carrier_errors++;
                if (felic_stat & ECSR_LCHNG) {
@@ -839,26 +1007,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                                else
                                        link_stat = PHY_ST_LINK;
                        } else {
-                               link_stat = (readl(ioaddr + PSR));
+                               link_stat = (sh_eth_read(ndev, PSR));
                                if (mdp->ether_link_active_low)
                                        link_stat = ~link_stat;
                        }
-                       if (!(link_stat & PHY_ST_LINK)) {
-                               /* Link Down : disable tx and rx */
-                               writel(readl(ioaddr + ECMR) &
-                                         ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
-                       } else {
+                       if (!(link_stat & PHY_ST_LINK))
+                               sh_eth_rcv_snd_disable(ndev);
+                       else {
                                /* Link Up */
-                               writel(readl(ioaddr + EESIPR) &
-                                         ~DMAC_M_ECI, ioaddr + EESIPR);
+                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
+                                         ~DMAC_M_ECI, EESIPR);
                                /*clear int */
-                               writel(readl(ioaddr + ECSR),
-                                         ioaddr + ECSR);
-                               writel(readl(ioaddr + EESIPR) |
-                                         DMAC_M_ECI, ioaddr + EESIPR);
+                               sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
+                                         ECSR);
+                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
+                                         DMAC_M_ECI, EESIPR);
                                /* enable tx and rx */
-                               writel(readl(ioaddr + ECMR) |
-                                         (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+                               sh_eth_rcv_snd_enable(ndev);
                        }
                }
        }
@@ -867,6 +1032,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* Write buck end. unused write back interrupt */
                if (intr_status & EESR_TABT)    /* Transmit Abort int */
                        mdp->stats.tx_aborted_errors++;
+                       if (netif_msg_tx_err(mdp))
+                               dev_err(&ndev->dev, "Transmit Abort\n");
        }
 
        if (intr_status & EESR_RABT) {
@@ -874,28 +1041,47 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        mdp->stats.rx_frame_errors++;
-                       dev_err(&ndev->dev, "Receive Frame Overflow\n");
+                       if (netif_msg_rx_err(mdp))
+                               dev_err(&ndev->dev, "Receive Abort\n");
                }
        }
 
-       if (!mdp->cd->no_ade) {
-               if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
-                   intr_status & EESR_TFE)
-                       mdp->stats.tx_fifo_errors++;
+       if (intr_status & EESR_TDE) {
+               /* Transmit Descriptor Empty int */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+       }
+
+       if (intr_status & EESR_TFE) {
+               /* FIFO under flow */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
        }
 
        if (intr_status & EESR_RDE) {
                /* Receive Descriptor Empty int */
                mdp->stats.rx_over_errors++;
 
-               if (readl(ioaddr + EDRRR) ^ EDRRR_R)
-                       writel(EDRRR_R, ioaddr + EDRRR);
-               dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+               if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
+                       sh_eth_write(ndev, EDRRR_R, EDRRR);
+               if (netif_msg_rx_err(mdp))
+                       dev_err(&ndev->dev, "Receive Descriptor Empty\n");
        }
+
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                mdp->stats.rx_fifo_errors++;
-               dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+               if (netif_msg_rx_err(mdp))
+                       dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+       }
+
+       if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+               /* Address Error */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Address Error\n");
        }
 
        mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -903,7 +1089,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                mask &= ~EESR_ADE;
        if (intr_status & mask) {
                /* Tx error */
-               u32 edtrr = readl(ndev->base_addr + EDTRR);
+               u32 edtrr = sh_eth_read(ndev, EDTRR);
                /* dmesg */
                dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
                                intr_status, mdp->cur_tx);
@@ -913,9 +1099,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                sh_eth_txfree(ndev);
 
                /* SH7712 BUG */
-               if (edtrr ^ EDTRR_TRNS) {
+               if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
                        /* tx dma start */
-                       writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
+                       sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
                }
                /* wakeup */
                netif_wake_queue(ndev);
@@ -928,18 +1114,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       u32 ioaddr, intr_status = 0;
+       u32 intr_status = 0;
 
-       ioaddr = ndev->base_addr;
        spin_lock(&mdp->lock);
 
        /* Get interrpt stat */
-       intr_status = readl(ioaddr + EESR);
+       intr_status = sh_eth_read(ndev, EESR);
        /* Clear interrupt */
        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
                        cd->tx_check | cd->eesr_err_check)) {
-               writel(intr_status, ioaddr + EESR);
+               sh_eth_write(ndev, intr_status, EESR);
                ret = IRQ_HANDLED;
        } else
                goto other_irq;
@@ -982,7 +1167,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct phy_device *phydev = mdp->phydev;
-       u32 ioaddr = ndev->base_addr;
        int new_state = 0;
 
        if (phydev->link != PHY_DOWN) {
@@ -1000,8 +1184,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                                mdp->cd->set_rate(ndev);
                }
                if (mdp->link == PHY_DOWN) {
-                       writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
-                                       | ECMR_DM, ioaddr + ECMR);
+                       sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
+                                       | ECMR_DM, ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
                }
@@ -1012,7 +1196,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                mdp->duplex = -1;
        }
 
-       if (new_state)
+       if (new_state && netif_msg_link(mdp))
                phy_print_status(phydev);
 }
 
@@ -1032,7 +1216,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
 
        /* Try connect to PHY */
        phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
-                               0, PHY_INTERFACE_MODE_MII);
+                               0, mdp->phy_interface);
        if (IS_ERR(phydev)) {
                dev_err(&ndev->dev, "phy_connect failed\n");
                return PTR_ERR(phydev);
@@ -1063,6 +1247,131 @@ static int sh_eth_phy_start(struct net_device *ndev)
        return 0;
 }
 
+static int sh_eth_get_settings(struct net_device *ndev,
+                       struct ethtool_cmd *ecmd)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+       ret = phy_ethtool_gset(mdp->phydev, ecmd);
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+               struct ethtool_cmd *ecmd)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+
+       /* disable tx and rx */
+       sh_eth_rcv_snd_disable(ndev);
+
+       ret = phy_ethtool_sset(mdp->phydev, ecmd);
+       if (ret)
+               goto error_exit;
+
+       if (ecmd->duplex == DUPLEX_FULL)
+               mdp->duplex = 1;
+       else
+               mdp->duplex = 0;
+
+       if (mdp->cd->set_duplex)
+               mdp->cd->set_duplex(ndev);
+
+error_exit:
+       mdelay(1);
+
+       /* enable tx and rx */
+       sh_eth_rcv_snd_enable(ndev);
+
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+       ret = phy_start_aneg(mdp->phydev);
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+       "rx_current", "tx_current",
+       "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return SH_ETH_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+                       struct ethtool_stats *stats, u64 *data)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int i = 0;
+
+       /* device-specific stats */
+       data[i++] = mdp->cur_rx;
+       data[i++] = mdp->cur_tx;
+       data[i++] = mdp->dirty_rx;
+       data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(data, *sh_eth_gstrings_stats,
+                                       sizeof(sh_eth_gstrings_stats));
+               break;
+       }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+       .get_settings   = sh_eth_get_settings,
+       .set_settings   = sh_eth_set_settings,
+       .nway_reset             = sh_eth_nway_reset,
+       .get_msglevel   = sh_eth_get_msglevel,
+       .set_msglevel   = sh_eth_set_msglevel,
+       .get_link               = ethtool_op_get_link,
+       .get_strings    = sh_eth_get_strings,
+       .get_ethtool_stats  = sh_eth_get_ethtool_stats,
+       .get_sset_count     = sh_eth_get_sset_count,
+};
+
 /* network device open function */
 static int sh_eth_open(struct net_device *ndev)
 {
@@ -1073,8 +1382,8 @@ static int sh_eth_open(struct net_device *ndev)
 
        ret = request_irq(ndev->irq, sh_eth_interrupt,
 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7764) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7757)
+       defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+       defined(CONFIG_CPU_SUBTYPE_SH7757)
                                IRQF_SHARED,
 #else
                                0,
@@ -1117,15 +1426,14 @@ out_free_irq:
 static void sh_eth_tx_timeout(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
        struct sh_eth_rxdesc *rxdesc;
        int i;
 
        netif_stop_queue(ndev);
 
-       /* worning message out. */
-       printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
-              " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
+       if (netif_msg_timer(mdp))
+               dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
+              " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
 
        /* tx_errors count up */
        mdp->stats.tx_errors++;
@@ -1167,6 +1475,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
                if (!sh_eth_txfree(ndev)) {
+                       if (netif_msg_tx_queued(mdp))
+                               dev_warn(&ndev->dev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
                        return NETDEV_TX_BUSY;
@@ -1196,8 +1506,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        mdp->cur_tx++;
 
-       if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
-               writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
+       if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
+               sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
 
        return NETDEV_TX_OK;
 }
@@ -1206,17 +1516,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 static int sh_eth_close(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
        int ringsize;
 
        netif_stop_queue(ndev);
 
        /* Disable interrupts by clearing the interrupt mask. */
-       writel(0x0000, ioaddr + EESIPR);
+       sh_eth_write(ndev, 0x0000, EESIPR);
 
        /* Stop the chip's Tx and Rx processes. */
-       writel(0, ioaddr + EDTRR);
-       writel(0, ioaddr + EDRRR);
+       sh_eth_write(ndev, 0, EDTRR);
+       sh_eth_write(ndev, 0, EDRRR);
 
        /* PHY Disconnect */
        if (mdp->phydev) {
@@ -1247,25 +1556,24 @@ static int sh_eth_close(struct net_device *ndev)
 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       u32 ioaddr = ndev->base_addr;
 
        pm_runtime_get_sync(&mdp->pdev->dev);
 
-       mdp->stats.tx_dropped += readl(ioaddr + TROCR);
-       writel(0, ioaddr + TROCR);      /* (write clear) */
-       mdp->stats.collisions += readl(ioaddr + CDCR);
-       writel(0, ioaddr + CDCR);       /* (write clear) */
-       mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
-       writel(0, ioaddr + LCCR);       /* (write clear) */
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-       mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
-       writel(0, ioaddr + CERCR);      /* (write clear) */
-       mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
-       writel(0, ioaddr + CEECR);      /* (write clear) */
-#else
-       mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
-       writel(0, ioaddr + CNDCR);      /* (write clear) */
-#endif
+       mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+       sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
+       mdp->stats.collisions += sh_eth_read(ndev, CDCR);
+       sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
+       mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+       sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
+       if (sh_eth_is_gether(mdp)) {
+               mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+               sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
+               mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+               sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
+       } else {
+               mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+               sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
+       }
        pm_runtime_put_sync(&mdp->pdev->dev);
 
        return &mdp->stats;
@@ -1291,48 +1599,46 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
 /* Multicast reception directions set */
 static void sh_eth_set_multicast_list(struct net_device *ndev)
 {
-       u32 ioaddr = ndev->base_addr;
-
        if (ndev->flags & IFF_PROMISC) {
                /* Set promiscuous. */
-               writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
-                         ioaddr + ECMR);
+               sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
+                               ECMR_PRM, ECMR);
        } else {
                /* Normal, unicast/broadcast-only mode. */
-               writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
-                         ioaddr + ECMR);
+               sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
+                               ECMR_MCT, ECMR);
        }
 }
+#endif /* SH_ETH_HAS_TSU */
 
 /* SuperH's TSU register init function */
-static void sh_eth_tsu_init(u32 ioaddr)
-{
-       writel(0, ioaddr + TSU_FWEN0);  /* Disable forward(0->1) */
-       writel(0, ioaddr + TSU_FWEN1);  /* Disable forward(1->0) */
-       writel(0, ioaddr + TSU_FCM);    /* forward fifo 3k-3k */
-       writel(0xc, ioaddr + TSU_BSYSL0);
-       writel(0xc, ioaddr + TSU_BSYSL1);
-       writel(0, ioaddr + TSU_PRISL0);
-       writel(0, ioaddr + TSU_PRISL1);
-       writel(0, ioaddr + TSU_FWSL0);
-       writel(0, ioaddr + TSU_FWSL1);
-       writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-       writel(0, ioaddr + TSU_QTAG0);  /* Disable QTAG(0->1) */
-       writel(0, ioaddr + TSU_QTAG1);  /* Disable QTAG(1->0) */
-#else
-       writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
-       writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
-#endif
-       writel(0, ioaddr + TSU_FWSR);   /* all interrupt status clear */
-       writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
-       writel(0, ioaddr + TSU_TEN);    /* Disable all CAM entry */
-       writel(0, ioaddr + TSU_POST1);  /* Disable CAM entry [ 0- 7] */
-       writel(0, ioaddr + TSU_POST2);  /* Disable CAM entry [ 8-15] */
-       writel(0, ioaddr + TSU_POST3);  /* Disable CAM entry [16-23] */
-       writel(0, ioaddr + TSU_POST4);  /* Disable CAM entry [24-31] */
+static void sh_eth_tsu_init(struct sh_eth_private *mdp)
+{
+       sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
+       sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
+       sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
+       sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
+       sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
+       sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
+       sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
+       sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
+       sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
+       sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
+       if (sh_eth_is_gether(mdp)) {
+               sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
+               sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
+       } else {
+               sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
+               sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
+       }
+       sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
+       sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
+       sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
+       sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
+       sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
+       sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
+       sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
 }
-#endif /* SH_ETH_HAS_TSU */
 
 /* MDIO bus release function */
 static int sh_mdio_release(struct net_device *ndev)
@@ -1355,7 +1661,8 @@ static int sh_mdio_release(struct net_device *ndev)
 }
 
 /* MDIO bus init function */
-static int sh_mdio_init(struct net_device *ndev, int id)
+static int sh_mdio_init(struct net_device *ndev, int id,
+                       struct sh_eth_plat_data *pd)
 {
        int ret, i;
        struct bb_info *bitbang;
@@ -1369,7 +1676,8 @@ static int sh_mdio_init(struct net_device *ndev, int id)
        }
 
        /* bitbang init */
-       bitbang->addr = ndev->base_addr + PIR;
+       bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
+       bitbang->set_gate = pd->set_mdio_gate;
        bitbang->mdi_msk = 0x08;
        bitbang->mdo_msk = 0x04;
        bitbang->mmd_msk = 0x02;/* MMD */
@@ -1420,6 +1728,28 @@ out:
        return ret;
 }
 
+static const u16 *sh_eth_get_register_offset(int register_type)
+{
+       const u16 *reg_offset = NULL;
+
+       switch (register_type) {
+       case SH_ETH_REG_GIGABIT:
+               reg_offset = sh_eth_offset_gigabit;
+               break;
+       case SH_ETH_REG_FAST_SH4:
+               reg_offset = sh_eth_offset_fast_sh4;
+               break;
+       case SH_ETH_REG_FAST_SH3_SH2:
+               reg_offset = sh_eth_offset_fast_sh3_sh2;
+               break;
+       default:
+               printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+               break;
+       }
+
+       return reg_offset;
+}
+
 static const struct net_device_ops sh_eth_netdev_ops = {
        .ndo_open               = sh_eth_open,
        .ndo_stop               = sh_eth_close,
@@ -1486,19 +1816,28 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
        /* get PHY ID */
        mdp->phy_id = pd->phy;
+       mdp->phy_interface = pd->phy_interface;
        /* EDMAC endian */
        mdp->edmac_endian = pd->edmac_endian;
        mdp->no_ether_link = pd->no_ether_link;
        mdp->ether_link_active_low = pd->ether_link_active_low;
+       mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
 
        /* set cpu data */
+#if defined(SH_ETH_HAS_BOTH_MODULES)
+       mdp->cd = sh_eth_get_cpu_data(mdp);
+#else
        mdp->cd = &sh_eth_my_cpu_data;
+#endif
        sh_eth_set_default_cpu_data(mdp->cd);
 
        /* set function */
        ndev->netdev_ops = &sh_eth_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
        ndev->watchdog_timeo = TX_TIMEOUT;
 
+       /* debug message level */
+       mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
        mdp->post_rx = POST_RX >> (devno << 1);
        mdp->post_fw = POST_FW >> (devno << 1);
 
@@ -1507,13 +1846,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
        /* First device only init */
        if (!devno) {
+               if (mdp->cd->tsu) {
+                       struct resource *rtsu;
+                       rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+                       if (!rtsu) {
+                               dev_err(&pdev->dev, "Not found TSU resource\n");
+                               goto out_release;
+                       }
+                       mdp->tsu_addr = ioremap(rtsu->start,
+                                               resource_size(rtsu));
+               }
                if (mdp->cd->chip_reset)
                        mdp->cd->chip_reset(ndev);
 
-#if defined(SH_ETH_HAS_TSU)
-               /* TSU init (Init only)*/
-               sh_eth_tsu_init(SH_TSU_ADDR);
-#endif
+               if (mdp->cd->tsu) {
+                       /* TSU init (Init only)*/
+                       sh_eth_tsu_init(mdp);
+               }
        }
 
        /* network device register */
@@ -1522,7 +1871,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                goto out_release;
 
        /* mdio bus init */
-       ret = sh_mdio_init(ndev, pdev->id);
+       ret = sh_mdio_init(ndev, pdev->id, pd);
        if (ret)
                goto out_unregister;
 
@@ -1539,6 +1888,8 @@ out_unregister:
 
 out_release:
        /* net_dev free */
+       if (mdp->tsu_addr)
+               iounmap(mdp->tsu_addr);
        if (ndev)
                free_netdev(ndev);
 
@@ -1549,7 +1900,9 @@ out:
 static int sh_eth_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct sh_eth_private *mdp = netdev_priv(ndev);
 
+       iounmap(mdp->tsu_addr);
        sh_mdio_release(ndev);
        unregister_netdev(ndev);
        pm_runtime_disable(&pdev->dev);
index efa6422..c3048a6 100644 (file)
@@ -2,7 +2,7 @@
  *  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- *  Copyright (C) 2008-2009 Renesas Solutions Corp.
+ *  Copyright (C) 2008-2011 Renesas Solutions Corp.
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
 #define ETHERSMALL             60
 #define PKT_BUF_SZ             1538
 
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-/* This CPU register maps is very difference by other SH4 CPU */
-
-/* Chip Base Address */
-# define SH_TSU_ADDR   0xFEE01800
-# define ARSTR         SH_TSU_ADDR
-
-/* Chip Registers */
-/* E-DMAC */
-# define EDSR    0x000
-# define EDMR    0x400
-# define EDTRR   0x408
-# define EDRRR   0x410
-# define EESR    0x428
-# define EESIPR  0x430
-# define TDLAR   0x010
-# define TDFAR   0x014
-# define TDFXR   0x018
-# define TDFFR   0x01C
-# define RDLAR   0x030
-# define RDFAR   0x034
-# define RDFXR   0x038
-# define RDFFR   0x03C
-# define TRSCER  0x438
-# define RMFCR   0x440
-# define TFTR    0x448
-# define FDR     0x450
-# define RMCR    0x458
-# define RPADIR  0x460
-# define FCFTR   0x468
-
-/* Ether Register */
-# define ECMR    0x500
-# define ECSR    0x510
-# define ECSIPR  0x518
-# define PIR     0x520
-# define PSR     0x528
-# define PIPR    0x52C
-# define RFLR    0x508
-# define APR     0x554
-# define MPR     0x558
-# define PFTCR  0x55C
-# define PFRCR  0x560
-# define TPAUSER 0x564
-# define GECMR   0x5B0
-# define BCULR   0x5B4
-# define MAHR    0x5C0
-# define MALR    0x5C8
-# define TROCR   0x700
-# define CDCR    0x708
-# define LCCR    0x710
-# define CEFCR   0x740
-# define FRECR   0x748
-# define TSFRCR  0x750
-# define TLFRCR  0x758
-# define RFCR    0x760
-# define CERCR   0x768
-# define CEECR   0x770
-# define MAFCR   0x778
-
-/* TSU Absolute Address */
-# define TSU_CTRST       0x004
-# define TSU_FWEN0       0x010
-# define TSU_FWEN1       0x014
-# define TSU_FCM         0x18
-# define TSU_BSYSL0      0x20
-# define TSU_BSYSL1      0x24
-# define TSU_PRISL0      0x28
-# define TSU_PRISL1      0x2C
-# define TSU_FWSL0       0x30
-# define TSU_FWSL1       0x34
-# define TSU_FWSLC       0x38
-# define TSU_QTAG0       0x40
-# define TSU_QTAG1       0x44
-# define TSU_FWSR        0x50
-# define TSU_FWINMK      0x54
-# define TSU_ADQT0       0x48
-# define TSU_ADQT1       0x4C
-# define TSU_VTAG0       0x58
-# define TSU_VTAG1       0x5C
-# define TSU_ADSBSY      0x60
-# define TSU_TEN         0x64
-# define TSU_POST1       0x70
-# define TSU_POST2       0x74
-# define TSU_POST3       0x78
-# define TSU_POST4       0x7C
-# define TSU_ADRH0       0x100
-# define TSU_ADRL0       0x104
-# define TSU_ADRH31      0x1F8
-# define TSU_ADRL31      0x1FC
-
-# define TXNLCR0         0x80
-# define TXALCR0         0x84
-# define RXNLCR0         0x88
-# define RXALCR0         0x8C
-# define FWNLCR0         0x90
-# define FWALCR0         0x94
-# define TXNLCR1         0xA0
-# define TXALCR1         0xA4
-# define RXNLCR1         0xA8
-# define RXALCR1         0xAC
-# define FWNLCR1         0xB0
-# define FWALCR1         0x40
-
-#elif defined(CONFIG_CPU_SH4)  /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */
-/* EtherC */
-#define ECMR           0x100
-#define RFLR           0x108
-#define ECSR           0x110
-#define ECSIPR         0x118
-#define PIR            0x120
-#define PSR            0x128
-#define RDMLR          0x140
-#define IPGR           0x150
-#define APR            0x154
-#define MPR            0x158
-#define TPAUSER                0x164
-#define RFCF           0x160
-#define TPAUSECR       0x168
-#define BCFRR          0x16c
-#define MAHR           0x1c0
-#define MALR           0x1c8
-#define TROCR          0x1d0
-#define CDCR           0x1d4
-#define LCCR           0x1d8
-#define CNDCR          0x1dc
-#define CEFCR          0x1e4
-#define FRECR          0x1e8
-#define TSFRCR         0x1ec
-#define TLFRCR         0x1f0
-#define RFCR           0x1f4
-#define MAFCR          0x1f8
-#define RTRATE         0x1fc
-
-/* E-DMAC */
-#define EDMR           0x000
-#define EDTRR          0x008
-#define EDRRR          0x010
-#define TDLAR          0x018
-#define RDLAR          0x020
-#define EESR           0x028
-#define EESIPR         0x030
-#define TRSCER         0x038
-#define RMFCR          0x040
-#define TFTR           0x048
-#define FDR            0x050
-#define RMCR           0x058
-#define TFUCR          0x064
-#define RFOCR          0x068
-#define FCFTR          0x070
-#define RPADIR         0x078
-#define TRIMD          0x07c
-#define RBWAR          0x0c8
-#define RDFAR          0x0cc
-#define TBRAR          0x0d4
-#define TDFAR          0x0d8
-#else /* #elif defined(CONFIG_CPU_SH4) */
-/* This section is SH3 or SH2 */
-#ifndef CONFIG_CPU_SUBTYPE_SH7619
-/* Chip base address */
-# define SH_TSU_ADDR  0xA7000804
-# define ARSTR           0xA7000800
-#endif
-/* Chip Registers */
-/* E-DMAC */
-# define EDMR  0x0000
-# define EDTRR 0x0004
-# define EDRRR 0x0008
-# define TDLAR 0x000C
-# define RDLAR 0x0010
-# define EESR  0x0014
-# define EESIPR        0x0018
-# define TRSCER        0x001C
-# define RMFCR 0x0020
-# define TFTR  0x0024
-# define FDR   0x0028
-# define RMCR  0x002C
-# define EDOCR 0x0030
-# define FCFTR 0x0034
-# define RPADIR        0x0038
-# define TRIMD 0x003C
-# define RBWAR 0x0040
-# define RDFAR 0x0044
-# define TBRAR 0x004C
-# define TDFAR 0x0050
-
-/* Ether Register */
-# define ECMR  0x0160
-# define ECSR  0x0164
-# define ECSIPR        0x0168
-# define PIR   0x016C
-# define MAHR  0x0170
-# define MALR  0x0174
-# define RFLR  0x0178
-# define PSR   0x017C
-# define TROCR 0x0180
-# define CDCR  0x0184
-# define LCCR  0x0188
-# define CNDCR 0x018C
-# define CEFCR 0x0194
-# define FRECR 0x0198
-# define TSFRCR        0x019C
-# define TLFRCR        0x01A0
-# define RFCR  0x01A4
-# define MAFCR 0x01A8
-# define IPGR  0x01B4
-# if defined(CONFIG_CPU_SUBTYPE_SH7710)
-# define APR   0x01B8
-# define MPR   0x01BC
-# define TPAUSER 0x1C4
-# define BCFR  0x1CC
-# endif /* CONFIG_CPU_SH7710 */
-
-/* TSU */
-# define TSU_CTRST     0x004
-# define TSU_FWEN0     0x010
-# define TSU_FWEN1     0x014
-# define TSU_FCM       0x018
-# define TSU_BSYSL0    0x020
-# define TSU_BSYSL1    0x024
-# define TSU_PRISL0    0x028
-# define TSU_PRISL1    0x02C
-# define TSU_FWSL0     0x030
-# define TSU_FWSL1     0x034
-# define TSU_FWSLC     0x038
-# define TSU_QTAGM0    0x040
-# define TSU_QTAGM1    0x044
-# define TSU_ADQT0     0x048
-# define TSU_ADQT1     0x04C
-# define TSU_FWSR      0x050
-# define TSU_FWINMK    0x054
-# define TSU_ADSBSY    0x060
-# define TSU_TEN       0x064
-# define TSU_POST1     0x070
-# define TSU_POST2     0x074
-# define TSU_POST3     0x078
-# define TSU_POST4     0x07C
-# define TXNLCR0       0x080
-# define TXALCR0       0x084
-# define RXNLCR0       0x088
-# define RXALCR0       0x08C
-# define FWNLCR0       0x090
-# define FWALCR0       0x094
-# define TXNLCR1       0x0A0
-# define TXALCR1       0x0A4
-# define RXNLCR1       0x0A8
-# define RXALCR1       0x0AC
-# define FWNLCR1       0x0B0
-# define FWALCR1       0x0B4
-
-#define TSU_ADRH0      0x0100
-#define TSU_ADRL0      0x0104
-#define TSU_ADRL31     0x01FC
-
-#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
-
-/* There are avoid compile error... */
-#if !defined(BCULR)
-#define BCULR  0x0fc
-#endif
-#if !defined(TRIMD)
-#define TRIMD  0x0fc
-#endif
-#if !defined(APR)
-#define APR    0x0fc
-#endif
-#if !defined(MPR)
-#define MPR    0x0fc
-#endif
-#if !defined(TPAUSER)
-#define TPAUSER        0x0fc
-#endif
+enum {
+       /* E-DMAC registers */
+       EDSR = 0,
+       EDMR,
+       EDTRR,
+       EDRRR,
+       EESR,
+       EESIPR,
+       TDLAR,
+       TDFAR,
+       TDFXR,
+       TDFFR,
+       RDLAR,
+       RDFAR,
+       RDFXR,
+       RDFFR,
+       TRSCER,
+       RMFCR,
+       TFTR,
+       FDR,
+       RMCR,
+       EDOCR,
+       TFUCR,
+       RFOCR,
+       FCFTR,
+       RPADIR,
+       TRIMD,
+       RBWAR,
+       TBRAR,
+
+       /* Ether registers */
+       ECMR,
+       ECSR,
+       ECSIPR,
+       PIR,
+       PSR,
+       RDMLR,
+       PIPR,
+       RFLR,
+       IPGR,
+       APR,
+       MPR,
+       PFTCR,
+       PFRCR,
+       RFCR,
+       RFCF,
+       TPAUSER,
+       TPAUSECR,
+       BCFR,
+       BCFRR,
+       GECMR,
+       BCULR,
+       MAHR,
+       MALR,
+       TROCR,
+       CDCR,
+       LCCR,
+       CNDCR,
+       CEFCR,
+       FRECR,
+       TSFRCR,
+       TLFRCR,
+       CERCR,
+       CEECR,
+       MAFCR,
+       RTRATE,
+
+       /* TSU Absolute address */
+       ARSTR,
+       TSU_CTRST,
+       TSU_FWEN0,
+       TSU_FWEN1,
+       TSU_FCM,
+       TSU_BSYSL0,
+       TSU_BSYSL1,
+       TSU_PRISL0,
+       TSU_PRISL1,
+       TSU_FWSL0,
+       TSU_FWSL1,
+       TSU_FWSLC,
+       TSU_QTAG0,
+       TSU_QTAG1,
+       TSU_QTAGM0,
+       TSU_QTAGM1,
+       TSU_FWSR,
+       TSU_FWINMK,
+       TSU_ADQT0,
+       TSU_ADQT1,
+       TSU_VTAG0,
+       TSU_VTAG1,
+       TSU_ADSBSY,
+       TSU_TEN,
+       TSU_POST1,
+       TSU_POST2,
+       TSU_POST3,
+       TSU_POST4,
+       TSU_ADRH0,
+       TSU_ADRL0,
+       TSU_ADRH31,
+       TSU_ADRL31,
+
+       TXNLCR0,
+       TXALCR0,
+       RXNLCR0,
+       RXALCR0,
+       FWNLCR0,
+       FWALCR0,
+       TXNLCR1,
+       TXALCR1,
+       RXNLCR1,
+       RXALCR1,
+       FWNLCR1,
+       FWALCR1,
+
+       /* This value must be written at last. */
+       SH_ETH_MAX_REGISTER_OFFSET,
+};
+
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+       [EDSR]  = 0x0000,
+       [EDMR]  = 0x0400,
+       [EDTRR] = 0x0408,
+       [EDRRR] = 0x0410,
+       [EESR]  = 0x0428,
+       [EESIPR]        = 0x0430,
+       [TDLAR] = 0x0010,
+       [TDFAR] = 0x0014,
+       [TDFXR] = 0x0018,
+       [TDFFR] = 0x001c,
+       [RDLAR] = 0x0030,
+       [RDFAR] = 0x0034,
+       [RDFXR] = 0x0038,
+       [RDFFR] = 0x003c,
+       [TRSCER]        = 0x0438,
+       [RMFCR] = 0x0440,
+       [TFTR]  = 0x0448,
+       [FDR]   = 0x0450,
+       [RMCR]  = 0x0458,
+       [RPADIR]        = 0x0460,
+       [FCFTR] = 0x0468,
+
+       [ECMR]  = 0x0500,
+       [ECSR]  = 0x0510,
+       [ECSIPR]        = 0x0518,
+       [PIR]   = 0x0520,
+       [PSR]   = 0x0528,
+       [PIPR]  = 0x052c,
+       [RFLR]  = 0x0508,
+       [APR]   = 0x0554,
+       [MPR]   = 0x0558,
+       [PFTCR] = 0x055c,
+       [PFRCR] = 0x0560,
+       [TPAUSER]       = 0x0564,
+       [GECMR] = 0x05b0,
+       [BCULR] = 0x05b4,
+       [MAHR]  = 0x05c0,
+       [MALR]  = 0x05c8,
+       [TROCR] = 0x0700,
+       [CDCR]  = 0x0708,
+       [LCCR]  = 0x0710,
+       [CEFCR] = 0x0740,
+       [FRECR] = 0x0748,
+       [TSFRCR]        = 0x0750,
+       [TLFRCR]        = 0x0758,
+       [RFCR]  = 0x0760,
+       [CERCR] = 0x0768,
+       [CEECR] = 0x0770,
+       [MAFCR] = 0x0778,
+
+       [ARSTR] = 0x0000,
+       [TSU_CTRST]     = 0x0004,
+       [TSU_FWEN0]     = 0x0010,
+       [TSU_FWEN1]     = 0x0014,
+       [TSU_FCM]       = 0x0018,
+       [TSU_BSYSL0]    = 0x0020,
+       [TSU_BSYSL1]    = 0x0024,
+       [TSU_PRISL0]    = 0x0028,
+       [TSU_PRISL1]    = 0x002c,
+       [TSU_FWSL0]     = 0x0030,
+       [TSU_FWSL1]     = 0x0034,
+       [TSU_FWSLC]     = 0x0038,
+       [TSU_QTAG0]     = 0x0040,
+       [TSU_QTAG1]     = 0x0044,
+       [TSU_FWSR]      = 0x0050,
+       [TSU_FWINMK]    = 0x0054,
+       [TSU_ADQT0]     = 0x0048,
+       [TSU_ADQT1]     = 0x004c,
+       [TSU_VTAG0]     = 0x0058,
+       [TSU_VTAG1]     = 0x005c,
+       [TSU_ADSBSY]    = 0x0060,
+       [TSU_TEN]       = 0x0064,
+       [TSU_POST1]     = 0x0070,
+       [TSU_POST2]     = 0x0074,
+       [TSU_POST3]     = 0x0078,
+       [TSU_POST4]     = 0x007c,
+       [TSU_ADRH0]     = 0x0100,
+       [TSU_ADRL0]     = 0x0104,
+       [TSU_ADRH31]    = 0x01f8,
+       [TSU_ADRL31]    = 0x01fc,
+
+       [TXNLCR0]       = 0x0080,
+       [TXALCR0]       = 0x0084,
+       [RXNLCR0]       = 0x0088,
+       [RXALCR0]       = 0x008c,
+       [FWNLCR0]       = 0x0090,
+       [FWALCR0]       = 0x0094,
+       [TXNLCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a0,
+       [RXNLCR1]       = 0x00a8,
+       [RXALCR1]       = 0x00ac,
+       [FWNLCR1]       = 0x00b0,
+       [FWALCR1]       = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+       [ECMR]  = 0x0100,
+       [RFLR]  = 0x0108,
+       [ECSR]  = 0x0110,
+       [ECSIPR]        = 0x0118,
+       [PIR]   = 0x0120,
+       [PSR]   = 0x0128,
+       [RDMLR] = 0x0140,
+       [IPGR]  = 0x0150,
+       [APR]   = 0x0154,
+       [MPR]   = 0x0158,
+       [TPAUSER]       = 0x0164,
+       [RFCF]  = 0x0160,
+       [TPAUSECR]      = 0x0168,
+       [BCFRR] = 0x016c,
+       [MAHR]  = 0x01c0,
+       [MALR]  = 0x01c8,
+       [TROCR] = 0x01d0,
+       [CDCR]  = 0x01d4,
+       [LCCR]  = 0x01d8,
+       [CNDCR] = 0x01dc,
+       [CEFCR] = 0x01e4,
+       [FRECR] = 0x01e8,
+       [TSFRCR]        = 0x01ec,
+       [TLFRCR]        = 0x01f0,
+       [RFCR]  = 0x01f4,
+       [MAFCR] = 0x01f8,
+       [RTRATE]        = 0x01fc,
+
+       [EDMR]  = 0x0000,
+       [EDTRR] = 0x0008,
+       [EDRRR] = 0x0010,
+       [TDLAR] = 0x0018,
+       [RDLAR] = 0x0020,
+       [EESR]  = 0x0028,
+       [EESIPR]        = 0x0030,
+       [TRSCER]        = 0x0038,
+       [RMFCR] = 0x0040,
+       [TFTR]  = 0x0048,
+       [FDR]   = 0x0050,
+       [RMCR]  = 0x0058,
+       [TFUCR] = 0x0064,
+       [RFOCR] = 0x0068,
+       [FCFTR] = 0x0070,
+       [RPADIR]        = 0x0078,
+       [TRIMD] = 0x007c,
+       [RBWAR] = 0x00c8,
+       [RDFAR] = 0x00cc,
+       [TBRAR] = 0x00d4,
+       [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+       [ECMR]  = 0x0160,
+       [ECSR]  = 0x0164,
+       [ECSIPR]        = 0x0168,
+       [PIR]   = 0x016c,
+       [MAHR]  = 0x0170,
+       [MALR]  = 0x0174,
+       [RFLR]  = 0x0178,
+       [PSR]   = 0x017c,
+       [TROCR] = 0x0180,
+       [CDCR]  = 0x0184,
+       [LCCR]  = 0x0188,
+       [CNDCR] = 0x018c,
+       [CEFCR] = 0x0194,
+       [FRECR] = 0x0198,
+       [TSFRCR]        = 0x019c,
+       [TLFRCR]        = 0x01a0,
+       [RFCR]  = 0x01a4,
+       [MAFCR] = 0x01a8,
+       [IPGR]  = 0x01b4,
+       [APR]   = 0x01b8,
+       [MPR]   = 0x01bc,
+       [TPAUSER]       = 0x01c4,
+       [BCFR]  = 0x01cc,
+
+       [ARSTR] = 0x0000,
+       [TSU_CTRST]     = 0x0004,
+       [TSU_FWEN0]     = 0x0010,
+       [TSU_FWEN1]     = 0x0014,
+       [TSU_FCM]       = 0x0018,
+       [TSU_BSYSL0]    = 0x0020,
+       [TSU_BSYSL1]    = 0x0024,
+       [TSU_PRISL0]    = 0x0028,
+       [TSU_PRISL1]    = 0x002c,
+       [TSU_FWSL0]     = 0x0030,
+       [TSU_FWSL1]     = 0x0034,
+       [TSU_FWSLC]     = 0x0038,
+       [TSU_QTAGM0]    = 0x0040,
+       [TSU_QTAGM1]    = 0x0044,
+       [TSU_ADQT0]     = 0x0048,
+       [TSU_ADQT1]     = 0x004c,
+       [TSU_FWSR]      = 0x0050,
+       [TSU_FWINMK]    = 0x0054,
+       [TSU_ADSBSY]    = 0x0060,
+       [TSU_TEN]       = 0x0064,
+       [TSU_POST1]     = 0x0070,
+       [TSU_POST2]     = 0x0074,
+       [TSU_POST3]     = 0x0078,
+       [TSU_POST4]     = 0x007c,
+
+       [TXNLCR0]       = 0x0080,
+       [TXALCR0]       = 0x0084,
+       [RXNLCR0]       = 0x0088,
+       [RXALCR0]       = 0x008c,
+       [FWNLCR0]       = 0x0090,
+       [FWALCR0]       = 0x0094,
+       [TXNLCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a0,
+       [RXNLCR1]       = 0x00a8,
+       [RXALCR1]       = 0x00ac,
+       [FWNLCR1]       = 0x00b0,
+       [FWALCR1]       = 0x00b4,
+
+       [TSU_ADRH0]     = 0x0100,
+       [TSU_ADRL0]     = 0x0104,
+       [TSU_ADRL31]    = 0x01fc,
+
+};
 
 /* Driver's parameters */
 #if defined(CONFIG_CPU_SH4)
@@ -338,20 +400,14 @@ enum GECMR_BIT {
 enum DMAC_M_BIT {
        EDMR_EL = 0x40, /* Litte endian */
        EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-       EDMR_SRST = 0x03,
-#else /* CONFIG_CPU_SUBTYPE_SH7763 */
-       EDMR_SRST = 0x01,
-#endif
+       EDMR_SRST_GETHER = 0x03,
+       EDMR_SRST_ETHER = 0x01,
 };
 
 /* EDTRR */
 enum DMAC_T_BIT {
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-       EDTRR_TRNS = 0x03,
-#else
-       EDTRR_TRNS = 0x01,
-#endif
+       EDTRR_TRNS_GETHER = 0x03,
+       EDTRR_TRNS_ETHER = 0x01,
 };
 
 /* EDRRR*/
@@ -695,6 +751,7 @@ struct sh_eth_cpu_data {
        unsigned mpr:1;                 /* EtherC have MPR */
        unsigned tpauser:1;             /* EtherC have TPAUSER */
        unsigned bculr:1;               /* EtherC have BCULR */
+       unsigned tsu:1;                 /* EtherC have TSU */
        unsigned hw_swap:1;             /* E-DMAC have DE bit in EDMR */
        unsigned rpadir:1;              /* E-DMAC have RPADIR */
        unsigned no_trimd:1;            /* E-DMAC DO NOT have TRIMD */
@@ -704,6 +761,8 @@ struct sh_eth_cpu_data {
 struct sh_eth_private {
        struct platform_device *pdev;
        struct sh_eth_cpu_data *cd;
+       const u16 *reg_offset;
+       void __iomem *tsu_addr;
        dma_addr_t rx_desc_dma;
        dma_addr_t tx_desc_dma;
        struct sh_eth_rxdesc *rx_ring;
@@ -722,6 +781,7 @@ struct sh_eth_private {
        struct mii_bus *mii_bus;        /* MDIO bus control */
        struct phy_device *phydev;      /* PHY device control */
        enum phy_state link;
+       phy_interface_t phy_interface;
        int msg_enable;
        int speed;
        int duplex;
@@ -746,4 +806,32 @@ static inline void sh_eth_soft_swap(char *src, int len)
 #endif
 }
 
+static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+                               int enum_index)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       writel(data, ndev->base_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_read(struct net_device *ndev,
+                                       int enum_index)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       return readl(ndev->base_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
+                               unsigned long data, int enum_index)
+{
+       writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
+                                       int enum_index)
+{
+       return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
 #endif /* #ifndef __SH_ETH_H__ */
index 5976d1d..84d4167 100644 (file)
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        sis_priv->mii_info.reg_num_mask = 0x1f;
 
        /* Get Mac address according to the chip revision */
-       pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
+       sis_priv->chipset_rev = pci_dev->revision;
        if(netif_msg_probe(sis_priv))
                printk(KERN_DEBUG "%s: detected revision %2.2x, "
                                "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        /* save our host bridge revision */
        dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
        if (dev) {
-               pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev);
+               sis_priv->host_bridge_rev = dev->revision;
                pci_dev_put(dev);
        }
 
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
                                              "cur_rx:%4.4d, dirty_rx:%4.4d\n",
                                              net_dev->name, sis_priv->cur_rx,
                                              sis_priv->dirty_rx);
+                               dev_kfree_skb(skb);
                                break;
                        }
 
index 42daf98..35b28f4 100644 (file)
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       /* device is off until link detection */
-       netif_carrier_off(dev);
-
        return dev;
 }
 
index 7d85a38..2a91868 100644 (file)
@@ -4983,7 +4983,7 @@ static int sky2_suspend(struct device *dev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int sky2_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
index 726df61..43654a3 100644 (file)
@@ -81,6 +81,7 @@ static const char version[] =
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/workqueue.h>
+#include <linux/of.h>
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+       { .compatible = "smsc,lan91c94", },
+       { .compatible = "smsc,lan91c111", },
+       {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
 static struct dev_pm_ops smc_drv_pm_ops = {
        .suspend        = smc_drv_suspend,
        .resume         = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
                .name   = CARDNAME,
                .owner  = THIS_MODULE,
                .pm     = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+               .of_match_table = smc91x_match,
+#endif
        },
 };
 
index 64bfdae..d70bde9 100644 (file)
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
        smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
        smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
 
+       /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+       spin_lock_irq(&pdata->mac_lock);
+       smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+       spin_unlock_irq(&pdata->mac_lock);
+
        /* Make sure EEPROM has finished loading before setting GPIO_CFG */
        timeout = 50;
        while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
index 34a0af3..0e5f031 100644 (file)
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
 
        priv->hw = device;
 
-       if (device_can_wakeup(priv->device))
+       if (device_can_wakeup(priv->device)) {
                priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+               enable_irq_wake(dev->irq);
+       }
 
        return 0;
 }
index 1c5408f..c1a3448 100644 (file)
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
 
        if (txmac_stat & MAC_TXSTAT_URUN) {
                netdev_err(dev, "TX MAC xmit underrun\n");
-               gp->net_stats.tx_fifo_errors++;
+               dev->stats.tx_fifo_errors++;
        }
 
        if (txmac_stat & MAC_TXSTAT_MPE) {
                netdev_err(dev, "TX MAC max packet size error\n");
-               gp->net_stats.tx_errors++;
+               dev->stats.tx_errors++;
        }
 
        /* The rest are all cases of one of the 16-bit TX
         * counters expiring.
         */
        if (txmac_stat & MAC_TXSTAT_NCE)
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.collisions += 0x10000;
 
        if (txmac_stat & MAC_TXSTAT_ECE) {
-               gp->net_stats.tx_aborted_errors += 0x10000;
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
        }
 
        if (txmac_stat & MAC_TXSTAT_LCE) {
-               gp->net_stats.tx_aborted_errors += 0x10000;
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
        }
 
        /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
                u32 smac = readl(gp->regs + MAC_SMACHINE);
 
                netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
-               gp->net_stats.rx_over_errors++;
-               gp->net_stats.rx_fifo_errors++;
+               dev->stats.rx_over_errors++;
+               dev->stats.rx_fifo_errors++;
 
                ret = gem_rxmac_reset(gp);
        }
 
        if (rxmac_stat & MAC_RXSTAT_ACE)
-               gp->net_stats.rx_frame_errors += 0x10000;
+               dev->stats.rx_frame_errors += 0x10000;
 
        if (rxmac_stat & MAC_RXSTAT_CCE)
-               gp->net_stats.rx_crc_errors += 0x10000;
+               dev->stats.rx_crc_errors += 0x10000;
 
        if (rxmac_stat & MAC_RXSTAT_LCE)
-               gp->net_stats.rx_length_errors += 0x10000;
+               dev->stats.rx_length_errors += 0x10000;
 
        /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
         * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
                if (netif_msg_rx_err(gp))
                        printk(KERN_DEBUG "%s: no buffer for rx frame\n",
                                gp->dev->name);
-               gp->net_stats.rx_dropped++;
+               dev->stats.rx_dropped++;
        }
 
        if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
                if (netif_msg_rx_err(gp))
                        printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
                                gp->dev->name);
-               gp->net_stats.rx_errors++;
+               dev->stats.rx_errors++;
 
                goto do_reset;
        }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                                break;
                }
                gp->tx_skbs[entry] = NULL;
-               gp->net_stats.tx_bytes += skb->len;
+               dev->stats.tx_bytes += skb->len;
 
                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
                        txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                        entry = NEXT_TX(entry);
                }
 
-               gp->net_stats.tx_packets++;
+               dev->stats.tx_packets++;
                dev_kfree_skb_irq(skb);
        }
        gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
 
 static int gem_rx(struct gem *gp, int work_to_do)
 {
+       struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
        __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
 
                len = (status & RXDCTRL_BUFSZ) >> 16;
                if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
-                       gp->net_stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (len < ETH_ZLEN)
-                               gp->net_stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (len & RXDCTRL_BAD)
-                               gp->net_stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
 
                        /* We'll just return it to GEM. */
                drop_it:
-                       gp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        goto next;
                }
 
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
 
                netif_receive_skb(skb);
 
-               gp->net_stats.rx_packets++;
-               gp->net_stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
 
        next:
                entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
 static struct net_device_stats *gem_get_stats(struct net_device *dev)
 {
        struct gem *gp = netdev_priv(dev);
-       struct net_device_stats *stats = &gp->net_stats;
 
        spin_lock_irq(&gp->lock);
        spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
         * so we shield against this
         */
        if (gp->running) {
-               stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+               dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
                writel(0, gp->regs + MAC_FCSERR);
 
-               stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+               dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
                writel(0, gp->regs + MAC_AERR);
 
-               stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+               dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
                writel(0, gp->regs + MAC_LERR);
 
-               stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
-               stats->collisions +=
+               dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+               dev->stats.collisions +=
                        (readl(gp->regs + MAC_ECOLL) +
                         readl(gp->regs + MAC_LCOLL));
                writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
        spin_unlock(&gp->tx_lock);
        spin_unlock_irq(&gp->lock);
 
-       return &gp->net_stats;
+       return &dev->stats;
 }
 
 static int gem_set_mac_address(struct net_device *dev, void *addr)
index 1990546..ede0178 100644 (file)
@@ -994,7 +994,6 @@ struct gem {
        u32                     status;
 
        struct napi_struct      napi;
-       struct net_device_stats net_stats;
 
        int                     tx_fifo_sz;
        int                     rx_fifo_sz;
index 7841a8f..ebec888 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
 #define BAR_0  0
 #define BAR_2  2
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
 #include "tg3.h"
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    116
+#define TG3_MIN_NUM                    117
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "December 3, 2010"
+#define DRV_MODULE_RELDATE     "January 25, 2011"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
                                 TG3_TX_RING_SIZE)
 #define NEXT_TX(N)             (((N) + 1) & (TG3_TX_RING_SIZE - 1))
 
-#define TG3_RX_DMA_ALIGN               16
-#define TG3_RX_HEADROOM                        ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
 #define TG3_DMA_BYTE_ENAB              64
 
 #define TG3_RX_STD_DMA_SZ              1536
@@ -1785,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
                tg3_phy_cl45_read(tp, MDIO_MMD_AN,
                                  TG3_CL45_D7_EEERES_STAT, &val);
 
-               if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
-                   val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+               switch (val) {
+               case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+                       switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+                       case ASIC_REV_5717:
+                       case ASIC_REV_5719:
+                       case ASIC_REV_57765:
+                               /* Enable SM_DSP clock and tx 6dB coding. */
+                               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                                     MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+                                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+                               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+                               tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+                               /* Turn off SM_DSP clock. */
+                               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+                               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+                       }
+                       /* Fallthrough */
+               case TG3_CL45_D7_EEERES_STAT_LP_100TX:
                        tp->setlpicnt = 2;
+               }
        }
 
        if (!tp->setlpicnt) {
@@ -2109,7 +2120,7 @@ out:
 
 static void tg3_frob_aux_power(struct tg3 *tp)
 {
-       struct tg3 *tp_peer = tp;
+       bool need_vaux = false;
 
        /* The GPIOs do something completely different on 57765. */
        if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
@@ -2117,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                return;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
+            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
+           tp->pdev_peer != tp->pdev) {
                struct net_device *dev_peer;
 
                dev_peer = pci_get_drvdata(tp->pdev_peer);
+
                /* remove_one() may have been run on the peer. */
-               if (!dev_peer)
-                       tp_peer = tp;
-               else
-                       tp_peer = netdev_priv(dev_peer);
+               if (dev_peer) {
+                       struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+                       if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+                               return;
+
+                       if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+                           (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
+                               need_vaux = true;
+               }
        }
 
-       if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
-           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
-           (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
-           (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
+       if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+               need_vaux = true;
+
+       if (need_vaux) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
@@ -2163,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
                        u32 no_gpio2;
                        u32 grc_local_ctrl = 0;
 
-                       if (tp_peer != tp &&
-                           (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
-                               return;
-
                        /* Workaround to prevent overdrawing Amps. */
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
                            ASIC_REV_5714) {
@@ -2205,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
        } else {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
-                       if (tp_peer != tp &&
-                           (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
-                               return;
-
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
                                    (GRC_LCLCTRL_GPIO_OE1 |
                                     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
@@ -2977,11 +2989,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
                      MII_TG3_AUXCTL_ACTL_TX_6DB;
                tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 
-               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
-                   !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
-                       tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
-                                        val | MII_TG3_DSP_CH34TP2_HIBW01);
+               switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+               case ASIC_REV_5717:
+               case ASIC_REV_57765:
+                       if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+                               tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+                                                MII_TG3_DSP_CH34TP2_HIBW01);
+                       /* Fall through */
+               case ASIC_REV_5719:
+                       val = MII_TG3_DSP_TAP26_ALNOKO |
+                             MII_TG3_DSP_TAP26_RMRXSTO |
+                             MII_TG3_DSP_TAP26_OPCSINPT;
+                       tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+               }
 
                val = 0;
                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -4722,8 +4742,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                struct sk_buff *skb;
                dma_addr_t dma_addr;
                u32 opaque_key, desc_idx, *post_ptr;
-               bool hw_vlan __maybe_unused = false;
-               u16 vtag __maybe_unused = 0;
 
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4800,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
 
-                       copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+                       copy_skb = netdev_alloc_skb(tp->dev, len +
                                                    TG3_RAW_IP_ALIGN);
                        if (copy_skb == NULL)
                                goto drop_it_no_recycle;
 
-                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
                        skb_put(copy_skb, len);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
                        skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4832,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                }
 
                if (desc->type_flags & RXD_FLAG_VLAN &&
-                   !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
-                       vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
-                       if (tp->vlgrp)
-                               hw_vlan = true;
-                       else
-#endif
-                       {
-                               struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
-                                                   __skb_push(skb, VLAN_HLEN);
-
-                               memmove(ve, skb->data + VLAN_HLEN,
-                                       ETH_ALEN * 2);
-                               ve->h_vlan_proto = htons(ETH_P_8021Q);
-                               ve->h_vlan_TCI = htons(vtag);
-                       }
-               }
+                   !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+                       __vlan_hwaccel_put_tag(skb,
+                                              desc->err_vlan & RXD_VLAN_MASK);
 
-#if TG3_VLAN_TAG_USED
-               if (hw_vlan)
-                       vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
-               else
-#endif
-                       napi_gro_receive(&tnapi->napi, skb);
+               napi_gro_receive(&tnapi->napi, skb);
 
                received++;
                budget--;
@@ -5740,11 +5739,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
                base_flags |= TXD_FLAG_TCPUDP_CSUM;
        }
 
-#if TG3_VLAN_TAG_USED
        if (vlan_tx_tag_present(skb))
                base_flags |= (TXD_FLAG_VLAN |
                               (vlan_tx_tag_get(skb) << 16));
-#endif
 
        len = skb_headlen(skb);
 
@@ -5986,11 +5983,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                        }
                }
        }
-#if TG3_VLAN_TAG_USED
+
        if (vlan_tx_tag_present(skb))
                base_flags |= (TXD_FLAG_VLAN |
                               (vlan_tx_tag_get(skb) << 16));
-#endif
 
        if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -7834,7 +7830,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
 
                tw32_f(TG3_CPMU_EEE_DBTMR2,
-                      TG3_CPMU_DBTMR1_APE_TX_2047US |
+                      TG3_CPMU_DBTMR2_APE_TX_2047US |
                       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
        }
 
@@ -8108,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Program the jumbo buffer descriptor ring control
         * blocks on those devices that have them.
         */
-       if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
-           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
                /* Setup replenish threshold. */
                tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
 
@@ -8196,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                              RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
                              RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 
-       /* If statement applies to 5705 and 5750 PCI devices only */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
                if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
                        rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
@@ -8227,8 +8222,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
            (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                val = tr32(TG3_RDMA_RSRVCTRL_REG);
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-                       val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
-                       val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+                       val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+                                TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+                                TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+                       val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+                              TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+                              TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
                }
                tw32(TG3_RDMA_RSRVCTRL_REG,
                     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8350,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+       if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+               tp->irq_cnt > 1) {
                val = tr32(MSGINT_MODE);
                val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
                tw32(MSGINT_MODE, val);
@@ -8367,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
               WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
               WDMAC_MODE_LNGREAD_ENAB);
 
-       /* If statement applies to 5705 and 5750 PCI devices only */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
                if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
                    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
                     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
                        /* nothing */
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
-                          !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
-                          !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+                          !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
                        val |= WDMAC_MODE_RX_ACCEL;
                }
        }
@@ -9090,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp)
 
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
                u32 msi_mode = tr32(MSGINT_MODE);
-               if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+               if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+                   tp->irq_cnt > 1)
                        msi_mode |= MSGINT_MODE_MULTIVEC_EN;
                tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
        }
@@ -9532,17 +9530,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
        rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
                                  RX_MODE_KEEP_VLAN_TAG);
 
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
        /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
         * flag clear.
         */
-#if TG3_VLAN_TAG_USED
-       if (!tp->vlgrp &&
-           !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
-               rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
-       /* By definition, VLAN is disabled always in this
-        * case.
-        */
        if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
                rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 #endif
@@ -10492,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp)
                goto out;
        }
 
+       err = -EIO;
+
        /* Bootstrap checksum at offset 0x10 */
        csum = calc_crc((unsigned char *) buf, 0x10);
-       if (csum != be32_to_cpu(buf[0x10/4]))
+       if (csum != le32_to_cpu(buf[0x10/4]))
                goto out;
 
        /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
        csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
-       if (csum != be32_to_cpu(buf[0xfc/4]))
+       if (csum != le32_to_cpu(buf[0xfc/4]))
                goto out;
 
+       for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
+               /* The data is in little-endian format in NVRAM.
+                * Use the big-endian read routines to preserve
+                * the byte order as it exists in NVRAM.
+                */
+               if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
+                       goto out;
+       }
+
+       i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
+                            PCI_VPD_LRDT_RO_DATA);
+       if (i > 0) {
+               j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+               if (j < 0)
+                       goto out;
+
+               if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
+                       goto out;
+
+               i += PCI_VPD_LRDT_TAG_SIZE;
+               j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+                                             PCI_VPD_RO_KEYWORD_CHKSUM);
+               if (j > 0) {
+                       u8 csum8 = 0;
+
+                       j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+                       for (i = 0; i <= j; i++)
+                               csum8 += ((u8 *)buf)[i];
+
+                       if (csum8)
+                               goto out;
+               }
+       }
+
        err = 0;
 
 out:
@@ -10873,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        if (loopback_mode == TG3_MAC_LOOPBACK) {
                /* HW errata - mac loopback fails in some cases on 5780.
                 * Normal traffic and PHY loopback are not affected by
-                * errata.
+                * errata.  Also, the MAC loopback test is deprecated for
+                * all newer ASIC revisions.
                 */
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+                   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
                        return 0;
 
-               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
-                          MAC_MODE_PORT_INT_LPBACK;
+               mac_mode = tp->mac_mode &
+                          ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+               mac_mode |= MAC_MODE_PORT_INT_LPBACK;
                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                        mac_mode |= MAC_MODE_LINK_POLARITY;
                if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10901,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                tg3_writephy(tp, MII_BMCR, val);
                udelay(40);
 
-               mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+               mac_mode = tp->mac_mode &
+                          ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
                if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        tg3_writephy(tp, MII_TG3_FET_PTEST,
                                     MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10929,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                                     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
                }
                tw32(MAC_MODE, mac_mode);
+
+               /* Wait for link */
+               for (i = 0; i < 100; i++) {
+                       if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+                               break;
+                       mdelay(1);
+               }
        } else {
                return -EINVAL;
        }
@@ -11035,14 +11074,19 @@ out:
 static int tg3_test_loopback(struct tg3 *tp)
 {
        int err = 0;
-       u32 cpmuctrl = 0;
+       u32 eee_cap, cpmuctrl = 0;
 
        if (!netif_running(tp->dev))
                return TG3_LOOPBACK_FAILED;
 
+       eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+       tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
        err = tg3_reset_hw(tp, 1);
-       if (err)
-               return TG3_LOOPBACK_FAILED;
+       if (err) {
+               err = TG3_LOOPBACK_FAILED;
+               goto done;
+       }
 
        /* Turn off gphy autopowerdown. */
        if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11062,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp)
                        udelay(10);
                }
 
-               if (status != CPMU_MUTEX_GNT_DRIVER)
-                       return TG3_LOOPBACK_FAILED;
+               if (status != CPMU_MUTEX_GNT_DRIVER) {
+                       err = TG3_LOOPBACK_FAILED;
+                       goto done;
+               }
 
                /* Turn off link-based power management. */
                cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11092,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp)
        if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, true);
 
+done:
+       tp->phy_flags |= eee_cap;
+
        return err;
 }
 
@@ -11198,7 +11247,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+               if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+                   ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+                    !netif_running(dev)))
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
@@ -11214,7 +11265,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+               if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+                   ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+                    !netif_running(dev)))
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
@@ -11230,31 +11283,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return -EOPNOTSUPP;
 }
 
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-       struct tg3 *tp = netdev_priv(dev);
-
-       if (!netif_running(dev)) {
-               tp->vlgrp = grp;
-               return;
-       }
-
-       tg3_netif_stop(tp);
-
-       tg3_full_lock(tp, 0);
-
-       tp->vlgrp = grp;
-
-       /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
-       __tg3_set_rx_mode(dev);
-
-       tg3_netif_start(tp);
-
-       tg3_full_unlock(tp);
-}
-#endif
-
 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -12468,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
        }
 done:
-       device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
-       device_set_wakeup_enable(&tp->pdev->dev,
+       if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+               device_set_wakeup_enable(&tp->pdev->dev,
                                 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+       else
+               device_set_wakeup_capable(&tp->pdev->dev, false);
 }
 
 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -12522,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
        return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
 }
 
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+       u32 adv = ADVERTISED_Autoneg |
+                 ADVERTISED_Pause;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+               adv |= ADVERTISED_1000baseT_Half |
+                      ADVERTISED_1000baseT_Full;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+               adv |= ADVERTISED_100baseT_Half |
+                      ADVERTISED_100baseT_Full |
+                      ADVERTISED_10baseT_Half |
+                      ADVERTISED_10baseT_Full |
+                      ADVERTISED_TP;
+       else
+               adv |= ADVERTISED_FIBRE;
+
+       tp->link_config.advertising = adv;
+       tp->link_config.speed = SPEED_INVALID;
+       tp->link_config.duplex = DUPLEX_INVALID;
+       tp->link_config.autoneg = AUTONEG_ENABLE;
+       tp->link_config.active_speed = SPEED_INVALID;
+       tp->link_config.active_duplex = DUPLEX_INVALID;
+       tp->link_config.orig_speed = SPEED_INVALID;
+       tp->link_config.orig_duplex = DUPLEX_INVALID;
+       tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
 static int __devinit tg3_phy_probe(struct tg3 *tp)
 {
        u32 hw_phy_id_1, hw_phy_id_2;
        u32 hw_phy_id, hw_phy_id_masked;
        int err;
 
+       /* flow control autonegotiation is default behavior */
+       tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+       tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
                return tg3_phy_init(tp);
 
@@ -12589,6 +12652,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
              tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
                tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 
+       tg3_phy_init_link_config(tp);
+
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12644,17 +12709,6 @@ skip_phy_reset:
                err = tg3_init_5401phy_dsp(tp);
        }
 
-       if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
-               tp->link_config.advertising =
-                       (ADVERTISED_1000baseT_Half |
-                        ADVERTISED_1000baseT_Full |
-                        ADVERTISED_Autoneg |
-                        ADVERTISED_FIBRE);
-       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-               tp->link_config.advertising &=
-                       ~(ADVERTISED_1000baseT_Half |
-                         ADVERTISED_1000baseT_Full);
-
        return err;
 }
 
@@ -13066,9 +13120,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
 
 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
 {
-#if TG3_VLAN_TAG_USED
        dev->vlan_features |= flags;
-#endif
 }
 
 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13083,7 +13135,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
                return 512;
 }
 
-DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
@@ -13325,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        /* Determine TSO capabilities */
-       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+               ; /* Do nothing. HW bug. */
+       else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
        else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13376,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
        }
 
-       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+       if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
                tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13394,42 +13449,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 
                tp->pcie_readrq = 4096;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-                       u16 word;
-
-                       pci_read_config_word(tp->pdev,
-                                            tp->pcie_cap + PCI_EXP_LNKSTA,
-                                            &word);
-                       switch (word & PCI_EXP_LNKSTA_CLS) {
-                       case PCI_EXP_LNKSTA_CLS_2_5GB:
-                               word &= PCI_EXP_LNKSTA_NLW;
-                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-                               switch (word) {
-                               case 2:
-                                       tp->pcie_readrq = 2048;
-                                       break;
-                               case 4:
-                                       tp->pcie_readrq = 1024;
-                                       break;
-                               }
-                               break;
-
-                       case PCI_EXP_LNKSTA_CLS_5_0GB:
-                               word &= PCI_EXP_LNKSTA_NLW;
-                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-                               switch (word) {
-                               case 1:
-                                       tp->pcie_readrq = 2048;
-                                       break;
-                               case 2:
-                                       tp->pcie_readrq = 1024;
-                                       break;
-                               case 4:
-                                       tp->pcie_readrq = 512;
-                                       break;
-                               }
-                       }
-               }
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+                       tp->pcie_readrq = 2048;
 
                pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
@@ -13468,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         * every mailbox register write to force the writes to be
         * posted to the chip in order.
         */
-       if (pci_dev_present(write_reorder_chipsets) &&
+       if (pci_dev_present(tg3_write_reorder_chipsets) &&
            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
                tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
 
@@ -13861,11 +13882,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        else
                tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-       tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+       tp->rx_offset = NET_IP_ALIGN;
        tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
-               tp->rx_offset -= NET_IP_ALIGN;
+               tp->rx_offset = 0;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
                tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -14224,7 +14245,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
 
 #define TEST_BUFFER_SIZE       0x2000
 
-DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
        { },
 };
@@ -14403,7 +14424,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                 * now look for chipsets that are known to expose the
                 * DMA bug without failing the test.
                 */
-               if (pci_dev_present(dma_wait_state_chipsets)) {
+               if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
                        tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
                        tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
                } else {
@@ -14420,23 +14441,6 @@ out_nofree:
        return ret;
 }
 
-static void __devinit tg3_init_link_config(struct tg3 *tp)
-{
-       tp->link_config.advertising =
-               (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
-                ADVERTISED_Autoneg | ADVERTISED_MII);
-       tp->link_config.speed = SPEED_INVALID;
-       tp->link_config.duplex = DUPLEX_INVALID;
-       tp->link_config.autoneg = AUTONEG_ENABLE;
-       tp->link_config.active_speed = SPEED_INVALID;
-       tp->link_config.active_duplex = DUPLEX_INVALID;
-       tp->link_config.orig_speed = SPEED_INVALID;
-       tp->link_config.orig_duplex = DUPLEX_INVALID;
-       tp->link_config.orig_autoneg = AUTONEG_INVALID;
-}
-
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
        if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
@@ -14629,9 +14633,6 @@ static const struct net_device_ops tg3_netdev_ops = {
        .ndo_do_ioctl           = tg3_ioctl,
        .ndo_tx_timeout         = tg3_tx_timeout,
        .ndo_change_mtu         = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-       .ndo_vlan_rx_register   = tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tg3_poll_controller,
 #endif
@@ -14648,9 +14649,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
        .ndo_do_ioctl           = tg3_ioctl,
        .ndo_tx_timeout         = tg3_tx_timeout,
        .ndo_change_mtu         = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-       .ndo_vlan_rx_register   = tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tg3_poll_controller,
 #endif
@@ -14700,9 +14698,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if TG3_VLAN_TAG_USED
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
 
        tp = netdev_priv(dev);
        tp->pdev = pdev;
@@ -14748,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_free_dev;
        }
 
-       tg3_init_link_config(tp);
-
        tp->rx_pending = TG3_DEF_RX_RING_PENDING;
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
 
@@ -14897,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
-       /* flow control autonegotiation is default behavior */
-       tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
-       tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
-
        intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
        rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
        sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
index d62c8d9..73884b6 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
  */
 
 #ifndef _T3_H
 #define  CHIPREV_ID_57780_A1            0x57780001
 #define  CHIPREV_ID_5717_A0             0x05717000
 #define  CHIPREV_ID_57765_A0            0x57785000
+#define  CHIPREV_ID_5719_A0             0x05719000
 #define  GET_ASIC_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 12)
 #define   ASIC_REV_5700                         0x07
 #define   ASIC_REV_5701                         0x00
 #define  TG3_CPMU_DBTMR1_PCIEXIT_2047US         0x07ff0000
 #define  TG3_CPMU_DBTMR1_LNKIDLE_2047US         0x000070ff
 #define TG3_CPMU_EEE_DBTMR2            0x000036b8
-#define  TG3_CPMU_DBTMR1_APE_TX_2047US  0x07ff0000
+#define  TG3_CPMU_DBTMR2_APE_TX_2047US  0x07ff0000
 #define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US         0x000070ff
 #define TG3_CPMU_EEE_LNKIDL_CTRL       0x000036bc
 #define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0   0x01000000
 
 #define TG3_RDMA_RSRVCTRL_REG          0x00004900
 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX         0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K         0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK         0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K         0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK         0x000ff000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B   0x28000000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK   0xffe00000
 /* 0x4904 --> 0x4910 unused */
 
 #define MII_TG3_DSP_TAP1               0x0001
 #define  MII_TG3_DSP_TAP1_AGCTGT_DFLT  0x0007
+#define MII_TG3_DSP_TAP26              0x001a
+#define  MII_TG3_DSP_TAP26_ALNOKO      0x0001
+#define  MII_TG3_DSP_TAP26_RMRXSTO     0x0002
+#define  MII_TG3_DSP_TAP26_OPCSINPT    0x0004
 #define MII_TG3_DSP_AADJ1CH0           0x001f
 #define MII_TG3_DSP_CH34TP2            0x4022
 #define MII_TG3_DSP_CH34TP2_HIBW01     0x0010
@@ -2808,9 +2817,6 @@ struct tg3 {
        u32                             rx_std_max_post;
        u32                             rx_offset;
        u32                             rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
-       struct vlan_group               *vlgrp;
-#endif
 
 
        /* begin "everything else" cacheline(s) section */
index f8e463c..ace6404 100644 (file)
  *             Microchip Technology, 24C01A/02A/04A Data Sheet
  *                     available in PDF format from www.microchip.com
  *
- * Change History
- *
- *     Tigran Aivazian <tigran@sco.com>:       TLan_PciProbe() now uses
- *                                             new PCI BIOS interface.
- *     Alan Cox        <alan@lxorguk.ukuu.org.uk>:
- *                                             Fixed the out of memory
- *                                             handling.
- *
- *     Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
- *
- *     v1.1 Dec 20, 1999    - Removed linux version checking
- *                            Patch from Tigran Aivazian.
- *                          - v1.1 includes Alan's SMP updates.
- *                          - We still have problems on SMP though,
- *                            but I'm looking into that.
- *
- *     v1.2 Jan 02, 2000    - Hopefully fixed the SMP deadlock.
- *                          - Removed dependency of HZ being 100.
- *                          - We now allow higher priority timers to
- *                            overwrite timers like TLAN_TIMER_ACTIVITY
- *                            Patch from John Cagle <john.cagle@compaq.com>.
- *                          - Fixed a few compiler warnings.
- *
- *     v1.3 Feb 04, 2000    - Fixed the remaining HZ issues.
- *                          - Removed call to pci_present().
- *                          - Removed SA_INTERRUPT flag from irq handler.
- *                          - Added __init and __initdata to reduce resisdent
- *                            code size.
- *                          - Driver now uses module_init/module_exit.
- *                          - Rewrote init_module and tlan_probe to
- *                            share a lot more code. We now use tlan_probe
- *                            with builtin and module driver.
- *                          - Driver ported to new net API.
- *                          - tlan.txt has been reworked to reflect current
- *                            driver (almost)
- *                          - Other minor stuff
- *
- *     v1.4 Feb 10, 2000    - Updated with more changes required after Dave's
- *                            network cleanup in 2.3.43pre7 (Tigran & myself)
- *                          - Minor stuff.
- *
- *     v1.5 March 22, 2000  - Fixed another timer bug that would hang the driver
- *                            if no cable/link were present.
- *                          - Cosmetic changes.
- *                          - TODO: Port completely to new PCI/DMA API
- *                                  Auto-Neg fallback.
- *
- *     v1.6 April 04, 2000  - Fixed driver support for kernel-parameters. Haven't
- *                            tested it though, as the kernel support is currently
- *                            broken (2.3.99p4p3).
- *                          - Updated tlan.txt accordingly.
- *                          - Adjusted minimum/maximum frame length.
- *                          - There is now a TLAN website up at
- *                            http://hp.sourceforge.net/ 
- *
- *     v1.7 April 07, 2000  - Started to implement custom ioctls. Driver now
- *                            reports PHY information when used with Donald
- *                            Beckers userspace MII diagnostics utility.
- *
- *     v1.8 April 23, 2000  - Fixed support for forced speed/duplex settings.
- *                          - Added link information to Auto-Neg and forced
- *                            modes. When NIC operates with auto-neg the driver
- *                            will report Link speed & duplex modes as well as
- *                            link partner abilities. When forced link is used,
- *                            the driver will report status of the established
- *                            link.
- *                            Please read tlan.txt for additional information.
- *                          - Removed call to check_region(), and used
- *                            return value of request_region() instead.
- *
- *     v1.8a May 28, 2000   - Minor updates.
- *
- *     v1.9 July 25, 2000   - Fixed a few remaining Full-Duplex issues.
- *                          - Updated with timer fixes from Andrew Morton.
- *                          - Fixed module race in TLan_Open.
- *                          - Added routine to monitor PHY status.
- *                          - Added activity led support for Proliant devices.
- *
- *     v1.10 Aug 30, 2000   - Added support for EISA based tlan controllers
- *                            like the Compaq NetFlex3/E.
- *                          - Rewrote tlan_probe to better handle multiple
- *                            bus probes. Probing and device setup is now
- *                            done through TLan_Probe and TLan_init_one. Actual
- *                            hardware probe is done with kernel API and
- *                            TLan_EisaProbe.
- *                          - Adjusted debug information for probing.
- *                          - Fixed bug that would cause general debug information
- *                            to be printed after driver removal.
- *                          - Added transmit timeout handling.
- *                          - Fixed OOM return values in tlan_probe.
- *                          - Fixed possible mem leak in tlan_exit
- *                            (now tlan_remove_one).
- *                          - Fixed timer bug in TLan_phyMonitor.
- *                          - This driver version is alpha quality, please
- *                            send me any bug issues you may encounter.
- *
- *     v1.11 Aug 31, 2000   - Do not try to register irq 0 if no irq line was
- *                            set for EISA cards.
- *                          - Added support for NetFlex3/E with nibble-rate
- *                            10Base-T PHY. This is untestet as I haven't got
- *                            one of these cards.
- *                          - Fixed timer being added twice.
- *                          - Disabled PhyMonitoring by default as this is
- *                            work in progress. Define MONITOR to enable it.
- *                          - Now we don't display link info with PHYs that
- *                            doesn't support it (level1).
- *                          - Incresed tx_timeout beacuse of auto-neg.
- *                          - Adjusted timers for forced speeds.
- *
- *     v1.12 Oct 12, 2000   - Minor fixes (memleak, init, etc.)
- *
- *     v1.13 Nov 28, 2000   - Stop flooding console with auto-neg issues
- *                            when link can't be established.
- *                          - Added the bbuf option as a kernel parameter.
- *                          - Fixed ioaddr probe bug.
- *                          - Fixed stupid deadlock with MII interrupts.
- *                          - Added support for speed/duplex selection with
- *                            multiple nics.
- *                          - Added partly fix for TX Channel lockup with
- *                            TLAN v1.0 silicon. This needs to be investigated
- *                            further.
- *
- *     v1.14 Dec 16, 2000   - Added support for servicing multiple frames per.
- *                            interrupt. Thanks goes to
- *                            Adam Keys <adam@ti.com>
- *                            Denis Beaudoin <dbeaudoin@ti.com>
- *                            for providing the patch.
- *                          - Fixed auto-neg output when using multiple
- *                            adapters.
- *                          - Converted to use new taskq interface.
- *
- *     v1.14a Jan 6, 2001   - Minor adjustments (spinlocks, etc.)
- *
- *     Samuel Chessman <chessman@tux.org> New Maintainer!
- *
- *     v1.15 Apr 4, 2002    - Correct operation when aui=1 to be
- *                            10T half duplex no loopback
- *                            Thanks to Gunnar Eikman
- *
- *     Sakari Ailus <sakari.ailus@iki.fi>:
- *
- *     v1.15a Dec 15 2008   - Remove bbuf support, it doesn't work anyway.
- *
- *******************************************************************************/
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/init.h>
 
 #include "tlan.h"
 
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
 
 /* For removing EISA devices */
-static struct net_device       *TLan_Eisa_Devices;
+static struct net_device       *tlan_eisa_devices;
 
-static int             TLanDevicesInstalled;
+static int             tlan_devices_installed;
 
 /* Set speed, duplex and aui settings */
 static  int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
 module_param_array(duplex, int, NULL, 0);
 module_param_array(speed, int, NULL, 0);
 MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
-MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
+MODULE_PARM_DESC(duplex,
+                "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
 
 MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static  int              debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
 
-static const char TLanSignature[] = "TLAN";
-static  const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static  const char tlan_banner[] = "ThunderLAN driver v1.17\n";
 static  int tlan_have_pci;
 static  int tlan_have_eisa;
 
-static const char *media[] = {
-       "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
-       "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+       "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+       "100BaseTx-FD", "100BaseT4", NULL
 };
 
 static struct board {
-       const char      *deviceLabel;
-       u32             flags;
-       u16             addrOfs;
+       const char      *device_label;
+       u32             flags;
+       u16             addr_ofs;
 } board_info[] = {
        { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+       { "Compaq Netelligent 10/100 TX PCI UTP",
+         TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
        { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/P",
          TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
        { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq Netelligent Integrated 10/100 TX UTP",
          TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
-       { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+         TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent 10/100 TX Embedded UTP",
+         TLAN_ADAPTER_NONE, 0x83 },
        { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
-       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
        { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/E",
-         TLAN_ADAPTER_ACTIVITY_LED |   /* EISA card */
+         TLAN_ADAPTER_ACTIVITY_LED |   /* EISA card */
          TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
-       { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+       { "Compaq NetFlex-3/E",
+         TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
 };
 
 static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
        { 0,}
 };
 MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
 
-static void    TLan_EisaProbe( void );
-static void    TLan_Eisa_Cleanup( void );
-static int      TLan_Init( struct net_device * );
-static int     TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int     TLan_Close( struct net_device *);
-static struct  net_device_stats *TLan_GetStats( struct net_device *);
-static void    TLan_SetMulticastList( struct net_device *);
-static int     TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int      TLan_probe1( struct pci_dev *pdev, long ioaddr,
-                            int irq, int rev, const struct pci_device_id *ent);
-static void    TLan_tx_timeout( struct net_device *dev);
-static void    TLan_tx_timeout_work(struct work_struct *work);
-static int     tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32     TLan_HandleTxEOF( struct net_device *, u16 );
-static u32     TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32     TLan_HandleRxEOF( struct net_device *, u16 );
-static u32     TLan_HandleDummy( struct net_device *, u16 );
-static u32     TLan_HandleTxEOC( struct net_device *, u16 );
-static u32     TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32     TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void    TLan_Timer( unsigned long );
-
-static void    TLan_ResetLists( struct net_device * );
-static void    TLan_FreeLists( struct net_device * );
-static void    TLan_PrintDio( u16 );
-static void    TLan_PrintList( TLanList *, char *, int );
-static void    TLan_ReadAndClearStats( struct net_device *, int );
-static void    TLan_ResetAdapter( struct net_device * );
-static void    TLan_FinishReset( struct net_device * );
-static void    TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void    TLan_PhyPrint( struct net_device * );
-static void    TLan_PhyDetect( struct net_device * );
-static void    TLan_PhyPowerDown( struct net_device * );
-static void    TLan_PhyPowerUp( struct net_device * );
-static void    TLan_PhyReset( struct net_device * );
-static void    TLan_PhyStartLink( struct net_device * );
-static void    TLan_PhyFinishAutoNeg( struct net_device * );
+static void    tlan_eisa_probe(void);
+static void    tlan_eisa_cleanup(void);
+static int      tlan_init(struct net_device *);
+static int     tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int     tlan_close(struct net_device *);
+static struct  net_device_stats *tlan_get_stats(struct net_device *);
+static void    tlan_set_multicast_list(struct net_device *);
+static int     tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int      tlan_probe1(struct pci_dev *pdev, long ioaddr,
+                           int irq, int rev, const struct pci_device_id *ent);
+static void    tlan_tx_timeout(struct net_device *dev);
+static void    tlan_tx_timeout_work(struct work_struct *work);
+static int     tlan_init_one(struct pci_dev *pdev,
+                             const struct pci_device_id *ent);
+
+static u32     tlan_handle_tx_eof(struct net_device *, u16);
+static u32     tlan_handle_stat_overflow(struct net_device *, u16);
+static u32     tlan_handle_rx_eof(struct net_device *, u16);
+static u32     tlan_handle_dummy(struct net_device *, u16);
+static u32     tlan_handle_tx_eoc(struct net_device *, u16);
+static u32     tlan_handle_status_check(struct net_device *, u16);
+static u32     tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void    tlan_timer(unsigned long);
+
+static void    tlan_reset_lists(struct net_device *);
+static void    tlan_free_lists(struct net_device *);
+static void    tlan_print_dio(u16);
+static void    tlan_print_list(struct tlan_list *, char *, int);
+static void    tlan_read_and_clear_stats(struct net_device *, int);
+static void    tlan_reset_adapter(struct net_device *);
+static void    tlan_finish_reset(struct net_device *);
+static void    tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void    tlan_phy_print(struct net_device *);
+static void    tlan_phy_detect(struct net_device *);
+static void    tlan_phy_power_down(struct net_device *);
+static void    tlan_phy_power_up(struct net_device *);
+static void    tlan_phy_reset(struct net_device *);
+static void    tlan_phy_start_link(struct net_device *);
+static void    tlan_phy_finish_auto_neg(struct net_device *);
 #ifdef MONITOR
-static void     TLan_PhyMonitor( struct net_device * );
+static void     tlan_phy_monitor(struct net_device *);
 #endif
 
 /*
-static int     TLan_PhyNop( struct net_device * );
-static int     TLan_PhyInternalCheck( struct net_device * );
-static int     TLan_PhyInternalService( struct net_device * );
-static int     TLan_PhyDp83840aCheck( struct net_device * );
+  static int   tlan_phy_nop(struct net_device *);
+  static int   tlan_phy_internal_check(struct net_device *);
+  static int   tlan_phy_internal_service(struct net_device *);
+  static int   tlan_phy_dp83840a_check(struct net_device *);
 */
 
-static bool    TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void    TLan_MiiSendData( u16, u32, unsigned );
-static void    TLan_MiiSync( u16 );
-static void    TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool    tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void    tlan_mii_send_data(u16, u32, unsigned);
+static void    tlan_mii_sync(u16);
+static void    tlan_mii_write_reg(struct net_device *, u16, u16, u16);
 
-static void    TLan_EeSendStart( u16 );
-static int     TLan_EeSendByte( u16, u8, int );
-static void    TLan_EeReceiveByte( u16, u8 *, int );
-static int     TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void    tlan_ee_send_start(u16);
+static int     tlan_ee_send_byte(u16, u8, int);
+static void    tlan_ee_receive_byte(u16, u8 *, int);
+static int     tlan_ee_read_byte(struct net_device *, u8, u8 *);
 
 
 static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
 {
        unsigned long addr = (unsigned long)skb;
        tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
 }
 
 static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
 {
        unsigned long addr;
 
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
        return (struct sk_buff *) addr;
 }
 
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
        NULL,
-       TLan_HandleTxEOF,
-       TLan_HandleStatOverflow,
-       TLan_HandleRxEOF,
-       TLan_HandleDummy,
-       TLan_HandleTxEOC,
-       TLan_HandleStatusCheck,
-       TLan_HandleRxEOC
+       tlan_handle_tx_eof,
+       tlan_handle_stat_overflow,
+       tlan_handle_rx_eof,
+       tlan_handle_dummy,
+       tlan_handle_tx_eoc,
+       tlan_handle_status_check,
+       tlan_handle_rx_eoc
 };
 
 static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
 
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
-       if ( priv->timer.function != NULL &&
-               priv->timerType != TLAN_TIMER_ACTIVITY ) {
+       if (priv->timer.function != NULL &&
+           priv->timer_type != TLAN_TIMER_ACTIVITY) {
                if (!in_irq())
                        spin_unlock_irqrestore(&priv->lock, flags);
                return;
        }
-       priv->timer.function = TLan_Timer;
+       priv->timer.function = tlan_timer;
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
        priv->timer.data = (unsigned long) dev;
-       priv->timerSetAt = jiffies;
-       priv->timerType = type;
+       priv->timer_set_at = jiffies;
+       priv->timer_type = type;
        mod_timer(&priv->timer, jiffies + ticks);
 
-} /* TLan_SetTimer */
+}
 
 
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
 
-       These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
 
 ******************************************************************************
 *****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
 
 
 
-       /***************************************************************
       *      tlan_remove_one
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              None
       *
       *      Goes through the TLanDevices list and frees the device
       *      structs and memory associated with each device (lists
       *      and buffers).  It also ureserves the IO port regions
       *      associated with this device.
       *
       **************************************************************/
+/***************************************************************
*     tlan_remove_one
+ *
*     Returns:
*             Nothing
*     Parms:
*             None
+ *
*     Goes through the TLanDevices list and frees the device
*     structs and memory associated with each device (lists
*     and buffers).  It also ureserves the IO port regions
*     associated with this device.
+ *
+ **************************************************************/
 
 
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
 {
-       struct net_device *dev = pci_get_drvdata( pdev );
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct tlan_priv        *priv = netdev_priv(dev);
 
-       unregister_netdev( dev );
+       unregister_netdev(dev);
 
-       if ( priv->dmaStorage ) {
-               pci_free_consistent(priv->pciDev,
-                                   priv->dmaSize, priv->dmaStorage,
-                                   priv->dmaStorageDMA );
+       if (priv->dma_storage) {
+               pci_free_consistent(priv->pci_dev,
+                                   priv->dma_size, priv->dma_storage,
+                                   priv->dma_storage_dma);
        }
 
 #ifdef CONFIG_PCI
        pci_release_regions(pdev);
 #endif
 
-       free_netdev( dev );
+       free_netdev(dev);
 
-       pci_set_drvdata( pdev, NULL );
+       pci_set_drvdata(pdev, NULL);
 }
 
+static void tlan_start(struct net_device *dev)
+{
+       tlan_reset_lists(dev);
+       /* NOTE: It might not be necessary to read the stats before a
+          reset if you don't care what the values are.
+       */
+       tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+       tlan_reset_adapter(dev);
+       netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+       struct tlan_priv *priv = netdev_priv(dev);
+
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
+       outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+       /* Reset and power down phy */
+       tlan_reset_adapter(dev);
+       if (priv->timer.function != NULL) {
+               del_timer_sync(&priv->timer);
+               priv->timer.function = NULL;
+       }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (netif_running(dev))
+               tlan_stop(dev);
+
+       netif_device_detach(dev);
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_wake_from_d3(pdev, false);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_enable_wake(pdev, 0, 0);
+       netif_device_attach(dev);
+
+       if (netif_running(dev))
+               tlan_start(dev);
+
+       return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend   NULL
+#define tlan_resume    NULL
+
+#endif /* CONFIG_PM */
+
+
 static struct pci_driver tlan_driver = {
        .name           = "tlan",
        .id_table       = tlan_pci_tbl,
        .probe          = tlan_init_one,
        .remove         = __devexit_p(tlan_remove_one),
+       .suspend        = tlan_suspend,
+       .resume         = tlan_resume,
 };
 
 static int __init tlan_probe(void)
 {
        int rc = -ENODEV;
 
-       printk(KERN_INFO "%s", tlan_banner);
+       pr_info("%s", tlan_banner);
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
 
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
        rc = pci_register_driver(&tlan_driver);
 
        if (rc != 0) {
-               printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+               pr_err("Could not register pci driver\n");
                goto err_out_pci_free;
        }
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
-       TLan_EisaProbe();
+       tlan_eisa_probe();
 
-       printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d  EISA: %d\n",
-                TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
-                tlan_have_pci, tlan_have_eisa);
+       pr_info("%d device%s installed, PCI: %d  EISA: %d\n",
+               tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+               tlan_have_pci, tlan_have_eisa);
 
-       if (TLanDevicesInstalled == 0) {
+       if (tlan_devices_installed == 0) {
                rc = -ENODEV;
                goto  err_out_pci_unreg;
        }
@@ -501,39 +432,39 @@ err_out_pci_free:
 }
 
 
-static int __devinit tlan_init_one( struct pci_dev *pdev,
-                                   const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+                                  const struct pci_device_id *ent)
 {
-       return TLan_probe1( pdev, -1, -1, 0, ent);
+       return tlan_probe1(pdev, -1, -1, 0, ent);
 }
 
 
 /*
-       ***************************************************************
-        *      tlan_probe1
-        *
-        *      Returns:
-        *              0 on success, error code on error
-        *      Parms:
-        *              none
-        *
-        *      The name is lower case to fit in with all the rest of
-        *      the netcard_probe names.  This function looks for
-        *      another TLan based adapter, setting it up with the
-        *      allocated device struct if one is found.
-        *      tlan_probe has been ported to the new net API and
-        *      now allocates its own device structure. This function
-        *      is also used by modules.
-        *
-        **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+*      tlan_probe1
+*
+*      Returns:
+*              0 on success, error code on error
+*      Parms:
+*              none
+*
+*      The name is lower case to fit in with all the rest of
+*      the netcard_probe names.  This function looks for
+*      another TLan based adapter, setting it up with the
+*      allocated device struct if one is found.
+*      tlan_probe has been ported to the new net API and
+*      now allocates its own device structure. This function
+*      is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
                                 long ioaddr, int irq, int rev,
-                                const struct pci_device_id *ent )
+                                const struct pci_device_id *ent)
 {
 
        struct net_device  *dev;
-       TLanPrivateInfo    *priv;
+       struct tlan_priv  *priv;
        u16                device_id;
        int                reg, rc = -ENODEV;
 
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                if (rc)
                        return rc;
 
-               rc = pci_request_regions(pdev, TLanSignature);
+               rc = pci_request_regions(pdev, tlan_signature);
                if (rc) {
-                       printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
+                       pr_err("Could not reserve IO regions\n");
                        goto err_out;
                }
        }
 #endif  /*  CONFIG_PCI  */
 
-       dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+       dev = alloc_etherdev(sizeof(struct tlan_priv));
        if (dev == NULL) {
-               printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
+               pr_err("Could not allocate memory for device\n");
                rc = -ENOMEM;
                goto err_out_regions;
        }
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
 
        priv = netdev_priv(dev);
 
-       priv->pciDev = pdev;
+       priv->pci_dev = pdev;
        priv->dev = dev;
 
        /* Is this a PCI device? */
        if (pdev) {
-               u32                pci_io_base = 0;
+               u32                pci_io_base = 0;
 
                priv->adapter = &board_info[ent->driver_data];
 
                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (rc) {
-                       printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+                       pr_err("No suitable PCI mapping available\n");
                        goto err_out_free_dev;
                }
 
-               for ( reg= 0; reg <= 5; reg ++ ) {
+               for (reg = 0; reg <= 5; reg++) {
                        if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
                                pci_io_base = pci_resource_start(pdev, reg);
-                               TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
-                                               pci_io_base);
+                               TLAN_DBG(TLAN_DEBUG_GNRL,
+                                        "IO mapping is available at %x.\n",
+                                        pci_io_base);
                                break;
                        }
                }
                if (!pci_io_base) {
-                       printk(KERN_ERR "TLAN: No IO mappings available\n");
+                       pr_err("No IO mappings available\n");
                        rc = -EIO;
                        goto err_out_free_dev;
                }
 
                dev->base_addr = pci_io_base;
                dev->irq = pdev->irq;
-               priv->adapterRev = pdev->revision;
+               priv->adapter_rev = pdev->revision;
                pci_set_master(pdev);
                pci_set_drvdata(pdev, dev);
 
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                device_id = inw(ioaddr + EISA_ID2);
                priv->is_eisa = 1;
                if (device_id == 0x20F1) {
-                       priv->adapter = &board_info[13];        /* NetFlex-3/E */
-                       priv->adapterRev = 23;                  /* TLAN 2.3 */
+                       priv->adapter = &board_info[13]; /* NetFlex-3/E */
+                       priv->adapter_rev = 23;         /* TLAN 2.3 */
                } else {
                        priv->adapter = &board_info[14];
-                       priv->adapterRev = 10;                  /* TLAN 1.0 */
+                       priv->adapter_rev = 10;         /* TLAN 1.0 */
                }
                dev->base_addr = ioaddr;
                dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                priv->speed  = ((dev->mem_start & 0x18) == 0x18) ? 0
                        : (dev->mem_start & 0x18) >> 3;
 
-               if (priv->speed == 0x1) {
+               if (priv->speed == 0x1)
                        priv->speed = TLAN_SPEED_10;
-               } else if (priv->speed == 0x2) {
+               else if (priv->speed == 0x2)
                        priv->speed = TLAN_SPEED_100;
-               }
+
                debug = priv->debug = dev->mem_end;
        } else {
                priv->aui    = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
 
        /* This will be used when we get an adapter error from
         * within our irq handler */
-       INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+       INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
 
        spin_lock_init(&priv->lock);
 
-       rc = TLan_Init(dev);
+       rc = tlan_init(dev);
        if (rc) {
-               printk(KERN_ERR "TLAN: Could not set up device.\n");
+               pr_err("Could not set up device\n");
                goto err_out_free_dev;
        }
 
        rc = register_netdev(dev);
        if (rc) {
-               printk(KERN_ERR "TLAN: Could not register device.\n");
+               pr_err("Could not register device\n");
                goto err_out_uninit;
        }
 
 
-       TLanDevicesInstalled++;
+       tlan_devices_installed++;
        boards_found++;
 
        /* pdev is NULL if this is an EISA device */
        if (pdev)
                tlan_have_pci++;
        else {
-               priv->nextDevice = TLan_Eisa_Devices;
-               TLan_Eisa_Devices = dev;
+               priv->next_device = tlan_eisa_devices;
+               tlan_eisa_devices = dev;
                tlan_have_eisa++;
        }
 
-       printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
-                       dev->name,
-                       (int) dev->irq,
-                       (int) dev->base_addr,
-                       priv->adapter->deviceLabel,
-                       priv->adapterRev);
+       netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+                   (int)dev->irq,
+                   (int)dev->base_addr,
+                   priv->adapter->device_label,
+                   priv->adapter_rev);
        return 0;
 
 err_out_uninit:
-       pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
-                           priv->dmaStorageDMA );
+       pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+                           priv->dma_storage_dma);
 err_out_free_dev:
        free_netdev(dev);
 err_out_regions:
@@ -689,22 +620,23 @@ err_out:
 }
 
 
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
 {
        struct net_device *dev;
-       TLanPrivateInfo *priv;
+       struct tlan_priv *priv;
 
-       while( tlan_have_eisa ) {
-               dev = TLan_Eisa_Devices;
+       while (tlan_have_eisa) {
+               dev = tlan_eisa_devices;
                priv = netdev_priv(dev);
-               if (priv->dmaStorage) {
-                       pci_free_consistent(priv->pciDev, priv->dmaSize,
-                                           priv->dmaStorage, priv->dmaStorageDMA );
+               if (priv->dma_storage) {
+                       pci_free_consistent(priv->pci_dev, priv->dma_size,
+                                           priv->dma_storage,
+                                           priv->dma_storage_dma);
                }
-               release_region( dev->base_addr, 0x10);
-               unregister_netdev( dev );
-               TLan_Eisa_Devices = priv->nextDevice;
-               free_netdev( dev );
+               release_region(dev->base_addr, 0x10);
+               unregister_netdev(dev);
+               tlan_eisa_devices = priv->next_device;
+               free_netdev(dev);
                tlan_have_eisa--;
        }
 }
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
        pci_unregister_driver(&tlan_driver);
 
        if (tlan_have_eisa)
-               TLan_Eisa_Cleanup();
+               tlan_eisa_cleanup();
 
 }
 
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
 
 
 
-       /**************************************************************
       *      TLan_EisaProbe
       *
       *      Returns: 0 on success, 1 otherwise
       *
       *      Parms:   None
       *
       *
       *      This functions probes for EISA devices and calls
       *      TLan_probe1 when one is found.
       *
       *************************************************************/
+/**************************************************************
*     tlan_eisa_probe
+ *
*     Returns: 0 on success, 1 otherwise
+ *
*     Parms:   None
+ *
+ *
*     This functions probes for EISA devices and calls
*     TLan_probe1 when one is found.
+ *
+ *************************************************************/
 
-static void  __init TLan_EisaProbe (void)
+static void  __init tlan_eisa_probe(void)
 {
-       long    ioaddr;
-       int     rc = -ENODEV;
-       int     irq;
+       long    ioaddr;
+       int     rc = -ENODEV;
+       int     irq;
        u16     device_id;
 
        if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void  __init TLan_EisaProbe (void)
        /* Loop through all slots of the EISA bus */
        for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
 
-       TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-                (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
-       TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-                (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+               TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+                        (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+               TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+                        (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
 
 
-               TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
-                                       (int) ioaddr);
-               if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+               TLAN_DBG(TLAN_DEBUG_PROBE,
+                        "Probing for EISA adapter at IO: 0x%4x : ",
+                        (int) ioaddr);
+               if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
                        goto out;
 
                if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void  __init TLan_EisaProbe (void)
 
                device_id = inw(ioaddr + EISA_ID2);
                if (device_id !=  0x20F1 && device_id != 0x40F1) {
-                       release_region (ioaddr, 0x10);
+                       release_region(ioaddr, 0x10);
                        goto out;
                }
 
-               if (inb(ioaddr + EISA_CR) != 0x1) {     /* Check if adapter is enabled */
-                       release_region (ioaddr, 0x10);
+               /* check if adapter is enabled */
+               if (inb(ioaddr + EISA_CR) != 0x1) {
+                       release_region(ioaddr, 0x10);
                        goto out2;
                }
 
                if (debug == 0x10)
-                       printk("Found one\n");
+                       pr_info("Found one\n");
 
 
                /* Get irq from board */
-               switch (inb(ioaddr + 0xCC0)) {
-                       case(0x10):
-                               irq=5;
-                               break;
-                       case(0x20):
-                               irq=9;
-                               break;
-                       case(0x40):
-                               irq=10;
-                               break;
-                       case(0x80):
-                               irq=11;
-                               break;
-                       default:
-                               goto out;
+               switch (inb(ioaddr + 0xcc0)) {
+               case(0x10):
+                       irq = 5;
+                       break;
+               case(0x20):
+                       irq = 9;
+                       break;
+               case(0x40):
+                       irq = 10;
+                       break;
+               case(0x80):
+                       irq = 11;
+                       break;
+               default:
+                       goto out;
                }
 
 
                /* Setup the newly found eisa adapter */
-               rc = TLan_probe1( NULL, ioaddr, irq,
-                                       12, NULL);
+               rc = tlan_probe1(NULL, ioaddr, irq,
+                                12, NULL);
                continue;
 
-               out:
-                       if (debug == 0x10)
-                               printk("None found\n");
-                       continue;
+out:
+               if (debug == 0x10)
+                       pr_info("None found\n");
+               continue;
 
-               out2:   if (debug == 0x10)
-                               printk("Card found but it is not enabled, skipping\n");
-                       continue;
+out2:
+               if (debug == 0x10)
+                       pr_info("Card found but it is not enabled, skipping\n");
+               continue;
 
        }
 
-} /* TLan_EisaProbe */
+}
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
 {
        disable_irq(dev->irq);
-       TLan_HandleInterrupt(dev->irq, dev);
+       tlan_handle_interrupt(dev->irq, dev);
        enable_irq(dev->irq);
 }
 #endif
 
-static const struct net_device_ops TLan_netdev_ops = {
-       .ndo_open               = TLan_Open,
-       .ndo_stop               = TLan_Close,
-       .ndo_start_xmit         = TLan_StartTx,
-       .ndo_tx_timeout         = TLan_tx_timeout,
-       .ndo_get_stats          = TLan_GetStats,
-       .ndo_set_multicast_list = TLan_SetMulticastList,
-       .ndo_do_ioctl           = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+       .ndo_open               = tlan_open,
+       .ndo_stop               = tlan_close,
+       .ndo_start_xmit         = tlan_start_tx,
+       .ndo_tx_timeout         = tlan_tx_timeout,
+       .ndo_get_stats          = tlan_get_stats,
+       .ndo_set_multicast_list = tlan_set_multicast_list,
+       .ndo_do_ioctl           = tlan_ioctl,
        .ndo_change_mtu         = eth_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller     = TLan_Poll,
+       .ndo_poll_controller     = tlan_poll,
 #endif
 };
 
 
 
-       /***************************************************************
       *      TLan_Init
       *
       *      Returns:
       *              0 on success, error code otherwise.
       *      Parms:
       *              dev     The structure of the device to be
       *                      init'ed.
       *
       *      This function completes the initialization of the
       *      device structure and driver.  It reserves the IO
       *      addresses, allocates memory for the lists and bounce
       *      buffers, retrieves the MAC address from the eeprom
       *      and assignes the device's methods.
       *
       **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
*     tlan_init
+ *
*     Returns:
*             0 on success, error code otherwise.
*     Parms:
*             dev     The structure of the device to be
*                     init'ed.
+ *
*     This function completes the initialization of the
*     device structure and driver.  It reserves the IO
*     addresses, allocates memory for the lists and bounce
*     buffers, retrieves the MAC address from the eeprom
*     and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
 {
        int             dma_size;
-       int             err;
+       int             err;
        int             i;
-       TLanPrivateInfo *priv;
+       struct tlan_priv        *priv;
 
        priv = netdev_priv(dev);
 
-       dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
-               * ( sizeof(TLanList) );
-       priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
-                                               dma_size, &priv->dmaStorageDMA);
-       priv->dmaSize = dma_size;
+       dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+               * (sizeof(struct tlan_list));
+       priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+                                                dma_size,
+                                                &priv->dma_storage_dma);
+       priv->dma_size = dma_size;
 
-       if ( priv->dmaStorage == NULL ) {
-               printk(KERN_ERR "TLAN:  Could not allocate lists and buffers for %s.\n",
-                       dev->name );
+       if (priv->dma_storage == NULL) {
+               pr_err("Could not allocate lists and buffers for %s\n",
+                      dev->name);
                return -ENOMEM;
        }
-       memset( priv->dmaStorage, 0, dma_size );
-       priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
-       priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
-       priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
-       priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+       memset(priv->dma_storage, 0, dma_size);
+       priv->rx_list = (struct tlan_list *)
+               ALIGN((unsigned long)priv->dma_storage, 8);
+       priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+       priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+       priv->tx_list_dma =
+               priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
        err = 0;
-       for ( i = 0;  i < 6 ; i++ )
-               err |= TLan_EeReadByte( dev,
-                                       (u8) priv->adapter->addrOfs + i,
-                                       (u8 *) &dev->dev_addr[i] );
-       if ( err ) {
-               printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
-                       dev->name,
-                       err );
+       for (i = 0;  i < 6 ; i++)
+               err |= tlan_ee_read_byte(dev,
+                                        (u8) priv->adapter->addr_ofs + i,
+                                        (u8 *) &dev->dev_addr[i]);
+       if (err) {
+               pr_err("%s: Error reading MAC from eeprom: %d\n",
+                      dev->name, err);
        }
        dev->addr_len = 6;
 
        netif_carrier_off(dev);
 
        /* Device methods */
-       dev->netdev_ops = &TLan_netdev_ops;
+       dev->netdev_ops = &tlan_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        return 0;
 
-} /* TLan_Init */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Open
       *
       *      Returns:
       *              0 on success, error code otherwise.
       *      Parms:
       *              dev     Structure of device to be opened.
       *
       *      This routine puts the driver and TLAN adapter in a
       *      state where it is ready to send and receive packets.
       *      It allocates the IRQ, resets and brings the adapter
       *      out of reset, and allows interrupts.  It also delays
       *      the startup for autonegotiation or sends a Rx GO
       *      command to the adapter, as appropriate.
       *
       **************************************************************/
+/***************************************************************
*     tlan_open
+ *
*     Returns:
*             0 on success, error code otherwise.
*     Parms:
*             dev     Structure of device to be opened.
+ *
*     This routine puts the driver and TLAN adapter in a
*     state where it is ready to send and receive packets.
*     It allocates the IRQ, resets and brings the adapter
*     out of reset, and allows interrupts.  It also delays
*     the startup for autonegotiation or sends a Rx GO
*     command to the adapter, as appropriate.
+ *
+ **************************************************************/
 
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             err;
 
-       priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
-       err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
-                          dev->name, dev );
+       priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+       err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+                         dev->name, dev);
 
-       if ( err ) {
-               pr_err("TLAN:  Cannot open %s because IRQ %d is already in use.\n",
-                      dev->name, dev->irq );
+       if (err) {
+               netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+                          dev->irq);
                return err;
        }
 
        init_timer(&priv->timer);
-       netif_start_queue(dev);
 
-       /* NOTE: It might not be necessary to read the stats before a
-                        reset if you don't care what the values are.
-       */
-       TLan_ResetLists( dev );
-       TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-       TLan_ResetAdapter( dev );
+       tlan_start(dev);
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
-                 dev->name, priv->tlanRev );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
+                dev->name, priv->tlan_rev);
 
        return 0;
 
-} /* TLan_Open */
+}
 
 
 
-       /**************************************************************
       *      TLan_ioctl
       *
       *      Returns:
       *              0 on success, error code otherwise
       *      Params:
       *              dev     structure of device to receive ioctl.
       *
       *              rq      ifreq structure to hold userspace data.
       *
       *              cmd     ioctl command.
       *
       *
       *************************************************************/
+/**************************************************************
*     tlan_ioctl
+ *
*     Returns:
*             0 on success, error code otherwise
*     Params:
*             dev     structure of device to receive ioctl.
+ *
*             rq      ifreq structure to hold userspace data.
+ *
*             cmd     ioctl command.
+ *
+ *
+ *************************************************************/
 
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        struct mii_ioctl_data *data = if_mii(rq);
-       u32 phy   = priv->phy[priv->phyNum];
+       u32 phy   = priv->phy[priv->phy_num];
 
-       if (!priv->phyOnline)
+       if (!priv->phy_online)
                return -EAGAIN;
 
-       switch(cmd) {
-       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
-                       data->phy_id = phy;
+       switch (cmd) {
+       case SIOCGMIIPHY:               /* get address of MII PHY in use. */
+               data->phy_id = phy;
 
 
-       case SIOCGMIIREG:               /* Read MII PHY register. */
-                       TLan_MiiReadReg(dev, data->phy_id & 0x1f,
-                                       data->reg_num & 0x1f, &data->val_out);
-                       return 0;
+       case SIOCGMIIREG:               /* read MII PHY register. */
+               tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+                                 data->reg_num & 0x1f, &data->val_out);
+               return 0;
 
 
-       case SIOCSMIIREG:               /* Write MII PHY register. */
-                       TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
-                                        data->reg_num & 0x1f, data->val_in);
-                       return 0;
-               default:
-                       return -EOPNOTSUPP;
+       case SIOCSMIIREG:               /* write MII PHY register. */
+               tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+                                  data->reg_num & 0x1f, data->val_in);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
        }
-} /* tlan_ioctl */
+}
 
 
-       /***************************************************************
       *      TLan_tx_timeout
       *
       *      Returns: nothing
       *
       *      Params:
       *              dev     structure of device which timed out
       *                      during transmit.
       *
       **************************************************************/
+/***************************************************************
*     tlan_tx_timeout
+ *
*     Returns: nothing
+ *
*     Params:
*             dev     structure of device which timed out
*                     during transmit.
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
 {
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
 
        /* Ok so we timed out, lets see what we can do about it...*/
-       TLan_FreeLists( dev );
-       TLan_ResetLists( dev );
-       TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-       TLan_ResetAdapter( dev );
+       tlan_free_lists(dev);
+       tlan_reset_lists(dev);
+       tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+       tlan_reset_adapter(dev);
        dev->trans_start = jiffies; /* prevent tx timeout */
-       netif_wake_queue( dev );
+       netif_wake_queue(dev);
 
 }
 
 
-       /***************************************************************
       *      TLan_tx_timeout_work
       *
       *      Returns: nothing
       *
       *      Params:
       *              work    work item of device which timed out
       *
       **************************************************************/
+/***************************************************************
*     tlan_tx_timeout_work
+ *
*     Returns: nothing
+ *
*     Params:
*             work    work item of device which timed out
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
 {
-       TLanPrivateInfo *priv =
-               container_of(work, TLanPrivateInfo, tlan_tqueue);
+       struct tlan_priv        *priv =
+               container_of(work, struct tlan_priv, tlan_tqueue);
 
-       TLan_tx_timeout(priv->dev);
+       tlan_tx_timeout(priv->dev);
 }
 
 
 
-       /***************************************************************
       *      TLan_StartTx
       *
       *      Returns:
       *              0 on success, non-zero on failure.
       *      Parms:
       *              skb     A pointer to the sk_buff containing the
       *                      frame to be sent.
       *              dev     The device to send the data on.
       *
       *      This function adds a frame to the Tx list to be sent
       *      ASAP.  First it verifies that the adapter is ready and
       *      there is room in the queue.  Then it sets up the next
       *      available list, copies the frame to the corresponding
       *      buffer.  If the adapter Tx channel is idle, it gives
       *      the adapter a Tx Go command on the list, otherwise it
       *      sets the forward address of the previous list to point
       *      to this one.  Then it frees the sk_buff.
       *
       **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
*     tlan_start_tx
+ *
*     Returns:
*             0 on success, non-zero on failure.
*     Parms:
*             skb     A pointer to the sk_buff containing the
*                     frame to be sent.
*             dev     The device to send the data on.
+ *
*     This function adds a frame to the Tx list to be sent
*     ASAP.  First it verifies that the adapter is ready and
*     there is room in the queue.  Then it sets up the next
*     available list, copies the frame to the corresponding
*     buffer.  If the adapter Tx channel is idle, it gives
*     the adapter a Tx Go command on the list, otherwise it
*     sets the forward address of the previous list to point
*     to this one.  Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        dma_addr_t      tail_list_phys;
-       TLanList        *tail_list;
+       struct tlan_list        *tail_list;
        unsigned long   flags;
        unsigned int    txlen;
 
-       if ( ! priv->phyOnline ) {
-               TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
-                         dev->name );
+       if (!priv->phy_online) {
+               TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
+                        dev->name);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
                return NETDEV_TX_OK;
        txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
 
-       tail_list = priv->txList + priv->txTail;
-       tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+       tail_list = priv->tx_list + priv->tx_tail;
+       tail_list_phys =
+               priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
 
-       if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
-                         dev->name, priv->txHead, priv->txTail );
+       if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
+                        dev->name, priv->tx_head, priv->tx_tail);
                netif_stop_queue(dev);
-               priv->txBusyCount++;
+               priv->tx_busy_count++;
                return NETDEV_TX_BUSY;
        }
 
        tail_list->forward = 0;
 
-       tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+       tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
                                                      skb->data, txlen,
                                                      PCI_DMA_TODEVICE);
-       TLan_StoreSKB(tail_list, skb);
+       tlan_store_skb(tail_list, skb);
 
-       tail_list->frameSize = (u16) txlen;
+       tail_list->frame_size = (u16) txlen;
        tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
        tail_list->buffer[1].count = 0;
        tail_list->buffer[1].address = 0;
 
        spin_lock_irqsave(&priv->lock, flags);
-       tail_list->cStat = TLAN_CSTAT_READY;
-       if ( ! priv->txInProgress ) {
-               priv->txInProgress = 1;
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Starting TX on buffer %d\n", priv->txTail );
-               outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
-               outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+       tail_list->c_stat = TLAN_CSTAT_READY;
+       if (!priv->tx_in_progress) {
+               priv->tx_in_progress = 1;
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  Starting TX on buffer %d\n",
+                        priv->tx_tail);
+               outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+               outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
        } else {
-               TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Adding buffer %d to TX channel\n",
-                         priv->txTail );
-               if ( priv->txTail == 0 ) {
-                       ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  Adding buffer %d to TX channel\n",
+                        priv->tx_tail);
+               if (priv->tx_tail == 0) {
+                       (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
                                = tail_list_phys;
                } else {
-                       ( priv->txList + ( priv->txTail - 1 ) )->forward
+                       (priv->tx_list + (priv->tx_tail - 1))->forward
                                = tail_list_phys;
                }
        }
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+       CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
 
        return NETDEV_TX_OK;
 
-} /* TLan_StartTx */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleInterrupt
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              irq     The line on which the interrupt
       *                      occurred.
       *              dev_id  A pointer to the device assigned to
       *                      this irq line.
       *
       *      This function handles an interrupt generated by its
       *      assigned TLAN adapter.  The function deactivates
       *      interrupts on its adapter, records the type of
       *      interrupt, executes the appropriate subhandler, and
       *      acknowdges the interrupt to the adapter (thus
       *      re-enabling adapter interrupts.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_interrupt
+ *
*     Returns:
*             Nothing
*     Parms:
*             irq     The line on which the interrupt
*                     occurred.
*             dev_id  A pointer to the device assigned to
*                     this irq line.
+ *
*     This function handles an interrupt generated by its
*     assigned TLAN adapter.  The function deactivates
*     interrupts on its adapter, records the type of
*     interrupt, executes the appropriate subhandler, and
*     acknowdges the interrupt to the adapter (thus
*     re-enabling adapter interrupts.
+ *
+ **************************************************************/
 
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 {
        struct net_device       *dev = dev_id;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16             host_int;
        u16             type;
 
        spin_lock(&priv->lock);
 
-       host_int = inw( dev->base_addr + TLAN_HOST_INT );
-       type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
-       if ( type ) {
+       host_int = inw(dev->base_addr + TLAN_HOST_INT);
+       type = (host_int & TLAN_HI_IT_MASK) >> 2;
+       if (type) {
                u32     ack;
                u32     host_cmd;
 
-               outw( host_int, dev->base_addr + TLAN_HOST_INT );
-               ack = TLanIntVector[type]( dev, host_int );
+               outw(host_int, dev->base_addr + TLAN_HOST_INT);
+               ack = tlan_int_vector[type](dev, host_int);
 
-               if ( ack ) {
-                       host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
-                       outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+               if (ack) {
+                       host_cmd = TLAN_HC_ACK | ack | (type << 18);
+                       outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
                }
        }
 
        spin_unlock(&priv->lock);
 
        return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Close
       *
       *      Returns:
       *              An error code.
       *      Parms:
       *              dev     The device structure of the device to
       *                      close.
       *
       *      This function shuts down the adapter.  It records any
       *      stats, puts the adapter into reset state, deactivates
       *      its time as needed, and frees the irq it is using.
       *
       **************************************************************/
+/***************************************************************
*     tlan_close
+ *
*     Returns:
*             An error code.
*     Parms:
*             dev     The device structure of the device to
*                     close.
+ *
*     This function shuts down the adapter.  It records any
*     stats, puts the adapter into reset state, deactivates
*     its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
 
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
 
-       netif_stop_queue(dev);
        priv->neg_be_verbose = 0;
+       tlan_stop(dev);
 
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
-       outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
-       if ( priv->timer.function != NULL ) {
-               del_timer_sync( &priv->timer );
-               priv->timer.function = NULL;
-       }
-
-       free_irq( dev->irq, dev );
-       TLan_FreeLists( dev );
-       TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+       free_irq(dev->irq, dev);
+       tlan_free_lists(dev);
+       TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
 
        return 0;
 
-} /* TLan_Close */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_GetStats
       *
       *      Returns:
       *              A pointer to the device's statistics structure.
       *      Parms:
       *              dev     The device structure to return the
       *                      stats for.
       *
       *      This function updates the devices statistics by reading
       *      the TLAN chip's onboard registers.  Then it returns the
       *      address of the statistics structure.
       *
       **************************************************************/
+/***************************************************************
*     tlan_get_stats
+ *
*     Returns:
*             A pointer to the device's statistics structure.
*     Parms:
*             dev     The device structure to return the
*                     stats for.
+ *
*     This function updates the devices statistics by reading
*     the TLAN chip's onboard registers.  Then it returns the
*     address of the statistics structure.
+ *
+ **************************************************************/
 
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int i;
 
        /* Should only read stats if open ? */
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
-       TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
-                 priv->rxEocCount );
-       TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
-                 priv->txBusyCount );
-       if ( debug & TLAN_DEBUG_GNRL ) {
-               TLan_PrintDio( dev->base_addr );
-               TLan_PhyPrint( dev );
+       TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
+                priv->rx_eoc_count);
+       TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
+                priv->tx_busy_count);
+       if (debug & TLAN_DEBUG_GNRL) {
+               tlan_print_dio(dev->base_addr);
+               tlan_phy_print(dev);
        }
-       if ( debug & TLAN_DEBUG_LIST ) {
-               for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
-                       TLan_PrintList( priv->rxList + i, "RX", i );
-               for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
-                       TLan_PrintList( priv->txList + i, "TX", i );
+       if (debug & TLAN_DEBUG_LIST) {
+               for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+                       tlan_print_list(priv->rx_list + i, "RX", i);
+               for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+                       tlan_print_list(priv->tx_list + i, "TX", i);
        }
 
        return &dev->stats;
 
-} /* TLan_GetStats */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_SetMulticastList
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     The device structure to set the
       *                      multicast list for.
       *
       *      This function sets the TLAN adaptor to various receive
       *      modes.  If the IFF_PROMISC flag is set, promiscuous
       *      mode is acitviated.  Otherwise, promiscuous mode is
       *      turned off.  If the IFF_ALLMULTI flag is set, then
       *      the hash table is set to receive all group addresses.
       *      Otherwise, the first three multicast addresses are
       *      stored in AREG_1-3, and the rest are selected via the
       *      hash table, as necessary.
       *
       **************************************************************/
+/***************************************************************
*     tlan_set_multicast_list
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     The device structure to set the
*                     multicast list for.
+ *
*     This function sets the TLAN adaptor to various receive
*     modes.  If the IFF_PROMISC flag is set, promiscuous
*     mode is acitviated.  Otherwise, promiscuous mode is
*     turned off.  If the IFF_ALLMULTI flag is set, then
*     the hash table is set to receive all group addresses.
*     Otherwise, the first three multicast addresses are
*     stored in AREG_1-3, and the rest are selected via the
*     hash table, as necessary.
+ *
+ **************************************************************/
 
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
 {
        struct netdev_hw_addr *ha;
        u32                     hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
        u32                     offset;
        u8                      tmp;
 
-       if ( dev->flags & IFF_PROMISC ) {
-               tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+       if (dev->flags & IFF_PROMISC) {
+               tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
        } else {
-               tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
-               if ( dev->flags & IFF_ALLMULTI ) {
-                       for ( i = 0; i < 3; i++ )
-                               TLan_SetMac( dev, i + 1, NULL );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+               tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+               if (dev->flags & IFF_ALLMULTI) {
+                       for (i = 0; i < 3; i++)
+                               tlan_set_mac(dev, i + 1, NULL);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+                                        0xffffffff);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+                                        0xffffffff);
                } else {
                        i = 0;
                        netdev_for_each_mc_addr(ha, dev) {
-                               if ( i < 3 ) {
-                                       TLan_SetMac( dev, i + 1,
+                               if (i < 3) {
+                                       tlan_set_mac(dev, i + 1,
                                                     (char *) &ha->addr);
                                } else {
-                                       offset = TLan_HashFunc((u8 *)&ha->addr);
-                                       if ( offset < 32 )
-                                               hash1 |= ( 1 << offset );
+                                       offset =
+                                               tlan_hash_func((u8 *)&ha->addr);
+                                       if (offset < 32)
+                                               hash1 |= (1 << offset);
                                        else
-                                               hash2 |= ( 1 << ( offset - 32 ) );
+                                               hash2 |= (1 << (offset - 32));
                                }
                                i++;
                        }
-                       for ( ; i < 3; i++ )
-                               TLan_SetMac( dev, i + 1, NULL );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+                       for ( ; i < 3; i++)
+                               tlan_set_mac(dev, i + 1, NULL);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
                }
        }
 
-} /* TLan_SetMulticastList */
+}
 
 
 
 /*****************************************************************************
 ******************************************************************************
 
-        ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
 
-       Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
-       Programmer's Guide" for more informations on handling interrupts
-       generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
 
 ******************************************************************************
 *****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
 
 
 
-       /***************************************************************
-        *      TLan_HandleTxEOF
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles Tx EOF interrupts which are raised
       *      by the adapter when it has completed sending the
       *      contents of a buffer.  If detemines which list/buffer
       *      was completed and resets it.  If the buffer was the last
       *      in the channel (EOC), then the function checks to see if
       *      another buffer is ready to send, and if so, sends a Tx
       *      Go command.  Finally, the driver activates/continues the
       *      activity LED.
       *
       **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ *     tlan_handle_tx_eof
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles Tx EOF interrupts which are raised
*     by the adapter when it has completed sending the
*     contents of a buffer.  If detemines which list/buffer
*     was completed and resets it.  If the buffer was the last
*     in the channel (EOC), then the function checks to see if
*     another buffer is ready to send, and if so, sends a Tx
*     Go command.  Finally, the driver activates/continues the
*     activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             eoc = 0;
-       TLanList        *head_list;
+       struct tlan_list        *head_list;
        dma_addr_t      head_list_phys;
        u32             ack = 0;
-       u16             tmpCStat;
+       u16             tmp_c_stat;
 
-       TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
-                 priv->txHead, priv->txTail );
-       head_list = priv->txList + priv->txHead;
+       TLAN_DBG(TLAN_DEBUG_TX,
+                "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
+                priv->tx_head, priv->tx_tail);
+       head_list = priv->tx_list + priv->tx_head;
 
-       while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-               struct sk_buff *skb = TLan_GetSKB(head_list);
+       while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+              && (ack < 255)) {
+               struct sk_buff *skb = tlan_get_skb(head_list);
 
                ack++;
-               pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+               pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
                                 max(skb->len,
                                     (unsigned int)TLAN_MIN_FRAME_SIZE),
                                 PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
                head_list->buffer[8].address = 0;
                head_list->buffer[9].address = 0;
 
-               if ( tmpCStat & TLAN_CSTAT_EOC )
+               if (tmp_c_stat & TLAN_CSTAT_EOC)
                        eoc = 1;
 
-               dev->stats.tx_bytes += head_list->frameSize;
+               dev->stats.tx_bytes += head_list->frame_size;
 
-               head_list->cStat = TLAN_CSTAT_UNUSED;
+               head_list->c_stat = TLAN_CSTAT_UNUSED;
                netif_start_queue(dev);
-               CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
-               head_list = priv->txList + priv->txHead;
+               CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+               head_list = priv->tx_list + priv->tx_head;
        }
 
        if (!ack)
-               printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
-       if ( eoc ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d)\n",
-                         priv->txHead, priv->txTail );
-               head_list = priv->txList + priv->txHead;
-               head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-               if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
-                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+               netdev_info(dev,
+                           "Received interrupt for uncompleted TX frame\n");
+
+       if (eoc) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  handling TX EOC (Head=%d Tail=%d)\n",
+                        priv->tx_head, priv->tx_tail);
+               head_list = priv->tx_list + priv->tx_head;
+               head_list_phys = priv->tx_list_dma
+                       + sizeof(struct tlan_list)*priv->tx_head;
+               if ((head_list->c_stat & TLAN_CSTAT_READY)
+                   == TLAN_CSTAT_READY) {
+                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                        ack |= TLAN_HC_GO;
                } else {
-                       priv->txInProgress = 0;
+                       priv->tx_in_progress = 0;
                }
        }
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-               if ( priv->timer.function == NULL ) {
-                        priv->timer.function = TLan_Timer;
-                        priv->timer.data = (unsigned long) dev;
-                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-                        priv->timerSetAt = jiffies;
-                        priv->timerType = TLAN_TIMER_ACTIVITY;
-                        add_timer(&priv->timer);
-               } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-                       priv->timerSetAt = jiffies;
+       if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+               if (priv->timer.function == NULL) {
+                       priv->timer.function = tlan_timer;
+                       priv->timer.data = (unsigned long) dev;
+                       priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+                       priv->timer_set_at = jiffies;
+                       priv->timer_type = TLAN_TIMER_ACTIVITY;
+                       add_timer(&priv->timer);
+               } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+                       priv->timer_set_at = jiffies;
                }
        }
 
        return ack;
 
-} /* TLan_HandleTxEOF */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleStatOverflow
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Statistics Overflow interrupt
       *      which means that one or more of the TLAN statistics
       *      registers has reached 1/2 capacity and needs to be read.
       *
       **************************************************************/
+/***************************************************************
*     TLan_HandleStatOverflow
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Statistics Overflow interrupt
*     which means that one or more of the TLAN statistics
*     registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
 {
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
        return 1;
 
-} /* TLan_HandleStatOverflow */
-
-
-
-
-       /***************************************************************
       *      TLan_HandleRxEOF
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Rx EOF interrupt which
       *      indicates a frame has been received by the adapter from
       *      the net and the frame has been transferred to memory.
       *      The function determines the bounce buffer the frame has
       *      been loaded into, creates a new sk_buff big enough to
       *      hold the frame, and sends it to protocol stack.  It
       *      then resets the used buffer and appends it to the end
       *      of the list.  If the frame was the last in the Rx
       *      channel (EOC), the function restarts the receive channel
       *      by sending an Rx Go command to the adapter.  Then it
       *      activates/continues the activity LED.
       *
       **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
*     TLan_HandleRxEOF
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Rx EOF interrupt which
*     indicates a frame has been received by the adapter from
*     the net and the frame has been transferred to memory.
*     The function determines the bounce buffer the frame has
*     been loaded into, creates a new sk_buff big enough to
*     hold the frame, and sends it to protocol stack.  It
*     then resets the used buffer and appends it to the end
*     of the list.  If the frame was the last in the Rx
*     channel (EOC), the function restarts the receive channel
*     by sending an Rx Go command to the adapter.  Then it
*     activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             ack = 0;
        int             eoc = 0;
-       TLanList        *head_list;
+       struct tlan_list        *head_list;
        struct sk_buff  *skb;
-       TLanList        *tail_list;
-       u16             tmpCStat;
+       struct tlan_list        *tail_list;
+       u16             tmp_c_stat;
        dma_addr_t      head_list_phys;
 
-       TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  Handling RX EOF (Head=%d Tail=%d)\n",
-                 priv->rxHead, priv->rxTail );
-       head_list = priv->rxList + priv->rxHead;
-       head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+       TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  handling RX EOF (Head=%d Tail=%d)\n",
+                priv->rx_head, priv->rx_tail);
+       head_list = priv->rx_list + priv->rx_head;
+       head_list_phys =
+               priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
 
-       while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-               dma_addr_t frameDma = head_list->buffer[0].address;
-               u32 frameSize = head_list->frameSize;
+       while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+              && (ack < 255)) {
+               dma_addr_t frame_dma = head_list->buffer[0].address;
+               u32 frame_size = head_list->frame_size;
                struct sk_buff *new_skb;
 
                ack++;
-               if (tmpCStat & TLAN_CSTAT_EOC)
+               if (tmp_c_stat & TLAN_CSTAT_EOC)
                        eoc = 1;
 
                new_skb = netdev_alloc_skb_ip_align(dev,
                                                    TLAN_MAX_FRAME_SIZE + 5);
-               if ( !new_skb )
+               if (!new_skb)
                        goto drop_and_reuse;
 
-               skb = TLan_GetSKB(head_list);
-               pci_unmap_single(priv->pciDev, frameDma,
+               skb = tlan_get_skb(head_list);
+               pci_unmap_single(priv->pci_dev, frame_dma,
                                 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
-               skb_put( skb, frameSize );
+               skb_put(skb, frame_size);
 
-               dev->stats.rx_bytes += frameSize;
+               dev->stats.rx_bytes += frame_size;
 
-               skb->protocol = eth_type_trans( skb, dev );
-               netif_rx( skb );
+               skb->protocol = eth_type_trans(skb, dev);
+               netif_rx(skb);
 
-               head_list->buffer[0].address = pci_map_single(priv->pciDev,
-                                                             new_skb->data,
-                                                             TLAN_MAX_FRAME_SIZE,
-                                                             PCI_DMA_FROMDEVICE);
+               head_list->buffer[0].address =
+                       pci_map_single(priv->pci_dev, new_skb->data,
+                                      TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
 
-               TLan_StoreSKB(head_list, new_skb);
+               tlan_store_skb(head_list, new_skb);
 drop_and_reuse:
                head_list->forward = 0;
-               head_list->cStat = 0;
-               tail_list = priv->rxList + priv->rxTail;
+               head_list->c_stat = 0;
+               tail_list = priv->rx_list + priv->rx_tail;
                tail_list->forward = head_list_phys;
 
-               CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
-               CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
-               head_list = priv->rxList + priv->rxHead;
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+               CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+               CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+               head_list = priv->rx_list + priv->rx_head;
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
        }
 
        if (!ack)
-               printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
-       if ( eoc ) {
-               TLAN_DBG( TLAN_DEBUG_RX,
-                         "RECEIVE:  Handling RX EOC (Head=%d Tail=%d)\n",
-                         priv->rxHead, priv->rxTail );
-               head_list = priv->rxList + priv->rxHead;
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+               netdev_info(dev,
+                           "Received interrupt for uncompleted RX frame\n");
+
+
+       if (eoc) {
+               TLAN_DBG(TLAN_DEBUG_RX,
+                        "RECEIVE:  handling RX EOC (Head=%d Tail=%d)\n",
+                        priv->rx_head, priv->rx_tail);
+               head_list = priv->rx_list + priv->rx_head;
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
+               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                ack |= TLAN_HC_GO | TLAN_HC_RT;
-               priv->rxEocCount++;
+               priv->rx_eoc_count++;
        }
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-               if ( priv->timer.function == NULL )  {
-                       priv->timer.function = TLan_Timer;
+       if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+               if (priv->timer.function == NULL)  {
+                       priv->timer.function = tlan_timer;
                        priv->timer.data = (unsigned long) dev;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-                       priv->timerSetAt = jiffies;
-                       priv->timerType = TLAN_TIMER_ACTIVITY;
+                       priv->timer_set_at = jiffies;
+                       priv->timer_type = TLAN_TIMER_ACTIVITY;
                        add_timer(&priv->timer);
-               } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-                       priv->timerSetAt = jiffies;
+               } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+                       priv->timer_set_at = jiffies;
                }
        }
 
        return ack;
 
-} /* TLan_HandleRxEOF */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleDummy
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Dummy interrupt, which is
       *      raised whenever a test interrupt is generated by setting
       *      the Req_Int bit of HOST_CMD to 1.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_dummy
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Dummy interrupt, which is
*     raised whenever a test interrupt is generated by setting
*     the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
 {
-       printk( "TLAN:  Test interrupt on %s.\n", dev->name );
+       netdev_info(dev, "Test interrupt\n");
        return 1;
 
-} /* TLan_HandleDummy */
+}
 
 
 
 
-       /***************************************************************
-        *      TLan_HandleTxEOC
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This driver is structured to determine EOC occurrences by
       *      reading the CSTAT member of the list structure.  Tx EOC
       *      interrupts are disabled via the DIO INTDIS register.
       *      However, TLAN chips before revision 3.0 didn't have this
       *      functionality, so process EOC events if this is the
       *      case.
       *
       **************************************************************/
+/***************************************************************
+ *     tlan_handle_tx_eoc
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This driver is structured to determine EOC occurrences by
*     reading the CSTAT member of the list structure.  Tx EOC
*     interrupts are disabled via the DIO INTDIS register.
*     However, TLAN chips before revision 3.0 didn't have this
*     functionality, so process EOC events if this is the
*     case.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
-       TLanList                *head_list;
+       struct tlan_priv        *priv = netdev_priv(dev);
+       struct tlan_list                *head_list;
        dma_addr_t              head_list_phys;
        u32                     ack = 1;
 
        host_int = 0;
-       if ( priv->tlanRev < 0x30 ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
-                         priv->txHead, priv->txTail );
-               head_list = priv->txList + priv->txHead;
-               head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-               if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+       if (priv->tlan_rev < 0x30) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+                        priv->tx_head, priv->tx_tail);
+               head_list = priv->tx_list + priv->tx_head;
+               head_list_phys = priv->tx_list_dma
+                       + sizeof(struct tlan_list)*priv->tx_head;
+               if ((head_list->c_stat & TLAN_CSTAT_READY)
+                   == TLAN_CSTAT_READY) {
                        netif_stop_queue(dev);
-                       outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                        ack |= TLAN_HC_GO;
                } else {
-                       priv->txInProgress = 0;
+                       priv->tx_in_progress = 0;
                }
        }
 
        return ack;
 
-} /* TLan_HandleTxEOC */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleStatusCheck
       *
       *      Returns:
       *              0 if Adapter check, 1 if Network Status check.
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles Adapter Check/Network Status
       *      interrupts generated by the adapter.  It checks the
       *      vector in the HOST_INT register to determine if it is
       *      an Adapter Check interrupt.  If so, it resets the
       *      adapter.  Otherwise it clears the status registers
       *      and services the PHY.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_status_check
+ *
*     Returns:
*             0 if Adapter check, 1 if Network Status check.
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles Adapter Check/Network Status
*     interrupts generated by the adapter.  It checks the
*     vector in the HOST_INT register to determine if it is
*     an Adapter Check interrupt.  If so, it resets the
*     adapter.  Otherwise it clears the status registers
*     and services the PHY.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             ack;
        u32             error;
        u8              net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
        u16             tlphy_sts;
 
        ack = 1;
-       if ( host_int & TLAN_HI_IV_MASK ) {
-               netif_stop_queue( dev );
-               error = inl( dev->base_addr + TLAN_CH_PARM );
-               printk( "TLAN:  %s: Adaptor Error = 0x%x\n", dev->name, error );
-               TLan_ReadAndClearStats( dev, TLAN_RECORD );
-               outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+       if (host_int & TLAN_HI_IV_MASK) {
+               netif_stop_queue(dev);
+               error = inl(dev->base_addr + TLAN_CH_PARM);
+               netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+               tlan_read_and_clear_stats(dev, TLAN_RECORD);
+               outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
 
                schedule_work(&priv->tlan_tqueue);
 
                netif_wake_queue(dev);
                ack = 0;
        } else {
-               TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
-               phy = priv->phy[priv->phyNum];
-
-               net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
-               if ( net_sts ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
-                       TLAN_DBG( TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
-                                 dev->name, (unsigned) net_sts );
+               TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+               phy = priv->phy[priv->phy_num];
+
+               net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+               if (net_sts) {
+                       tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+                       TLAN_DBG(TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
+                                dev->name, (unsigned) net_sts);
                }
-               if ( ( net_sts & TLAN_NET_STS_MIRQ ) &&  ( priv->phyNum == 0 ) ) {
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-                       if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
-                            ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                               tlphy_ctl |= TLAN_TC_SWAPOL;
-                               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-                       } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
-                                   ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                               tlphy_ctl &= ~TLAN_TC_SWAPOL;
-                               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-                       }
-
-                       if (debug) {
-                               TLan_PhyPrint( dev );
+               if ((net_sts & TLAN_NET_STS_MIRQ) &&  (priv->phy_num == 0)) {
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+                       if (!(tlphy_sts & TLAN_TS_POLOK) &&
+                           !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+                               tlphy_ctl |= TLAN_TC_SWAPOL;
+                               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+                                                  tlphy_ctl);
+                       } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+                                  (tlphy_ctl & TLAN_TC_SWAPOL)) {
+                               tlphy_ctl &= ~TLAN_TC_SWAPOL;
+                               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+                                                  tlphy_ctl);
                        }
+
+                       if (debug)
+                               tlan_phy_print(dev);
                }
        }
 
        return ack;
 
-} /* TLan_HandleStatusCheck */
+}
 
 
 
 
-       /***************************************************************
-        *      TLan_HandleRxEOC
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This driver is structured to determine EOC occurrences by
       *      reading the CSTAT member of the list structure.  Rx EOC
       *      interrupts are disabled via the DIO INTDIS register.
       *      However, TLAN chips before revision 3.0 didn't have this
       *      CSTAT member or a INTDIS register, so if this chip is
       *      pre-3.0, process EOC interrupts normally.
       *
       **************************************************************/
+/***************************************************************
+ *     tlan_handle_rx_eoc
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This driver is structured to determine EOC occurrences by
*     reading the CSTAT member of the list structure.  Rx EOC
*     interrupts are disabled via the DIO INTDIS register.
*     However, TLAN chips before revision 3.0 didn't have this
*     CSTAT member or a INTDIS register, so if this chip is
*     pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        dma_addr_t      head_list_phys;
        u32             ack = 1;
 
-       if (  priv->tlanRev < 0x30 ) {
-               TLAN_DBG( TLAN_DEBUG_RX,
-                         "RECEIVE:  Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
-                         priv->rxHead, priv->rxTail );
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-               outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+       if (priv->tlan_rev < 0x30) {
+               TLAN_DBG(TLAN_DEBUG_RX,
+                        "RECEIVE:  Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+                        priv->rx_head, priv->rx_tail);
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
+               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                ack |= TLAN_HC_GO | TLAN_HC_RT;
-               priv->rxEocCount++;
+               priv->rx_eoc_count++;
        }
 
        return ack;
 
-} /* TLan_HandleRxEOC */
+}
 
 
 
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_Timer
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              data    A value given to add timer when
       *                      add_timer was called.
       *
       *      This function handles timed functionality for the
       *      TLAN driver.  The two current timer uses are for
       *      delaying for autonegotionation and driving the ACT LED.
       *      -       Autonegotiation requires being allowed about
       *              2 1/2 seconds before attempting to transmit a
       *              packet.  It would be a very bad thing to hang
       *              the kernel this long, so the driver doesn't
       *              allow transmission 'til after this time, for
       *              certain PHYs.  It would be much nicer if all
       *              PHYs were interrupt-capable like the internal
       *              PHY.
       *      -       The ACT LED, which shows adapter activity, is
       *              driven by the driver, and so must be left on
       *              for a short period to power up the LED so it
       *              can be seen.  This delay can be changed by
       *              changing the TLAN_TIMER_ACT_DELAY in tlan.h,
       *              if desired.  100 ms  produces a slightly
       *              sluggish response.
       *
       **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
*     tlan_timer
+ *
*     Returns:
*             Nothing
*     Parms:
*             data    A value given to add timer when
*                     add_timer was called.
+ *
*     This function handles timed functionality for the
*     TLAN driver.  The two current timer uses are for
*     delaying for autonegotionation and driving the ACT LED.
*     -       Autonegotiation requires being allowed about
*             2 1/2 seconds before attempting to transmit a
*             packet.  It would be a very bad thing to hang
*             the kernel this long, so the driver doesn't
*             allow transmission 'til after this time, for
*             certain PHYs.  It would be much nicer if all
*             PHYs were interrupt-capable like the internal
*             PHY.
*     -       The ACT LED, which shows adapter activity, is
*             driven by the driver, and so must be left on
*             for a short period to power up the LED so it
*             can be seen.  This delay can be changed by
*             changing the TLAN_TIMER_ACT_DELAY in tlan.h,
*             if desired.  100 ms  produces a slightly
*             sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
 {
        struct net_device       *dev = (struct net_device *) data;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             elapsed;
        unsigned long   flags = 0;
 
        priv->timer.function = NULL;
 
-       switch ( priv->timerType ) {
+       switch (priv->timer_type) {
 #ifdef MONITOR
-               case TLAN_TIMER_LINK_BEAT:
-                       TLan_PhyMonitor( dev );
-                       break;
+       case TLAN_TIMER_LINK_BEAT:
+               tlan_phy_monitor(dev);
+               break;
 #endif
-               case TLAN_TIMER_PHY_PDOWN:
-                       TLan_PhyPowerDown( dev );
-                       break;
-               case TLAN_TIMER_PHY_PUP:
-                       TLan_PhyPowerUp( dev );
-                       break;
-               case TLAN_TIMER_PHY_RESET:
-                       TLan_PhyReset( dev );
-                       break;
-               case TLAN_TIMER_PHY_START_LINK:
-                       TLan_PhyStartLink( dev );
-                       break;
-               case TLAN_TIMER_PHY_FINISH_AN:
-                       TLan_PhyFinishAutoNeg( dev );
-                       break;
-               case TLAN_TIMER_FINISH_RESET:
-                       TLan_FinishReset( dev );
-                       break;
-               case TLAN_TIMER_ACTIVITY:
-                       spin_lock_irqsave(&priv->lock, flags);
-                       if ( priv->timer.function == NULL ) {
-                               elapsed = jiffies - priv->timerSetAt;
-                               if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
-                                       TLan_DioWrite8( dev->base_addr,
-                                                       TLAN_LED_REG, TLAN_LED_LINK );
-                               } else  {
-                                       priv->timer.function = TLan_Timer;
-                                       priv->timer.expires = priv->timerSetAt
-                                               + TLAN_TIMER_ACT_DELAY;
-                                       spin_unlock_irqrestore(&priv->lock, flags);
-                                       add_timer( &priv->timer );
-                                       break;
-                               }
+       case TLAN_TIMER_PHY_PDOWN:
+               tlan_phy_power_down(dev);
+               break;
+       case TLAN_TIMER_PHY_PUP:
+               tlan_phy_power_up(dev);
+               break;
+       case TLAN_TIMER_PHY_RESET:
+               tlan_phy_reset(dev);
+               break;
+       case TLAN_TIMER_PHY_START_LINK:
+               tlan_phy_start_link(dev);
+               break;
+       case TLAN_TIMER_PHY_FINISH_AN:
+               tlan_phy_finish_auto_neg(dev);
+               break;
+       case TLAN_TIMER_FINISH_RESET:
+               tlan_finish_reset(dev);
+               break;
+       case TLAN_TIMER_ACTIVITY:
+               spin_lock_irqsave(&priv->lock, flags);
+               if (priv->timer.function == NULL) {
+                       elapsed = jiffies - priv->timer_set_at;
+                       if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+                               tlan_dio_write8(dev->base_addr,
+                                               TLAN_LED_REG, TLAN_LED_LINK);
+                       } else  {
+                               priv->timer.function = tlan_timer;
+                               priv->timer.expires = priv->timer_set_at
+                                       + TLAN_TIMER_ACT_DELAY;
+                               spin_unlock_irqrestore(&priv->lock, flags);
+                               add_timer(&priv->timer);
+                               break;
                        }
-                       spin_unlock_irqrestore(&priv->lock, flags);
-                       break;
-               default:
-                       break;
+               }
+               spin_unlock_irqrestore(&priv->lock, flags);
+               break;
+       default:
+               break;
        }
 
-} /* TLan_Timer */
+}
 
 
 
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_ResetLists
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     The device structure with the list
       *                      stuctures to be reset.
       *
       *      This routine sets the variables associated with managing
       *      the TLAN lists to their initial values.
       *
       **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
*     tlan_reset_lists
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     The device structure with the list
*                     stuctures to be reset.
+ *
*     This routine sets the variables associated with managing
*     the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        int             i;
-       TLanList        *list;
+       struct tlan_list        *list;
        dma_addr_t      list_phys;
        struct sk_buff  *skb;
 
-       priv->txHead = 0;
-       priv->txTail = 0;
-       for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-               list = priv->txList + i;
-               list->cStat = TLAN_CSTAT_UNUSED;
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+       for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+               list = priv->tx_list + i;
+               list->c_stat = TLAN_CSTAT_UNUSED;
                list->buffer[0].address = 0;
                list->buffer[2].count = 0;
                list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
                list->buffer[9].address = 0;
        }
 
-       priv->rxHead = 0;
-       priv->rxTail = TLAN_NUM_RX_LISTS - 1;
-       for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-               list = priv->rxList + i;
-               list_phys = priv->rxListDMA + sizeof(TLanList) * i;
-               list->cStat = TLAN_CSTAT_READY;
-               list->frameSize = TLAN_MAX_FRAME_SIZE;
+       priv->rx_head = 0;
+       priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+       for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+               list = priv->rx_list + i;
+               list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+               list->c_stat = TLAN_CSTAT_READY;
+               list->frame_size = TLAN_MAX_FRAME_SIZE;
                list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
                skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
-               if ( !skb ) {
-                       pr_err("TLAN: out of memory for received data.\n" );
+               if (!skb) {
+                       netdev_err(dev, "Out of memory for received data\n");
                        break;
                }
 
-               list->buffer[0].address = pci_map_single(priv->pciDev,
+               list->buffer[0].address = pci_map_single(priv->pci_dev,
                                                         skb->data,
                                                         TLAN_MAX_FRAME_SIZE,
                                                         PCI_DMA_FROMDEVICE);
-               TLan_StoreSKB(list, skb);
+               tlan_store_skb(list, skb);
                list->buffer[1].count = 0;
                list->buffer[1].address = 0;
-               list->forward = list_phys + sizeof(TLanList);
+               list->forward = list_phys + sizeof(struct tlan_list);
        }
 
        /* in case ran out of memory early, clear bits */
        while (i < TLAN_NUM_RX_LISTS) {
-               TLan_StoreSKB(priv->rxList + i, NULL);
+               tlan_store_skb(priv->rx_list + i, NULL);
                ++i;
        }
        list->forward = 0;
 
-} /* TLan_ResetLists */
+}
 
 
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        int             i;
-       TLanList        *list;
+       struct tlan_list        *list;
        struct sk_buff  *skb;
 
-       for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-               list = priv->txList + i;
-               skb = TLan_GetSKB(list);
-               if ( skb ) {
+       for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+               list = priv->tx_list + i;
+               skb = tlan_get_skb(list);
+               if (skb) {
                        pci_unmap_single(
-                               priv->pciDev,
+                               priv->pci_dev,
                                list->buffer[0].address,
                                max(skb->len,
                                    (unsigned int)TLAN_MIN_FRAME_SIZE),
                                PCI_DMA_TODEVICE);
-                       dev_kfree_skb_any( skb );
+                       dev_kfree_skb_any(skb);
                        list->buffer[8].address = 0;
                        list->buffer[9].address = 0;
                }
        }
 
-       for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-               list = priv->rxList + i;
-               skb = TLan_GetSKB(list);
-               if ( skb ) {
-                       pci_unmap_single(priv->pciDev,
+       for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+               list = priv->rx_list + i;
+               skb = tlan_get_skb(list);
+               if (skb) {
+                       pci_unmap_single(priv->pci_dev,
                                         list->buffer[0].address,
                                         TLAN_MAX_FRAME_SIZE,
                                         PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any( skb );
+                       dev_kfree_skb_any(skb);
                        list->buffer[8].address = 0;
                        list->buffer[9].address = 0;
                }
        }
-} /* TLan_FreeLists */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_PrintDio
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         Base IO port of the device of
       *                              which to print DIO registers.
       *
       *      This function prints out all the internal (DIO)
       *      registers of a TLAN chip.
       *
       **************************************************************/
+/***************************************************************
*     tlan_print_dio
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         Base IO port of the device of
*                             which to print DIO registers.
+ *
*     This function prints out all the internal (DIO)
*     registers of a TLAN chip.
+ *
+ **************************************************************/
 
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
 {
        u32 data0, data1;
        int     i;
 
-       printk( "TLAN:   Contents of internal registers for io base 0x%04hx.\n",
-               io_base );
-       printk( "TLAN:      Off.  +0         +4\n" );
-       for ( i = 0; i < 0x4C; i+= 8 ) {
-               data0 = TLan_DioRead32( io_base, i );
-               data1 = TLan_DioRead32( io_base, i + 0x4 );
-               printk( "TLAN:      0x%02x  0x%08x 0x%08x\n", i, data0, data1 );
+       pr_info("Contents of internal registers for io base 0x%04hx\n",
+               io_base);
+       pr_info("Off.  +0        +4\n");
+       for (i = 0; i < 0x4C; i += 8) {
+               data0 = tlan_dio_read32(io_base, i);
+               data1 = tlan_dio_read32(io_base, i + 0x4);
+               pr_info("0x%02x  0x%08x 0x%08x\n", i, data0, data1);
        }
 
-} /* TLan_PrintDio */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_PrintList
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              list    A pointer to the TLanList structure to
       *                      be printed.
       *              type    A string to designate type of list,
       *                      "Rx" or "Tx".
       *              num     The index of the list.
       *
       *      This function prints out the contents of the list
       *      pointed to by the list parameter.
       *
       **************************************************************/
+/***************************************************************
*     TLan_PrintList
+ *
*     Returns:
*             Nothing
*     Parms:
*             list    A pointer to the struct tlan_list structure to
*                     be printed.
*             type    A string to designate type of list,
*                     "Rx" or "Tx".
*             num     The index of the list.
+ *
*     This function prints out the contents of the list
*     pointed to by the list parameter.
+ *
+ **************************************************************/
 
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
 {
        int i;
 
-       printk( "TLAN:   %s List %d at %p\n", type, num, list );
-       printk( "TLAN:      Forward    = 0x%08x\n",  list->forward );
-       printk( "TLAN:      CSTAT      = 0x%04hx\n", list->cStat );
-       printk( "TLAN:      Frame Size = 0x%04hx\n", list->frameSize );
-       /* for ( i = 0; i < 10; i++ ) { */
-       for ( i = 0; i < 2; i++ ) {
-               printk( "TLAN:      Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
-                       i, list->buffer[i].count, list->buffer[i].address );
+       pr_info("%s List %d at %p\n", type, num, list);
+       pr_info("   Forward    = 0x%08x\n",  list->forward);
+       pr_info("   CSTAT      = 0x%04hx\n", list->c_stat);
+       pr_info("   Frame Size = 0x%04hx\n", list->frame_size);
+       /* for (i = 0; i < 10; i++) { */
+       for (i = 0; i < 2; i++) {
+               pr_info("   Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+                       i, list->buffer[i].count, list->buffer[i].address);
        }
 
-} /* TLan_PrintList */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_ReadAndClearStats
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      to which to read stats.
       *              record  Flag indicating whether to add
       *
       *      This functions reads all the internal status registers
       *      of the TLAN chip, which clears them as a side effect.
       *      It then either adds the values to the device's status
       *      struct, or discards them, depending on whether record
       *      is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
       *
       **************************************************************/
+/***************************************************************
*     tlan_read_and_clear_stats
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     Pointer to device structure of adapter
*                     to which to read stats.
*             record  Flag indicating whether to add
+ *
*     This functions reads all the internal status registers
*     of the TLAN chip, which clears them as a side effect.
*     It then either adds the values to the device's status
*     struct, or discards them, depending on whether record
*     is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
 
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
 {
        u32             tx_good, tx_under;
        u32             rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
        u32             multi_col, single_col;
        u32             excess_col, late_col, loss;
 
-       outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       tx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-       tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-       tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       rx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-       rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-       rx_over  = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
-       def_tx  = inb( dev->base_addr + TLAN_DIO_DATA );
-       def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       crc     = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-       code    = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       multi_col   = inb( dev->base_addr + TLAN_DIO_DATA );
-       multi_col  += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       single_col  = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-       single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
-       outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
-       late_col   = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
-       loss       = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
-       if ( record ) {
+       outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       tx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+       tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+       tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       rx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+       rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+       rx_over  = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+       def_tx  = inb(dev->base_addr + TLAN_DIO_DATA);
+       def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       crc     = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+       code    = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       multi_col   = inb(dev->base_addr + TLAN_DIO_DATA);
+       multi_col  += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       single_col  = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+       single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+       outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+       late_col   = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+       loss       = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+       if (record) {
                dev->stats.rx_packets += rx_good;
                dev->stats.rx_errors  += rx_over + crc + code;
                dev->stats.tx_packets += tx_good;
                dev->stats.tx_errors  += tx_under + loss;
-               dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+               dev->stats.collisions += multi_col
+                       + single_col + excess_col + late_col;
 
                dev->stats.rx_over_errors    += rx_over;
                dev->stats.rx_crc_errors     += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
                dev->stats.tx_carrier_errors += loss;
        }
 
-} /* TLan_ReadAndClearStats */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Reset
       *
       *      Returns:
       *              0
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      to be reset.
       *
       *      This function resets the adapter and it's physical
       *      device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
       *      Programmer's Guide" for details.  The routine tries to
       *      implement what is detailed there, though adjustments
       *      have been made.
       *
       **************************************************************/
+/***************************************************************
*     TLan_Reset
+ *
*     Returns:
*             0
*     Parms:
*             dev     Pointer to device structure of adapter
*                     to be reset.
+ *
*     This function resets the adapter and it's physical
*     device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
*     Programmer's Guide" for details.  The routine tries to
*     implement what is detailed there, though adjustments
*     have been made.
+ *
+ **************************************************************/
 
 static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             i;
        u32             addr;
        u32             data;
        u8              data8;
 
-       priv->tlanFullDuplex = false;
-       priv->phyOnline=0;
+       priv->tlan_full_duplex = false;
+       priv->phy_online = 0;
        netif_carrier_off(dev);
 
 /*  1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
 
        udelay(1000);
 
-/*  2. Turn off interrupts. ( Probably isn't necessary ) */
+/*  2. Turn off interrupts. (Probably isn't necessary) */
 
        data = inl(dev->base_addr + TLAN_HOST_CMD);
        data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
 
 /*  3. Clear AREGs and HASHs. */
 
-       for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
-               TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
-       }
+       for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+               tlan_dio_write32(dev->base_addr, (u16) i, 0);
 
 /*  4. Setup NetConfig register. */
 
        data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-       TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+       tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
 /*  5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
 
-       outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
-       outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+       outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+       outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
 
 /*  6. Unreset the MII by setting NMRST (in NetSio) to 1. */
 
-       outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
        addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
-       TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+       tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
 
 /*  7. Setup the remaining registers. */
 
-       if ( priv->tlanRev >= 0x30 ) {
+       if (priv->tlan_rev >= 0x30) {
                data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
-               TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+               tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
        }
-       TLan_PhyDetect( dev );
+       tlan_phy_detect(dev);
        data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+       if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
                data |= TLAN_NET_CFG_BIT;
-               if ( priv->aui == 1 ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
-               } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
-                       priv->tlanFullDuplex = true;
+               if (priv->aui == 1) {
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+               } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+                       priv->tlan_full_duplex = true;
                } else {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
                }
        }
 
-       if ( priv->phyNum == 0 ) {
+       if (priv->phy_num == 0)
                data |= TLAN_NET_CFG_PHY_EN;
-       }
-       TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+       tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               TLan_FinishReset( dev );
-       } else {
-               TLan_PhyPowerDown( dev );
-       }
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+               tlan_finish_reset(dev);
+       else
+               tlan_phy_power_down(dev);
 
-} /* TLan_ResetAdapter */
+}
 
 
 
 
 static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u8              data;
        u32             phy;
        u8              sio;
        u16             status;
        u16             partner;
        u16             tlphy_ctl;
-       u16             tlphy_par;
+       u16             tlphy_par;
        u16             tlphy_id1, tlphy_id2;
-       int             i;
+       int             i;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
        data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
-       if ( priv->tlanFullDuplex ) {
+       if (priv->tlan_full_duplex)
                data |= TLAN_NET_CMD_DUPLEX;
-       }
-       TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+       tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
        data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
-       if ( priv->phyNum == 0 ) {
+       if (priv->phy_num == 0)
                data |= TLAN_NET_MASK_MASK7;
-       }
-       TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
-       TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+       tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+       tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+       tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+       tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
 
-       if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
-            ( priv->aui ) ) {
+       if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+           (priv->aui)) {
                status = MII_GS_LINK;
-               printk( "TLAN:  %s: Link forced.\n", dev->name );
+               netdev_info(dev, "Link forced\n");
        } else {
-               TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-               udelay( 1000 );
-               TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-               if ( (status & MII_GS_LINK) &&
-                    /* We only support link info on Nat.Sem. PHY's */
-                       (tlphy_id1 == NAT_SEM_ID1) &&
-                       (tlphy_id2 == NAT_SEM_ID2) ) {
-                       TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
-                       printk( "TLAN: %s: Link active with ", dev->name );
-                       if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
-                                printk( "forced 10%sMbps %s-Duplex\n",
-                                        tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-                                        tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
-                       } else {
-                               printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
-                                       tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-                                       tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
-                               printk("TLAN: Partner capability: ");
-                                       for (i = 5; i <= 10; i++)
-                                               if (partner & (1<<i))
-                                                       printk("%s",media[i-5]);
-                               printk("\n");
+               tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+               udelay(1000);
+               tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+               if ((status & MII_GS_LINK) &&
+                   /* We only support link info on Nat.Sem. PHY's */
+                   (tlphy_id1 == NAT_SEM_ID1) &&
+                   (tlphy_id2 == NAT_SEM_ID2)) {
+                       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+                       netdev_info(dev,
+                                   "Link active with %s %uMbps %s-Duplex\n",
+                                   !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+                                   ? "forced" : "Autonegotiation enabled,",
+                                   tlphy_par & TLAN_PHY_SPEED_100
+                                   ? 100 : 10,
+                                   tlphy_par & TLAN_PHY_DUPLEX_FULL
+                                   ? "Full" : "Half");
+
+                       if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+                               netdev_info(dev, "Partner capability:");
+                               for (i = 5; i < 10; i++)
+                                       if (partner & (1 << i))
+                                               pr_cont(" %s", media[i-5]);
+                               pr_cont("\n");
                        }
 
-                       TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+                                       TLAN_LED_LINK);
 #ifdef MONITOR
                        /* We have link beat..for now anyway */
-                       priv->link = 1;
-                       /*Enabling link beat monitoring */
-                       TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+                       priv->link = 1;
+                       /*Enabling link beat monitoring */
+                       tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
 #endif
                } else if (status & MII_GS_LINK)  {
-                       printk( "TLAN: %s: Link active\n", dev->name );
-                       TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+                       netdev_info(dev, "Link active\n");
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+                                       TLAN_LED_LINK);
                }
        }
 
-       if ( priv->phyNum == 0 ) {
-               TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-               tlphy_ctl |= TLAN_TC_INTEN;
-               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
-               sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
-               sio |= TLAN_NET_SIO_MINTEN;
-               TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
-       }
-
-       if ( status & MII_GS_LINK ) {
-               TLan_SetMac( dev, 0, dev->dev_addr );
-               priv->phyOnline = 1;
-               outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-               if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
-                       outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-               }
-               outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
-               outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+       if (priv->phy_num == 0) {
+               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+               tlphy_ctl |= TLAN_TC_INTEN;
+               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+               sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+               sio |= TLAN_NET_SIO_MINTEN;
+               tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+       }
+
+       if (status & MII_GS_LINK) {
+               tlan_set_mac(dev, 0, dev->dev_addr);
+               priv->phy_online = 1;
+               outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+               if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+                       outb((TLAN_HC_REQ_INT >> 8),
+                            dev->base_addr + TLAN_HOST_CMD + 1);
+               outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+               outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
                netif_carrier_on(dev);
        } else {
-               printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
-                       dev->name );
-               TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+               netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+               tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
                return;
        }
-       TLan_SetMulticastList(dev);
+       tlan_set_multicast_list(dev);
 
-} /* TLan_FinishReset */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_SetMac
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      on which to change the AREG.
       *              areg    The AREG to set the address in (0 - 3).
       *              mac     A pointer to an array of chars.  Each
       *                      element stores one byte of the address.
       *                      IE, it isn't in ascii.
       *
       *      This function transfers a MAC address to one of the
       *      TLAN AREGs (address registers).  The TLAN chip locks
       *      the register on writing to offset 0 and unlocks the
       *      register after writing to offset 5.  If NULL is passed
       *      in mac, then the AREG is filled with 0's.
       *
       **************************************************************/
+/***************************************************************
*     tlan_set_mac
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     Pointer to device structure of adapter
*                     on which to change the AREG.
*             areg    The AREG to set the address in (0 - 3).
*             mac     A pointer to an array of chars.  Each
*                     element stores one byte of the address.
*                     IE, it isn't in ascii.
+ *
*     This function transfers a MAC address to one of the
*     TLAN AREGs (address registers).  The TLAN chip locks
*     the register on writing to offset 0 and unlocks the
*     register after writing to offset 5.  If NULL is passed
*     in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
 
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
 {
        int i;
 
        areg *= 6;
 
-       if ( mac != NULL ) {
-               for ( i = 0; i < 6; i++ )
-                       TLan_DioWrite8( dev->base_addr,
-                                       TLAN_AREG_0 + areg + i, mac[i] );
+       if (mac != NULL) {
+               for (i = 0; i < 6; i++)
+                       tlan_dio_write8(dev->base_addr,
+                                       TLAN_AREG_0 + areg + i, mac[i]);
        } else {
-               for ( i = 0; i < 6; i++ )
-                       TLan_DioWrite8( dev->base_addr,
-                                       TLAN_AREG_0 + areg + i, 0 );
+               for (i = 0; i < 6; i++)
+                       tlan_dio_write8(dev->base_addr,
+                                       TLAN_AREG_0 + areg + i, 0);
        }
 
-} /* TLan_SetMac */
+}
 
 
 
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
 
-       /*********************************************************************
       *      TLan_PhyPrint
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     A pointer to the device structure of the
       *                      TLAN device having the PHYs to be detailed.
       *
       *      This function prints the registers a PHY (aka transceiver).
       *
       ********************************************************************/
+/*********************************************************************
*     tlan_phy_print
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     A pointer to the device structure of the
*                     TLAN device having the PHYs to be detailed.
+ *
*     This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
 
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16 i, data0, data1, data2, data3, phy;
 
-       phy = priv->phy[priv->phyNum];
-
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               printk( "TLAN:   Device %s, Unmanaged PHY.\n", dev->name );
-       } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
-               printk( "TLAN:   Device %s, PHY 0x%02x.\n", dev->name, phy );
-               printk( "TLAN:      Off.  +0     +1     +2     +3\n" );
-                for ( i = 0; i < 0x20; i+= 4 ) {
-                       printk( "TLAN:      0x%02x", i );
-                       TLan_MiiReadReg( dev, phy, i, &data0 );
-                       printk( " 0x%04hx", data0 );
-                       TLan_MiiReadReg( dev, phy, i + 1, &data1 );
-                       printk( " 0x%04hx", data1 );
-                       TLan_MiiReadReg( dev, phy, i + 2, &data2 );
-                       printk( " 0x%04hx", data2 );
-                       TLan_MiiReadReg( dev, phy, i + 3, &data3 );
-                       printk( " 0x%04hx\n", data3 );
+       phy = priv->phy[priv->phy_num];
+
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+               netdev_info(dev, "Unmanaged PHY\n");
+       } else if (phy <= TLAN_PHY_MAX_ADDR) {
+               netdev_info(dev, "PHY 0x%02x\n", phy);
+               pr_info("   Off.  +0     +1     +2     +3\n");
+               for (i = 0; i < 0x20; i += 4) {
+                       tlan_mii_read_reg(dev, phy, i, &data0);
+                       tlan_mii_read_reg(dev, phy, i + 1, &data1);
+                       tlan_mii_read_reg(dev, phy, i + 2, &data2);
+                       tlan_mii_read_reg(dev, phy, i + 3, &data3);
+                       pr_info("   0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+                               i, data0, data1, data2, data3);
                }
        } else {
-               printk( "TLAN:   Device %s, Invalid PHY.\n", dev->name );
+               netdev_info(dev, "Invalid PHY\n");
        }
 
-} /* TLan_PhyPrint */
+}
 
 
 
 
-       /*********************************************************************
       *      TLan_PhyDetect
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     A pointer to the device structure of the adapter
       *                      for which the PHY needs determined.
       *
       *      So far I've found that adapters which have external PHYs
       *      may also use the internal PHY for part of the functionality.
       *      (eg, AUI/Thinnet).  This function finds out if this TLAN
       *      chip has an internal PHY, and then finds the first external
       *      PHY (starting from address 0) if it exists).
       *
       ********************************************************************/
+/*********************************************************************
*     tlan_phy_detect
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     A pointer to the device structure of the adapter
*                     for which the PHY needs determined.
+ *
*     So far I've found that adapters which have external PHYs
*     may also use the internal PHY for part of the functionality.
*     (eg, AUI/Thinnet).  This function finds out if this TLAN
*     chip has an internal PHY, and then finds the first external
*     PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
 
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16             control;
        u16             hi;
        u16             lo;
        u32             phy;
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               priv->phyNum = 0xFFFF;
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+               priv->phy_num = 0xffff;
                return;
        }
 
-       TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+       tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
 
-       if ( hi != 0xFFFF ) {
+       if (hi != 0xffff)
                priv->phy[0] = TLAN_PHY_MAX_ADDR;
-       } else {
+       else
                priv->phy[0] = TLAN_PHY_NONE;
-       }
 
        priv->phy[1] = TLAN_PHY_NONE;
-       for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
-               TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
-               TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
-               TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
-               if ( ( control != 0xFFFF ) ||
-                    ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
-                       TLAN_DBG( TLAN_DEBUG_GNRL,
-                                 "PHY found at %02x %04x %04x %04x\n",
-                                 phy, control, hi, lo );
-                       if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
-                            ( phy != TLAN_PHY_MAX_ADDR ) ) {
+       for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+               tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+               tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+               tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+               if ((control != 0xffff) ||
+                   (hi != 0xffff) || (lo != 0xffff)) {
+                       TLAN_DBG(TLAN_DEBUG_GNRL,
+                                "PHY found at %02x %04x %04x %04x\n",
+                                phy, control, hi, lo);
+                       if ((priv->phy[1] == TLAN_PHY_NONE) &&
+                           (phy != TLAN_PHY_MAX_ADDR)) {
                                priv->phy[1] = phy;
                        }
                }
        }
 
-       if ( priv->phy[1] != TLAN_PHY_NONE ) {
-               priv->phyNum = 1;
-       } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
-               priv->phyNum = 0;
-       } else {
-               printk( "TLAN:  Cannot initialize device, no PHY was found!\n" );
-       }
+       if (priv->phy[1] != TLAN_PHY_NONE)
+               priv->phy_num = 1;
+       else if (priv->phy[0] != TLAN_PHY_NONE)
+               priv->phy_num = 0;
+       else
+               netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
 
-} /* TLan_PhyDetect */
+}
 
 
 
 
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             value;
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
        value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
-       TLan_MiiSync( dev->base_addr );
-       TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-       if ( ( priv->phyNum == 0 ) &&
-            ( priv->phy[1] != TLAN_PHY_NONE ) &&
-            ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
-               TLan_MiiSync( dev->base_addr );
-               TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+       tlan_mii_sync(dev->base_addr);
+       tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+       if ((priv->phy_num == 0) &&
+           (priv->phy[1] != TLAN_PHY_NONE) &&
+           (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+               tlan_mii_sync(dev->base_addr);
+               tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
        }
 
        /* Wait for 50 ms and powerup
         * This is abitrary.  It is intended to make sure the
         * transceiver settles.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
 
-} /* TLan_PhyPowerDown */
+}
 
 
 
 
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             value;
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
-       TLan_MiiSync( dev->base_addr );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+       tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK;
-       TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-       TLan_MiiSync(dev->base_addr);
+       tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+       tlan_mii_sync(dev->base_addr);
        /* Wait for 500 ms and reset the
         * transceiver.  The TLAN docs say both 50 ms and
         * 500 ms, so do the longer, just in case.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
 
-} /* TLan_PhyPowerUp */
+}
 
 
 
 
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             phy;
        u16             value;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
-       TLan_MiiSync( dev->base_addr );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+       tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK | MII_GC_RESET;
-       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
-       TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-       while ( value & MII_GC_RESET ) {
-               TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-       }
+       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+       tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+       while (value & MII_GC_RESET)
+               tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
 
        /* Wait for 500 ms and initialize.
         * I don't remember why I wait this long.
         * I've changed this to 50ms, as it seems long enough.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
 
-} /* TLan_PhyReset */
+}
 
 
 
 
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             ability;
        u16             control;
        u16             data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
        u16             status;
        u16             tctl;
 
-       phy = priv->phy[priv->phyNum];
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+       phy = priv->phy[priv->phy_num];
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
 
-       if ( ( status & MII_GS_AUTONEG ) &&
-            ( ! priv->aui ) ) {
+       if ((status & MII_GS_AUTONEG) &&
+           (!priv->aui)) {
                ability = status >> 11;
-               if ( priv->speed  == TLAN_SPEED_10 &&
-                    priv->duplex == TLAN_DUPLEX_HALF) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
-               } else if ( priv->speed == TLAN_SPEED_10 &&
-                           priv->duplex == TLAN_DUPLEX_FULL) {
-                       priv->tlanFullDuplex = true;
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
-               } else if ( priv->speed == TLAN_SPEED_100 &&
-                           priv->duplex == TLAN_DUPLEX_HALF) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
-               } else if ( priv->speed == TLAN_SPEED_100 &&
-                           priv->duplex == TLAN_DUPLEX_FULL) {
-                       priv->tlanFullDuplex = true;
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+               if (priv->speed  == TLAN_SPEED_10 &&
+                   priv->duplex == TLAN_DUPLEX_HALF) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+               } else if (priv->speed == TLAN_SPEED_10 &&
+                          priv->duplex == TLAN_DUPLEX_FULL) {
+                       priv->tlan_full_duplex = true;
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+               } else if (priv->speed == TLAN_SPEED_100 &&
+                          priv->duplex == TLAN_DUPLEX_HALF) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+               } else if (priv->speed == TLAN_SPEED_100 &&
+                          priv->duplex == TLAN_DUPLEX_FULL) {
+                       priv->tlan_full_duplex = true;
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
                } else {
 
                        /* Set Auto-Neg advertisement */
-                       TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+                       tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+                                          (ability << 5) | 1);
                        /* Enablee Auto-Neg */
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
                        /* Restart Auto-Neg */
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
                        /* Wait for 4 sec for autonegotiation
-                       * to complete.  The max spec time is less than this
-                       * but the card need additional time to start AN.
-                       * .5 sec should be plenty extra.
-                       */
-                       printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
-                       TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+                        * to complete.  The max spec time is less than this
+                        * but the card need additional time to start AN.
+                        * .5 sec should be plenty extra.
+                        */
+                       netdev_info(dev, "Starting autonegotiation\n");
+                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
                        return;
                }
 
        }
 
-       if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
-               priv->phyNum = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-               TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-               TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+       if ((priv->aui) && (priv->phy_num != 0)) {
+               priv->phy_num = 0;
+               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+                       | TLAN_NET_CFG_PHY_EN;
+               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+               tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
-       }  else if ( priv->phyNum == 0 ) {
+       } else if (priv->phy_num == 0) {
                control = 0;
-               TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
-               if ( priv->aui ) {
-                       tctl |= TLAN_TC_AUISEL;
+               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+               if (priv->aui) {
+                       tctl |= TLAN_TC_AUISEL;
                } else {
-                       tctl &= ~TLAN_TC_AUISEL;
-                       if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+                       tctl &= ~TLAN_TC_AUISEL;
+                       if (priv->duplex == TLAN_DUPLEX_FULL) {
                                control |= MII_GC_DUPLEX;
-                               priv->tlanFullDuplex = true;
+                               priv->tlan_full_duplex = true;
                        }
-                       if ( priv->speed == TLAN_SPEED_100 ) {
+                       if (priv->speed == TLAN_SPEED_100)
                                control |= MII_GC_SPEEDSEL;
-                       }
                }
-               TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
-               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+               tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
        }
 
        /* Wait for 2 sec to give the transceiver time
         * to establish link.
         */
-       TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+       tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyStartLink */
+}
 
 
 
 
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             an_adv;
        u16             an_lpa;
        u16             data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
        u16             phy;
        u16             status;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-       udelay( 1000 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+       udelay(1000);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
 
-       if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+       if (!(status & MII_GS_AUTOCMPLT)) {
                /* Wait for 8 sec to give the process
                 * more time.  Perhaps we should fail after a while.
                 */
-                if (!priv->neg_be_verbose++) {
-                        pr_info("TLAN:  Giving autonegotiation more time.\n");
-                        pr_info("TLAN:  Please check that your adapter has\n");
-                        pr_info("TLAN:  been properly connected to a HUB or Switch.\n");
-                        pr_info("TLAN:  Trying to establish link in the background...\n");
-                }
-               TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+               if (!priv->neg_be_verbose++) {
+                       pr_info("Giving autonegotiation more time.\n");
+                       pr_info("Please check that your adapter has\n");
+                       pr_info("been properly connected to a HUB or Switch.\n");
+                       pr_info("Trying to establish link in the background...\n");
+               }
+               tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
                return;
        }
 
-       printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
-       TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
-       TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+       netdev_info(dev, "Autonegotiation complete\n");
+       tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
        mode = an_adv & an_lpa & 0x03E0;
-       if ( mode & 0x0100 ) {
-               priv->tlanFullDuplex = true;
-       } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
-               priv->tlanFullDuplex = true;
-       }
-
-       if ( ( ! ( mode & 0x0180 ) ) &&
-            ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
-            ( priv->phyNum != 0 ) ) {
-               priv->phyNum = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-               TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-               TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+       if (mode & 0x0100)
+               priv->tlan_full_duplex = true;
+       else if (!(mode & 0x0080) && (mode & 0x0040))
+               priv->tlan_full_duplex = true;
+
+       if ((!(mode & 0x0180)) &&
+           (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+           (priv->phy_num != 0)) {
+               priv->phy_num = 0;
+               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+                       | TLAN_NET_CFG_PHY_EN;
+               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+               tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
        }
 
-       if ( priv->phyNum == 0 ) {
-               if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
-                    ( an_adv & an_lpa & 0x0040 ) ) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
-                                         MII_GC_AUTOENB | MII_GC_DUPLEX );
-                       pr_info("TLAN:  Starting internal PHY with FULL-DUPLEX\n" );
+       if (priv->phy_num == 0) {
+               if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+                   (an_adv & an_lpa & 0x0040)) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+                                          MII_GC_AUTOENB | MII_GC_DUPLEX);
+                       netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
                } else {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
-                       pr_info( "TLAN:  Starting internal PHY with HALF-DUPLEX\n" );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+                                          MII_GC_AUTOENB);
+                       netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
                }
        }
 
        /* Wait for 100 ms.  No reason in partiticular.
         */
-       TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+       tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyFinishAutoNeg */
+}
 
 #ifdef MONITOR
 
-        /*********************************************************************
       *
       *      TLan_phyMonitor
       *
       *      Returns:
       *              None
       *
       *      Params:
       *              dev             The device structure of this device.
       *
       *
       *      This function monitors PHY condition by reading the status
       *      register via the MII bus. This can be used to give info
       *      about link changes (up/down), and possible switch to alternate
       *      media.
       *
       * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
*     tlan_phy_monitor
+ *
*     Returns:
*           None
+ *
*     Params:
*           dev            The device structure of this device.
+ *
+ *
*     This function monitors PHY condition by reading the status
*     register via the MII bus. This can be used to give info
*     about link changes (up/down), and possible switch to alternate
*     media.
+ *
*******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16     phy;
        u16     phy_status;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-        /* Get PHY status register */
-        TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+       /* Get PHY status register */
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
 
-        /* Check if link has been lost */
-        if (!(phy_status & MII_GS_LINK)) {
-              if (priv->link) {
-                     priv->link = 0;
-                     printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
-                     netif_carrier_off(dev);
-                     TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
-                     return;
+       /* Check if link has been lost */
+       if (!(phy_status & MII_GS_LINK)) {
+               if (priv->link) {
+                       priv->link = 0;
+                       printk(KERN_DEBUG "TLAN: %s has lost link\n",
+                              dev->name);
+                       netif_carrier_off(dev);
+                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+                       return;
                }
        }
 
-        /* Link restablished? */
-        if ((phy_status & MII_GS_LINK) && !priv->link) {
-               priv->link = 1;
-               printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+       /* Link restablished? */
+       if ((phy_status & MII_GS_LINK) && !priv->link) {
+               priv->link = 1;
+               printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+                      dev->name);
                netif_carrier_on(dev);
-        }
+       }
 
        /* Setup a new monitor */
-       TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
 }
 
 #endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
 
-       These routines are based on the information in Chap. 2 of the
-       "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
-        *      TLan_MiiReadReg
-        *
-        *      Returns:
-        *              false   if ack received ok
-        *              true    if no ack received or other error
-        *
-        *      Parms:
-        *              dev             The device structure containing
-        *                              The io address and interrupt count
-        *                              for this device.
-        *              phy             The address of the PHY to be queried.
-        *              reg             The register whose contents are to be
-        *                              retrieved.
-        *              val             A pointer to a variable to store the
-        *                              retrieved value.
-        *
-        *      This function uses the TLAN's MII bus to retrieve the contents
-        *      of a given register on a PHY.  It sends the appropriate info
-        *      and then reads the 16-bit register value from the MII bus via
-        *      the TLAN SIO register.
-        *
-        **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ *     tlan_mii_read_reg
+ *
+ *     Returns:
+ *             false   if ack received ok
+ *             true    if no ack received or other error
+ *
+ *     Parms:
+ *             dev             The device structure containing
+ *                             The io address and interrupt count
+ *                             for this device.
+ *             phy             The address of the PHY to be queried.
+ *             reg             The register whose contents are to be
+ *                             retrieved.
+ *             val             A pointer to a variable to store the
+ *                             retrieved value.
+ *
+ *     This function uses the TLAN's MII bus to retrieve the contents
+ *     of a given register on a PHY.  It sends the appropriate info
+ *     and then reads the 16-bit register value from the MII bus via
+ *     the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
 {
        u8      nack;
        u16     sio, tmp;
-       u32     i;
+       u32     i;
        bool    err;
        int     minten;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
 
        err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_MiiSync(dev->base_addr);
+       tlan_mii_sync(dev->base_addr);
 
-       minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-       if ( minten )
-               TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+       minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Start ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, 0x2, 2 );     /* Read  ( 10b ) */
-       TLan_MiiSendData( dev->base_addr, phy, 5 );     /* Device #      */
-       TLan_MiiSendData( dev->base_addr, reg, 5 );     /* Register #    */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* start (01b) */
+       tlan_mii_send_data(dev->base_addr, 0x2, 2);     /* read  (10b) */
+       tlan_mii_send_data(dev->base_addr, phy, 5);     /* device #      */
+       tlan_mii_send_data(dev->base_addr, reg, 5);     /* register #    */
 
 
-       TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio);         /* Change direction */
+       tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);        /* change direction */
 
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Clock Idle bit */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Wait 300ns */
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* clock idle bit */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* wait 300ns */
 
-       nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio);    /* Check for ACK */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);            /* Finish ACK */
-       if (nack) {                                     /* No ACK, so fake it */
+       nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);   /* check for ACK */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);           /* finish ACK */
+       if (nack) {                                     /* no ACK, so fake it */
                for (i = 0; i < 16; i++) {
-                       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-                       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
                }
                tmp = 0xffff;
                err = true;
        } else {                                        /* ACK, so read data */
                for (tmp = 0, i = 0x8000; i; i >>= 1) {
-                       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-                       if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+                       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+                       if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
                                tmp |= i;
-                       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
                }
        }
 
 
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Idle cycle */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* idle cycle */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-       if ( minten )
-               TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
        *val = tmp;
 
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
 
        return err;
 
-} /* TLan_MiiReadReg */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiSendData
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              base_port       The base IO port of the adapter in
       *                              question.
       *              dev             The address of the PHY to be queried.
       *              data            The value to be placed on the MII bus.
       *              num_bits        The number of bits in data that are to
       *                              be placed on the MII bus.
       *
       *      This function sends on sequence of bits on the MII
       *      configuration bus.
       *
       **************************************************************/
+/***************************************************************
*     tlan_mii_send_data
+ *
*     Returns:
*             Nothing
*     Parms:
*             base_port       The base IO port of the adapter in
*                             question.
*             dev             The address of the PHY to be queried.
*             data            The value to be placed on the MII bus.
*             num_bits        The number of bits in data that are to
*                             be placed on the MII bus.
+ *
*     This function sends on sequence of bits on the MII
*     configuration bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
 {
        u16 sio;
        u32 i;
 
-       if ( num_bits == 0 )
+       if (num_bits == 0)
                return;
 
-       outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
        sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
-       TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+       tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
 
-       for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
-               TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-               (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
-               if ( data & i )
-                       TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+       for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+               tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+               (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+               if (data & i)
+                       tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
                else
-                       TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
-               (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+                       tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+               (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
        }
 
-} /* TLan_MiiSendData */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiSync
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              base_port       The base IO port of the adapter in
       *                              question.
       *
       *      This functions syncs all PHYs in terms of the MII configuration
       *      bus.
       *
       **************************************************************/
+/***************************************************************
*     TLan_MiiSync
+ *
*     Returns:
*             Nothing
*     Parms:
*             base_port       The base IO port of the adapter in
*                             question.
+ *
*     This functions syncs all PHYs in terms of the MII configuration
*     bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
 {
        int i;
        u16 sio;
 
-       outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
        sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-       TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
-       for ( i = 0; i < 32; i++ ) {
-               TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-               TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+       tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+       for (i = 0; i < 32; i++) {
+               tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+               tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
        }
 
-} /* TLan_MiiSync */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiWriteReg
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev             The device structure for the device
       *                              to write to.
       *              phy             The address of the PHY to be written to.
       *              reg             The register whose contents are to be
       *                              written.
       *              val             The value to be written to the register.
       *
       *      This function uses the TLAN's MII bus to write the contents of a
       *      given register on a PHY.  It sends the appropriate info and then
       *      writes the 16-bit register value from the MII configuration bus
       *      via the TLAN SIO register.
       *
       **************************************************************/
+/***************************************************************
*     tlan_mii_write_reg
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev             The device structure for the device
*                             to write to.
*             phy             The address of the PHY to be written to.
*             reg             The register whose contents are to be
*                             written.
*             val             The value to be written to the register.
+ *
*     This function uses the TLAN's MII bus to write the contents of a
*     given register on a PHY.  It sends the appropriate info and then
*     writes the 16-bit register value from the MII configuration bus
*     via the TLAN SIO register.
+ *
+ **************************************************************/
 
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
 {
        u16     sio;
        int     minten;
        unsigned long flags = 0;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
 
        outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
        sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_MiiSync( dev->base_addr );
+       tlan_mii_sync(dev->base_addr);
 
-       minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-       if ( minten )
-               TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+       minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Start ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Write ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, phy, 5 );     /* Device #      */
-       TLan_MiiSendData( dev->base_addr, reg, 5 );     /* Register #    */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* start (01b) */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* write (01b) */
+       tlan_mii_send_data(dev->base_addr, phy, 5);     /* device #      */
+       tlan_mii_send_data(dev->base_addr, reg, 5);     /* register #    */
 
-       TLan_MiiSendData( dev->base_addr, 0x2, 2 );     /* Send ACK */
-       TLan_MiiSendData( dev->base_addr, val, 16 );    /* Send Data */
+       tlan_mii_send_data(dev->base_addr, 0x2, 2);     /* send ACK */
+       tlan_mii_send_data(dev->base_addr, val, 16);    /* send data */
 
-       TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );        /* Idle cycle */
-       TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-       if ( minten )
-               TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+       if (minten)
+               tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
-} /* TLan_MiiWriteReg */
+}
 
 
 
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
 
-       The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
-       EEPROM.  These functions are based on information in Microchip's
-       data sheet.  I don't know how well this functions will work with
-       other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM.  these functions are based on information in microchip's
+data sheet.  I don't know how well this functions will work with
+other Eeproms.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_EeSendStart
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *
       *      This function sends a start cycle to an EEPROM attached
       *      to a TLAN chip.
       *
       **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
*     tlan_ee_send_start
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
+ *
*     This function sends a start cycle to an EEPROM attached
*     to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
 {
        u16     sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-       TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-       TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-       TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
-       /***************************************************************
       *      TLan_EeSendByte
       *
       *      Returns:
       *              If the correct ack was received, 0, otherwise 1
       *      Parms:  io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              data            The 8 bits of information to
       *                              send to the EEPROM.
       *              stop            If TLAN_EEPROM_STOP is passed, a
       *                              stop cycle is sent after the
       *                              byte is sent after the ack is
       *                              read.
       *
       *      This function sends a byte on the serial EEPROM line,
       *      driving the clock to send each bit. The function then
       *      reverses transmission direction and reads an acknowledge
       *      bit.
       *
       **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+       tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+       tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+       tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_send_byte
+ *
*     Returns:
*             If the correct ack was received, 0, otherwise 1
*     Parms:  io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             data            The 8 bits of information to
*                             send to the EEPROM.
*             stop            If TLAN_EEPROM_STOP is passed, a
*                             stop cycle is sent after the
*                             byte is sent after the ack is
*                             read.
+ *
*     This function sends a byte on the serial EEPROM line,
*     driving the clock to send each bit. The function then
*     reverses transmission direction and reads an acknowledge
*     bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
 {
        int     err;
        u8      place;
        u16     sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
        /* Assume clock is low, tx is enabled; */
-       for ( place = 0x80; place != 0; place >>= 1 ) {
-               if ( place & data )
-                       TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+       for (place = 0x80; place != 0; place >>= 1) {
+               if (place & data)
+                       tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
                else
-                       TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+                       tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        }
-       TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-       TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-       err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+       tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+       tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+       err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
 
-       if ( ( ! err ) && stop ) {
+       if ((!err) && stop) {
                /* STOP, raise data while clock is high */
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
        }
 
        return err;
 
-} /* TLan_EeSendByte */
-
-
-
-
-       /***************************************************************
       *      TLan_EeReceiveByte
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              data            An address to a char to hold the
       *                              data sent from the EEPROM.
       *              stop            If TLAN_EEPROM_STOP is passed, a
       *                              stop cycle is sent after the
       *                              byte is received, and no ack is
       *                              sent.
       *
       *      This function receives 8 bits of data from the EEPROM
       *      over the serial link.  It then sends and ack bit, or no
       *      ack and a stop bit.  This function is used to retrieve
       *      data after the address of a byte in the EEPROM has been
       *      sent.
       *
       **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_receive_byte
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             data            An address to a char to hold the
*                             data sent from the EEPROM.
*             stop            If TLAN_EEPROM_STOP is passed, a
*                             stop cycle is sent after the
*                             byte is received, and no ack is
*                             sent.
+ *
*     This function receives 8 bits of data from the EEPROM
*     over the serial link.  It then sends and ack bit, or no
*     ack and a stop bit.  This function is used to retrieve
*     data after the address of a byte in the EEPROM has been
*     sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
 {
        u8  place;
        u16 sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
        *data = 0;
 
        /* Assume clock is low, tx is enabled; */
-       TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-       for ( place = 0x80; place; place >>= 1 ) {
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+       tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+       for (place = 0x80; place; place >>= 1) {
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
                        *data |= place;
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        }
 
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-       if ( ! stop ) {
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );       /* Ack = 0 */
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+       if (!stop) {
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        } else {
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );         /* No ack = 1 (?) */
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);  /* no ack = 1 (?) */
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
                /* STOP, raise data while clock is high */
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
-       }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
-       /***************************************************************
       *      TLan_EeReadByte
       *
       *      Returns:
       *              No error = 0, else, the stage at which the error
       *              occurred.
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              ee_addr         The address of the byte in the
       *                              EEPROM whose contents are to be
       *                              retrieved.
       *              data            An address to a char to hold the
       *                              data obtained from the EEPROM.
       *
       *      This function reads a byte of information from an byte
       *      cell in the EEPROM.
       *
       **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+       }
+
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_read_byte
+ *
*     Returns:
*             No error = 0, else, the stage at which the error
*             occurred.
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             ee_addr         The address of the byte in the
*                             EEPROM whose contents are to be
*                             retrieved.
*             data            An address to a char to hold the
*                             data obtained from the EEPROM.
+ *
*     This function reads a byte of information from an byte
*     cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
 {
        int err;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
-       int ret=0;
+       int ret = 0;
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_EeSendStart( dev->base_addr );
-       err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=1;
+       tlan_ee_send_start(dev->base_addr);
+       err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 1;
                goto fail;
        }
-       err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=2;
+       err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 2;
                goto fail;
        }
-       TLan_EeSendStart( dev->base_addr );
-       err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=3;
+       tlan_ee_send_start(dev->base_addr);
+       err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 3;
                goto fail;
        }
-       TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+       tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
 fail:
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return ret;
 
-} /* TLan_EeReadByte */
+}
 
 
 
index 3315ced..5fc98a8 100644 (file)
@@ -20,8 +20,8 @@
  ********************************************************************/
 
 
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
 #include <linux/netdevice.h>
 
 
 #define TLAN_IGNORE            0
 #define TLAN_RECORD            1
 
-#define TLAN_DBG(lvl, format, args...) \
-       do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...)                                 \
+       do {                                                            \
+               if (debug&lvl)                                          \
+                       printk(KERN_DEBUG "TLAN: " format, ##args);     \
+       } while (0)
 
 #define TLAN_DEBUG_GNRL                0x0001
 #define TLAN_DEBUG_TX          0x0002
@@ -50,7 +53,8 @@
 #define TLAN_DEBUG_PROBE       0x0010
 
 #define TX_TIMEOUT             (10*HZ)  /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS                8        /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS                8        /* Max number of boards installed
+                                           at a time */
 
 
        /*****************************************************************
 #define PCI_DEVICE_ID_OLICOM_OC2326                    0x0014
 #endif
 
-typedef struct tlan_adapter_entry {
-       u16     vendorId;
-       u16     deviceId;
-       char    *deviceLabel;
+struct tlan_adapter_entry {
+       u16     vendor_id;
+       u16     device_id;
+       char    *device_label;
        u32     flags;
-       u16     addrOfs;
-} TLanAdapterEntry;
+       u16     addr_ofs;
+};
 
 #define TLAN_ADAPTER_NONE              0x00000000
 #define TLAN_ADAPTER_UNMANAGED_PHY     0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
 #define TLAN_CSTAT_DP_PR       0x0100
 
 
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
        u32     count;
        u32     address;
-} TLanBufferRef;
+};
 
 
-typedef struct tlan_list_tag {
+struct tlan_list {
        u32             forward;
-       u16             cStat;
-       u16             frameSize;
-       TLanBufferRef   buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+       u16             c_stat;
+       u16             frame_size;
+       struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
 
 
 typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
         *
         ****************************************************************/
 
-typedef struct tlan_private_tag {
-       struct net_device       *nextDevice;
-       struct pci_dev          *pciDev;
+struct tlan_priv {
+       struct net_device       *next_device;
+       struct pci_dev          *pci_dev;
        struct net_device       *dev;
-       void                    *dmaStorage;
-       dma_addr_t              dmaStorageDMA;
-       unsigned int            dmaSize;
-       u8                      *padBuffer;
-       TLanList                *rxList;
-       dma_addr_t              rxListDMA;
-       u8                      *rxBuffer;
-       dma_addr_t              rxBufferDMA;
-       u32                     rxHead;
-       u32                     rxTail;
-       u32                     rxEocCount;
-       TLanList                *txList;
-       dma_addr_t              txListDMA;
-       u8                      *txBuffer;
-       dma_addr_t              txBufferDMA;
-       u32                     txHead;
-       u32                     txInProgress;
-       u32                     txTail;
-       u32                     txBusyCount;
-       u32                     phyOnline;
-       u32                     timerSetAt;
-       u32                     timerType;
+       void                    *dma_storage;
+       dma_addr_t              dma_storage_dma;
+       unsigned int            dma_size;
+       u8                      *pad_buffer;
+       struct tlan_list        *rx_list;
+       dma_addr_t              rx_list_dma;
+       u8                      *rx_buffer;
+       dma_addr_t              rx_buffer_dma;
+       u32                     rx_head;
+       u32                     rx_tail;
+       u32                     rx_eoc_count;
+       struct tlan_list        *tx_list;
+       dma_addr_t              tx_list_dma;
+       u8                      *tx_buffer;
+       dma_addr_t              tx_buffer_dma;
+       u32                     tx_head;
+       u32                     tx_in_progress;
+       u32                     tx_tail;
+       u32                     tx_busy_count;
+       u32                     phy_online;
+       u32                     timer_set_at;
+       u32                     timer_type;
        struct timer_list       timer;
        struct board            *adapter;
-       u32                     adapterRev;
+       u32                     adapter_rev;
        u32                     aui;
        u32                     debug;
        u32                     duplex;
        u32                     phy[2];
-       u32                     phyNum;
+       u32                     phy_num;
        u32                     speed;
-       u8                      tlanRev;
-       u8                      tlanFullDuplex;
+       u8                      tlan_rev;
+       u8                      tlan_full_duplex;
        spinlock_t              lock;
        u8                      link;
        u8                      is_eisa;
        struct work_struct                      tlan_tqueue;
        u8                      neg_be_verbose;
-} TLanPrivateInfo;
+};
 
 
 
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
         ****************************************************************/
 
 #define TLAN_HOST_CMD                  0x00
-#define        TLAN_HC_GO              0x80000000
+#define        TLAN_HC_GO              0x80000000
 #define                TLAN_HC_STOP            0x40000000
 #define                TLAN_HC_ACK             0x20000000
 #define                TLAN_HC_CS_MASK         0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
 #define                TLAN_NET_CMD_TRFRAM     0x02
 #define                TLAN_NET_CMD_TXPACE     0x01
 #define TLAN_NET_SIO                   0x01
-#define        TLAN_NET_SIO_MINTEN     0x80
+#define        TLAN_NET_SIO_MINTEN     0x80
 #define                TLAN_NET_SIO_ECLOK      0x40
 #define                TLAN_NET_SIO_ETXEN      0x20
 #define                TLAN_NET_SIO_EDATA      0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
 #define                TLAN_NET_MASK_MASK4     0x10
 #define                TLAN_NET_MASK_RSRVD     0x0F
 #define TLAN_NET_CONFIG                        0x04
-#define        TLAN_NET_CFG_RCLK       0x8000
+#define        TLAN_NET_CFG_RCLK       0x8000
 #define                TLAN_NET_CFG_TCLK       0x4000
 #define                TLAN_NET_CFG_BIT        0x2000
 #define                TLAN_NET_CFG_RXCRC      0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
 /* Generic MII/PHY Registers */
 
 #define MII_GEN_CTL                    0x00
-#define        MII_GC_RESET            0x8000
+#define        MII_GC_RESET            0x8000
 #define                MII_GC_LOOPBK           0x4000
 #define                MII_GC_SPEEDSEL         0x2000
 #define                MII_GC_AUTOENB          0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
 #define                MII_GS_EXTCAP           0x0001
 #define MII_GEN_ID_HI                  0x02
 #define MII_GEN_ID_LO                  0x03
-#define        MII_GIL_OUI             0xFC00
-#define        MII_GIL_MODEL           0x03F0
-#define        MII_GIL_REVISION        0x000F
+#define        MII_GIL_OUI             0xFC00
+#define        MII_GIL_MODEL           0x03F0
+#define        MII_GIL_REVISION        0x000F
 #define MII_AN_ADV                     0x04
 #define MII_AN_LPA                     0x05
 #define MII_AN_EXP                     0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
 
 #define TLAN_TLPHY_ID                  0x10
 #define TLAN_TLPHY_CTL                 0x11
-#define        TLAN_TC_IGLINK          0x8000
+#define        TLAN_TC_IGLINK          0x8000
 #define                TLAN_TC_SWAPOL          0x4000
 #define                TLAN_TC_AUISEL          0x2000
 #define                TLAN_TC_SQEEN           0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
 #define LEVEL1_ID1                     0x7810
 #define LEVEL1_ID2                     0x0000
 
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
 
 /* Routines to access internal registers. */
 
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
 
-} /* TLan_DioRead8 */
+}
 
 
 
 
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
 
-} /* TLan_DioRead16 */
+}
 
 
 
 
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inl(base_addr + TLAN_DIO_DATA);
 
-} /* TLan_DioRead32 */
+}
 
 
 
 
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
 
 
 
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
 
 
 
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
 
 }
 
-#define TLan_ClearBit( bit, port )     outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port )       ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port )       outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port)      outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port)        ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port)        outb_p(inb_p(port) | bit, port)
 
 /*
  * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
  *
  * The original code was:
  *
- * u32 xor( u32 a, u32 b ) {   return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) {     return ((a && !b ) || (! a && b )); }
  *
- * #define XOR8( a, b, c, d, e, f, g, h )      \
- *     xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit )                ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h)        \
+ *     xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit)          (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
  *
- *     hash  = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- *                   DA(a,30), DA(a,36), DA(a,42) );
- *     hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- *                   DA(a,31), DA(a,37), DA(a,43) ) << 1;
- *     hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- *                   DA(a,32), DA(a,38), DA(a,44) ) << 2;
- *     hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- *                   DA(a,33), DA(a,39), DA(a,45) ) << 3;
- *     hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- *                   DA(a,34), DA(a,40), DA(a,46) ) << 4;
- *     hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- *                   DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ *     hash  = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ *                   DA(a,30), DA(a,36), DA(a,42));
+ *     hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ *                   DA(a,31), DA(a,37), DA(a,43)) << 1;
+ *     hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ *                   DA(a,32), DA(a,38), DA(a,44)) << 2;
+ *     hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ *                   DA(a,33), DA(a,39), DA(a,45)) << 3;
+ *     hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ *                   DA(a,34), DA(a,40), DA(a,46)) << 4;
+ *     hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ *                   DA(a,35), DA(a,41), DA(a,47)) << 5;
  *
  */
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
 {
-        u8     hash;
+       u8     hash;
 
-        hash = (a[0]^a[3]);             /* & 077 */
-        hash ^= ((a[0]^a[3])>>6);       /* & 003 */
-        hash ^= ((a[1]^a[4])<<2);       /* & 074 */
-        hash ^= ((a[1]^a[4])>>4);       /* & 017 */
-        hash ^= ((a[2]^a[5])<<4);       /* & 060 */
-        hash ^= ((a[2]^a[5])>>2);       /* & 077 */
+       hash = (a[0]^a[3]);             /* & 077 */
+       hash ^= ((a[0]^a[3])>>6);       /* & 003 */
+       hash ^= ((a[1]^a[4])<<2);       /* & 074 */
+       hash ^= ((a[1]^a[4])>>4);       /* & 017 */
+       hash ^= ((a[2]^a[5])<<4);       /* & 060 */
+       hash ^= ((a[2]^a[5])>>2);       /* & 077 */
 
-        return hash & 077;
+       return hash & 077;
 }
 #endif
index b100bd5..f5e9ac0 100644 (file)
@@ -34,6 +34,8 @@
  *    Modifications for 2.3.99-pre5 kernel.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define DRV_NAME       "tun"
 #define DRV_VERSION    "1.6"
 #define DRV_DESCRIPTION        "Universal TUN/TAP device driver"
 #ifdef TUN_DEBUG
 static int debug;
 
-#define DBG  if(tun->debug)printk
-#define DBG1 if(debug==2)printk
+#define tun_debug(level, tun, fmt, args...)                    \
+do {                                                           \
+       if (tun->debug)                                         \
+               netdev_printk(level, tun->dev, fmt, ##args);    \
+} while (0)
+#define DBG1(level, fmt, args...)                              \
+do {                                                           \
+       if (debug == 2)                                         \
+               printk(level fmt, ##args);                      \
+} while (0)
 #else
-#define DBG( a... )
-#define DBG1( a... )
+#define tun_debug(level, tun, fmt, args...)                    \
+do {                                                           \
+       if (0)                                                  \
+               netdev_printk(level, tun->dev, fmt, ##args);    \
+} while (0)
+#define DBG1(level, fmt, args...)                              \
+do {                                                           \
+       if (0)                                                  \
+               printk(level fmt, ##args);                      \
+} while (0)
 #endif
 
 #define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
                tun_detach(tfile->tun);
 }
 
-/* TAP filterting */
+/* TAP filtering */
 static void addr_hash_set(u32 *mask, const u8 *addr)
 {
        int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
+       tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
        /* Drop packet if interface is not attached */
        if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
 
        sk = tun->socket.sk;
 
-       DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
        poll_wait(file, &tun->wq.wait, wait);
 
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
        if (!tun)
                return -EBADFD;
 
-       DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
+       tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
 
        result = tun_get_user(tun, iv, iov_length(iv, count),
                              file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
                        else if (sinfo->gso_type & SKB_GSO_UDP)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
-                               printk(KERN_ERR "tun: unexpected GSO type: "
+                               pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
                                       sinfo->gso_type, gso.gso_size,
                                       gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
        struct sk_buff *skb;
        ssize_t ret = 0;
 
-       DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_chr_read\n");
 
        add_wait_queue(&tun->wq.wait, &wait);
        while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
-                       printk(KERN_ERR "Failed to create tun sysfs files\n");
+                       pr_err("Failed to create tun sysfs files\n");
 
                sk->sk_destruct = tun_sock_destruct;
 
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        goto failed;
        }
 
-       DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
                tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 static int tun_get_iff(struct net *net, struct tun_struct *tun,
                       struct ifreq *ifr)
 {
-       DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_get_iff\n");
 
        strcpy(ifr->ifr_name, tun->dev->name);
 
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
  * privs required. */
 static int set_offload(struct net_device *dev, unsigned long arg)
 {
-       unsigned int old_features, features;
+       u32 old_features, features;
 
        old_features = dev->features;
        /* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        if (!tun)
                goto unlock;
 
-       DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
+       tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
 
        ret = 0;
        switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                else
                        tun->flags &= ~TUN_NOCHECKSUM;
 
-               DBG(KERN_INFO "%s: checksum %s\n",
-                   tun->dev->name, arg ? "disabled" : "enabled");
+               tun_debug(KERN_INFO, tun, "checksum %s\n",
+                         arg ? "disabled" : "enabled");
                break;
 
        case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                else
                        tun->flags &= ~TUN_PERSIST;
 
-               DBG(KERN_INFO "%s: persist %s\n",
-                   tun->dev->name, arg ? "enabled" : "disabled");
+               tun_debug(KERN_INFO, tun, "persist %s\n",
+                         arg ? "enabled" : "disabled");
                break;
 
        case TUNSETOWNER:
                /* Set owner of the device */
                tun->owner = (uid_t) arg;
 
-               DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
+               tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
                break;
 
        case TUNSETGROUP:
                /* Set group of the device */
                tun->group= (gid_t) arg;
 
-               DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+               tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
                break;
 
        case TUNSETLINK:
                /* Only allow setting the type when the interface is down */
                if (tun->dev->flags & IFF_UP) {
-                       DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
-                               tun->dev->name);
+                       tun_debug(KERN_INFO, tun,
+                                 "Linktype set failed because interface is up\n");
                        ret = -EBUSY;
                } else {
                        tun->dev->type = (int) arg;
-                       DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
+                       tun_debug(KERN_INFO, tun, "linktype set to %d\n",
+                                 tun->dev->type);
                        ret = 0;
                }
                break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 
        case SIOCSIFHWADDR:
                /* Set hw address */
-               DBG(KERN_DEBUG "%s: set hw address: %pM\n",
-                       tun->dev->name, ifr.ifr_hwaddr.sa_data);
+               tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
+                         ifr.ifr_hwaddr.sa_data);
 
                ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
                break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
        if (!tun)
                return -EBADFD;
 
-       DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
+       tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
 
        if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
                goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 {
        struct tun_file *tfile;
 
-       DBG1(KERN_INFO "tunX: tun_chr_open\n");
+       DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
        tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
        if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
        if (tun) {
                struct net_device *dev = tun->dev;
 
-               DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
+               tun_debug(KERN_INFO, tun, "tun_chr_close\n");
 
                __tun_detach(tun);
 
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
 {
        int ret = 0;
 
-       printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
-       printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
+       pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+       pr_info("%s\n", DRV_COPYRIGHT);
 
        ret = rtnl_link_register(&tun_link_ops);
        if (ret) {
-               printk(KERN_ERR "tun: Can't register link_ops\n");
+               pr_err("Can't register link_ops\n");
                goto err_linkops;
        }
 
        ret = misc_register(&tun_miscdev);
        if (ret) {
-               printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
+               pr_err("Can't register misc device %d\n", TUN_MINOR);
                goto err_misc;
        }
        return  0;
index a3c46f6..7fa5ec2 100644 (file)
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
 #include <linux/in6.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
-#include <generated/utsrelease.h>
 
 #include "typhoon.h"
 
 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE_NAME);
 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
index 04e8ce1..7113168 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * cdc_ncm.c
  *
- * Copyright (C) ST-Ericsson 2010
+ * Copyright (C) ST-Ericsson 2010-2011
  * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
  * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
  *
@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define        DRIVER_VERSION                          "17-Jan-2011"
+#define        DRIVER_VERSION                          "7-Feb-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN           0x10
@@ -77,6 +77,9 @@
  */
 #define        CDC_NCM_DPT_DATAGRAMS_MAX               32
 
+/* Maximum amount of IN datagrams in NTB */
+#define        CDC_NCM_DPT_DATAGRAMS_IN_MAX            0 /* unlimited */
+
 /* Restart the timer, if amount of datagrams is less than given value */
 #define        CDC_NCM_RESTART_TIMER_DATAGRAM_CNT      3
 
        (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
        (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
 
-struct connection_speed_change {
-       __le32  USBitRate; /* holds 3GPP downlink value, bits per second */
-       __le32  DSBitRate; /* holds 3GPP uplink value, bits per second */
-} __attribute__ ((packed));
-
 struct cdc_ncm_data {
        struct usb_cdc_ncm_nth16 nth16;
        struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 {
        struct usb_cdc_notification req;
        u32 val;
-       __le16 max_datagram_size;
        u8 flags;
        u8 iface_no;
        int err;
+       u16 ntb_fmt_supported;
 
        iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
        ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
        ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
        ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+       /* devices prior to NCM Errata shall set this field to zero */
+       ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+       ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
        if (ctx->func_desc != NULL)
                flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 
        pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
                 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
-                "wNdpOutAlignment=%u flags=0x%x\n",
+                "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
                 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-                ctx->tx_ndp_modulus, flags);
+                ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
 
-       /* max count of tx datagrams without terminating NULL entry */
-       ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+       /* max count of tx datagrams */
+       if ((ctx->tx_max_datagrams == 0) ||
+                       (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+               ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
        /* verify maximum size of received NTB in bytes */
-       if ((ctx->rx_max <
-           (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
-           (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+       if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+               pr_debug("Using min receive length=%d\n",
+                                               USB_CDC_NCM_NTB_MIN_IN_SIZE);
+               ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+       }
+
+       if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
                pr_debug("Using default maximum receive length=%d\n",
                                                CDC_NCM_NTB_MAX_SIZE_RX);
                ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
        }
 
+       /* inform device about NTB input size changes */
+       if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+               req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+                                                       USB_RECIP_INTERFACE;
+               req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+               req.wValue = 0;
+               req.wIndex = cpu_to_le16(iface_no);
+
+               if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+                       struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+                       req.wLength = 8;
+                       ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+                       ndp_in_sz.wNtbInMaxDatagrams =
+                                       cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+                       ndp_in_sz.wReserved = 0;
+                       err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+                                                                       1000);
+               } else {
+                       __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+                       req.wLength = 4;
+                       err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+                                                               NULL, 1000);
+               }
+
+               if (err)
+                       pr_debug("Setting NTB Input Size failed\n");
+       }
+
        /* verify maximum size of transmitted NTB in bytes */
        if ((ctx->tx_max <
            (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
        /* additional configuration */
 
        /* set CRC Mode */
-       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-       req.bNotificationType = USB_CDC_SET_CRC_MODE;
-       req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
-       req.wIndex = cpu_to_le16(iface_no);
-       req.wLength = 0;
-
-       err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-       if (err)
-               pr_debug("Setting CRC mode off failed\n");
+       if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+               req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+                                                       USB_RECIP_INTERFACE;
+               req.bNotificationType = USB_CDC_SET_CRC_MODE;
+               req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+               req.wIndex = cpu_to_le16(iface_no);
+               req.wLength = 0;
+
+               err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+               if (err)
+                       pr_debug("Setting CRC mode off failed\n");
+       }
 
-       /* set NTB format */
-       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-       req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
-       req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
-       req.wIndex = cpu_to_le16(iface_no);
-       req.wLength = 0;
+       /* set NTB format, if both formats are supported */
+       if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+               req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+                                                       USB_RECIP_INTERFACE;
+               req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+               req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+               req.wIndex = cpu_to_le16(iface_no);
+               req.wLength = 0;
+
+               err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+               if (err)
+                       pr_debug("Setting NTB format to 16-bit failed\n");
+       }
 
-       err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-       if (err)
-               pr_debug("Setting NTB format to 16-bit failed\n");
+       ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
 
        /* set Max Datagram Size (MTU) */
-       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
-       req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
-       req.wValue = 0;
-       req.wIndex = cpu_to_le16(iface_no);
-       req.wLength = cpu_to_le16(2);
+       if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+               __le16 max_datagram_size;
+               u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+               req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+                                                       USB_RECIP_INTERFACE;
+               req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+               req.wValue = 0;
+               req.wIndex = cpu_to_le16(iface_no);
+               req.wLength = cpu_to_le16(2);
+
+               err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+                                                                       1000);
+               if (err) {
+                       pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+                                               CDC_NCM_MIN_DATAGRAM_SIZE);
+               } else {
+                       ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+                       /* Check Eth descriptor value */
+                       if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+                               if (ctx->max_datagram_size > eth_max_sz)
+                                       ctx->max_datagram_size = eth_max_sz;
+                       } else {
+                               if (ctx->max_datagram_size >
+                                               CDC_NCM_MAX_DATAGRAM_SIZE)
+                                       ctx->max_datagram_size =
+                                               CDC_NCM_MAX_DATAGRAM_SIZE;
+                       }
 
-       err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
-       if (err) {
-               pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
-                        CDC_NCM_MIN_DATAGRAM_SIZE);
-               /* use default */
-               ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-       } else {
-               ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+                       if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+                               ctx->max_datagram_size =
+                                       CDC_NCM_MIN_DATAGRAM_SIZE;
+
+                       /* if value changed, update device */
+                       req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+                                                       USB_RECIP_INTERFACE;
+                       req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+                       req.wValue = 0;
+                       req.wIndex = cpu_to_le16(iface_no);
+                       req.wLength = 2;
+                       max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+                       err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+                                                               0, NULL, 1000);
+                       if (err)
+                               pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+               }
 
-               if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-               else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
        }
 
        if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
 
                        ctx->ether_desc =
                                        (const struct usb_cdc_ether_desc *)buf;
-
                        dev->hard_mtu =
                                le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-                       if (dev->hard_mtu <
-                           (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
-                               dev->hard_mtu =
-                                       CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
-
-                       else if (dev->hard_mtu >
-                                (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
-                               dev->hard_mtu =
-                                       CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+                       if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+                               dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
+                       else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+                               dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
                        break;
 
                case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
        u32 offset;
        u32 last_offset;
        u16 n = 0;
-       u8 timeout = 0;
+       u8 ready2send = 0;
 
        /* if there is a remaining skb, it gets priority */
        if (skb != NULL)
                swap(skb, ctx->tx_rem_skb);
        else
-               timeout = 1;
+               ready2send = 1;
 
        /*
         * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 
        for (; n < ctx->tx_max_datagrams; n++) {
                /* check if end of transmit buffer is reached */
-               if (offset >= ctx->tx_max)
+               if (offset >= ctx->tx_max) {
+                       ready2send = 1;
                        break;
-
+               }
                /* compute maximum buffer size */
                rem = ctx->tx_max - offset;
 
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
                                }
                                ctx->tx_rem_skb = skb;
                                skb = NULL;
-
-                               /* loop one more time */
-                               timeout = 1;
+                               ready2send = 1;
                        }
                        break;
                }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
                ctx->tx_curr_last_offset = last_offset;
                goto exit_no_skb;
 
-       } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+       } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
                /* wait for more frames */
                /* push variables */
                ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
                                        cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
        ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
        ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
-       ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+       ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
                                                        ctx->tx_ndp_modulus);
 
        memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
        rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
                                        sizeof(struct usb_cdc_ncm_dpe16));
        ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
-       ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+       ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
 
-       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
                                                &(ctx->tx_ncm.ndp16),
                                                sizeof(ctx->tx_ncm.ndp16));
 
-       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+       memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
                                        sizeof(ctx->tx_ncm.ndp16),
                                        &(ctx->tx_ncm.dpe16),
                                        (ctx->tx_curr_frame_num + 1) *
@@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
                goto error;
        }
 
-       temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+       temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
        if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
                pr_debug("invalid DPT16 index\n");
                goto error;
@@ -1048,10 +1115,10 @@ error:
 
 static void
 cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
-                    struct connection_speed_change *data)
+                    struct usb_cdc_speed_change *data)
 {
-       uint32_t rx_speed = le32_to_cpu(data->USBitRate);
-       uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+       uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+       uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
        /*
         * Currently the USB-NET API does not support reporting the actual
@@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
        /* test for split data in 8-byte chunks */
        if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
                cdc_ncm_speed_change(ctx,
-                     (struct connection_speed_change *)urb->transfer_buffer);
+                     (struct usb_cdc_speed_change *)urb->transfer_buffer);
                return;
        }
 
@@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
-               if (urb->actual_length <
-                   (sizeof(*event) + sizeof(struct connection_speed_change)))
+               if (urb->actual_length < (sizeof(*event) +
+                                       sizeof(struct usb_cdc_speed_change)))
                        set_bit(EVENT_STS_SPLIT, &dev->flags);
                else
                        cdc_ncm_speed_change(ctx,
-                               (struct connection_speed_change *) &event[1]);
+                               (struct usb_cdc_speed_change *) &event[1]);
                break;
 
        default:
index 02b622e..5002f5b 100644 (file)
@@ -650,6 +650,10 @@ static const struct usb_device_id products[] = {
        USB_DEVICE(0x0fe6, 0x8101),     /* DM9601 USB to Fast Ethernet Adapter */
        .driver_info = (unsigned long)&dm9601_info,
         },
+       {
+        USB_DEVICE(0x0fe6, 0x9700),    /* DM9601 USB to Fast Ethernet Adapter */
+        .driver_info = (unsigned long)&dm9601_info,
+        },
        {
         USB_DEVICE(0x0a46, 0x9000),    /* DM9000E */
         .driver_info = (unsigned long)&dm9601_info,
index bed8fce..6d83812 100644 (file)
@@ -2628,15 +2628,15 @@ exit:
 
 static void hso_free_tiomget(struct hso_serial *serial)
 {
-       struct hso_tiocmget *tiocmget = serial->tiocmget;
+       struct hso_tiocmget *tiocmget;
+       if (!serial)
+               return;
+       tiocmget = serial->tiocmget;
        if (tiocmget) {
-               if (tiocmget->urb) {
-                       usb_free_urb(tiocmget->urb);
-                       tiocmget->urb = NULL;
-               }
+               usb_free_urb(tiocmget->urb);
+               tiocmget->urb = NULL;
                serial->tiocmget = NULL;
                kfree(tiocmget);
-
        }
 }
 
index 5e98643..7dc8497 100644 (file)
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
 
        if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
                err("Firmware too big: %zu", fw->size);
+               release_firmware(fw);
                return -ENOSPC;
        }
        data_len = fw->size;
index ed9a416..95c41d5 100644 (file)
@@ -931,8 +931,10 @@ fail_halt:
                if (urb != NULL) {
                        clear_bit (EVENT_RX_MEMORY, &dev->flags);
                        status = usb_autopm_get_interface(dev->intf);
-                       if (status < 0)
+                       if (status < 0) {
+                               usb_free_urb(urb);
                                goto fail_lowmem;
+                       }
                        if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
                                resched = 0;
                        usb_autopm_put_interface(dev->intf);
index cc83fa7..105d7f0 100644 (file)
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_ADDRESS] == NULL)
                random_ether_addr(dev->dev_addr);
 
-       if (tb[IFLA_IFNAME])
-               nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
-       else
-               snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
-       if (strchr(dev->name, '%')) {
-               err = dev_alloc_name(dev, dev->name);
-               if (err < 0)
-                       goto err_alloc_name;
-       }
-
        err = register_netdevice(dev);
        if (err < 0)
                goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 
 err_register_dev:
        /* nothing to do */
-err_alloc_name:
 err_configure_peer:
        unregister_netdevice(peer);
        return err;
index 09cac70..0d6fec6 100644 (file)
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
 static int velocity_set_wol(struct velocity_info *vptr)
 {
        struct mac_regs __iomem *regs = vptr->mac_regs;
+       enum speed_opt spd_dpx = vptr->options.spd_dpx;
        static u8 buf[256];
        int i;
 
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
 
        writew(0x0FFF, &regs->WOLSRClr);
 
+       if (spd_dpx == SPD_DPX_1000_FULL)
+               goto mac_done;
+
+       if (spd_dpx != SPD_DPX_AUTO)
+               goto advertise_done;
+
        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
        if (vptr->mii_status & VELOCITY_SPEED_1000)
                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 
+advertise_done:
        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 
        {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
                writeb(GCR, &regs->CHIPGCR);
        }
 
+mac_done:
        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
        /* Turn on SWPTAG just before entering power mode */
        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
index aa2e69b..d722753 100644 (file)
@@ -361,7 +361,7 @@ enum  velocity_owner {
 #define MAC_REG_CHIPGSR     0x9C
 #define MAC_REG_TESTCFG     0x9D
 #define MAC_REG_DEBUG       0x9E
-#define MAC_REG_CHIPGCR     0x9F
+#define MAC_REG_CHIPGCR     0x9F       /* Chip Operation and Diagnostic Control */
 #define MAC_REG_WOLCR0_SET  0xA0
 #define MAC_REG_WOLCR1_SET  0xA1
 #define MAC_REG_PWCFG_SET   0xA2
@@ -848,10 +848,10 @@ enum  velocity_owner {
  *     Bits in CHIPGCR register
  */
 
-#define CHIPGCR_FCGMII      0x80       /* enable GMII mode */
-#define CHIPGCR_FCFDX       0x40
+#define CHIPGCR_FCGMII      0x80       /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX       0x40       /* force full duplex */
 #define CHIPGCR_FCRESV      0x20
-#define CHIPGCR_FCMODE      0x10
+#define CHIPGCR_FCMODE      0x10       /* enable MAC forced mode */
 #define CHIPGCR_LPSOPT      0x08
 #define CHIPGCR_TM1US       0x04
 #define CHIPGCR_TM0US       0x02
index 90a23e4..82dba5a 100644 (file)
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
        }
 }
 
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+       napi_enable(&vi->napi);
+
+       /* If all buffers were filled by other side before we napi_enabled, we
+        * won't get another interrupt, so process any outstanding packets
+        * now.  virtnet_poll wants re-enable the queue, so we disable here.
+        * We synchronize against interrupts via NAPI_STATE_SCHED */
+       if (napi_schedule_prep(&vi->napi)) {
+               virtqueue_disable_cb(vi->rvq);
+               __napi_schedule(&vi->napi);
+       }
+}
+
 static void refill_work(struct work_struct *work)
 {
        struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
        vi = container_of(work, struct virtnet_info, refill.work);
        napi_disable(&vi->napi);
        still_empty = !try_fill_recv(vi, GFP_KERNEL);
-       napi_enable(&vi->napi);
+       virtnet_napi_enable(vi);
 
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
-       napi_enable(&vi->napi);
-
-       /* If all buffers were filled by other side before we napi_enabled, we
-        * won't get another interrupt, so process any outstanding packets
-        * now.  virtnet_poll wants re-enable the queue, so we disable here.
-        * We synchronize against interrupts via NAPI_STATE_SCHED */
-       if (napi_schedule_prep(&vi->napi)) {
-               virtqueue_disable_cb(vi->rvq);
-               __napi_schedule(&vi->napi);
-       }
+       virtnet_napi_enable(vi);
        return 0;
 }
 
index 01c05f5..e74e4b4 100644 (file)
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
                data1 = steer_ctrl = 0;
 
                status = vxge_hw_vpath_fw_api(vpath,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
                        VXGE_HW_FW_API_GET_EPROM_REV,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
                        0, &data0, &data1, &steer_ctrl);
                if (status != VXGE_HW_OK)
                        break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
        ring->rxd_init = attr->rxd_init;
        ring->rxd_term = attr->rxd_term;
        ring->buffer_mode = config->buffer_mode;
+       ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+       ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
        ring->rxds_limit = config->rxds_limit;
 
        ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
 
        /* apply "interrupts per txdl" attribute */
        fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+       fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+       fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
 
        if (fifo->config->intr)
                fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -3690,7 +3694,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
        if (status != VXGE_HW_OK)
                goto exit;
 
-       if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+       if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
            (rts_table !=
             VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
                *data1 = 0;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+               vpath->tim_tti_cfg1_saved = val64;
+
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
 
                if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+               vpath->tim_tti_cfg3_saved = val64;
        }
 
        if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+               vpath->tim_rti_cfg1_saved = val64;
+
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
 
                if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+               vpath->tim_rti_cfg3_saved = val64;
        }
 
        val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
        return status;
 }
 
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-       struct vxge_hw_vp_config *config;
-       u64 val64;
-
-       vpath = &hldev->virtual_paths[vp_id];
-       vp_reg = vpath->vp_reg;
-       config = vpath->vp_config;
-
-       if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
-           config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
-               config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
-               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-       }
-}
-
 /*
  * __vxge_hw_vpath_initialize
  * This routine is the final phase of init which initializes the
index e249e28..3c53aa7 100644 (file)
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
        u32                             vsport_number;
        u32                             max_kdfc_db;
        u32                             max_nofl_db;
+       u64                             tim_tti_cfg1_saved;
+       u64                             tim_tti_cfg3_saved;
+       u64                             tim_rti_cfg1_saved;
+       u64                             tim_rti_cfg3_saved;
 
        struct __vxge_hw_ring *____cacheline_aligned ringh;
        struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
        u32                                     doorbell_cnt;
        u32                                     total_db_cnt;
        u64                                     rxds_limit;
+       u32                                     rtimer;
+       u64                                     tim_rti_cfg1_saved;
+       u64                                     tim_rti_cfg3_saved;
 
        enum vxge_hw_status (*callback)(
                        struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
        u32                                     per_txdl_space;
        u32                                     vp_id;
        u32                                     tx_intr_num;
+       u32                                     rtimer;
+       u64                                     tim_tti_cfg1_saved;
+       u64                                     tim_tti_cfg3_saved;
 
        enum vxge_hw_status (*callback)(
                        struct __vxge_hw_fifo *fifo_handle,
index c81a651..395423a 100644 (file)
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
        struct vxge_hw_ring_rxd_info ext_info;
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                ring->ndev->name, __func__, __LINE__);
-       ring->pkts_processed = 0;
-
-       vxge_hw_ring_replenish(ringh);
 
        do {
                prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
        return ret;
 }
 
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+       int i = 0;
+
+       /* Enable CI for RTI */
+       if (vdev->config.intr_type == MSI_X) {
+               for (i = 0; i < vdev->no_of_vpath; i++) {
+                       struct __vxge_hw_ring *hw_ring;
+
+                       hw_ring = vdev->vpaths[i].ring.handle;
+                       vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+               }
+       }
+
+       /* Enable CI for TTI */
+       for (i = 0; i < vdev->no_of_vpath; i++) {
+               struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+               vxge_hw_vpath_tti_ci_set(hw_fifo);
+               /*
+                * For Inta (with or without napi), Set CI ON for only one
+                * vpath. (Have only one free running timer).
+                */
+               if ((vdev->config.intr_type == INTA) && (i == 0))
+                       break;
+       }
+
+       return;
+}
+
 static int do_vxge_reset(struct vxgedev *vdev, int event)
 {
        enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                netif_tx_wake_all_queues(vdev->ndev);
        }
 
+       /* configure CI */
+       vxge_config_ci_for_tti_rti(vdev);
+
 out:
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
  */
 static int vxge_poll_msix(struct napi_struct *napi, int budget)
 {
-       struct vxge_ring *ring =
-               container_of(napi, struct vxge_ring, napi);
+       struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+       int pkts_processed;
        int budget_org = budget;
-       ring->budget = budget;
 
+       ring->budget = budget;
+       ring->pkts_processed = 0;
        vxge_hw_vpath_poll_rx(ring->handle);
+       pkts_processed = ring->pkts_processed;
 
        if (ring->pkts_processed < budget_org) {
                napi_complete(napi);
+
                /* Re enable the Rx interrupts for the vpath */
                vxge_hw_channel_msix_unmask(
                                (struct __vxge_hw_channel *)ring->handle,
                                ring->rx_vector_no);
+               mmiowb();
        }
 
-       return ring->pkts_processed;
+       /* We are copying and returning the local variable, in case if after
+        * clearing the msix interrupt above, if the interrupt fires right
+        * away which can preempt this NAPI thread */
+       return pkts_processed;
 }
 
 static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        for (i = 0; i < vdev->no_of_vpath; i++) {
                ring = &vdev->vpaths[i].ring;
                ring->budget = budget;
+               ring->pkts_processed = 0;
                vxge_hw_vpath_poll_rx(ring->handle);
                pkts_processed += ring->pkts_processed;
                budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                                        netdev_get_tx_queue(vdev->ndev, 0);
                        vpath->fifo.indicate_max_pkts =
                                vdev->config.fifo_indicate_max_pkts;
+                       vpath->fifo.tx_vector_no = 0;
                        vpath->ring.rx_vector_no = 0;
                        vpath->ring.rx_csum = vdev->rx_csum;
                        vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
        return VXGE_HW_OK;
 }
 
+/**
+ *  adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @fifo: pointer to transmit fifo structure
+ *  Description: The function changes boundary timer and restriction timer
+ *  value depends on the traffic
+ *  Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+       fifo->interrupt_count++;
+       if (jiffies > fifo->jiffies + HZ / 100) {
+               struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+               fifo->jiffies = jiffies;
+               if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+                   hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+                       hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+                       vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+               } else if (hw_fifo->rtimer != 0) {
+                       hw_fifo->rtimer = 0;
+                       vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+               }
+               fifo->interrupt_count = 0;
+       }
+}
+
+/**
+ *  adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @ring: pointer to receive ring structure
+ *  Description: The function increases of decreases the packet counts within
+ *  the ranges of traffic utilization, if the interrupts due to this ring are
+ *  not within a fixed range.
+ *  Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+       ring->interrupt_count++;
+       if (jiffies > ring->jiffies + HZ / 100) {
+               struct __vxge_hw_ring *hw_ring = ring->handle;
+
+               ring->jiffies = jiffies;
+               if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+                   hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+                       hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+                       vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+               } else if (hw_ring->rtimer != 0) {
+                       hw_ring->rtimer = 0;
+                       vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+               }
+               ring->interrupt_count = 0;
+       }
+}
+
 /*
  *  vxge_isr_napi
  *  @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
 
 #ifdef CONFIG_PCI_MSI
 
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
 {
        struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
 
+       adaptive_coalesce_tx_interrupts(fifo);
+
+       vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+                                 fifo->tx_vector_no);
+
+       vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+                                  fifo->tx_vector_no);
+
        VXGE_COMPLETE_VPATH_TX(fifo);
 
+       vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+                                   fifo->tx_vector_no);
+
+       mmiowb();
+
        return IRQ_HANDLED;
 }
 
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
 {
        struct vxge_ring *ring = (struct vxge_ring *)dev_id;
 
-       /* MSIX_IDX for Rx is 1 */
+       adaptive_coalesce_rx_interrupts(ring);
+
        vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
-                                       ring->rx_vector_no);
+                                 ring->rx_vector_no);
+
+       vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+                                  ring->rx_vector_no);
 
        napi_schedule(&ring->napi);
        return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
                VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
+               /* Reduce the chance of loosing alarm interrupts by masking
+                * the vector. A pending bit will be set if an alarm is
+                * generated and on unmask the interrupt will be fired.
+                */
                vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+               vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+               mmiowb();
 
                status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
                        vdev->exec_mode);
                if (status == VXGE_HW_OK) {
-
                        vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
-                                       msix_id);
+                                                 msix_id);
+                       mmiowb();
                        continue;
                }
                vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
                        vpath->ring.rx_vector_no = (vpath->device_id *
                                                VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
 
+                       vpath->fifo.tx_vector_no = (vpath->device_id *
+                                               VXGE_HW_VPATH_MSIX_ACTIVE);
+
                        vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
                                               VXGE_ALARM_MSIX_ID);
                }
@@ -2474,8 +2592,9 @@ INTA_MODE:
                        "%s:vxge:INTA", vdev->ndev->name);
                vxge_hw_device_set_intr_type(vdev->devh,
                        VXGE_HW_INTR_MODE_IRQLINE);
-               vxge_hw_vpath_tti_ci_set(vdev->devh,
-                       vdev->vpaths[0].device_id);
+
+               vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
                ret = request_irq((int) vdev->pdev->irq,
                        vxge_isr_napi,
                        IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
        }
 
        netif_tx_start_all_queues(vdev->ndev);
+
+       /* configure CI */
+       vxge_config_ci_for_tti_rti(vdev);
+
        goto out0;
 
 out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
-static int __devinit vxge_device_revision(struct vxgedev *vdev)
-{
-       int ret;
-       u8 revision;
-
-       ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
-       if (ret)
-               return -EIO;
-
-       vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
-       return 0;
-}
-
 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                                          struct vxge_config *config,
                                          int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        memcpy(&vdev->config, config, sizeof(struct vxge_config));
        vdev->rx_csum = 1;      /* Enable Rx CSUM by default. */
        vdev->rx_hwts = 0;
-
-       ret = vxge_device_revision(vdev);
-       if (ret < 0)
-               goto _out1;
+       vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
 
        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                vxge_debug_init(VXGE_ERR,
                        "%s: vpath memory allocation failed",
                        vdev->ndev->name);
-               ret = -ENODEV;
+               ret = -ENOMEM;
                goto _out1;
        }
 
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        if (vdev->config.gro_enable)
                ndev->features |= NETIF_F_GRO;
 
-       if (register_netdev(ndev)) {
+       ret = register_netdev(ndev);
+       if (ret) {
                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
                        "%s: %s : device registration failed!",
                        ndev->name, __func__);
-               ret = -ENODEV;
                goto _out2;
        }
 
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
 
+       kfree(vdev->vpaths);
+
+       /* we are safe to free it now */
+       free_netdev(dev);
+
        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
                        buf);
        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
                break;
 
        case MSI_X:
-               device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+               device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
                break;
        }
 
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit1;
        }
 
-       if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+       ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+       if (ret) {
                vxge_debug_init(VXGE_ERR,
                        "%s : request regions failed", __func__);
-               ret = -ENODEV;
                goto _exit1;
        }
 
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                        if (!img[i].is_valid)
                                break;
                        vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
-                                       "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+                                       "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
                                        VXGE_EPROM_IMG_MAJOR(img[i].version),
                                        VXGE_EPROM_IMG_MINOR(img[i].version),
                                        VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
 _exit5:
        vxge_device_unregister(hldev);
 _exit4:
-       pci_disable_sriov(pdev);
+       pci_set_drvdata(pdev, NULL);
        vxge_hw_device_terminate(hldev);
+       pci_disable_sriov(pdev);
 _exit3:
        iounmap(attr.bar0);
 _exit2:
@@ -4655,7 +4768,7 @@ _exit0:
        kfree(ll_config);
        kfree(device_config);
        driver_config->config_dev_cnt--;
-       pci_set_drvdata(pdev, NULL);
+       driver_config->total_dev_cnt--;
        return ret;
 }
 
@@ -4668,45 +4781,34 @@ _exit0:
 static void __devexit vxge_remove(struct pci_dev *pdev)
 {
        struct __vxge_hw_device *hldev;
-       struct vxgedev *vdev = NULL;
-       struct net_device *dev;
-       int i = 0;
+       struct vxgedev *vdev;
+       int i;
 
        hldev = pci_get_drvdata(pdev);
-
        if (hldev == NULL)
                return;
 
-       dev = hldev->ndev;
-       vdev = netdev_priv(dev);
+       vdev = netdev_priv(hldev->ndev);
 
        vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
-
        vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
                        __func__);
-       vxge_device_unregister(hldev);
 
-       for (i = 0; i < vdev->no_of_vpath; i++) {
+       for (i = 0; i < vdev->no_of_vpath; i++)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
-               vdev->vpaths[i].mcast_addr_cnt = 0;
-               vdev->vpaths[i].mac_addr_cnt = 0;
-       }
-
-       kfree(vdev->vpaths);
 
+       vxge_device_unregister(hldev);
+       pci_set_drvdata(pdev, NULL);
+       /* Do not call pci_disable_sriov here, as it will break child devices */
+       vxge_hw_device_terminate(hldev);
        iounmap(vdev->bar0);
-
-       /* we are safe to free it now */
-       free_netdev(dev);
+       pci_release_region(pdev, 0);
+       pci_disable_device(pdev);
+       driver_config->config_dev_cnt--;
+       driver_config->total_dev_cnt--;
 
        vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
                        __func__, __LINE__);
-
-       vxge_hw_device_terminate(hldev);
-
-       pci_disable_device(pdev);
-       pci_release_region(pdev, 0);
-       pci_set_drvdata(pdev, NULL);
        vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
                             __LINE__);
 }
index 5746fed..40474f0 100644 (file)
 #define VXGE_TTI_LTIMER_VAL    1000
 #define VXGE_T1A_TTI_LTIMER_VAL        80
 #define VXGE_TTI_RTIMER_VAL    0
+#define VXGE_TTI_RTIMER_ADAPT_VAL      10
 #define VXGE_T1A_TTI_RTIMER_VAL        400
 #define VXGE_RTI_BTIMER_VAL    250
 #define VXGE_RTI_LTIMER_VAL    100
 #define VXGE_RTI_RTIMER_VAL    0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL      15
+#define VXGE_FIFO_INDICATE_MAX_PKTS    VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT   8
 #define VXGE_MAX_CONFIG_DEV    0xFF
 #define VXGE_EXEC_MODE_DISABLE 0
 #define RTI_T1A_RX_UFC_C       50
 #define RTI_T1A_RX_UFC_D       60
 
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT   100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT        200
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY               10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
        int tx_steering_type;
        int indicate_max_pkts;
 
+       /* Adaptive interrupt moderation parameters used in T1A */
+       unsigned long interrupt_count;
+       unsigned long jiffies;
+
+       u32 tx_vector_no;
        /* Tx stats */
        struct vxge_fifo_stats stats;
 } ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
         */
        int driver_id;
 
+       /* Adaptive interrupt moderation parameters used in T1A */
+       unsigned long interrupt_count;
+       unsigned long jiffies;
+
        /* copy of the flag indicating whether rx_csum is to be used */
        u32 rx_csum:1,
            rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
 
        int vlan_tag_strip;
        struct vlan_group *vlgrp;
-       int rx_vector_no;
+       u32 rx_vector_no;
        enum vxge_hw_status last_status;
 
        /* Rx stats */
index 4c10d6c..8674f33 100644 (file)
@@ -218,6 +218,68 @@ exit:
        return status;
 }
 
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct vxge_hw_vp_config *config;
+       u64 val64;
+
+       if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+               return;
+
+       vp_reg = fifo->vp_reg;
+       config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+       if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+               config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+               fifo->tim_tti_cfg1_saved = val64;
+               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+       }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+       u64 val64 = ring->tim_rti_cfg1_saved;
+
+       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+       ring->tim_rti_cfg1_saved = val64;
+       writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+       u64 val64 = fifo->tim_tti_cfg3_saved;
+       u64 timer = (fifo->rtimer * 1000) / 272;
+
+       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+       if (timer)
+               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+                       VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+       writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+       /* tti_cfg3_saved is not updated again because it is
+        * initialized at one place only - init time.
+        */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+       u64 val64 = ring->tim_rti_cfg3_saved;
+       u64 timer = (ring->rtimer * 1000) / 272;
+
+       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+       if (timer)
+               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+                       VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+       writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+       /* rti_cfg3_saved is not updated again because it is
+        * initialized at one place only - init time.
+        */
+}
+
 /**
  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  * @channeh: Channel for rx or tx handle
@@ -253,6 +315,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
                &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
 }
 
+/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id:  MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+       __vxge_hw_pio_mem_write32_upper(
+               (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+               &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
 /**
  * vxge_hw_device_set_intr_type - Updates the configuration
  *             with new interrupt type.
@@ -2190,20 +2269,15 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
 
        if (vpath->hldev->config.intr_mode ==
                                        VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+                               VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+                               0, 32), &vp_reg->one_shot_vect0_en);
                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
                                VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
                                0, 32), &vp_reg->one_shot_vect1_en);
-       }
-
-       if (vpath->hldev->config.intr_mode ==
-               VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
                                VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
                                0, 32), &vp_reg->one_shot_vect2_en);
-
-               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
-                               VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
-                               0, 32), &vp_reg->one_shot_vect3_en);
        }
 }
 
@@ -2228,6 +2302,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
 }
 
+/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+       if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+                       &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+       else
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+                       &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
 /**
  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  * @vp: Virtual Path handle.
index d48486d..9d9dfda 100644 (file)
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
  *  Virtual Paths
  */
 
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
 u32 vxge_hw_vpath_id(
        struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2245,6 +2249,8 @@ void
 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
                        int msix_id);
 
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
 
 void
@@ -2269,6 +2275,9 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
 void
 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
 
+void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
 void
 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
                                 void **dtrh);
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
 
 #endif
index ad2f99b..581e215 100644 (file)
@@ -16,8 +16,8 @@
 
 #define VXGE_VERSION_MAJOR     "2"
 #define VXGE_VERSION_MINOR     "5"
-#define VXGE_VERSION_FIX       "1"
-#define VXGE_VERSION_BUILD     "22082"
+#define VXGE_VERSION_FIX       "2"
+#define VXGE_VERSION_BUILD     "22259"
 #define VXGE_VERSION_FOR       "k"
 
 #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
index b4338f3..7aeb113 100644 (file)
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
 source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/ipw2x00/Kconfig"
 source "drivers/net/wireless/iwlwifi/Kconfig"
+source "drivers/net/wireless/iwlegacy/Kconfig"
 source "drivers/net/wireless/iwmc3200wifi/Kconfig"
 source "drivers/net/wireless/libertas/Kconfig"
 source "drivers/net/wireless/orinoco/Kconfig"
index 9760561..ddd3fb6 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY)               += b43legacy/
 obj-$(CONFIG_ZD1211RW)         += zd1211rw/
 obj-$(CONFIG_RTL8180)          += rtl818x/
 obj-$(CONFIG_RTL8187)          += rtl818x/
-obj-$(CONFIG_RTL8192CE)                += rtlwifi/
+obj-$(CONFIG_RTLWIFI)          += rtlwifi/
 
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)     += ray_cs.o
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
 
 obj-$(CONFIG_MWL8K)    += mwl8k.o
 
-obj-$(CONFIG_IWLWIFI)  += iwlwifi/
+obj-$(CONFIG_IWLAGN)   += iwlwifi/
+obj-$(CONFIG_IWLWIFI_LEGACY)   += iwlegacy/
 obj-$(CONFIG_RT2X00)   += rt2x00/
 
 obj-$(CONFIG_P54_COMMON)       += p54/
index f9aa1bc..afe2cbc 100644 (file)
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct adm8211_tx_hdr *txhdr;
        size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        txhdr->retry_limit = info->control.rates[0].count;
 
        adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
-
-       return NETDEV_TX_OK;
 }
 
 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
index 1476314..2986014 100644 (file)
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
        ieee80211_wake_queues(priv->hw);
 }
 
-static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct at76_priv *priv = hw->priv;
        struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (priv->tx_urb->status == -EINPROGRESS) {
                wiphy_err(priv->hw->wiphy,
                          "%s called while tx urb is pending\n", __func__);
-               return NETDEV_TX_BUSY;
+               dev_kfree_skb_any(skb);
+               return;
        }
 
        /* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
                        memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
                        ieee80211_queue_work(hw, &priv->work_join_bssid);
-                       return NETDEV_TX_BUSY;
+                       dev_kfree_skb_any(skb);
+                       return;
                }
        }
 
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                                  priv->tx_urb,
                                  priv->tx_urb->hcpriv, priv->tx_urb->complete);
        }
-
-       return 0;
 }
 
 static int at76_mac80211_start(struct ieee80211_hw *hw)
index 4a37447..f14a654 100644 (file)
@@ -290,7 +290,7 @@ struct mib_mac_mgmt {
        u8 res;
        u8 multi_domain_capability_implemented;
        u8 multi_domain_capability_enabled;
-       u8 country_string[3];
+       u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
        u8 reserved[3];
 } __packed;
 
index d7a4799..7b9672b 100644 (file)
@@ -1,8 +1,10 @@
 config AR9170_USB
-       tristate "Atheros AR9170 802.11n USB support"
+       tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
        depends on USB && MAC80211
        select FW_LOADER
        help
+         This driver is going to get replaced by carl9170.
+
          This is a driver for the Atheros "otus" 802.11n USB devices.
 
          These devices require additional firmware (2 files).
index 4f845f8..371e4ce 100644 (file)
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 int ar9170_nag_limiter(struct ar9170 *ar);
 
 /* MAC */
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int ar9170_init_mac(struct ar9170 *ar);
 int ar9170_set_qos(struct ar9170 *ar);
 int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
index 32bf79e..b761fec 100644 (file)
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
                                     msecs_to_jiffies(AR9170_JANITOR_DELAY));
 }
 
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        skb_queue_tail(&ar->tx_pending[queue], skb);
 
        ar9170_tx(ar);
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static int ar9170_op_add_interface(struct ieee80211_hw *hw,
@@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
 static int ar9170_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                              u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
index e43210c..a6c6a46 100644 (file)
@@ -108,12 +108,14 @@ enum ath_cipher {
  * struct ath_ops - Register read/write operations
  *
  * @read: Register read
+ * @multi_read: Multiple register read
  * @write: Register write
  * @enable_write_buffer: Enable multiple register writes
  * @write_flush: flush buffered register writes and disable buffering
  */
 struct ath_ops {
        unsigned int (*read)(void *, u32 reg_offset);
+       void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
        void (*write)(void *, u32 val, u32 reg_offset);
        void (*enable_write_buffer)(void *);
        void (*write_flush) (void *);
index e079331..e18a9aa 100644 (file)
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
 
          modprobe ath5k debug=0x00000400
 
+config ATH5K_TRACER
+       bool "Atheros 5xxx tracer"
+       depends on ATH5K
+       depends on EVENT_TRACING
+       ---help---
+         Say Y here to enable tracepoints for the ath5k driver
+         using the kernel tracing infrastructure.  Select this
+         option if you are interested in debugging the driver.
+
+         If unsure, say N.
+
 config ATH5K_AHB
        bool "Atheros 5xxx AHB bus support"
        depends on (ATHEROS_AR231X && !PCI)
index 707cde1..82324e9 100644 (file)
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
        *csz = L1_CACHE_BYTES >> 2;
 }
 
-bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
        struct ath5k_softc *sc = common->priv;
        struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 
        eeprom += off;
        if (eeprom > eeprom_end)
-               return -EINVAL;
+               return false;
 
        *data = *eeprom;
-       return 0;
+       return true;
 }
 
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
@@ -92,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       mem = ioremap_nocache(res->start, res->end - res->start + 1);
+       mem = ioremap_nocache(res->start, resource_size(res));
        if (mem == NULL) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
index 407e39c..8a06dbd 100644 (file)
 /* Initial values */
 #define        AR5K_INIT_CYCRSSI_THR1                  2
 
-/* Tx retry limits */
-#define AR5K_INIT_SH_RETRY                     10
-#define AR5K_INIT_LG_RETRY                     AR5K_INIT_SH_RETRY
-/* For station mode */
-#define AR5K_INIT_SSH_RETRY                    32
-#define AR5K_INIT_SLG_RETRY                    AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY                     10
-
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT                  7
+#define AR5K_INIT_RETRY_LONG                   4
 
 /* Slot time */
 #define AR5K_INIT_SLOT_TIME_TURBO              6
@@ -518,7 +513,7 @@ enum ath5k_tx_queue_id {
        AR5K_TX_QUEUE_ID_NOQCU_DATA     = 0,
        AR5K_TX_QUEUE_ID_NOQCU_BEACON   = 1,
        AR5K_TX_QUEUE_ID_DATA_MIN       = 0, /*IEEE80211_TX_QUEUE_DATA0*/
-       AR5K_TX_QUEUE_ID_DATA_MAX       = 4, /*IEEE80211_TX_QUEUE_DATA4*/
+       AR5K_TX_QUEUE_ID_DATA_MAX       = 3, /*IEEE80211_TX_QUEUE_DATA3*/
        AR5K_TX_QUEUE_ID_DATA_SVP       = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
        AR5K_TX_QUEUE_ID_CAB            = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
        AR5K_TX_QUEUE_ID_BEACON         = 7, /*IEEE80211_TX_QUEUE_BEACON*/
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
 #define ah_modes               ah_capabilities.cap_mode
 #define ah_ee_version          ah_capabilities.cap_eeprom.ee_version
 
-       u32                     ah_limit_tx_retries;
+       u8                      ah_retry_long;
+       u8                      ah_retry_short;
+
        u8                      ah_coverage_class;
        bool                    ah_ack_bitrate_high;
        u8                      ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
        u8                      ah_ant_mode;
        u8                      ah_tx_ant;
        u8                      ah_def_ant;
-       bool                    ah_software_retry;
 
        struct ath5k_capabilities ah_capabilities;
 
@@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah);
 int ath5k_sysfs_register(struct ath5k_softc *sc);
 void ath5k_sysfs_unregister(struct ath5k_softc *sc);
 
+/* base.c */
+struct ath5k_buf;
+struct ath5k_txq;
+
+void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+bool ath_any_vif_assoc(struct ath5k_softc *sc);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+                   struct ath5k_txq *txq);
+int ath5k_init_hw(struct ath5k_softc *sc);
+int ath5k_stop_hw(struct ath5k_softc *sc);
+void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+                                       struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
+void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_softc *sc);
+void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+
 /*Chip id helper functions */
 const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
 int ath5k_hw_read_srev(struct ath5k_hw *ah);
@@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
                            enum ath5k_tx_queue queue_type,
                            struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+                                 unsigned int queue);
 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
index cdac5cf..bc82405 100644 (file)
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
        ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
        ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
        ah->ah_imr = 0;
-       ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
-       ah->ah_software_retry = false;
+       ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+       ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
        ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
        ah->ah_noise_floor = -95;       /* until first NF calibration is run */
        sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
                        ah->ah_radio = AR5K_RF5112;
                        ah->ah_single_chip = false;
                        ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
-               } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
+               } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
+                       ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
                        ah->ah_radio = AR5K_RF2316;
                        ah->ah_single_chip = true;
                        ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
index 09ae4ef..4d7f21e 100644 (file)
@@ -61,6 +61,9 @@
 #include "debug.h"
 #include "ani.h"
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 int ath5k_modparam_nohwcrypt;
 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -241,74 +244,69 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
 * Channel/mode setup *
 \********************/
 
-/*
- * Convert IEEE channel number to MHz frequency.
- */
-static inline short
-ath5k_ieee2mhz(short chan)
-{
-       if (chan <= 14 || chan >= 27)
-               return ieee80211chan2mhz(chan);
-       else
-               return 2212 + chan * 20;
-}
-
 /*
  * Returns true for the channel numbers used without all_channels modparam.
  */
-static bool ath5k_is_standard_channel(short chan)
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
 {
-       return ((chan <= 14) ||
-               /* UNII 1,2 */
-               ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+       if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+               return true;
+
+       return  /* UNII 1,2 */
+               (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
                /* midband */
                ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
                /* UNII-3 */
-               ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+               ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+               /* 802.11j 5.030-5.080 GHz (20MHz) */
+               (chan == 8 || chan == 12 || chan == 16) ||
+               /* 802.11j 4.9GHz (20MHz) */
+               (chan == 184 || chan == 188 || chan == 192 || chan == 196));
 }
 
 static unsigned int
-ath5k_copy_channels(struct ath5k_hw *ah,
-               struct ieee80211_channel *channels,
-               unsigned int mode,
-               unsigned int max)
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+               unsigned int mode, unsigned int max)
 {
-       unsigned int i, count, size, chfreq, freq, ch;
-
-       if (!test_bit(mode, ah->ah_modes))
-               return 0;
+       unsigned int count, size, chfreq, freq, ch;
+       enum ieee80211_band band;
 
        switch (mode) {
        case AR5K_MODE_11A:
                /* 1..220, but 2GHz frequencies are filtered by check_channel */
-               size = 220 ;
+               size = 220;
                chfreq = CHANNEL_5GHZ;
+               band = IEEE80211_BAND_5GHZ;
                break;
        case AR5K_MODE_11B:
        case AR5K_MODE_11G:
                size = 26;
                chfreq = CHANNEL_2GHZ;
+               band = IEEE80211_BAND_2GHZ;
                break;
        default:
                ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
                return 0;
        }
 
-       for (i = 0, count = 0; i < size && max > 0; i++) {
-               ch = i + 1 ;
-               freq = ath5k_ieee2mhz(ch);
+       count = 0;
+       for (ch = 1; ch <= size && count < max; ch++) {
+               freq = ieee80211_channel_to_frequency(ch, band);
+
+               if (freq == 0) /* mapping failed - not a standard channel */
+                       continue;
 
                /* Check if channel is supported by the chipset */
                if (!ath5k_channel_ok(ah, freq, chfreq))
                        continue;
 
-               if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+               if (!modparam_all_channels &&
+                   !ath5k_is_standard_channel(ch, band))
                        continue;
 
                /* Write channel info and increment counter */
                channels[count].center_freq = freq;
-               channels[count].band = (chfreq == CHANNEL_2GHZ) ?
-                       IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+               channels[count].band = band;
                switch (mode) {
                case AR5K_MODE_11A:
                case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
                }
 
                count++;
-               max--;
        }
 
        return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_bitrates = 12;
 
                sband->channels = sc->channels;
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11G, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                }
 
                sband->channels = sc->channels;
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11B, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_bitrates = 8;
 
                sband->channels = &sc->channels[count_c];
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11A, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,31 +442,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
        return ath5k_reset(sc, chan, true);
 }
 
-static void
-ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
-{
-       sc->curmode = mode;
-
-       if (mode == AR5K_MODE_11A) {
-               sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
-       } else {
-               sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
-       }
-}
-
-struct ath_vif_iter_data {
-       const u8        *hw_macaddr;
-       u8              mask[ETH_ALEN];
-       u8              active_mac[ETH_ALEN]; /* first active MAC */
-       bool            need_set_hw_addr;
-       bool            found_active;
-       bool            any_assoc;
-       enum nl80211_iftype opmode;
-};
-
-static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 {
-       struct ath_vif_iter_data *iter_data = data;
+       struct ath5k_vif_iter_data *iter_data = data;
        int i;
        struct ath5k_vif *avf = (void *)vif->drv_priv;
 
@@ -499,9 +474,12 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
         */
        if (avf->opmode == NL80211_IFTYPE_AP)
                iter_data->opmode = NL80211_IFTYPE_AP;
-       else
+       else {
+               if (avf->opmode == NL80211_IFTYPE_STATION)
+                       iter_data->n_stas++;
                if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
                        iter_data->opmode = avf->opmode;
+       }
 }
 
 void
@@ -509,7 +487,8 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
                                   struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath5k_hw_common(sc->ah);
-       struct ath_vif_iter_data iter_data;
+       struct ath5k_vif_iter_data iter_data;
+       u32 rfilt;
 
        /*
         * Use the hardware MAC address as reference, the hardware uses it
@@ -520,12 +499,13 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
        iter_data.found_active = false;
        iter_data.need_set_hw_addr = true;
        iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
+       iter_data.n_stas = 0;
 
        if (vif)
-               ath_vif_iter(&iter_data, vif->addr, vif);
+               ath5k_vif_iter(&iter_data, vif->addr, vif);
 
        /* Get list of all active MAC addresses */
-       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
+       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
                                                   &iter_data);
        memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
 
@@ -543,20 +523,19 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
 
        if (ath5k_hw_hasbssidmask(sc->ah))
                ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
-}
 
-void
-ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
-{
-       struct ath5k_hw *ah = sc->ah;
-       u32 rfilt;
+       /* Set up RX Filter */
+       if (iter_data.n_stas > 1) {
+               /* If you have multiple STA interfaces connected to
+                * different APs, ARPs are not received (most of the time?)
+                * Enabling PROMISC appears to fix that probem.
+                */
+               sc->filter_flags |= AR5K_RX_FILTER_PROM;
+       }
 
-       /* configure rx filter */
        rfilt = sc->filter_flags;
-       ath5k_hw_set_rx_filter(ah, rfilt);
+       ath5k_hw_set_rx_filter(sc->ah, rfilt);
        ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
-
-       ath5k_update_bssid_mask_and_opmode(sc, vif);
 }
 
 static inline int
@@ -569,7 +548,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
                        "hw_rix out of bounds: %x\n", hw_rix))
                return 0;
 
-       rix = sc->rate_idx[sc->curband->band][hw_rix];
+       rix = sc->rate_idx[sc->curchan->band][hw_rix];
        if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
                rix = 0;
 
@@ -964,6 +943,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
                spin_lock_init(&txq->lock);
                txq->setup = true;
                txq->txq_len = 0;
+               txq->txq_max = ATH5K_TXQ_LEN_MAX;
                txq->txq_poll_mark = false;
                txq->txq_stuck = 0;
        }
@@ -1132,7 +1112,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
        spin_unlock_bh(&sc->rxbuflock);
 
        ath5k_hw_start_rx_dma(ah);      /* enable recv descriptors */
-       ath5k_mode_setup(sc, NULL);             /* set filters, etc. */
+       ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
        ath5k_hw_start_rx_pcu(ah);      /* re-enable PCU/DMA engine */
 
        return 0;
@@ -1376,10 +1356,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
         * right now, so it's not too bad...
         */
        rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
-       rxs->flag |= RX_FLAG_TSFT;
+       rxs->flag |= RX_FLAG_MACTIME_MPDU;
 
        rxs->freq = sc->curchan->center_freq;
-       rxs->band = sc->curband->band;
+       rxs->band = sc->curchan->band;
 
        rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
 
@@ -1394,10 +1374,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
        rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
 
        if (rxs->rate_idx >= 0 && rs->rs_rate ==
-           sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+           sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
                rxs->flag |= RX_FLAG_SHORTPRE;
 
-       ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
+       trace_ath5k_rx(sc, skb);
 
        ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
 
@@ -1533,7 +1513,7 @@ unlock:
 * TX Handling *
 \*************/
 
-int
+void
 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
               struct ath5k_txq *txq)
 {
@@ -1542,7 +1522,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
        unsigned long flags;
        int padsize;
 
-       ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
+       trace_ath5k_tx(sc, skb, txq);
 
        /*
         * The hardware expects the header padded to 4 byte boundaries.
@@ -1555,7 +1535,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
                goto drop_packet;
        }
 
-       if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
+       if (txq->txq_len >= txq->txq_max)
                ieee80211_stop_queue(hw, txq->qnum);
 
        spin_lock_irqsave(&sc->txbuflock, flags);
@@ -1582,16 +1562,15 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
                spin_unlock_irqrestore(&sc->txbuflock, flags);
                goto drop_packet;
        }
-       return NETDEV_TX_OK;
+       return;
 
 drop_packet:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static void
 ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
-                        struct ath5k_tx_status *ts)
+                        struct ath5k_txq *txq, struct ath5k_tx_status *ts)
 {
        struct ieee80211_tx_info *info;
        int i;
@@ -1643,6 +1622,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
        else
                sc->stats.antenna_tx[0]++; /* invalid */
 
+       trace_ath5k_tx_complete(sc, skb, txq, ts);
        ieee80211_tx_status(sc->hw, skb);
 }
 
@@ -1679,7 +1659,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
 
                        dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
                                        DMA_TO_DEVICE);
-                       ath5k_tx_frame_completed(sc, skb, &ts);
+                       ath5k_tx_frame_completed(sc, skb, txq, &ts);
                }
 
                /*
@@ -1821,8 +1801,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                goto out;
        }
 
-       ath5k_debug_dump_skb(sc, skb, "BC  ", 1);
-
        ath5k_txbuf_free_skb(sc, avf->bbuf);
        avf->bbuf->skb = skb;
        ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1895,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
                        sc->opmode == NL80211_IFTYPE_MESH_POINT)
                ath5k_beacon_update(sc->hw, vif);
 
+       trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+
        ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
        ath5k_hw_start_tx_dma(ah, sc->bhalq);
        ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2397,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
        /* set up multi-rate retry capabilities */
        if (sc->ah->ah_version == AR5K_AR5212) {
                hw->max_rates = 4;
-               hw->max_rate_tries = 11;
+               hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+                                        AR5K_INIT_RETRY_LONG);
        }
 
        hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2535,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
         * and then setup of the interrupt mask.
         */
        sc->curchan = sc->hw->conf.channel;
-       sc->curband = &sc->sbands[sc->curchan->band];
        sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
                AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
                AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2661,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
         * so we should also free any remaining
         * tx buffers */
        ath5k_drain_tx_buffs(sc);
-       if (chan) {
+       if (chan)
                sc->curchan = chan;
-               sc->curband = &sc->sbands[chan->band];
-       }
        ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
                                                                skip_pcu);
        if (ret) {
@@ -2782,12 +2760,6 @@ ath5k_init(struct ieee80211_hw *hw)
                goto err;
        }
 
-       /* NB: setup here so ath5k_rate_update is happy */
-       if (test_bit(AR5K_MODE_11A, ah->ah_modes))
-               ath5k_setcurmode(sc, AR5K_MODE_11A);
-       else
-               ath5k_setcurmode(sc, AR5K_MODE_11B);
-
        /*
         * Allocate tx+rx descriptors and populate the lists.
         */
@@ -2946,13 +2918,13 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
 bool
 ath_any_vif_assoc(struct ath5k_softc *sc)
 {
-       struct ath_vif_iter_data iter_data;
+       struct ath5k_vif_iter_data iter_data;
        iter_data.hw_macaddr = NULL;
        iter_data.any_assoc = false;
        iter_data.need_set_hw_addr = false;
        iter_data.found_active = true;
 
-       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
+       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
                                                   &iter_data);
        return iter_data.any_assoc;
 }
index 6d51147..978f1f4 100644 (file)
@@ -86,6 +86,7 @@ struct ath5k_txq {
        spinlock_t              lock;   /* lock on q and link */
        bool                    setup;
        int                     txq_len; /* number of queued buffers */
+       int                     txq_max; /* max allowed num of queued buffers */
        bool                    txq_poll_mark;
        unsigned int            txq_stuck;      /* informational counter */
 };
@@ -183,8 +184,6 @@ struct ath5k_softc {
        enum nl80211_iftype     opmode;
        struct ath5k_hw         *ah;            /* Atheros HW */
 
-       struct ieee80211_supported_band         *curband;
-
 #ifdef CONFIG_ATH5K_DEBUG
        struct ath5k_dbg_info   debug;          /* debug info */
 #endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +201,6 @@ struct ath5k_softc {
 #define ATH_STAT_STARTED       4               /* opened & irqs enabled */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
-       unsigned int            curmode;        /* current phy mode */
        struct ieee80211_channel *curchan;      /* current h/w channel */
 
        u16                     nvifs;
@@ -262,6 +260,19 @@ struct ath5k_softc {
        struct survey_info      survey;         /* collected survey info */
 };
 
+struct ath5k_vif_iter_data {
+       const u8        *hw_macaddr;
+       u8              mask[ETH_ALEN];
+       u8              active_mac[ETH_ALEN]; /* first active MAC */
+       bool            need_set_hw_addr;
+       bool            found_active;
+       bool            any_assoc;
+       enum nl80211_iftype opmode;
+       int n_stas;
+};
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
+
+
 #define ath5k_hw_hasbssidmask(_ah) \
        (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
 #define ath5k_hw_hasveol(_ah) \
index 31cad80..f77e8a7 100644 (file)
  */
 int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
 {
+       struct ath5k_capabilities *caps = &ah->ah_capabilities;
        u16 ee_header;
 
        /* Capabilities stored in the EEPROM */
-       ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+       ee_header = caps->cap_eeprom.ee_header;
 
        if (ah->ah_version == AR5K_AR5210) {
                /*
                 * Set radio capabilities
                 * (The AR5110 only supports the middle 5GHz band)
                 */
-               ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
-               ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
-               ah->ah_capabilities.cap_range.range_2ghz_min = 0;
-               ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+               caps->cap_range.range_5ghz_min = 5120;
+               caps->cap_range.range_5ghz_max = 5430;
+               caps->cap_range.range_2ghz_min = 0;
+               caps->cap_range.range_2ghz_max = 0;
 
                /* Set supported modes */
-               __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
+               __set_bit(AR5K_MODE_11A, caps->cap_mode);
        } else {
                /*
                 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                 * XXX current ieee80211 implementation because the IEEE
                 * XXX channel mapping does not support negative channel
                 * XXX numbers (2312MHz is channel -19). Of course, this
-                * XXX doesn't matter because these channels are out of range
-                * XXX but some regulation domains like MKK (Japan) will
-                * XXX support frequencies somewhere around 4.8GHz.
+                * XXX doesn't matter because these channels are out of the
+                * XXX legal range.
                 */
 
                /*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                 */
 
                if (AR5K_EEPROM_HDR_11A(ee_header)) {
-                       /* 4920 */
-                       ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
-                       ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+                       if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+                               caps->cap_range.range_5ghz_min = 4920;
+                       else
+                               caps->cap_range.range_5ghz_min = 5005;
+                       caps->cap_range.range_5ghz_max = 6100;
 
                        /* Set supported modes */
-                       __set_bit(AR5K_MODE_11A,
-                                       ah->ah_capabilities.cap_mode);
+                       __set_bit(AR5K_MODE_11A, caps->cap_mode);
                }
 
                /* Enable  802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                    (AR5K_EEPROM_HDR_11G(ee_header) &&
                     ah->ah_version != AR5K_AR5211)) {
                        /* 2312 */
-                       ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
-                       ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+                       caps->cap_range.range_2ghz_min = 2412;
+                       caps->cap_range.range_2ghz_max = 2732;
 
                        if (AR5K_EEPROM_HDR_11B(ee_header))
-                               __set_bit(AR5K_MODE_11B,
-                                               ah->ah_capabilities.cap_mode);
+                               __set_bit(AR5K_MODE_11B, caps->cap_mode);
 
                        if (AR5K_EEPROM_HDR_11G(ee_header) &&
                            ah->ah_version != AR5K_AR5211)
-                               __set_bit(AR5K_MODE_11G,
-                                               ah->ah_capabilities.cap_mode);
+                               __set_bit(AR5K_MODE_11G, caps->cap_mode);
                }
        }
 
        /* Set number of supported TX queues */
        if (ah->ah_version == AR5K_AR5210)
-               ah->ah_capabilities.cap_queues.q_tx_num =
-                       AR5K_NUM_TX_QUEUES_NOQCU;
+               caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
        else
-               ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+               caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
 
        /* newer hardware has PHY error counters */
        if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
-               ah->ah_capabilities.cap_has_phyerr_counters = true;
+               caps->cap_has_phyerr_counters = true;
        else
-               ah->ah_capabilities.cap_has_phyerr_counters = false;
+               caps->cap_has_phyerr_counters = false;
 
        return 0;
 }
index d2f84d7..0230f30 100644 (file)
@@ -308,8 +308,6 @@ static const struct {
        { ATH5K_DEBUG_CALIBRATE, "calib",       "periodic calibration" },
        { ATH5K_DEBUG_TXPOWER,  "txpower",      "transmit power setting" },
        { ATH5K_DEBUG_LED,      "led",          "LED management" },
-       { ATH5K_DEBUG_DUMP_RX,  "dumprx",       "print received skb content" },
-       { ATH5K_DEBUG_DUMP_TX,  "dumptx",       "print transmit skb content" },
        { ATH5K_DEBUG_DUMPBANDS, "dumpbands",   "dump bands" },
        { ATH5K_DEBUG_DMA,      "dma",          "dma start/stop" },
        { ATH5K_DEBUG_ANI,      "ani",          "adaptive noise immunity" },
@@ -1035,24 +1033,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
        spin_unlock_bh(&sc->rxbuflock);
 }
 
-void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx)
-{
-       char buf[16];
-
-       if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
-                    (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
-               return;
-
-       snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
-
-       print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
-               min(200U, skb->len));
-
-       printk(KERN_DEBUG "\n");
-}
-
 void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
 {
index 3e34428..b0355ae 100644 (file)
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
        ATH5K_DEBUG_CALIBRATE   = 0x00000020,
        ATH5K_DEBUG_TXPOWER     = 0x00000040,
        ATH5K_DEBUG_LED         = 0x00000080,
-       ATH5K_DEBUG_DUMP_RX     = 0x00000100,
-       ATH5K_DEBUG_DUMP_TX     = 0x00000200,
        ATH5K_DEBUG_DUMPBANDS   = 0x00000400,
        ATH5K_DEBUG_DMA         = 0x00000800,
        ATH5K_DEBUG_ANI         = 0x00002000,
@@ -151,10 +149,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
 void
 ath5k_debug_dump_bands(struct ath5k_softc *sc);
 
-void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx);
-
 void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
 
@@ -181,10 +175,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
 static inline void
 ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
 
-static inline void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx) {}
-
 static inline void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
 
index 0064be7..21091c2 100644 (file)
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
        for (i = 0; i < qmax; i++) {
                err = ath5k_hw_stop_tx_dma(ah, i);
                /* -EINVAL -> queue inactive */
-               if (err != -EINVAL)
+               if (err && err != -EINVAL)
                        return err;
        }
 
-       return err;
+       return 0;
 }
index 80e6256..b6561f7 100644 (file)
@@ -72,7 +72,6 @@ static int
 ath5k_eeprom_init_header(struct ath5k_hw *ah)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-       int ret;
        u16 val;
        u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
 
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 o = *offset;
        u16 val;
-       int ret, i = 0;
+       int i = 0;
 
        AR5K_EEPROM_READ(o++, val);
        ee->ee_switch_settling[mode]    = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 o = *offset;
        u16 val;
-       int ret;
 
        ee->ee_n_piers[mode] = 0;
        AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
        int o = *offset;
        int i = 0;
        u8 freq1, freq2;
-       int ret;
        u16 val;
 
        ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
-       int i, ret;
+       int i;
        u16 val;
        u8 mask;
 
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
        u32 offset;
        u8 i, c;
        u16 val;
-       int ret;
        u8 pd_gains = 0;
 
        /* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
        struct ath5k_chan_pcal_info *chinfo;
        u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
        u32 offset;
-       int idx, i, ret;
+       int idx, i;
        u16 val;
        u8 pd_gains = 0;
 
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
        u8 *rate_target_pwr_num;
        u32 offset;
        u16 val;
-       int ret, i;
+       int i;
 
        offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
        rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
        struct ath5k_edge_power *rep;
        unsigned int fmask, pmask;
        unsigned int ctl_mode;
-       int ret, i, j;
+       int i, j;
        u32 offset;
        u16 val;
 
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
        u8 mac_d[ETH_ALEN] = {};
        u32 total, offset;
        u16 data;
-       int octet, ret;
+       int octet;
 
-       ret = ath5k_hw_nvram_read(ah, 0x20, &data);
-       if (ret)
-               return ret;
+       AR5K_EEPROM_READ(0x20, data);
 
        for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
-               ret = ath5k_hw_nvram_read(ah, offset, &data);
-               if (ret)
-                       return ret;
+               AR5K_EEPROM_READ(offset, data);
 
                total += data;
                mac_d[octet + 1] = data & 0xff;
index 7c09e15..6511c27 100644 (file)
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
 #define        AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz      6250
 
 #define AR5K_EEPROM_READ(_o, _v) do {                  \
-       ret = ath5k_hw_nvram_read(ah, (_o), &(_v));     \
-       if (ret)                                        \
-               return ret;                             \
+       if (!ath5k_hw_nvram_read(ah, (_o), &(_v)))      \
+               return -EIO;                            \
 } while (0)
 
 #define AR5K_EEPROM_READ_HDR(_o, _v)                                   \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
        AR5K_CTL_MODE_M = 15,
 };
 
-/* Default CTL ids for the 3 main reg domains.
- * Atheros only uses these by default but vendors
- * can have up to 32 different CTLs for different
- * scenarios. Note that theese values are ORed with
- * the mode id (above) so we can have up to 24 CTL
- * datasets out of these 3 main regdomains. That leaves
- * 8 ids that can be used by vendors and since 0x20 is
- * missing from HAL sources i guess this is the set of
- * custom CTLs vendors can use. */
-#define        AR5K_CTL_FCC    0x10
-#define        AR5K_CTL_CUSTOM 0x20
-#define        AR5K_CTL_ETSI   0x30
-#define        AR5K_CTL_MKK    0x40
-
-/* Indicates a CTL with only mode set and
- * no reg domain mapping, such CTLs are used
- * for world roaming domains or simply when
- * a reg domain is not set */
-#define        AR5K_CTL_NO_REGDOMAIN   0xf0
-
-/* Indicates an empty (invalid) CTL */
-#define AR5K_CTL_NO_CTL                0xff
-
 /* Per channel calibration data, used for power table setup */
 struct ath5k_chan_pcal_info_rf5111 {
        /* Power levels in half dbm units
index d76d68c..9be29b7 100644 (file)
 
 extern int ath5k_modparam_nohwcrypt;
 
-/* functions used from base.c */
-void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
-bool ath_any_vif_assoc(struct ath5k_softc *sc);
-int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
-                  struct ath5k_txq *txq);
-int ath5k_init_hw(struct ath5k_softc *sc);
-int ath5k_stop_hw(struct ath5k_softc *sc);
-void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
-                                       struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
-void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
-int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_config(struct ath5k_softc *sc);
-void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-
 /********************\
 * Mac80211 functions *
 \********************/
 
-static int
+static void
 ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ath5k_softc *sc = hw->priv;
@@ -77,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
                dev_kfree_skb_any(skb);
-               return 0;
+               return;
        }
 
-       return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
+       ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
 }
 
 
@@ -175,8 +158,7 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
 
-       ath5k_mode_setup(sc, vif);
-
+       ath5k_update_bssid_mask_and_opmode(sc, vif);
        ret = 0;
 end:
        mutex_unlock(&sc->lock);
@@ -226,6 +208,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        struct ath5k_hw *ah = sc->ah;
        struct ieee80211_conf *conf = &hw->conf;
        int ret = 0;
+       int i;
 
        mutex_lock(&sc->lock);
 
@@ -243,6 +226,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
                ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
        }
 
+       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+               ah->ah_retry_long = conf->long_frame_max_tx_count;
+               ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+               for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+                       ath5k_hw_set_tx_retry_limits(ah, i);
+       }
+
        /* TODO:
         * 1) Move this on config_interface and handle each case
         * separately eg. when we have only one STA vif, use
@@ -389,6 +380,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
        struct ath5k_softc *sc = hw->priv;
        struct ath5k_hw *ah = sc->ah;
        u32 mfilt[2], rfilt;
+       struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
 
        mutex_lock(&sc->lock);
 
@@ -462,6 +454,21 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
                break;
        }
 
+       iter_data.hw_macaddr = NULL;
+       iter_data.n_stas = 0;
+       iter_data.need_set_hw_addr = false;
+       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
+                                                  &iter_data);
+
+       /* Set up RX Filter */
+       if (iter_data.n_stas > 1) {
+               /* If you have multiple STA interfaces connected to
+                * different APs, ARPs are not received (most of the time?)
+                * Enabling PROMISC appears to fix that probem.
+                */
+               rfilt |= AR5K_RX_FILTER_PROM;
+       }
+
        /* Set filters */
        ath5k_hw_set_rx_filter(ah, rfilt);
 
@@ -733,6 +740,47 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
 }
 
 
+static void ath5k_get_ringparam(struct ieee80211_hw *hw,
+                               u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+       struct ath5k_softc *sc = hw->priv;
+
+       *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
+
+       *tx_max = ATH5K_TXQ_LEN_MAX;
+       *rx = *rx_max = ATH_RXBUF;
+}
+
+
+static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
+{
+       struct ath5k_softc *sc = hw->priv;
+       u16 qnum;
+
+       /* only support setting tx ring size for now */
+       if (rx != ATH_RXBUF)
+               return -EINVAL;
+
+       /* restrict tx ring size min/max */
+       if (!tx || tx > ATH5K_TXQ_LEN_MAX)
+               return -EINVAL;
+
+       for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
+               if (!sc->txqs[qnum].setup)
+                       continue;
+               if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
+                   sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
+                       continue;
+
+               sc->txqs[qnum].txq_max = tx;
+               if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
+                       ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
+       }
+
+       return 0;
+}
+
+
 const struct ieee80211_ops ath5k_hw_ops = {
        .tx                     = ath5k_tx,
        .start                  = ath5k_start,
@@ -771,4 +819,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
        /* .napi_poll           = not implemented */
        .set_antenna            = ath5k_set_antenna,
        .get_antenna            = ath5k_get_antenna,
+       .set_ringparam          = ath5k_set_ringparam,
+       .get_ringparam          = ath5k_get_ringparam,
 };
index 7f8c5b0..66598a0 100644 (file)
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
 /*
  * Read from eeprom
  */
-bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
 {
        struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
        u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
                status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
                if (status & AR5K_EEPROM_STAT_RDDONE) {
                        if (status & AR5K_EEPROM_STAT_RDERR)
-                               return -EIO;
+                               return false;
                        *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
                                        0xffff);
-                       return 0;
+                       return true;
                }
                udelay(15);
        }
 
-       return -ETIMEDOUT;
+       return false;
 }
 
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
index e5f2b96..a702817 100644 (file)
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
        if (!ah->ah_bwmode) {
                dur = ieee80211_generic_frame_duration(sc->hw,
                                                NULL, len, rate);
-               return dur;
+               return le16_to_cpu(dur);
        }
 
        bitrate = rate->bitrate;
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
                 * what rate we should choose to TX ACKs. */
                tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
 
-               tx_time = le16_to_cpu(tx_time);
-
                ath5k_hw_reg_write(ah, tx_time, reg);
 
                if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
index 78c26fd..62ce2f4 100644 (file)
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
        return 0;
 }
 
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+                       struct ieee80211_channel *channel)
+{
+       /*
+        * On 5211+ read activation -> rx delay
+        * and use it (100ns steps).
+        */
+       if (ah->ah_version != AR5K_AR5210) {
+               u32 delay;
+               delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+                       AR5K_PHY_RX_DELAY_M;
+               delay = (channel->hw_value & CHANNEL_CCK) ?
+                       ((delay << 2) / 22) : (delay / 10);
+               if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+                       delay = delay << 1;
+               if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+                       delay = delay << 2;
+               /* XXX: /2 on turbo ? Let's be safe
+                * for now */
+               udelay(100 + delay);
+       } else {
+               mdelay(1);
+       }
+}
+
 
 /**********************\
 * RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
        case AR5K_RF5111:
                ret = ath5k_hw_rf5111_channel(ah, channel);
                break;
+       case AR5K_RF2317:
        case AR5K_RF2425:
                ret = ath5k_hw_rf2425_channel(ah, channel);
                break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                /* Failed */
                if (i >= 100)
                        return -EIO;
+
+               /* Set channel and wait for synth */
+               ret = ath5k_hw_channel(ah, channel);
+               if (ret)
+                       return ret;
+
+               ath5k_hw_wait_for_synth(ah, channel);
        }
 
        /*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        if (ret)
                return ret;
 
+       /* Write OFDM timings on 5212*/
+       if (ah->ah_version == AR5K_AR5212 &&
+               channel->hw_value & CHANNEL_OFDM) {
+
+               ret = ath5k_hw_write_ofdm_timings(ah, channel);
+               if (ret)
+                       return ret;
+
+               /* Spur info is available only from EEPROM versions
+                * greater than 5.3, but the EEPROM routines will use
+                * static values for older versions */
+               if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+                       ath5k_hw_set_spur_mitigation_filter(ah,
+                                                           channel);
+       }
+
+       /* If we used fast channel switching
+        * we are done, release RF bus and
+        * fire up NF calibration.
+        *
+        * Note: Only NF calibration due to
+        * channel change, not AGC calibration
+        * since AGC is still running !
+        */
+       if (fast) {
+               /*
+                * Release RF Bus grant
+                */
+               AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+                                   AR5K_PHY_RFBUS_REQ_REQUEST);
+
+               /*
+                * Start NF calibration
+                */
+               AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+                                       AR5K_PHY_AGCCTL_NF);
+
+               return ret;
+       }
+
        /*
         * For 5210 we do all initialization using
         * initvals, so we don't have to modify
         * any settings (5210 also only supports
         * a/aturbo modes)
         */
-       if ((ah->ah_version != AR5K_AR5210) && !fast) {
+       if (ah->ah_version != AR5K_AR5210) {
 
                /*
                 * Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                if (ret)
                        return ret;
 
-               /* Write OFDM timings on 5212*/
-               if (ah->ah_version == AR5K_AR5212 &&
-                       channel->hw_value & CHANNEL_OFDM) {
-
-                       ret = ath5k_hw_write_ofdm_timings(ah, channel);
-                       if (ret)
-                               return ret;
-
-                       /* Spur info is available only from EEPROM versions
-                        * greater than 5.3, but the EEPROM routines will use
-                        * static values for older versions */
-                       if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
-                               ath5k_hw_set_spur_mitigation_filter(ah,
-                                                                   channel);
-               }
-
                /*Enable/disable 802.11b mode on 5111
                (enable 2111 frequency converter + CCK)*/
                if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
         */
        ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
 
+       ath5k_hw_wait_for_synth(ah, channel);
+
        /*
-        * On 5211+ read activation -> rx delay
-        * and use it.
+        * Perform ADC test to see if baseband is ready
+        * Set tx hold and check adc test register
         */
-       if (ah->ah_version != AR5K_AR5210) {
-               u32 delay;
-               delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
-                       AR5K_PHY_RX_DELAY_M;
-               delay = (channel->hw_value & CHANNEL_CCK) ?
-                       ((delay << 2) / 22) : (delay / 10);
-               if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
-                       delay = delay << 1;
-               if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
-                       delay = delay << 2;
-               /* XXX: /2 on turbo ? Let's be safe
-                * for now */
-               udelay(100 + delay);
-       } else {
-               mdelay(1);
-       }
-
-       if (fast)
-               /*
-                * Release RF Bus grant
-                */
-               AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
-                                   AR5K_PHY_RFBUS_REQ_REQUEST);
-       else {
-               /*
-                * Perform ADC test to see if baseband is ready
-                * Set tx hold and check adc test register
-                */
-               phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
-               ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
-               for (i = 0; i <= 20; i++) {
-                       if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
-                               break;
-                       udelay(200);
-               }
-               ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+       phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+       ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+       for (i = 0; i <= 20; i++) {
+               if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+                       break;
+               udelay(200);
        }
+       ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
        /*
         * Start automatic gain control calibration
index 2c9c9e7..3343fb9 100644 (file)
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 /*
  * Set tx retry limits on DCU
  */
-static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
-                                       unsigned int queue)
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+                                 unsigned int queue)
 {
-       u32 retry_lg, retry_sh;
-
-       /*
-        * Calculate and set retry limits
-        */
-       if (ah->ah_software_retry) {
-               /* XXX Need to test this */
-               retry_lg = ah->ah_limit_tx_retries;
-               retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
-                       AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
-       } else {
-               retry_lg = AR5K_INIT_LG_RETRY;
-               retry_sh = AR5K_INIT_SH_RETRY;
-       }
-
        /* Single data queue on AR5210 */
        if (ah->ah_version == AR5K_AR5210) {
                struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 
                ath5k_hw_reg_write(ah,
                        (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
-                       | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-                               AR5K_NODCU_RETRY_LMT_SLG_RETRY)
-                       | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-                               AR5K_NODCU_RETRY_LMT_SSH_RETRY)
-                       | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
-                       | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_short,
+                                     AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_NODCU_RETRY_LMT_LG_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_short,
+                                     AR5K_NODCU_RETRY_LMT_SH_RETRY),
                        AR5K_NODCU_RETRY_LMT);
        /* DCU on AR5211+ */
        } else {
                ath5k_hw_reg_write(ah,
-                       AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-                               AR5K_DCU_RETRY_LMT_SLG_RETRY) |
-                       AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-                               AR5K_DCU_RETRY_LMT_SSH_RETRY) |
-                       AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
-                       AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
+                       AR5K_REG_SM(ah->ah_retry_long,
+                                   AR5K_DCU_RETRY_LMT_RTS)
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_DCU_RETRY_LMT_STA_RTS)
+                       | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+                                     AR5K_DCU_RETRY_LMT_STA_DATA),
                        AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
        }
-       return;
 }
 
 /**
index fd14b91..e1c9abd 100644 (file)
 
 /*
  * DCU retry limit registers
+ * all these fields don't allow zero values
  */
 #define AR5K_DCU_RETRY_LMT_BASE                0x1080                  /* Register Address -Queue0 DCU_RETRY_LMT */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY    0x0000000f      /* Short retry limit mask */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY_S  0
-#define AR5K_DCU_RETRY_LMT_LG_RETRY    0x000000f0      /* Long retry limit mask */
-#define AR5K_DCU_RETRY_LMT_LG_RETRY_S  4
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY   0x00003f00      /* Station short retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY   0x000fc000      /* Station long retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_DCU_RETRY_LMT_RTS         0x0000000f      /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S       0
+#define AR5K_DCU_RETRY_LMT_STA_RTS     0x00003f00      /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S   8
+#define AR5K_DCU_RETRY_LMT_STA_DATA    0x000fc000      /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S  14
 #define        AR5K_QUEUE_DFS_RETRY_LIMIT(_q)  AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
 
 /*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644 (file)
index 0000000..2de68ad
--- /dev/null
@@ -0,0 +1,107 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+#include "base.h"
+
+#ifndef CONFIG_ATH5K_TRACER
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+
+#define PRIV_ENTRY  __field(struct ath5k_softc *, priv)
+#define PRIV_ASSIGN __entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+       TP_ARGS(priv, skb),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __dynamic_array(u8, frame, skb->len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+       ),
+       TP_printk(
+               "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+       )
+);
+
+TRACE_EVENT(ath5k_tx,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+                struct ath5k_txq *q),
+
+       TP_ARGS(priv, skb, q),
+
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __field(u8, qnum)
+               __dynamic_array(u8, frame, skb->len)
+       ),
+
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               __entry->qnum = (u8) q->qnum;
+               memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+       ),
+
+       TP_printk(
+               "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+               __entry->qnum
+       )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+                struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+       TP_ARGS(priv, skb, q, ts),
+
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __field(u8, qnum)
+               __field(u8, ts_status)
+               __field(s8, ts_rssi)
+               __field(u8, ts_antenna)
+       ),
+
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               __entry->qnum = (u8) q->qnum;
+               __entry->ts_status = ts->ts_status;
+               __entry->ts_rssi =  ts->ts_rssi;
+               __entry->ts_antenna = ts->ts_antenna;
+       ),
+
+       TP_printk(
+               "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+               __entry->priv, __entry->skbaddr, __entry->qnum,
+               __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+       )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#ifdef CONFIG_ATH5K_TRACER
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
index aca0162..4d66ca8 100644 (file)
@@ -4,7 +4,6 @@ ath9k-y +=      beacon.o \
                main.o \
                recv.o \
                xmit.o \
-               virtual.o \
 
 ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_PCI) += pci.o
index 25a6e44..9cb0efa 100644 (file)
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops  = {
 static int ath_ahb_probe(struct platform_device *pdev)
 {
        void __iomem *mem;
-       struct ath_wiphy *aphy;
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
        struct resource *res;
@@ -76,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       mem = ioremap_nocache(res->start, res->end - res->start + 1);
+       mem = ioremap_nocache(res->start, resource_size(res));
        if (mem == NULL) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
 
        irq = res->start;
 
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
-                               sizeof(struct ath_softc), &ath9k_ops);
+       hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
        if (hw == NULL) {
                dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
                ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
        SET_IEEE80211_DEV(hw, &pdev->dev);
        platform_set_drvdata(pdev, hw);
 
-       aphy = hw->priv;
-       sc = (struct ath_softc *) (aphy + 1);
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->pri_wiphy = aphy;
+       sc = hw->priv;
        sc->hw = hw;
        sc->dev = &pdev->dev;
        sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
        struct ieee80211_hw *hw = platform_get_drvdata(pdev);
 
        if (hw) {
-               struct ath_wiphy *aphy = hw->priv;
-               struct ath_softc *sc = aphy->sc;
+               struct ath_softc *sc = hw->priv;
                void __iomem *mem = sc->mem;
 
                ath9k_deinit_device(sc);
index 5e300bd..76388c6 100644 (file)
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
 
-       if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+       if (AR_SREV_9271(ah)) {
+               if (!ar9285_hw_cl_cal(ah, chan))
+                       return false;
+       } else if (AR_SREV_9285_12_OR_LATER(ah)) {
                if (!ar9285_hw_clc(ah, chan))
                        return false;
        } else {
index f8a7771..f44c84a 100644 (file)
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
                }
 
                /* WAR for ASPM system hang */
-               if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+               if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
                        val |= (AR_WA_BIT6 | AR_WA_BIT7);
-               }
 
                if (AR_SREV_9285E_20(ah))
                        val |= AR_WA_BIT23;
index 4819747..4a92718 100644 (file)
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                                return;
 
                        reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
-                                     (7 << 14) | (6 << 17) | (1 << 20) |
+                                     (2 << 14) | (6 << 17) | (1 << 20) |
                                      (3 << 24) | (1 << 28);
 
                        REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
 {
 #define POW_SM(_r, _s)     (((_r) & 0x3f) << (_s))
        /* make sure forced gain is not set */
-       REG_WRITE(ah, 0xa458, 0);
+       REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
 
        /* Write the OFDM power per rate set */
 
        /* 6 (LSB), 9, 12, 18 (MSB) */
-       REG_WRITE(ah, 0xa3c0,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
 
        /* 24 (LSB), 36, 48, 54 (MSB) */
-       REG_WRITE(ah, 0xa3c4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Write the CCK power per rate set */
 
        /* 1L (LSB), reserved, 2L, 2S (MSB) */
-       REG_WRITE(ah, 0xa3c8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
                  /* POW_SM(txPowerTimes2,  8) | this is reserved for AR9003 */
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
 
        /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
-       REG_WRITE(ah, 0xa3cc,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Write the HT20 power per rate set */
 
        /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
-       REG_WRITE(ah, 0xa3d0,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 6 (LSB), 7, 12, 13 (MSB) */
-       REG_WRITE(ah, 0xa3d4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 14 (LSB), 15, 20, 21 */
-       REG_WRITE(ah, 0xa3e4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Mixed HT20 and HT40 rates */
 
        /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
-       REG_WRITE(ah, 0xa3e8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
         * correct PAR difference between HT40 and HT20/LEGACY
         * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
         */
-       REG_WRITE(ah, 0xa3d8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 6 (LSB), 7, 12, 13 (MSB) */
-       REG_WRITE(ah, 0xa3dc,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 14 (LSB), 15, 20, 21 */
-       REG_WRITE(ah, 0xa3ec,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
index 06fb2c8..7f5de6e 100644 (file)
  */
 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 {
-       if (AR_SREV_9485(ah)) {
+       if (AR_SREV_9485_11(ah)) {
+               /* mac */
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+                               ar9485_1_1_mac_core,
+                               ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+                               ar9485_1_1_mac_postamble,
+                               ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+
+               /* bb */
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
+                               ARRAY_SIZE(ar9485_1_1), 2);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                               ar9485_1_1_baseband_core,
+                               ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                               ar9485_1_1_baseband_postamble,
+                               ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+
+               /* radio */
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+                               ar9485_1_1_radio_core,
+                               ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+                               ar9485_1_1_radio_postamble,
+                               ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+
+               /* soc */
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+                               ar9485_1_1_soc_preamble,
+                               ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+
+               /* rx/tx gain */
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                               ar9485_common_rx_gain_1_1,
+                               ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                               ar9485_modes_lowest_ob_db_tx_gain_1_1,
+                               ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+                               5);
+
+               /* Load PCIE SERDES settings from INI */
+
+               /* Awake Setting */
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                               ar9485_1_1_pcie_phy_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+                               2);
+
+               /* Sleep Setting */
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                               ar9485_1_1_pcie_phy_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+                               2);
+       } else if (AR_SREV_9485(ah)) {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                /* Sleep Setting */
 
                INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                               ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1,
-                               ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1),
+                               ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
                                2);
        } else {
                /* mac */
@@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
        switch (ar9003_hw_get_tx_gain_idx(ah)) {
        case 0:
        default:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485_modes_lowest_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_lowest_ob_db_tx_gain_1_0,
                                       ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
@@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 1:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_high_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_high_ob_db_tx_gain_1_0,
-                                      ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+                                      ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
                                       5);
                else
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 2:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_low_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_low_ob_db_tx_gain_1_0,
-                                      ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+                                      ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
                                       5);
                else
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 3:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_high_power_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_high_power_tx_gain_1_0,
                                       ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
@@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
        switch (ar9003_hw_get_rx_gain_idx(ah)) {
        case 0:
        default:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesRxGain,
+                                      ar9485_common_rx_gain_1_1,
+                                      ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+                                      2);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesRxGain,
                                       ar9485Common_rx_gain_1_0,
                                       ARRAY_SIZE(ar9485Common_rx_gain_1_0),
@@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
                                       2);
                break;
        case 1:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesRxGain,
+                                      ar9485Common_wo_xlna_rx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+                                      2);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesRxGain,
                                       ar9485Common_wo_xlna_rx_gain_1_0,
                                       ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
index 4ceddbb..038a0cb 100644 (file)
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                 */
                if (rxsp->status11 & AR_CRCErr)
                        rxs->rs_status |= ATH9K_RXERR_CRC;
-               if (rxsp->status11 & AR_PHYErr) {
+               else if (rxsp->status11 & AR_PHYErr) {
                        phyerr = MS(rxsp->status11, AR_PHYErrCode);
                        /*
                         * If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                                rxs->rs_phyerr = phyerr;
                        }
 
-               }
-               if (rxsp->status11 & AR_DecryptCRCErr)
+               } else if (rxsp->status11 & AR_DecryptCRCErr)
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
-               if (rxsp->status11 & AR_MichaelErr)
+               else if (rxsp->status11 & AR_MichaelErr)
                        rxs->rs_status |= ATH9K_RXERR_MIC;
+
                if (rxsp->status11 & AR_KeyMiss)
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
        }
index 8d60f4f..eb250d6 100644 (file)
@@ -1020,28 +1020,29 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
 static void ar9003_hw_do_getnf(struct ath_hw *ah,
                              int16_t nfarray[NUM_NF_READINGS])
 {
-       int16_t nf;
-
-       nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
-       nfarray[0] = sign_extend32(nf, 8);
-
-       nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
-       nfarray[1] = sign_extend32(nf, 8);
+#define AR_PHY_CH_MINCCA_PWR   0x1FF00000
+#define AR_PHY_CH_MINCCA_PWR_S 20
+#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
 
-       nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
-       nfarray[2] = sign_extend32(nf, 8);
-
-       if (!IS_CHAN_HT40(ah->curchan))
-               return;
+       int16_t nf;
+       int i;
 
-       nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
-       nfarray[3] = sign_extend32(nf, 8);
+       for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+               if (ah->rxchainmask & BIT(i)) {
+                       nf = MS(REG_READ(ah, ah->nf_regs[i]),
+                                        AR_PHY_CH_MINCCA_PWR);
+                       nfarray[i] = sign_extend32(nf, 8);
 
-       nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
-       nfarray[4] = sign_extend32(nf, 8);
+                       if (IS_CHAN_HT40(ah->curchan)) {
+                               u8 ext_idx = AR9300_MAX_CHAINS + i;
 
-       nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
-       nfarray[5] = sign_extend32(nf, 8);
+                               nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
+                                                AR_PHY_CH_EXT_MINCCA_PWR);
+                               nfarray[ext_idx] = sign_extend32(nf, 8);
+                       }
+               }
+       }
 }
 
 static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
index 59bab6b..8bdda2c 100644 (file)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
 #define AR_PHY_PWRTX_MAX         (AR_SM_BASE + 0x1f0)
 #define AR_PHY_POWER_TX_SUB      (AR_SM_BASE + 0x1f4)
 
index 70de3d8..71cc0a3 100644 (file)
@@ -667,6 +667,7 @@ static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
 
 static const u32 ar9485_1_0_soc_preamble[][2] = {
        /*   Addr     allmodes */
+       {0x00004090, 0x00aa10aa},
        {0x000040a4, 0x00a0c9c9},
        {0x00007048, 0x00000004},
 };
@@ -940,4 +941,1146 @@ static const u32 ar9485_1_0_mac_core[][2] = {
        {0x000083cc, 0x00000200},
        {0x000083d0, 0x000301ff},
 };
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+       /*  Addr       allmodes */
+       {0x00000008, 0x00000000},
+       {0x00000030, 0x00020085},
+       {0x00000034, 0x00000005},
+       {0x00000040, 0x00000000},
+       {0x00000044, 0x00000000},
+       {0x00000048, 0x00000008},
+       {0x0000004c, 0x00000010},
+       {0x00000050, 0x00000000},
+       {0x00001040, 0x002ffc0f},
+       {0x00001044, 0x002ffc0f},
+       {0x00001048, 0x002ffc0f},
+       {0x0000104c, 0x002ffc0f},
+       {0x00001050, 0x002ffc0f},
+       {0x00001054, 0x002ffc0f},
+       {0x00001058, 0x002ffc0f},
+       {0x0000105c, 0x002ffc0f},
+       {0x00001060, 0x002ffc0f},
+       {0x00001064, 0x002ffc0f},
+       {0x000010f0, 0x00000100},
+       {0x00001270, 0x00000000},
+       {0x000012b0, 0x00000000},
+       {0x000012f0, 0x00000000},
+       {0x0000143c, 0x00000000},
+       {0x0000147c, 0x00000000},
+       {0x00008000, 0x00000000},
+       {0x00008004, 0x00000000},
+       {0x00008008, 0x00000000},
+       {0x0000800c, 0x00000000},
+       {0x00008018, 0x00000000},
+       {0x00008020, 0x00000000},
+       {0x00008038, 0x00000000},
+       {0x0000803c, 0x00000000},
+       {0x00008040, 0x00000000},
+       {0x00008044, 0x00000000},
+       {0x00008048, 0x00000000},
+       {0x0000804c, 0xffffffff},
+       {0x00008054, 0x00000000},
+       {0x00008058, 0x00000000},
+       {0x0000805c, 0x000fc78f},
+       {0x00008060, 0x0000000f},
+       {0x00008064, 0x00000000},
+       {0x00008070, 0x00000310},
+       {0x00008074, 0x00000020},
+       {0x00008078, 0x00000000},
+       {0x0000809c, 0x0000000f},
+       {0x000080a0, 0x00000000},
+       {0x000080a4, 0x02ff0000},
+       {0x000080a8, 0x0e070605},
+       {0x000080ac, 0x0000000d},
+       {0x000080b0, 0x00000000},
+       {0x000080b4, 0x00000000},
+       {0x000080b8, 0x00000000},
+       {0x000080bc, 0x00000000},
+       {0x000080c0, 0x2a800000},
+       {0x000080c4, 0x06900168},
+       {0x000080c8, 0x13881c22},
+       {0x000080cc, 0x01f40000},
+       {0x000080d0, 0x00252500},
+       {0x000080d4, 0x00a00000},
+       {0x000080d8, 0x00400000},
+       {0x000080dc, 0x00000000},
+       {0x000080e0, 0xffffffff},
+       {0x000080e4, 0x0000ffff},
+       {0x000080e8, 0x3f3f3f3f},
+       {0x000080ec, 0x00000000},
+       {0x000080f0, 0x00000000},
+       {0x000080f4, 0x00000000},
+       {0x000080fc, 0x00020000},
+       {0x00008100, 0x00000000},
+       {0x00008108, 0x00000052},
+       {0x0000810c, 0x00000000},
+       {0x00008110, 0x00000000},
+       {0x00008114, 0x000007ff},
+       {0x00008118, 0x000000aa},
+       {0x0000811c, 0x00003210},
+       {0x00008124, 0x00000000},
+       {0x00008128, 0x00000000},
+       {0x0000812c, 0x00000000},
+       {0x00008130, 0x00000000},
+       {0x00008134, 0x00000000},
+       {0x00008138, 0x00000000},
+       {0x0000813c, 0x0000ffff},
+       {0x00008144, 0xffffffff},
+       {0x00008168, 0x00000000},
+       {0x0000816c, 0x00000000},
+       {0x00008170, 0x18486200},
+       {0x00008174, 0x33332210},
+       {0x00008178, 0x00000000},
+       {0x0000817c, 0x00020000},
+       {0x000081c0, 0x00000000},
+       {0x000081c4, 0x33332210},
+       {0x000081d4, 0x00000000},
+       {0x000081ec, 0x00000000},
+       {0x000081f0, 0x00000000},
+       {0x000081f4, 0x00000000},
+       {0x000081f8, 0x00000000},
+       {0x000081fc, 0x00000000},
+       {0x00008240, 0x00100000},
+       {0x00008244, 0x0010f400},
+       {0x00008248, 0x00000800},
+       {0x0000824c, 0x0001e800},
+       {0x00008250, 0x00000000},
+       {0x00008254, 0x00000000},
+       {0x00008258, 0x00000000},
+       {0x0000825c, 0x40000000},
+       {0x00008260, 0x00080922},
+       {0x00008264, 0x9ca00010},
+       {0x00008268, 0xffffffff},
+       {0x0000826c, 0x0000ffff},
+       {0x00008270, 0x00000000},
+       {0x00008274, 0x40000000},
+       {0x00008278, 0x003e4180},
+       {0x0000827c, 0x00000004},
+       {0x00008284, 0x0000002c},
+       {0x00008288, 0x0000002c},
+       {0x0000828c, 0x000000ff},
+       {0x00008294, 0x00000000},
+       {0x00008298, 0x00000000},
+       {0x0000829c, 0x00000000},
+       {0x00008300, 0x00000140},
+       {0x00008314, 0x00000000},
+       {0x0000831c, 0x0000010d},
+       {0x00008328, 0x00000000},
+       {0x0000832c, 0x00000007},
+       {0x00008330, 0x00000302},
+       {0x00008334, 0x00000700},
+       {0x00008338, 0x00ff0000},
+       {0x0000833c, 0x02400000},
+       {0x00008340, 0x000107ff},
+       {0x00008344, 0xa248105b},
+       {0x00008348, 0x008f0000},
+       {0x0000835c, 0x00000000},
+       {0x00008360, 0xffffffff},
+       {0x00008364, 0xffffffff},
+       {0x00008368, 0x00000000},
+       {0x00008370, 0x00000000},
+       {0x00008374, 0x000000ff},
+       {0x00008378, 0x00000000},
+       {0x0000837c, 0x00000000},
+       {0x00008380, 0xffffffff},
+       {0x00008384, 0xffffffff},
+       {0x00008390, 0xffffffff},
+       {0x00008394, 0xffffffff},
+       {0x00008398, 0x00000000},
+       {0x0000839c, 0x00000000},
+       {0x000083a0, 0x00000000},
+       {0x000083a4, 0x0000fa14},
+       {0x000083a8, 0x000f0c00},
+       {0x000083ac, 0x33332210},
+       {0x000083b0, 0x33332210},
+       {0x000083b4, 0x33332210},
+       {0x000083b8, 0x33332210},
+       {0x000083bc, 0x00000000},
+       {0x000083c0, 0x00000000},
+       {0x000083c4, 0x00000000},
+       {0x000083c8, 0x00000000},
+       {0x000083cc, 0x00000200},
+       {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_baseband_core[][2] = {
+       /* Addr       allmodes */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a8f6b},
+       {0x0000980c, 0x04800000},
+       {0x00009814, 0x9280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x5f3ca3de},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x14750600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x52440bbe},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009c04, 0x00000000},
+       {0x00009c08, 0x03200000},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x1883800a},
+       {0x00009d10, 0x01834061},
+       {0x00009d14, 0x00c00400},
+       {0x00009d18, 0x00000000},
+       {0x00009d1c, 0x00000000},
+       {0x00009e08, 0x0038233c},
+       {0x00009e24, 0x9927b515},
+       {0x00009e28, 0x12ef0200},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x0d261820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009fc0, 0x80be4788},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x0000a20c, 0x00000000},
+       {0x0000a210, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a2a0, 0x00000001},
+       {0x0000a2c0, 0x00000001},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2cc, 0x18c43433},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2dc, 0x00000000},
+       {0x0000a2e0, 0x00000000},
+       {0x0000a2e4, 0x00000000},
+       {0x0000a2e8, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3a4, 0x000000ff},
+       {0x0000a3a8, 0x3b3b3b3b},
+       {0x0000a3ac, 0x2f2f2f2f},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000006},
+       {0x0000a3f8, 0x0cdbd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce739cf},
+       {0x0000a418, 0x2d0019ce},
+       {0x0000a41c, 0x1ce739ce},
+       {0x0000a420, 0x000001ce},
+       {0x0000a424, 0x1ce739ce},
+       {0x0000a428, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00000000},
+       {0x0000a440, 0x00000000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x04000000},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a5c4, 0xbfad9d74},
+       {0x0000a5c8, 0x0048060a},
+       {0x0000a5cc, 0x00000637},
+       {0x0000a760, 0x03020100},
+       {0x0000a764, 0x09080504},
+       {0x0000a768, 0x0d0c0b0a},
+       {0x0000a76c, 0x13121110},
+       {0x0000a770, 0x31301514},
+       {0x0000a774, 0x35343332},
+       {0x0000a778, 0x00000036},
+       {0x0000a780, 0x00000838},
+       {0x0000a7c0, 0x00000000},
+       {0x0000a7c4, 0xfffffffc},
+       {0x0000a7c8, 0x00000000},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000000},
+};
+
+static const u32 ar9485Common_1_1[][2] = {
+       /*  Addr      allmodes */
+       {0x00007010, 0x00000022},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+       /* Addr       5G_HT20        5G_HT40       2G_HT40       2G_HT20 */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+       {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+       {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+       {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+       {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+       {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+       {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20  */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+       /* Addr        allmodes */
+       {0x0001609c, 0x0b283f31},
+       {0x000160ac, 0x24611800},
+       {0x000160b0, 0x03284f3e},
+       {0x0001610c, 0x00170000},
+       {0x00016140, 0x10804008},
+};
+
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9485_1_1_radio_core[][2] = {
+       /* Addr        allmodes */
+       {0x00016000, 0x36db6db6},
+       {0x00016004, 0x6db6db40},
+       {0x00016008, 0x73800000},
+       {0x0001600c, 0x00000000},
+       {0x00016040, 0x7f80fff8},
+       {0x0001604c, 0x000f0278},
+       {0x00016050, 0x4db6db8c},
+       {0x00016054, 0x6db60000},
+       {0x00016080, 0x00080000},
+       {0x00016084, 0x0e48048c},
+       {0x00016088, 0x14214514},
+       {0x0001608c, 0x119f081e},
+       {0x00016090, 0x24926490},
+       {0x00016098, 0xd28b3330},
+       {0x000160a0, 0xc2108ffe},
+       {0x000160a4, 0x812fc370},
+       {0x000160a8, 0x423c8000},
+       {0x000160b4, 0x92480040},
+       {0x000160c0, 0x006db6db},
+       {0x000160c4, 0x0186db60},
+       {0x000160c8, 0x6db6db6c},
+       {0x000160cc, 0x6de6fbe0},
+       {0x000160d0, 0xf7dfcf3c},
+       {0x00016100, 0x04cb0001},
+       {0x00016104, 0xfff80015},
+       {0x00016108, 0x00080010},
+       {0x00016144, 0x01884080},
+       {0x00016148, 0x00008040},
+       {0x00016240, 0x08400000},
+       {0x00016244, 0x1bf90f00},
+       {0x00016248, 0x00000000},
+       {0x0001624c, 0x00000000},
+       {0x00016280, 0x01000015},
+       {0x00016284, 0x00d30000},
+       {0x00016288, 0x00318000},
+       {0x0001628c, 0x50000000},
+       {0x00016290, 0x4b96210f},
+       {0x00016380, 0x00000000},
+       {0x00016384, 0x00000000},
+       {0x00016388, 0x00800700},
+       {0x0001638c, 0x00800700},
+       {0x00016390, 0x00800700},
+       {0x00016394, 0x00000000},
+       {0x00016398, 0x00000000},
+       {0x0001639c, 0x00000000},
+       {0x000163a0, 0x00000001},
+       {0x000163a4, 0x00000001},
+       {0x000163a8, 0x00000000},
+       {0x000163ac, 0x00000000},
+       {0x000163b0, 0x00000000},
+       {0x000163b4, 0x00000000},
+       {0x000163b8, 0x00000000},
+       {0x000163bc, 0x00000000},
+       {0x000163c0, 0x000000a0},
+       {0x000163c4, 0x000c0000},
+       {0x000163c8, 0x14021402},
+       {0x000163cc, 0x00001402},
+       {0x000163d0, 0x00000000},
+       {0x000163d4, 0x00000000},
+       {0x00016c40, 0x13188278},
+       {0x00016c44, 0x12000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10052e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a580, 0x00000000},
+       {0x0000a584, 0x00000000},
+       {0x0000a588, 0x00000000},
+       {0x0000a58c, 0x00000000},
+       {0x0000a590, 0x00000000},
+       {0x0000a594, 0x00000000},
+       {0x0000a598, 0x00000000},
+       {0x0000a59c, 0x00000000},
+       {0x0000a5a0, 0x00000000},
+       {0x0000a5a4, 0x00000000},
+       {0x0000a5a8, 0x00000000},
+       {0x0000a5ac, 0x00000000},
+       {0x0000a5b0, 0x00000000},
+       {0x0000a5b4, 0x00000000},
+       {0x0000a5b8, 0x00000000},
+       {0x0000a5bc, 0x00000000},
+};
+
+static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+       {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+       {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+       {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+       {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10013e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+       /* Addr        allmodes */
+       {0x00004014, 0xba280400},
+       {0x00004090, 0x00aa10aa},
+       {0x000040a4, 0x00a0c9c9},
+       {0x00007010, 0x00000022},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+       {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+       /* Addr        allmodes */
+       {0x0000a398, 0x00000000},
+       {0x0000a39c, 0x6f7f0301},
+       {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20  */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+       /* Addr        5G_HT2        5G_HT40  */
+       {0x00009e00, 0x03721821, 0x03721821},
+       {0x0000a230, 0x0000400b, 0x00004016},
+       {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+       /* Addr        allmodes  */
+       {0x00018c00, 0x10012e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_common_rx_gain_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x01800082},
+       {0x0000a014, 0x01820181},
+       {0x0000a018, 0x01840183},
+       {0x0000a01c, 0x01880185},
+       {0x0000a020, 0x018a0189},
+       {0x0000a024, 0x02850284},
+       {0x0000a028, 0x02890288},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x21212128},
+       {0x0000a098, 0x171c1c1c},
+       {0x0000a09c, 0x02020212},
+       {0x0000a0a0, 0x00000202},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x111f1100},
+       {0x0000a0c8, 0x111d111e},
+       {0x0000a0cc, 0x111b111c},
+       {0x0000a0d0, 0x22032204},
+       {0x0000a0d4, 0x22012202},
+       {0x0000a0d8, 0x221f2200},
+       {0x0000a0dc, 0x221d221e},
+       {0x0000a0e0, 0x33013302},
+       {0x0000a0e4, 0x331f3300},
+       {0x0000a0e8, 0x4402331e},
+       {0x0000a0ec, 0x44004401},
+       {0x0000a0f0, 0x441e441f},
+       {0x0000a0f4, 0x55015502},
+       {0x0000a0f8, 0x551f5500},
+       {0x0000a0fc, 0x6602551e},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10053e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a000, 0x00060005},
+       {0x0000a004, 0x00810080},
+       {0x0000a008, 0x00830082},
+       {0x0000a00c, 0x00850084},
+       {0x0000a010, 0x01820181},
+       {0x0000a014, 0x01840183},
+       {0x0000a018, 0x01880185},
+       {0x0000a01c, 0x018a0189},
+       {0x0000a020, 0x02850284},
+       {0x0000a024, 0x02890288},
+       {0x0000a028, 0x028b028a},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x24242428},
+       {0x0000a098, 0x171e1e1e},
+       {0x0000a09c, 0x02020b0b},
+       {0x0000a0a0, 0x02020202},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x22072208},
+       {0x0000a0c4, 0x22052206},
+       {0x0000a0c8, 0x22032204},
+       {0x0000a0cc, 0x22012202},
+       {0x0000a0d0, 0x221f2200},
+       {0x0000a0d4, 0x221d221e},
+       {0x0000a0d8, 0x33023303},
+       {0x0000a0dc, 0x33003301},
+       {0x0000a0e0, 0x331e331f},
+       {0x0000a0e4, 0x4402331d},
+       {0x0000a0e8, 0x44004401},
+       {0x0000a0ec, 0x441e441f},
+       {0x0000a0f0, 0x55025503},
+       {0x0000a0f4, 0x55005501},
+       {0x0000a0f8, 0x551e551f},
+       {0x0000a0fc, 0x6602551d},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000296},
+};
+
 #endif
index 3681caf..099bd41 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
-#include <linux/pm_qos_params.h>
 
 #include "debug.h"
 #include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
 
 #define A_MAX(a, b) ((a) > (b) ? (a) : (b))
 
-#define ATH9K_PM_QOS_DEFAULT_VALUE     55
-
 #define TSF_TO_TU(_h,_l) \
        ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
@@ -95,9 +92,9 @@ struct ath_config {
  * @BUF_XRETRY: To denote excessive retries of the buffer
  */
 enum buffer_type {
-       BUF_AMPDU               = BIT(2),
-       BUF_AGGR                = BIT(3),
-       BUF_XRETRY              = BIT(5),
+       BUF_AMPDU               = BIT(0),
+       BUF_AGGR                = BIT(1),
+       BUF_XRETRY              = BIT(2),
 };
 
 #define bf_isampdu(bf)         (bf->bf_state.bf_type & BUF_AMPDU)
@@ -137,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
         (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
         WME_AC_VO)
 
-#define ADDBA_EXCHANGE_ATTEMPTS    10
 #define ATH_AGGR_DELIM_SZ          4
 #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
 /* number of delimiters for encryption padding */
@@ -184,7 +180,8 @@ enum ATH_AGGR_STATUS {
 
 #define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
-       u32 axq_qnum;
+       int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+       u32 axq_qnum; /* ath9k hardware queue number */
        u32 *axq_link;
        struct list_head axq_q;
        spinlock_t axq_lock;
@@ -218,6 +215,7 @@ struct ath_frame_info {
 struct ath_buf_state {
        u8 bf_type;
        u8 bfs_paprd;
+       unsigned long bfs_paprd_timestamp;
        enum ath9k_internal_frame_type bfs_ftype;
 };
 
@@ -233,7 +231,6 @@ struct ath_buf {
        bool bf_stale;
        u16 bf_flags;
        struct ath_buf_state bf_state;
-       struct ath_wiphy *aphy;
 };
 
 struct ath_atx_tid {
@@ -254,7 +251,10 @@ struct ath_atx_tid {
 };
 
 struct ath_node {
-       struct ath_common *common;
+#ifdef CONFIG_ATH9K_DEBUGFS
+       struct list_head list; /* for sc->nodes */
+       struct ieee80211_sta *sta; /* station struct we're part of */
+#endif
        struct ath_atx_tid tid[WME_NUM_TID];
        struct ath_atx_ac ac[WME_NUM_AC];
        u16 maxampdu;
@@ -277,6 +277,11 @@ struct ath_tx_control {
 #define ATH_TX_XRETRY       0x02
 #define ATH_TX_BAR          0x04
 
+/**
+ * @txq_map:  Index is mac80211 queue number.  This is
+ *  not necessarily the same as the hardware queue number
+ *  (axq_qnum).
+ */
 struct ath_tx {
        u16 seq_no;
        u32 txqsetup;
@@ -303,6 +308,8 @@ struct ath_rx {
        struct ath_descdma rxdma;
        struct ath_buf *rx_bufptr;
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+       struct sk_buff *frag;
 };
 
 int ath_startrecv(struct ath_softc *sc);
@@ -339,10 +346,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
 
 struct ath_vif {
        int av_bslot;
+       bool is_bslot_active;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        enum nl80211_iftype av_opmode;
        struct ath_buf *av_bcbuf;
-       struct ath_tx_control av_btxctl;
        u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
 };
 
@@ -362,7 +369,7 @@ struct ath_vif {
 #define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
 
 struct ath_beacon_config {
-       u16 beacon_interval;
+       int beacon_interval;
        u16 listen_interval;
        u16 dtim_period;
        u16 bmiss_timeout;
@@ -381,7 +388,6 @@ struct ath_beacon {
        u32 ast_be_xmit;
        u64 bc_tstamp;
        struct ieee80211_vif *bslot[ATH_BCBUF];
-       struct ath_wiphy *bslot_aphy[ATH_BCBUF];
        int slottime;
        int slotupdate;
        struct ath9k_tx_queue_info beacon_qi;
@@ -392,9 +398,10 @@ struct ath_beacon {
 
 void ath_beacon_tasklet(unsigned long data);
 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
 int ath_beaconq_config(struct ath_softc *sc);
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
 
 /*******/
 /* ANI */
@@ -441,26 +448,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
 
 #define ATH_LED_PIN_DEF                1
 #define ATH_LED_PIN_9287               8
-#define ATH_LED_ON_DURATION_IDLE       350     /* in msecs */
-#define ATH_LED_OFF_DURATION_IDLE      250     /* in msecs */
-
-enum ath_led_type {
-       ATH_LED_RADIO,
-       ATH_LED_ASSOC,
-       ATH_LED_TX,
-       ATH_LED_RX
-};
-
-struct ath_led {
-       struct ath_softc *sc;
-       struct led_classdev led_cdev;
-       enum ath_led_type led_type;
-       char name[32];
-       bool registered;
-};
+#define ATH_LED_PIN_9485               6
 
+#ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
+#else
+static inline void ath_init_leds(struct ath_softc *sc)
+{
+}
+
+static inline void ath_deinit_leds(struct ath_softc *sc)
+{
+}
+#endif
+
 
 /* Antenna diversity/combining */
 #define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -529,7 +531,6 @@ struct ath_ant_comb {
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
 #define ATH_MAX_SW_RETRIES      10
 #define ATH_CHAN_MAX            255
-#define IEEE80211_WEP_NKID      4       /* number of key ids */
 
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
 #define ATH_RATE_DUMMY_MARKER   0
@@ -557,27 +558,28 @@ struct ath_ant_comb {
 #define PS_WAIT_FOR_TX_ACK        BIT(3)
 #define PS_BEACON_SYNC            BIT(4)
 
-struct ath_wiphy;
 struct ath_rate_table;
 
+struct ath9k_vif_iter_data {
+       const u8 *hw_macaddr; /* phy's hardware address, set
+                              * before starting iteration for
+                              * valid bssid mask.
+                              */
+       u8 mask[ETH_ALEN]; /* bssid mask */
+       int naps;      /* number of AP vifs */
+       int nmeshes;   /* number of mesh vifs */
+       int nstations; /* number of station vifs */
+       int nwds;      /* number of nwd vifs */
+       int nadhocs;   /* number of adhoc vifs */
+       int nothers;   /* number of vifs not specified above. */
+};
+
 struct ath_softc {
        struct ieee80211_hw *hw;
        struct device *dev;
 
-       spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
-       struct ath_wiphy *pri_wiphy;
-       struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
-                                      * have NULL entries */
-       int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
        int chan_idx;
        int chan_is_ht;
-       struct ath_wiphy *next_wiphy;
-       struct work_struct chan_work;
-       int wiphy_select_failures;
-       unsigned long wiphy_select_first_fail;
-       struct delayed_work wiphy_work;
-       unsigned long wiphy_scheduler_int;
-       int wiphy_scheduler_index;
        struct survey_info *cur_survey;
        struct survey_info survey[ATH9K_NUM_CHANNELS];
 
@@ -593,16 +595,17 @@ struct ath_softc {
        struct work_struct paprd_work;
        struct work_struct hw_check_work;
        struct completion paprd_complete;
-       bool paprd_pending;
+
+       unsigned int hw_busy_count;
 
        u32 intrstatus;
        u32 sc_flags; /* SC_OP_* */
        u16 ps_flags; /* PS_* */
        u16 curtxpow;
-       u8 nbcnvifs;
-       u16 nvifs;
        bool ps_enabled;
        bool ps_idle;
+       short nbcnvifs;
+       short nvifs;
        unsigned long ps_usecount;
 
        struct ath_config config;
@@ -611,47 +614,29 @@ struct ath_softc {
        struct ath_beacon beacon;
        struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 
-       struct ath_led radio_led;
-       struct ath_led assoc_led;
-       struct ath_led tx_led;
-       struct ath_led rx_led;
-       struct delayed_work ath_led_blink_work;
-       int led_on_duration;
-       int led_off_duration;
-       int led_on_cnt;
-       int led_off_cnt;
+#ifdef CONFIG_MAC80211_LEDS
+       bool led_registered;
+       char led_name[32];
+       struct led_classdev led_cdev;
+#endif
 
-       int beacon_interval;
+       struct ath9k_hw_cal_data caldata;
+       int last_rssi;
 
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
+       spinlock_t nodes_lock;
+       struct list_head nodes; /* basically, stations */
+       unsigned int tx_complete_poll_work_seen;
 #endif
        struct ath_beacon_config cur_beacon_conf;
        struct delayed_work tx_complete_work;
+       struct delayed_work hw_pll_work;
        struct ath_btcoex btcoex;
 
        struct ath_descdma txsdma;
 
        struct ath_ant_comb ant_comb;
-
-       struct pm_qos_request_list pm_qos_req;
-};
-
-struct ath_wiphy {
-       struct ath_softc *sc; /* shared for all virtual wiphys */
-       struct ieee80211_hw *hw;
-       struct ath9k_hw_cal_data caldata;
-       enum ath_wiphy_state {
-               ATH_WIPHY_INACTIVE,
-               ATH_WIPHY_ACTIVE,
-               ATH_WIPHY_PAUSING,
-               ATH_WIPHY_PAUSED,
-               ATH_WIPHY_SCAN,
-       } state;
-       bool idle;
-       int chan_idx;
-       int chan_is_ht;
-       int last_rssi;
 };
 
 void ath9k_tasklet(unsigned long data);
@@ -666,7 +651,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
 extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
 extern int led_blink;
-extern int ath9k_pm_qos_value;
 extern bool is_ath9k_unloaded;
 
 irqreturn_t ath_isr(int irq, void *dev);
@@ -675,14 +659,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
                    const struct ath_bus_ops *bus_ops);
 void ath9k_deinit_device(struct ath_softc *sc);
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
-                          struct ath9k_channel *ichan);
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan);
 
 void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
 bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
+bool ath9k_uses_beacons(int type);
 
 #ifdef CONFIG_PCI
 int ath_pci_init(void);
@@ -706,26 +689,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int ath9k_wiphy_add(struct ath_softc *sc);
-int ath9k_wiphy_del(struct ath_wiphy *aphy);
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
-int ath9k_wiphy_pause(struct ath_wiphy *aphy);
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-int ath9k_wiphy_select(struct ath_wiphy *aphy);
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
-void ath9k_wiphy_chan_work(struct work_struct *work);
-bool ath9k_wiphy_started(struct ath_softc *sc);
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
-                                 struct ath_wiphy *selected);
-bool ath9k_wiphy_scanning(struct ath_softc *sc);
-void ath9k_wiphy_work(struct work_struct *work);
-bool ath9k_all_wiphys_idle(struct ath_softc *sc);
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
-
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
 extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ath9k_vif_iter_data *iter_data);
+
 
 #endif /* ATH9K_H */
index 385ba03..6d2a545 100644 (file)
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
 
 static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
 
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
                                           struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_buf *bf;
        struct ath_vif *avp;
@@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info;
        int cabq_depth;
 
-       if (aphy->state != ATH_WIPHY_ACTIVE)
-               return NULL;
-
        avp = (void *)vif->drv_priv;
        cabq = sc->beacon.cabq;
 
-       if (avp->av_bcbuf == NULL)
+       if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
                return NULL;
 
        /* Release the old beacon first */
@@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
        return bf;
 }
 
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
-       struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_vif *avp;
        struct ath_buf *bf;
        struct sk_buff *skb;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        __le64 tstamp;
 
        avp = (void *)vif->drv_priv;
@@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                                                 struct ath_buf, list);
                list_del(&avp->av_bcbuf->list);
 
-               if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
-                   sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
-                   sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+               if (ath9k_uses_beacons(vif->type)) {
                        int slot;
                        /*
                         * Assign the vif to a beacon xmit slot. As
@@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                        for (slot = 0; slot < ATH_BCBUF; slot++)
                                if (sc->beacon.bslot[slot] == NULL) {
                                        avp->av_bslot = slot;
+                                       avp->is_bslot_active = false;
 
                                        /* NB: keep looking for a double slot */
                                        if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                                }
                        BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
                        sc->beacon.bslot[avp->av_bslot] = vif;
-                       sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
                        sc->nbcnvifs++;
                }
        }
@@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 
        /* NB: the beacon data buffer must be 32-bit aligned. */
        skb = ieee80211_beacon_get(sc->hw, vif);
-       if (skb == NULL) {
-               ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
+       if (skb == NULL)
                return -ENOMEM;
-       }
 
        tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
        sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                u64 tsfadjust;
                int intval;
 
-               intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+               intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
 
                /*
                 * Calculate the TSF offset for this beacon slot, i.e., the
@@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                ath_err(common, "dma_mapping_error on beacon alloc\n");
                return -ENOMEM;
        }
+       avp->is_bslot_active = true;
 
        return 0;
 }
@@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
 
                if (avp->av_bslot != -1) {
                        sc->beacon.bslot[avp->av_bslot] = NULL;
-                       sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
                        sc->nbcnvifs--;
                }
 
@@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
 void ath_beacon_tasklet(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_buf *bf = NULL;
        struct ieee80211_vif *vif;
-       struct ath_wiphy *aphy;
        int slot;
        u32 bfaddr, bc = 0, tsftu;
        u64 tsf;
@@ -382,6 +373,7 @@ void ath_beacon_tasklet(unsigned long data)
                        ath_dbg(common, ATH_DBG_BSTUCK,
                                "missed %u consecutive beacons\n",
                                sc->beacon.bmisscnt);
+                       ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
                        ath9k_hw_bstuck_nfcal(ah);
                } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
                        ath_dbg(common, ATH_DBG_BSTUCK,
@@ -406,7 +398,7 @@ void ath_beacon_tasklet(unsigned long data)
         * on the tsf to safeguard against missing an swba.
         */
 
-       intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+       intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
 
        tsf = ath9k_hw_gettsf64(ah);
        tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -420,7 +412,6 @@ void ath_beacon_tasklet(unsigned long data)
         */
        slot = ATH_BCBUF - slot - 1;
        vif = sc->beacon.bslot[slot];
-       aphy = sc->beacon.bslot_aphy[slot];
 
        ath_dbg(common, ATH_DBG_BEACON,
                "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +419,7 @@ void ath_beacon_tasklet(unsigned long data)
 
        bfaddr = 0;
        if (vif) {
-               bf = ath_beacon_generate(aphy->hw, vif);
+               bf = ath_beacon_generate(sc->hw, vif);
                if (bf != NULL) {
                        bfaddr = bf->bf_daddr;
                        bc = 1;
@@ -460,16 +451,6 @@ void ath_beacon_tasklet(unsigned long data)
                sc->beacon.updateslot = OK;
        }
        if (bfaddr != 0) {
-               /*
-                * Stop any current dma and put the new frame(s) on the queue.
-                * This should never fail since we check above that no frames
-                * are still pending on the queue.
-                */
-               if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
-                       ath_err(common, "beacon queue %u did not stop?\n",
-                               sc->beacon.beaconq);
-               }
-
                /* NB: cabq traffic should already be queued and primed */
                ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
                ath9k_hw_txstart(ah, sc->beacon.beaconq);
@@ -720,10 +701,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
                iftype = sc->sc_ah->opmode;
        }
 
-               cur_conf->listen_interval = 1;
-               cur_conf->dtim_count = 1;
-               cur_conf->bmiss_timeout =
-                       ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+       cur_conf->listen_interval = 1;
+       cur_conf->dtim_count = 1;
+       cur_conf->bmiss_timeout =
+               ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
        /*
         * It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +716,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
                cur_conf->beacon_interval = 100;
 
        /*
-        * Some times we dont parse dtim period from mac80211, in that case
-        * use a default value
+        * We don't parse dtim period from mac80211 during the driver
+        * initialization as it breaks association with hidden-ssid
+        * AP and it causes latency in roaming
         */
        if (cur_conf->dtim_period == 0)
                cur_conf->dtim_period = 1;
@@ -760,3 +742,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
 
        sc->sc_flags |= SC_OP_BEACONS;
 }
+
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_vif *avp;
+       int slot;
+       bool found = false;
+
+       ath9k_ps_wakeup(sc);
+       if (status) {
+               for (slot = 0; slot < ATH_BCBUF; slot++) {
+                       if (sc->beacon.bslot[slot]) {
+                               avp = (void *)sc->beacon.bslot[slot]->drv_priv;
+                               if (avp->is_bslot_active) {
+                                       found = true;
+                                       break;
+                               }
+                       }
+               }
+               if (found) {
+                       /* Re-enable beaconing */
+                       ah->imask |= ATH9K_INT_SWBA;
+                       ath9k_hw_set_interrupts(ah, ah->imask);
+               }
+       } else {
+               /* Disable SWBA interrupt */
+               ah->imask &= ~ATH9K_INT_SWBA;
+               ath9k_hw_set_interrupts(ah, ah->imask);
+               tasklet_kill(&sc->bcon_tasklet);
+               ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
+       }
+       ath9k_ps_restore(sc);
+}
index b68a1ac..8649581 100644 (file)
@@ -262,7 +262,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
         * since 250us often results in NF load timeout and causes deaf
         * condition during stress testing 12/12/2009
         */
-       for (j = 0; j < 1000; j++) {
+       for (j = 0; j < 10000; j++) {
                if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
                     AR_PHY_AGC_CONTROL_NF) == 0)
                        break;
@@ -278,7 +278,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
         * here, the baseband nf cal will just be capped by our present
         * noisefloor until the next calibration timer.
         */
-       if (j == 1000) {
+       if (j == 10000) {
                ath_dbg(common, ATH_DBG_ANY,
                        "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
                        REG_READ(ah, AR_PHY_AGC_CONTROL));
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
        s16 default_nf;
        int i, j;
 
-       if (!ah->caldata)
-               return;
-
+       ah->caldata->channel = chan->channel;
+       ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
        h = ah->caldata->nfCalHist;
        default_nf = ath9k_hw_get_default_nf(ah, chan);
        for (i = 0; i < NUM_NF_READINGS; i++) {
index df1998d..615e682 100644 (file)
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
 }
 EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
 
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+                           u16 new_txpow, u16 *txpower)
+{
+       if (cur_txpow != new_txpow) {
+               ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+               /* read back in case value is clamped */
+               *txpower = ath9k_hw_regulatory(ah)->power_limit;
+       }
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
 static int __init ath9k_cmn_init(void)
 {
        return 0;
index a126bdd..b2f7b5f 100644 (file)
@@ -23,8 +23,6 @@
 
 /* Common header for Atheros 802.11n base driver cores */
 
-#define IEEE80211_WEP_NKID 4
-
 #define WME_NUM_TID             16
 #define WME_BA_BMP_SIZE         64
 #define WME_MAX_BA              WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
 void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
                                  enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+                           u16 new_txpow, u16 *txpower);
index 3586c43..8df5a92 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <asm/unaligned.h>
 
 #include "ath9k.h"
@@ -30,6 +31,19 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       u8 *buf = file->private_data;
+       return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
+{
+       vfree(file->private_data);
+       return 0;
+}
+
 #ifdef CONFIG_ATH_DEBUG
 
 static ssize_t read_file_debug(struct file *file, char __user *user_buf,
@@ -381,41 +395,40 @@ static const struct file_operations fops_interrupt = {
        .llseek = default_llseek,
 };
 
-static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
+static const char *channel_type_str(enum nl80211_channel_type t)
 {
-       switch (state) {
-       case ATH_WIPHY_INACTIVE:
-               return "INACTIVE";
-       case ATH_WIPHY_ACTIVE:
-               return "ACTIVE";
-       case ATH_WIPHY_PAUSING:
-               return "PAUSING";
-       case ATH_WIPHY_PAUSED:
-               return "PAUSED";
-       case ATH_WIPHY_SCAN:
-               return "SCAN";
+       switch (t) {
+       case NL80211_CHAN_NO_HT:
+               return "no ht";
+       case NL80211_CHAN_HT20:
+               return "ht20";
+       case NL80211_CHAN_HT40MINUS:
+               return "ht40-";
+       case NL80211_CHAN_HT40PLUS:
+               return "ht40+";
+       default:
+               return "???";
        }
-       return "?";
 }
 
 static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
                               size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
-       struct ath_wiphy *aphy = sc->pri_wiphy;
-       struct ieee80211_channel *chan = aphy->hw->conf.channel;
+       struct ieee80211_channel *chan = sc->hw->conf.channel;
+       struct ieee80211_conf *conf = &(sc->hw->conf);
        char buf[512];
        unsigned int len = 0;
-       int i;
        u8 addr[ETH_ALEN];
        u32 tmp;
 
        len += snprintf(buf + len, sizeof(buf) - len,
-                       "primary: %s (%s chan=%d ht=%d)\n",
-                       wiphy_name(sc->pri_wiphy->hw->wiphy),
-                       ath_wiphy_state_str(sc->pri_wiphy->state),
+                       "%s (chan=%d  center-freq: %d MHz  channel-type: %d (%s))\n",
+                       wiphy_name(sc->hw->wiphy),
                        ieee80211_frequency_to_channel(chan->center_freq),
-                       aphy->chan_is_ht);
+                       chan->center_freq,
+                       conf->channel_type,
+                       channel_type_str(conf->channel_type));
 
        put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
        put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +470,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
        else
                len += snprintf(buf + len, sizeof(buf) - len, "\n");
 
-       /* Put variable-length stuff down here, and check for overflows. */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
-               if (aphy_tmp == NULL)
-                       continue;
-               chan = aphy_tmp->hw->conf.channel;
-               len += snprintf(buf + len, sizeof(buf) - len,
-                       "secondary: %s (%s chan=%d ht=%d)\n",
-                       wiphy_name(aphy_tmp->hw->wiphy),
-                       ath_wiphy_state_str(aphy_tmp->state),
-                       ieee80211_frequency_to_channel(chan->center_freq),
-                                                      aphy_tmp->chan_is_ht);
-       }
        if (len > sizeof(buf))
                len = sizeof(buf);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
-static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
-{
-       int i;
-       if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
-               return sc->pri_wiphy;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
-                       return aphy;
-       }
-       return NULL;
-}
-
-static int del_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_del(aphy);
-}
-
-static int pause_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_pause(aphy);
-}
-
-static int unpause_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_unpause(aphy);
-}
-
-static int select_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_select(aphy);
-}
-
-static int schedule_wiphy(struct ath_softc *sc, const char *msec)
-{
-       ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
-       return 0;
-}
-
-static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
-                               size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       char buf[50];
-       size_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-       buf[len] = '\0';
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = '\0';
-
-       if (strncmp(buf, "add", 3) == 0) {
-               int res = ath9k_wiphy_add(sc);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "del=", 4) == 0) {
-               int res = del_wiphy(sc, buf + 4);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "pause=", 6) == 0) {
-               int res = pause_wiphy(sc, buf + 6);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "unpause=", 8) == 0) {
-               int res = unpause_wiphy(sc, buf + 8);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "select=", 7) == 0) {
-               int res = select_wiphy(sc, buf + 7);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "schedule=", 9) == 0) {
-               int res = schedule_wiphy(sc, buf + 9);
-               if (res < 0)
-                       return res;
-       } else
-               return -EOPNOTSUPP;
-
-       return count;
-}
-
 static const struct file_operations fops_wiphy = {
        .read = read_file_wiphy,
-       .write = write_file_wiphy,
        .open = ath9k_debugfs_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
 };
 
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
 #define PR(str, elem)                                                  \
        do {                                                            \
                len += snprintf(buf + len, size - len,                  \
                                "%s%13u%11u%10u%10u\n", str,            \
-               sc->debug.stats.txstats[WME_AC_BE].elem, \
-               sc->debug.stats.txstats[WME_AC_BK].elem, \
-               sc->debug.stats.txstats[WME_AC_VI].elem, \
-               sc->debug.stats.txstats[WME_AC_VO].elem); \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
+               if (len >= size)                          \
+                       goto done;                        \
+} while(0)
+
+#define PRX(str, elem)                                                 \
+do {                                                                   \
+       len += snprintf(buf + len, size - len,                          \
+                       "%s%13u%11u%10u%10u\n", str,                    \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem));       \
+       if (len >= size)                                                \
+               goto done;                                              \
 } while(0)
 
+#define PRQLE(str, elem)                                               \
+do {                                                                   \
+       len += snprintf(buf + len, size - len,                          \
+                       "%s%13i%11i%10i%10i\n", str,                    \
+                       list_empty(&sc->tx.txq_map[WME_AC_BE]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_BK]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_VI]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_VO]->elem));  \
+       if (len >= size)                                                \
+               goto done;                                              \
+} while (0)
+
 static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        char *buf;
-       unsigned int len = 0, size = 2048;
+       unsigned int len = 0, size = 8000;
+       int i;
        ssize_t retval = 0;
+       char tmp[32];
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
-       len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+       len += sprintf(buf, "Num-Tx-Queues: %i  tx-queues-setup: 0x%x"
+                      " poll-work-seen: %u\n"
+                      "%30s %10s%10s%10s\n\n",
+                      ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
+                      sc->tx_complete_poll_work_seen,
+                      "BE", "BK", "VI", "VO");
 
        PR("MPDUs Queued:    ", queued);
        PR("MPDUs Completed: ", completed);
        PR("Aggregates:      ", a_aggr);
-       PR("AMPDUs Queued:   ", a_queued);
+       PR("AMPDUs Queued HW:", a_queued_hw);
+       PR("AMPDUs Queued SW:", a_queued_sw);
        PR("AMPDUs Completed:", a_completed);
        PR("AMPDUs Retried:  ", a_retries);
        PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +557,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
        PR("DELIM Underrun:  ", delim_underrun);
        PR("TX-Pkts-All:     ", tx_pkts_all);
        PR("TX-Bytes-All:    ", tx_bytes_all);
+       PR("hw-put-tx-buf:   ", puttxbuf);
+       PR("hw-tx-start:     ", txstart);
+       PR("hw-tx-proc-desc: ", txprocdesc);
+       len += snprintf(buf + len, size - len,
+                       "%s%11p%11p%10p%10p\n", "txq-memory-address:",
+                       sc->tx.txq_map[WME_AC_BE],
+                       sc->tx.txq_map[WME_AC_BK],
+                       sc->tx.txq_map[WME_AC_VI],
+                       sc->tx.txq_map[WME_AC_VO]);
+       if (len >= size)
+               goto done;
+
+       PRX("axq-qnum:        ", axq_qnum);
+       PRX("axq-depth:       ", axq_depth);
+       PRX("axq-ampdu_depth: ", axq_ampdu_depth);
+       PRX("axq-stopped      ", stopped);
+       PRX("tx-in-progress   ", axq_tx_inprogress);
+       PRX("pending-frames   ", pending_frames);
+       PRX("txq_headidx:     ", txq_headidx);
+       PRX("txq_tailidx:     ", txq_headidx);
+
+       PRQLE("axq_q empty:       ", axq_q);
+       PRQLE("axq_acq empty:     ", axq_acq);
+       PRQLE("txq_fifo_pending:  ", txq_fifo_pending);
+       for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
+               snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
+               PRQLE(tmp, txq_fifo[i]);
+       }
+
+       /* Print out more detailed queue-info */
+       for (i = 0; i <= WME_AC_BK; i++) {
+               struct ath_txq *txq = &(sc->tx.txq[i]);
+               struct ath_atx_ac *ac;
+               struct ath_atx_tid *tid;
+               if (len >= size)
+                       goto done;
+               spin_lock_bh(&txq->axq_lock);
+               if (!list_empty(&txq->axq_acq)) {
+                       ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
+                                             list);
+                       len += snprintf(buf + len, size - len,
+                                       "txq[%i] first-ac: %p sched: %i\n",
+                                       i, ac, ac->sched);
+                       if (list_empty(&ac->tid_q) || (len >= size))
+                               goto done_for;
+                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+                                              list);
+                       len += snprintf(buf + len, size - len,
+                                       " first-tid: %p sched: %i paused: %i\n",
+                                       tid, tid->sched, tid->paused);
+               }
+       done_for:
+               spin_unlock_bh(&txq->axq_lock);
+       }
+
+done:
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static ssize_t read_file_stations(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char *buf;
+       unsigned int len = 0, size = 64000;
+       struct ath_node *an = NULL;
+       ssize_t retval = 0;
+       int q;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       len += snprintf(buf + len, size - len,
+                       "Stations:\n"
+                       " tid: addr sched paused buf_q-empty an ac\n"
+                       " ac: addr sched tid_q-empty txq\n");
+
+       spin_lock(&sc->nodes_lock);
+       list_for_each_entry(an, &sc->nodes, list) {
+               len += snprintf(buf + len, size - len,
+                               "%pM\n", an->sta->addr);
+               if (len >= size)
+                       goto done;
+
+               for (q = 0; q < WME_NUM_TID; q++) {
+                       struct ath_atx_tid *tid = &(an->tid[q]);
+                       len += snprintf(buf + len, size - len,
+                                       " tid: %p %s %s %i %p %p\n",
+                                       tid, tid->sched ? "sched" : "idle",
+                                       tid->paused ? "paused" : "running",
+                                       list_empty(&tid->buf_q),
+                                       tid->an, tid->ac);
+                       if (len >= size)
+                               goto done;
+               }
+
+               for (q = 0; q < WME_NUM_AC; q++) {
+                       struct ath_atx_ac *ac = &(an->ac[q]);
+                       len += snprintf(buf + len, size - len,
+                                       " ac: %p %s %i %p\n",
+                                       ac, ac->sched ? "sched" : "idle",
+                                       list_empty(&ac->tid_q), ac->txq);
+                       if (len >= size)
+                               goto done;
+               }
+       }
+
+done:
+       spin_unlock(&sc->nodes_lock);
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_hw *ah = sc->sc_ah;
+       struct ieee80211_hw *hw = sc->hw;
+       char *buf;
+       unsigned int len = 0, size = 8000;
+       ssize_t retval = 0;
+       const char *tmp;
+       unsigned int reg;
+       struct ath9k_vif_iter_data iter_data;
+
+       ath9k_calculate_iter_data(hw, NULL, &iter_data);
+       
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       switch (sc->sc_ah->opmode) {
+       case  NL80211_IFTYPE_ADHOC:
+               tmp = "ADHOC";
+               break;
+       case  NL80211_IFTYPE_MESH_POINT:
+               tmp = "MESH";
+               break;
+       case  NL80211_IFTYPE_AP:
+               tmp = "AP";
+               break;
+       case  NL80211_IFTYPE_STATION:
+               tmp = "STATION";
+               break;
+       default:
+               tmp = "???";
+               break;
+       }
+
+       len += snprintf(buf + len, size - len,
+                       "curbssid: %pM\n"
+                       "OP-Mode: %s(%i)\n"
+                       "Beacon-Timer-Register: 0x%x\n",
+                       common->curbssid,
+                       tmp, (int)(sc->sc_ah->opmode),
+                       REG_READ(ah, AR_BEACON_PERIOD));
+
+       reg = REG_READ(ah, AR_TIMER_MODE);
+       len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
+                       reg);
+       if (reg & AR_TBTT_TIMER_EN)
+               len += snprintf(buf + len, size - len, "TBTT ");
+       if (reg & AR_DBA_TIMER_EN)
+               len += snprintf(buf + len, size - len, "DBA ");
+       if (reg & AR_SWBA_TIMER_EN)
+               len += snprintf(buf + len, size - len, "SWBA ");
+       if (reg & AR_HCF_TIMER_EN)
+               len += snprintf(buf + len, size - len, "HCF ");
+       if (reg & AR_TIM_TIMER_EN)
+               len += snprintf(buf + len, size - len, "TIM ");
+       if (reg & AR_DTIM_TIMER_EN)
+               len += snprintf(buf + len, size - len, "DTIM ");
+       len += snprintf(buf + len, size - len, ")\n");
+
+       reg = sc->sc_ah->imask;
+       len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+       if (reg & ATH9K_INT_SWBA)
+               len += snprintf(buf + len, size - len, "SWBA ");
+       if (reg & ATH9K_INT_BMISS)
+               len += snprintf(buf + len, size - len, "BMISS ");
+       if (reg & ATH9K_INT_CST)
+               len += snprintf(buf + len, size - len, "CST ");
+       if (reg & ATH9K_INT_RX)
+               len += snprintf(buf + len, size - len, "RX ");
+       if (reg & ATH9K_INT_RXHP)
+               len += snprintf(buf + len, size - len, "RXHP ");
+       if (reg & ATH9K_INT_RXLP)
+               len += snprintf(buf + len, size - len, "RXLP ");
+       if (reg & ATH9K_INT_BB_WATCHDOG)
+               len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
+       /* there are other IRQs if one wanted to add them. */
+       len += snprintf(buf + len, size - len, ")\n");
+
+       len += snprintf(buf + len, size - len,
+                       "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
+                       " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+                       iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+                       iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+                       sc->nvifs, sc->nbcnvifs);
+
+       len += snprintf(buf + len, size - len,
+                       "Calculated-BSSID-Mask: %pM\n",
+                       iter_data.mask);
 
        if (len > size)
                len = size;
@@ -629,9 +785,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
 }
 
 void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
-                      struct ath_tx_status *ts)
+                      struct ath_tx_status *ts, struct ath_txq *txq)
 {
-       int qnum = skb_get_queue_mapping(bf->bf_mpdu);
+       int qnum = txq->axq_qnum;
 
        TX_STAT_INC(qnum, tx_pkts_all);
        sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +822,20 @@ static const struct file_operations fops_xmit = {
        .llseek = default_llseek,
 };
 
+static const struct file_operations fops_stations = {
+       .read = read_file_stations,
+       .open = ath9k_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static const struct file_operations fops_misc = {
+       .read = read_file_misc,
+       .open = ath9k_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static ssize_t read_file_recv(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
@@ -871,6 +1041,42 @@ static const struct file_operations fops_regval = {
        .llseek = default_llseek,
 };
 
+#define REGDUMP_LINE_SIZE      20
+
+static int open_file_regdump(struct inode *inode, struct file *file)
+{
+       struct ath_softc *sc = inode->i_private;
+       unsigned int len = 0;
+       u8 *buf;
+       int i;
+       unsigned long num_regs, regdump_len, max_reg_offset;
+
+       max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+       num_regs = max_reg_offset / 4 + 1;
+       regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
+       buf = vmalloc(regdump_len);
+       if (!buf)
+               return -ENOMEM;
+
+       ath9k_ps_wakeup(sc);
+       for (i = 0; i < num_regs; i++)
+               len += scnprintf(buf + len, regdump_len - len,
+                       "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+       ath9k_ps_restore(sc);
+
+       file->private_data = buf;
+
+       return 0;
+}
+
+static const struct file_operations fops_regdump = {
+       .open = open_file_regdump,
+       .read = ath9k_debugfs_read_buf,
+       .release = ath9k_debugfs_release_buf,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,/* read accesses f_pos */
+};
+
 int ath9k_init_debug(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
@@ -903,6 +1109,14 @@ int ath9k_init_debug(struct ath_hw *ah)
                        sc, &fops_xmit))
                goto err;
 
+       if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
+                       sc, &fops_stations))
+               goto err;
+
+       if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
+                       sc, &fops_misc))
+               goto err;
+
        if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
                        sc, &fops_recv))
                goto err;
@@ -927,6 +1141,10 @@ int ath9k_init_debug(struct ath_hw *ah)
                        sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
                goto err;
 
+       if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
+                       sc, &fops_regdump))
+               goto err;
+
        sc->debug.regidx = 0;
        return 0;
 err:
index 1e5078b..59338de 100644 (file)
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
  * @queued: Total MPDUs (non-aggr) queued
  * @completed: Total MPDUs (non-aggr) completed
  * @a_aggr: Total no. of aggregates queued
- * @a_queued: Total AMPDUs queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
  * @a_completed: Total AMPDUs completed
  * @a_retries: No. of AMPDUs retried (SW)
  * @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
  * @desc_cfg_err: Descriptor configuration errors
  * @data_urn: TX data underrun errors
  * @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart:  Number of times hardware was told to start tx.
+ * @txprocdesc:  Number of times tx descriptor was processed
  */
 struct ath_tx_stats {
        u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
        u32 queued;
        u32 completed;
        u32 a_aggr;
-       u32 a_queued;
+       u32 a_queued_hw;
+       u32 a_queued_sw;
        u32 a_completed;
        u32 a_retries;
        u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
        u32 desc_cfg_err;
        u32 data_underrun;
        u32 delim_underrun;
+       u32 puttxbuf;
+       u32 txstart;
+       u32 txprocdesc;
 };
 
 /**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
 
 void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
 void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
-                      struct ath_tx_status *ts);
+                      struct ath_tx_status *ts, struct ath_txq *txq);
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
 
 #else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
 
 static inline void ath_debug_stat_tx(struct ath_softc *sc,
                                     struct ath_buf *bf,
-                                    struct ath_tx_status *ts)
+                                    struct ath_tx_status *ts,
+                                    struct ath_txq *txq)
 {
 }
 
index d051631..8c18bed 100644 (file)
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
        return false;
 }
 
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+                                 int eep_start_loc, int size)
+{
+       int i = 0, j, addr;
+       u32 addrdata[8];
+       u32 data[8];
+
+       for (addr = 0; addr < size; addr++) {
+               addrdata[i] = AR5416_EEPROM_OFFSET +
+                       ((addr + eep_start_loc) << AR5416_EEPROM_S);
+               i++;
+               if (i == 8) {
+                       REG_READ_MULTI(ah, addrdata, data, i);
+
+                       for (j = 0; j < i; j++) {
+                               *eep_data = data[j];
+                               eep_data++;
+                       }
+                       i = 0;
+               }
+       }
+
+       if (i != 0) {
+               REG_READ_MULTI(ah, addrdata, data, i);
+
+               for (j = 0; j < i; j++) {
+                       *eep_data = data[j];
+                       eep_data++;
+               }
+       }
+}
+
 bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
 {
        return common->bus_ops->eeprom_read(common, off, data);
index 58e2ddc..bd82447 100644 (file)
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
 bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
                                    u16 *indexL, u16 *indexR);
 bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+                                 int eep_start_loc, int size);
 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
                             u8 *pVpdList, u16 numIntercepts,
                             u8 *pRetVpdList);
index fbdff7e..bc77a30 100644 (file)
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
        return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
 }
 
-static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
-{
 #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data = (u16 *)&ah->eeprom.map4k;
-       int addr, eep_start_loc = 0;
-
-       eep_start_loc = 64;
-
-       if (!ath9k_hw_use_flash(ah)) {
-               ath_dbg(common, ATH_DBG_EEPROM,
-                       "Reading from EEPROM, not flash\n");
-       }
+       int addr, eep_start_loc = 64;
 
        for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
                if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
        }
 
        return true;
-#undef SIZE_EEPROM_4K
 }
 
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+       return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_4k_fill_eeprom(ah);
+       else
+               return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_4K
+
 static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 {
 #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
index 9b6bc8a..8cd8333 100644 (file)
@@ -17,7 +17,7 @@
 #include "hw.h"
 #include "ar9002_phy.h"
 
-#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
 
 static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
 {
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
        return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
 }
 
-static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
 {
        struct ar9287_eeprom *eep = &ah->eeprom.map9287;
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data;
-       int addr, eep_start_loc;
+       int addr, eep_start_loc = AR9287_EEP_START_LOC;
        eep_data = (u16 *)eep;
 
-       if (common->bus_ops->ath_bus_type == ATH_USB)
-               eep_start_loc = AR9287_HTC_EEP_START_LOC;
-       else
-               eep_start_loc = AR9287_EEP_START_LOC;
-
-       if (!ath9k_hw_use_flash(ah)) {
-               ath_dbg(common, ATH_DBG_EEPROM,
-                       "Reading from EEPROM, not flash\n");
-       }
-
-       for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+       for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
                if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
                                         eep_data)) {
                        ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
        return true;
 }
 
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+                                    AR9287_HTC_EEP_START_LOC,
+                                    SIZE_EEPROM_AR9287);
+       return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+       else
+               return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
 static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
 {
        u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
                                need_swap = true;
                                eepdata = (u16 *)(&ah->eeprom);
 
-                               for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+                               for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
                                        temp = swab16(*eepdata);
                                        *eepdata = temp;
                                        eepdata++;
index 749a936..fccd87d 100644 (file)
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
        return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
 }
 
-static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
-{
 #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data = (u16 *)&ah->eeprom.def;
        int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
                eep_data++;
        }
        return true;
-#undef SIZE_EEPROM_DEF
 }
 
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+                                    0x100, SIZE_EEPROM_DEF);
+       return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_def_fill_eeprom(ah);
+       else
+               return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
 static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 {
        struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
        }
 
        /* Enable fixup for AR_AN_TOP2 if necessary */
-       if (AR_SREV_9280_20_OR_LATER(ah) &&
-           (eep->baseEepHeader.version & 0xff) > 0x0a &&
-           eep->baseEepHeader.pwdclkind == 0)
+       if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+           ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+           (eep->baseEepHeader.pwdclkind == 0))
                ah->need_an_top2_fixup = 1;
 
        if ((common->bus_ops->ath_bus_type == ATH_USB) &&
index 1337640..0fb8f8a 100644 (file)
 /*      LED functions          */
 /********************************/
 
-static void ath_led_blink_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc,
-                                           ath_led_blink_work.work);
-
-       if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
-               return;
-
-       if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
-           (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
-               ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-       else
-               ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
-                                 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
-       ieee80211_queue_delayed_work(sc->hw,
-                                    &sc->ath_led_blink_work,
-                                    (sc->sc_flags & SC_OP_LED_ON) ?
-                                       msecs_to_jiffies(sc->led_off_duration) :
-                                       msecs_to_jiffies(sc->led_on_duration));
-
-       sc->led_on_duration = sc->led_on_cnt ?
-                       max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
-                       ATH_LED_ON_DURATION_IDLE;
-       sc->led_off_duration = sc->led_off_cnt ?
-                       max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
-                       ATH_LED_OFF_DURATION_IDLE;
-       sc->led_on_cnt = sc->led_off_cnt = 0;
-       if (sc->sc_flags & SC_OP_LED_ON)
-               sc->sc_flags &= ~SC_OP_LED_ON;
-       else
-               sc->sc_flags |= SC_OP_LED_ON;
-}
-
+#ifdef CONFIG_MAC80211_LEDS
 static void ath_led_brightness(struct led_classdev *led_cdev,
                               enum led_brightness brightness)
 {
-       struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
-       struct ath_softc *sc = led->sc;
-
-       switch (brightness) {
-       case LED_OFF:
-               if (led->led_type == ATH_LED_ASSOC ||
-                   led->led_type == ATH_LED_RADIO) {
-                       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
-                               (led->led_type == ATH_LED_RADIO));
-                       sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
-                       if (led->led_type == ATH_LED_RADIO)
-                               sc->sc_flags &= ~SC_OP_LED_ON;
-               } else {
-                       sc->led_off_cnt++;
-               }
-               break;
-       case LED_FULL:
-               if (led->led_type == ATH_LED_ASSOC) {
-                       sc->sc_flags |= SC_OP_LED_ASSOCIATED;
-                       if (led_blink)
-                               ieee80211_queue_delayed_work(sc->hw,
-                                                    &sc->ath_led_blink_work, 0);
-               } else if (led->led_type == ATH_LED_RADIO) {
-                       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-                       sc->sc_flags |= SC_OP_LED_ON;
-               } else {
-                       sc->led_on_cnt++;
-               }
-               break;
-       default:
-               break;
-       }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
-                           char *trigger)
-{
-       int ret;
-
-       led->sc = sc;
-       led->led_cdev.name = led->name;
-       led->led_cdev.default_trigger = trigger;
-       led->led_cdev.brightness_set = ath_led_brightness;
-
-       ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
-       if (ret)
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Failed to register led:%s", led->name);
-       else
-               led->registered = 1;
-       return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
-       if (led->registered) {
-               led_classdev_unregister(&led->led_cdev);
-               led->registered = 0;
-       }
+       struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
+       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
 }
 
 void ath_deinit_leds(struct ath_softc *sc)
 {
-       ath_unregister_led(&sc->assoc_led);
-       sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
-       ath_unregister_led(&sc->tx_led);
-       ath_unregister_led(&sc->rx_led);
-       ath_unregister_led(&sc->radio_led);
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+       if (!sc->led_registered)
+               return;
+
+       ath_led_brightness(&sc->led_cdev, LED_OFF);
+       led_classdev_unregister(&sc->led_cdev);
 }
 
 void ath_init_leds(struct ath_softc *sc)
 {
-       char *trigger;
        int ret;
 
        if (AR_SREV_9287(sc->sc_ah))
                sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+       else if (AR_SREV_9485(sc->sc_ah))
+               sc->sc_ah->led_pin = ATH_LED_PIN_9485;
        else
                sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
 
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
        /* LED off, active low */
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
-       if (led_blink)
-               INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
-       trigger = ieee80211_get_radio_led_name(sc->hw);
-       snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
-               "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->radio_led, trigger);
-       sc->radio_led.led_type = ATH_LED_RADIO;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_assoc_led_name(sc->hw);
-       snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
-               "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->assoc_led, trigger);
-       sc->assoc_led.led_type = ATH_LED_ASSOC;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_tx_led_name(sc->hw);
-       snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
-               "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->tx_led, trigger);
-       sc->tx_led.led_type = ATH_LED_TX;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_rx_led_name(sc->hw);
-       snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
-               "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->rx_led, trigger);
-       sc->rx_led.led_type = ATH_LED_RX;
-       if (ret)
-               goto fail;
-
-       return;
-
-fail:
-       if (led_blink)
-               cancel_delayed_work_sync(&sc->ath_led_blink_work);
-       ath_deinit_leds(sc);
+       if (!led_blink)
+               sc->led_cdev.default_trigger =
+                       ieee80211_get_radio_led_name(sc->hw);
+
+       snprintf(sc->led_name, sizeof(sc->led_name),
+               "ath9k-%s", wiphy_name(sc->hw->wiphy));
+       sc->led_cdev.name = sc->led_name;
+       sc->led_cdev.brightness_set = ath_led_brightness;
+
+       ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
+       if (ret < 0)
+               return;
+
+       sc->led_registered = true;
 }
+#endif
 
 /*******************/
 /*     Rfkill     */
@@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
 
 void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        bool blocked = !!ath_is_rfkill_set(sc);
 
        wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
index 5ab3084..f1b8af6 100644 (file)
@@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
        { USB_DEVICE(0x083A, 0xA704),
          .driver_info = AR9280_USB },  /* SMC Networks */
 
+       { USB_DEVICE(0x0cf3, 0x20ff),
+         .driver_info = STORAGE_DEVICE },
+
        { },
 };
 
@@ -219,8 +222,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
        struct tx_buf *tx_buf = NULL;
        struct sk_buff *nskb = NULL;
        int ret = 0, i;
-       u16 *hdr, tx_skb_cnt = 0;
+       u16 tx_skb_cnt = 0;
        u8 *buf;
+       __le16 *hdr;
 
        if (hif_dev->tx.tx_skb_cnt == 0)
                return 0;
@@ -245,9 +249,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
 
                buf = tx_buf->buf;
                buf += tx_buf->offset;
-               hdr = (u16 *)buf;
-               *hdr++ = nskb->len;
-               *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+               hdr = (__le16 *)buf;
+               *hdr++ = cpu_to_le16(nskb->len);
+               *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
                buf += 4;
                memcpy(buf, nskb->data, nskb->len);
                tx_buf->len = nskb->len + 4;
@@ -913,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
        if (ret) {
                dev_err(&hif_dev->udev->dev,
                        "ath9k_htc: Unable to allocate URBs\n");
-               goto err_urb;
+               goto err_fw_download;
        }
 
        return 0;
 
-err_urb:
-       ath9k_hif_usb_dealloc_urbs(hif_dev);
 err_fw_download:
        release_firmware(hif_dev->firmware);
 err_fw_req:
@@ -934,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
                release_firmware(hif_dev->firmware);
 }
 
+/*
+ * An exact copy of the function from zd1211rw.
+ */
+static int send_eject_command(struct usb_interface *interface)
+{
+       struct usb_device *udev = interface_to_usbdev(interface);
+       struct usb_host_interface *iface_desc = &interface->altsetting[0];
+       struct usb_endpoint_descriptor *endpoint;
+       unsigned char *cmd;
+       u8 bulk_out_ep;
+       int r;
+
+       /* Find bulk out endpoint */
+       for (r = 1; r >= 0; r--) {
+               endpoint = &iface_desc->endpoint[r].desc;
+               if (usb_endpoint_dir_out(endpoint) &&
+                   usb_endpoint_xfer_bulk(endpoint)) {
+                       bulk_out_ep = endpoint->bEndpointAddress;
+                       break;
+               }
+       }
+       if (r == -1) {
+               dev_err(&udev->dev,
+                       "ath9k_htc: Could not find bulk out endpoint\n");
+               return -ENODEV;
+       }
+
+       cmd = kzalloc(31, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENODEV;
+
+       /* USB bulk command block */
+       cmd[0] = 0x55;  /* bulk command signature */
+       cmd[1] = 0x53;  /* bulk command signature */
+       cmd[2] = 0x42;  /* bulk command signature */
+       cmd[3] = 0x43;  /* bulk command signature */
+       cmd[14] = 6;    /* command length */
+
+       cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+       cmd[19] = 0x2;  /* eject disc */
+
+       dev_info(&udev->dev, "Ejecting storage device...\n");
+       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+               cmd, 31, NULL, 2000);
+       kfree(cmd);
+       if (r)
+               return r;
+
+       /* At this point, the device disconnects and reconnects with the real
+        * ID numbers. */
+
+       usb_set_intfdata(interface, NULL);
+       return 0;
+}
+
 static int ath9k_hif_usb_probe(struct usb_interface *interface,
                               const struct usb_device_id *id)
 {
@@ -941,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
        struct hif_device_usb *hif_dev;
        int ret = 0;
 
+       if (id->driver_info == STORAGE_DEVICE)
+               return send_eject_command(interface);
+
        hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
        if (!hif_dev) {
                ret = -ENOMEM;
@@ -1027,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
        struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
        bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
 
-       if (hif_dev) {
-               ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
-               ath9k_htc_hw_free(hif_dev->htc_handle);
-               ath9k_hif_usb_dev_deinit(hif_dev);
-               usb_set_intfdata(interface, NULL);
-       }
+       if (!hif_dev)
+               return;
+
+       ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+       ath9k_htc_hw_free(hif_dev->htc_handle);
+       ath9k_hif_usb_dev_deinit(hif_dev);
+       usb_set_intfdata(interface, NULL);
 
        if (!unplugged && (hif_dev->flags & HIF_USB_START))
                ath9k_hif_usb_reboot(udev);
index 780ac5e..753a245 100644 (file)
@@ -32,6 +32,7 @@
 #include "wmi.h"
 
 #define ATH_STA_SHORT_CALINTERVAL 1000    /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL  100     /* 100 ms */
 #define ATH_ANI_POLLINTERVAL      100     /* 100 ms */
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
        __be32 ht_tx_xretries;
 } __packed;
 
+#define ATH9K_HTC_MAX_VIF 2
+#define ATH9K_HTC_MAX_BCN_VIF 2
+
+#define INC_VIF(_priv, _type) do {             \
+               switch (_type) {                \
+               case NL80211_IFTYPE_STATION:    \
+                       _priv->num_sta_vif++;   \
+                       break;                  \
+               case NL80211_IFTYPE_ADHOC:      \
+                       _priv->num_ibss_vif++;  \
+                       break;                  \
+               case NL80211_IFTYPE_AP:         \
+                       _priv->num_ap_vif++;    \
+                       break;                  \
+               default:                        \
+                       break;                  \
+               }                               \
+       } while (0)
+
+#define DEC_VIF(_priv, _type) do {             \
+               switch (_type) {                \
+               case NL80211_IFTYPE_STATION:    \
+                       _priv->num_sta_vif--;   \
+                       break;                  \
+               case NL80211_IFTYPE_ADHOC:      \
+                       _priv->num_ibss_vif--;  \
+                       break;                  \
+               case NL80211_IFTYPE_AP:         \
+                       _priv->num_ap_vif--;    \
+                       break;                  \
+               default:                        \
+                       break;                  \
+               }                               \
+       } while (0)
+
 struct ath9k_htc_vif {
        u8 index;
+       u16 seq_no;
+       bool beacon_configured;
+};
+
+struct ath9k_vif_iter_data {
+       const u8 *hw_macaddr;
+       u8 mask[ETH_ALEN];
 };
 
 #define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
 
 struct htc_beacon_config {
        u16 beacon_interval;
-       u16 listen_interval;
        u16 dtim_period;
        u16 bmiss_timeout;
-       u8 dtim_count;
 };
 
 struct ath_btcoex {
@@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
 #define OP_SCANNING               BIT(1)
 #define OP_LED_ASSOCIATED         BIT(2)
 #define OP_LED_ON                 BIT(3)
-#define OP_PREAMBLE_SHORT         BIT(4)
-#define OP_PROTECT_ENABLE         BIT(5)
-#define OP_ASSOCIATED             BIT(6)
-#define OP_ENABLE_BEACON          BIT(7)
-#define OP_LED_DEINIT             BIT(8)
-#define OP_BT_PRIORITY_DETECTED    BIT(9)
-#define OP_BT_SCAN                 BIT(10)
+#define OP_ENABLE_BEACON          BIT(4)
+#define OP_LED_DEINIT             BIT(5)
+#define OP_BT_PRIORITY_DETECTED    BIT(6)
+#define OP_BT_SCAN                 BIT(7)
+#define OP_ANI_RUNNING             BIT(8)
+#define OP_TSF_RESET               BIT(9)
 
 struct ath9k_htc_priv {
        struct device *dev;
@@ -358,15 +398,24 @@ struct ath9k_htc_priv {
        enum htc_endpoint_id data_vi_ep;
        enum htc_endpoint_id data_vo_ep;
 
+       u8 vif_slot;
+       u8 mon_vif_idx;
+       u8 sta_slot;
+       u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
+       u8 num_ibss_vif;
+       u8 num_sta_vif;
+       u8 num_ap_vif;
+
        u16 op_flags;
        u16 curtxpow;
        u16 txpowlimit;
        u16 nvifs;
        u16 nstations;
-       u16 seq_no;
        u32 bmiss_cnt;
+       bool rearm_ani;
+       bool reconfig_beacon;
 
-       struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
+       struct ath9k_hw_cal_data caldata;
 
        spinlock_t beacon_lock;
 
@@ -382,7 +431,7 @@ struct ath9k_htc_priv {
        struct ath9k_htc_rx rx;
        struct tasklet_struct tx_tasklet;
        struct sk_buff_head tx_queue;
-       struct delayed_work ath9k_ani_work;
+       struct delayed_work ani_work;
        struct work_struct ps_work;
        struct work_struct fatal_work;
 
@@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv);
 void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
                             struct ieee80211_vif *vif);
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
 void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
 
 void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
 int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
 void ath9k_htc_station_work(struct work_struct *work);
 void ath9k_htc_aggr_work(struct work_struct *work);
-void ath9k_ani_work(struct work_struct *work);;
-void ath_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_ani_work(struct work_struct *work);
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 void ath9k_tx_tasklet(unsigned long data);
@@ -460,7 +511,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
 void ath9k_ps_work(struct work_struct *work);
 bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
                        enum ath9k_power_mode mode);
-void ath_update_txpow(struct ath9k_htc_priv *priv);
 
 void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
 void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
index 87cc65a..8d1d879 100644 (file)
@@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
        /* TSF out of range threshold fixed at 1 second */
        bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-       ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
-       ath_dbg(common, ATH_DBG_BEACON,
+       ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+               intval, tsf, tsftu);
+       ath_dbg(common, ATH_DBG_CONFIG,
                "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
                bs.bs_bmissthreshold, bs.bs_sleepduration,
                bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
        WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
 }
 
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+                                      struct htc_beacon_config *bss_conf)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       enum ath9k_int imask = 0;
+       u32 nexttbtt, intval, tsftu;
+       __be32 htc_imask = 0;
+       int ret;
+       u8 cmd_rsp;
+       u64 tsf;
+
+       intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+       intval /= ATH9K_HTC_MAX_BCN_VIF;
+       nexttbtt = intval;
+
+       if (priv->op_flags & OP_TSF_RESET) {
+               intval |= ATH9K_BEACON_RESET_TSF;
+               priv->op_flags &= ~OP_TSF_RESET;
+       } else {
+               /*
+                * Pull nexttbtt forward to reflect the current TSF.
+                */
+               tsf = ath9k_hw_gettsf64(priv->ah);
+               tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+               do {
+                       nexttbtt += intval;
+               } while (nexttbtt < tsftu);
+       }
+
+       intval |= ATH9K_BEACON_ENA;
+
+       if (priv->op_flags & OP_ENABLE_BEACON)
+               imask |= ATH9K_INT_SWBA;
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
+               bss_conf->beacon_interval, nexttbtt, imask);
+
+       WMI_CMD(WMI_DISABLE_INTR_CMDID);
+       ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+       priv->bmiss_cnt = 0;
+       htc_imask = cpu_to_be32(imask);
+       WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
 static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
                                          struct htc_beacon_config *bss_conf)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        enum ath9k_int imask = 0;
-       u32 nexttbtt, intval;
+       u32 nexttbtt, intval, tsftu;
        __be32 htc_imask = 0;
        int ret;
        u8 cmd_rsp;
+       u64 tsf;
 
        intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
        nexttbtt = intval;
+
+       /*
+        * Pull nexttbtt forward to reflect the current TSF.
+        */
+       tsf = ath9k_hw_gettsf64(priv->ah);
+       tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+       do {
+               nexttbtt += intval;
+       } while (nexttbtt < tsftu);
+
        intval |= ATH9K_BEACON_ENA;
        if (priv->op_flags & OP_ENABLE_BEACON)
                imask |= ATH9K_INT_SWBA;
 
-       ath_dbg(common, ATH_DBG_BEACON,
-               "IBSS Beacon config, intval: %d, imask: 0x%x\n",
-               bss_conf->beacon_interval, imask);
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
+               bss_conf->beacon_interval, nexttbtt, imask);
 
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
        ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
                struct ieee80211_hdr *hdr =
                        (struct ieee80211_hdr *) beacon->data;
-               priv->seq_no += 0x10;
+               avp->seq_no += 0x10;
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-               hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+               hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
        }
 
        tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
        }
 }
 
+static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *beacon_configured = (bool *)data;
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           avp->beacon_configured)
+               *beacon_configured = true;
+}
+
+static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
+                                         struct ieee80211_vif *vif)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       bool beacon_configured;
+
+       /*
+        * Changing the beacon interval when multiple AP interfaces
+        * are configured will affect beacon transmission of all
+        * of them.
+        */
+       if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+           (priv->num_ap_vif > 1) &&
+           (vif->type == NL80211_IFTYPE_AP) &&
+           (cur_conf->beacon_interval != bss_conf->beacon_int)) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Changing beacon interval of multiple AP interfaces !\n");
+               return false;
+       }
+
+       /*
+        * If the HW is operating in AP mode, any new station interfaces that
+        * are added cannot change the beacon parameters.
+        */
+       if (priv->num_ap_vif &&
+           (vif->type != NL80211_IFTYPE_AP)) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "HW in AP mode, cannot set STA beacon parameters\n");
+               return false;
+       }
+
+       /*
+        * The beacon parameters are configured only for the first
+        * station interface.
+        */
+       if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+           (priv->num_sta_vif > 1) &&
+           (vif->type == NL80211_IFTYPE_STATION)) {
+               beacon_configured = false;
+               ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                          ath9k_htc_beacon_iter,
+                                                          &beacon_configured);
+
+               if (beacon_configured) {
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "Beacon already configured for a station interface\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
                             struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+       if (!ath9k_htc_check_beacon_config(priv, vif))
+               return;
 
        cur_conf->beacon_interval = bss_conf->beacon_int;
        if (cur_conf->beacon_interval == 0)
                cur_conf->beacon_interval = 100;
 
        cur_conf->dtim_period = bss_conf->dtim_period;
-       cur_conf->listen_interval = 1;
-       cur_conf->dtim_count = 1;
        cur_conf->bmiss_timeout =
                ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
        switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               ath9k_htc_beacon_config_sta(priv, cur_conf);
+               avp->beacon_configured = true;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+               break;
+       case NL80211_IFTYPE_AP:
+               ath9k_htc_beacon_config_ap(priv, cur_conf);
+               break;
+       default:
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Unsupported beaconing mode\n");
+               return;
+       }
+}
+
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+
+       switch (priv->ah->opmode) {
        case NL80211_IFTYPE_STATION:
                ath9k_htc_beacon_config_sta(priv, cur_conf);
                break;
        case NL80211_IFTYPE_ADHOC:
                ath9k_htc_beacon_config_adhoc(priv, cur_conf);
                break;
+       case NL80211_IFTYPE_AP:
+               ath9k_htc_beacon_config_ap(priv, cur_conf);
+               break;
        default:
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Unsupported beaconing mode\n");
index fe70f67..7e630a8 100644 (file)
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
                        ret, ah->curchan->channel);
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        /* Start RX */
        WMI_CMD(WMI_START_RECV_CMDID);
index 38433f9..fc67c93 100644 (file)
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
 {
        ath9k_htc_exit_debug(priv->ah);
        ath9k_hw_deinit(priv->ah);
-       tasklet_kill(&priv->swba_tasklet);
-       tasklet_kill(&priv->rx_tasklet);
-       tasklet_kill(&priv->tx_tasklet);
        kfree(priv->ah);
        priv->ah = NULL;
 }
@@ -297,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
        return be32_to_cpu(val);
 }
 
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+                               u32 *val, u16 count)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       __be32 tmpaddr[8];
+       __be32 tmpval[8];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+              tmpaddr[i] = cpu_to_be32(addr[i]);
+       }
+
+       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+                          (u8 *)tmpaddr , sizeof(u32) * count,
+                          (u8 *)tmpval, sizeof(u32) * count,
+                          100);
+       if (unlikely(ret)) {
+               ath_dbg(common, ATH_DBG_WMI,
+                       "Multiple REGISTER READ FAILED (count: %d)\n", count);
+       }
+
+       for (i = 0; i < count; i++) {
+              val[i] = be32_to_cpu(tmpval[i]);
+       }
+}
+
 static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
 {
        struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -407,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
 
 static const struct ath_ops ath9k_common_ops = {
        .read = ath9k_regread,
+       .multi_read = ath9k_multi_regread,
        .write = ath9k_regwrite,
        .enable_write_buffer = ath9k_enable_regwrite_buffer,
        .write_flush = ath9k_regwrite_flush,
@@ -653,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
                     (unsigned long)priv);
        tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
                     (unsigned long)priv);
-       INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+       INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
        INIT_WORK(&priv->ps_work, ath9k_ps_work);
        INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
 
@@ -761,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
        struct ath_hw *ah;
        int error = 0;
        struct ath_regulatory *reg;
+       char hw_name[64];
 
        /* Bring up device */
        error = ath9k_init_priv(priv, devid, product, drv_info);
@@ -801,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
                        goto err_world;
        }
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
+               "BE:%d, BK:%d, VI:%d, VO:%d\n",
+               priv->wmi_cmd_ep,
+               priv->beacon_ep,
+               priv->cab_ep,
+               priv->uapsd_ep,
+               priv->mgmt_ep,
+               priv->data_be_ep,
+               priv->data_bk_ep,
+               priv->data_vi_ep,
+               priv->data_vo_ep);
+
+       ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
+       wiphy_info(hw->wiphy, "%s\n", hw_name);
+
        ath9k_init_leds(priv);
        ath9k_start_rfkill_poll(priv);
 
index f4d576b..db8c0c0 100644 (file)
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
 /* Utilities */
 /*************/
 
-void ath_update_txpow(struct ath9k_htc_priv *priv)
-{
-       struct ath_hw *ah = priv->ah;
-
-       if (priv->curtxpow != priv->txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
-               /* read back in case value is clamped */
-               priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
-       }
-}
-
 /* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
 static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
                                              struct ath9k_channel *ichan)
@@ -116,12 +105,88 @@ void ath9k_ps_work(struct work_struct *work)
        ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
 }
 
+static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_htc_priv *priv = data;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+       if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+               priv->reconfig_beacon = true;
+
+       if (bss_conf->assoc) {
+               priv->rearm_ani = true;
+               priv->reconfig_beacon = true;
+       }
+}
+
+static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
+{
+       priv->rearm_ani = false;
+       priv->reconfig_beacon = false;
+
+       ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                  ath9k_htc_vif_iter, priv);
+       if (priv->rearm_ani)
+               ath9k_htc_start_ani(priv);
+
+       if (priv->reconfig_beacon) {
+               ath9k_htc_ps_wakeup(priv);
+               ath9k_htc_beacon_reconfig(priv);
+               ath9k_htc_ps_restore(priv);
+       }
+}
+
+static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_vif_iter_data *iter_data = data;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+}
+
+static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+                                    struct ieee80211_vif *vif)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_vif_iter_data iter_data;
+
+       /*
+        * Use the hardware MAC address as reference, the hardware uses it
+        * together with the BSSID mask when matching addresses.
+        */
+       iter_data.hw_macaddr = common->macaddr;
+       memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+       if (vif)
+               ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
+
+       /* Get list of all active MAC addresses */
+       ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
+                                                  &iter_data);
+
+       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+       ath_hw_setbssidmask(common);
+}
+
+static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
+{
+       if (priv->num_ibss_vif)
+               priv->ah->opmode = NL80211_IFTYPE_ADHOC;
+       else if (priv->num_ap_vif)
+               priv->ah->opmode = NL80211_IFTYPE_AP;
+       else
+               priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+       ath9k_hw_setopmode(priv->ah);
+}
+
 void ath9k_htc_reset(struct ath9k_htc_priv *priv)
 {
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_channel *channel = priv->hw->conf.channel;
-       struct ath9k_hw_cal_data *caldata;
+       struct ath9k_hw_cal_data *caldata = NULL;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -130,16 +195,14 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
-       if (priv->op_flags & OP_ASSOCIATED)
-               cancel_delayed_work_sync(&priv->ath9k_ani_work);
-
+       ath9k_htc_stop_ani(priv);
        ieee80211_stop_queues(priv->hw);
        htc_stop(priv->htc);
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
        WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
        WMI_CMD(WMI_STOP_RECV_CMDID);
 
-       caldata = &priv->caldata[channel->hw_value];
+       caldata = &priv->caldata;
        ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
        if (ret) {
                ath_err(common,
@@ -147,7 +210,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
                        channel->center_freq, ret);
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        WMI_CMD(WMI_START_RECV_CMDID);
        ath9k_host_rx_init(priv);
@@ -158,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
 
        WMI_CMD(WMI_ENABLE_INTR_CMDID);
        htc_start(priv->htc);
-
-       if (priv->op_flags & OP_ASSOCIATED) {
-               ath9k_htc_beacon_config(priv, priv->vif);
-               ath_start_ani(priv);
-       }
-
+       ath9k_htc_vif_reconfig(priv);
        ieee80211_wake_queues(priv->hw);
 
        ath9k_htc_ps_restore(priv);
@@ -179,7 +238,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
        struct ieee80211_conf *conf = &common->hw->conf;
        bool fastcc;
        struct ieee80211_channel *channel = hw->conf.channel;
-       struct ath9k_hw_cal_data *caldata;
+       struct ath9k_hw_cal_data *caldata = NULL;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -202,7 +261,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
                fastcc);
 
-       caldata = &priv->caldata[channel->hw_value];
+       if (!fastcc)
+               caldata = &priv->caldata;
        ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (ret) {
                ath_err(common,
@@ -211,7 +271,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                goto err;
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        WMI_CMD(WMI_START_RECV_CMDID);
        if (ret)
@@ -230,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                goto err;
 
        htc_start(priv->htc);
+
+       if (!(priv->op_flags & OP_SCANNING) &&
+           !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+               ath9k_htc_vif_reconfig(priv);
+
 err:
        ath9k_htc_ps_restore(priv);
        return ret;
 }
 
+/*
+ * Monitor mode handling is a tad complicated because the firmware requires
+ * an interface to be created exclusively, while mac80211 doesn't associate
+ * an interface with the mode.
+ *
+ * So, for now, only one monitor interface can be configured.
+ */
 static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -244,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 
        memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
        memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
-       hvif.index = 0; /* Should do for now */
+       hvif.index = priv->mon_vif_idx;
        WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
        priv->nvifs--;
+       priv->vif_slot &= ~(1 << priv->mon_vif_idx);
 }
 
 static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
@@ -254,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct ath9k_htc_target_vif hvif;
        struct ath9k_htc_target_sta tsta;
-       int ret = 0;
+       int ret = 0, sta_idx;
        u8 cmd_rsp;
 
-       if (priv->nvifs > 0)
-               return -ENOBUFS;
+       if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
+           (priv->nstations >= ATH9K_HTC_MAX_STA)) {
+               ret = -ENOBUFS;
+               goto err_vif;
+       }
 
-       if (priv->nstations >= ATH9K_HTC_MAX_STA)
-               return -ENOBUFS;
+       sta_idx = ffz(priv->sta_slot);
+       if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
+               ret = -ENOBUFS;
+               goto err_vif;
+       }
 
        /*
         * Add an interface.
         */
-
        memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
        memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
 
        hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
-       priv->ah->opmode = NL80211_IFTYPE_MONITOR;
-       hvif.index = priv->nvifs;
+       hvif.index = ffz(priv->vif_slot);
 
        WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
        if (ret)
-               return ret;
+               goto err_vif;
+
+       /*
+        * Assign the monitor interface index as a special case here.
+        * This is needed when the interface is brought down.
+        */
+       priv->mon_vif_idx = hvif.index;
+       priv->vif_slot |= (1 << hvif.index);
+
+       /*
+        * Set the hardware mode to monitor only if there are no
+        * other interfaces.
+        */
+       if (!priv->nvifs)
+               priv->ah->opmode = NL80211_IFTYPE_MONITOR;
 
        priv->nvifs++;
 
        /*
         * Associate a station with the interface for packet injection.
         */
-
        memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
 
        memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
 
        tsta.is_vif_sta = 1;
-       tsta.sta_index = priv->nstations;
+       tsta.sta_index = sta_idx;
        tsta.vif_index = hvif.index;
        tsta.maxampdu = 0xffff;
 
        WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
        if (ret) {
                ath_err(common, "Unable to add station entry for monitor mode\n");
-               goto err_vif;
+               goto err_sta;
        }
 
+       priv->sta_slot |= (1 << sta_idx);
        priv->nstations++;
-
-       /*
-        * Set chainmask etc. on the target.
-        */
-       ret = ath9k_htc_update_cap_target(priv);
-       if (ret)
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "Failed to update capability in target\n");
-
+       priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
        priv->ah->is_monitoring = true;
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attached a monitor interface at idx: %d, sta idx: %d\n",
+               priv->mon_vif_idx, sta_idx);
+
        return 0;
 
-err_vif:
+err_sta:
        /*
         * Remove the interface from the target.
         */
        __ath9k_htc_remove_monitor_interface(priv);
+err_vif:
+       ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+
        return ret;
 }
 
@@ -329,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 
        __ath9k_htc_remove_monitor_interface(priv);
 
-       sta_idx = 0; /* Only single interface, for now */
+       sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
 
        WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
        if (ret) {
@@ -337,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
                return ret;
        }
 
+       priv->sta_slot &= ~(1 << sta_idx);
        priv->nstations--;
        priv->ah->is_monitoring = false;
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Removed a monitor interface at idx: %d, sta idx: %d\n",
+               priv->mon_vif_idx, sta_idx);
+
        return 0;
 }
 
@@ -351,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
        struct ath9k_htc_target_sta tsta;
        struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
        struct ath9k_htc_sta *ista;
-       int ret;
+       int ret, sta_idx;
        u8 cmd_rsp;
 
        if (priv->nstations >= ATH9K_HTC_MAX_STA)
                return -ENOBUFS;
 
+       sta_idx = ffz(priv->sta_slot);
+       if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
+               return -ENOBUFS;
+
        memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
 
        if (sta) {
@@ -366,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                tsta.associd = common->curaid;
                tsta.is_vif_sta = 0;
                tsta.valid = true;
-               ista->index = priv->nstations;
+               ista->index = sta_idx;
        } else {
                memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
                tsta.is_vif_sta = 1;
        }
 
-       tsta.sta_index = priv->nstations;
+       tsta.sta_index = sta_idx;
        tsta.vif_index = avp->index;
        tsta.maxampdu = 0xffff;
        if (sta && sta->ht_cap.ht_supported)
@@ -387,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                return ret;
        }
 
-       if (sta)
+       if (sta) {
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Added a station entry for: %pM (idx: %d)\n",
                        sta->addr, tsta.sta_index);
+       } else {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Added a station entry for VIF %d (idx: %d)\n",
+                       avp->index, tsta.sta_index);
+       }
 
+       priv->sta_slot |= (1 << sta_idx);
        priv->nstations++;
+       if (!sta)
+               priv->vif_sta_pos[avp->index] = sta_idx;
+
        return 0;
 }
 
@@ -401,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                                    struct ieee80211_sta *sta)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
        struct ath9k_htc_sta *ista;
        int ret;
        u8 cmd_rsp, sta_idx;
@@ -409,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                sta_idx = ista->index;
        } else {
-               sta_idx = 0;
+               sta_idx = priv->vif_sta_pos[avp->index];
        }
 
        WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
@@ -421,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                return ret;
        }
 
-       if (sta)
+       if (sta) {
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Removed a station entry for: %pM (idx: %d)\n",
                        sta->addr, sta_idx);
+       } else {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Removed a station entry for VIF %d (idx: %d)\n",
+                       avp->index, sta_idx);
+       }
 
+       priv->sta_slot &= ~(1 << sta_idx);
        priv->nstations--;
+
        return 0;
 }
 
@@ -808,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
 /* ANI */
 /*******/
 
-void ath_start_ani(struct ath9k_htc_priv *priv)
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -817,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv)
        common->ani.shortcal_timer = timestamp;
        common->ani.checkani_timer = timestamp;
 
-       ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+       priv->op_flags |= OP_ANI_RUNNING;
+
+       ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
                                     msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
 }
 
-void ath9k_ani_work(struct work_struct *work)
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
+{
+       cancel_delayed_work_sync(&priv->ani_work);
+       priv->op_flags &= ~OP_ANI_RUNNING;
+}
+
+void ath9k_htc_ani_work(struct work_struct *work)
 {
        struct ath9k_htc_priv *priv =
-               container_of(work, struct ath9k_htc_priv,
-                            ath9k_ani_work.work);
+               container_of(work, struct ath9k_htc_priv, ani_work.work);
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
        bool longcal = false;
@@ -834,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
        unsigned int timestamp = jiffies_to_msecs(jiffies);
        u32 cal_interval, short_cal_interval;
 
-       short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+       short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+               ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
 
        /* Only calibrate if awake */
        if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -903,7 +1028,7 @@ set_timer:
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
-       ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+       ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
                                     msecs_to_jiffies(cal_interval));
 }
 
@@ -911,7 +1036,7 @@ set_timer:
 /* mac80211 Callbacks */
 /**********************/
 
-static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct ath9k_htc_priv *priv = hw->priv;
@@ -924,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        padsize = padpos & 3;
        if (padsize && skb->len > padpos) {
                if (skb_headroom(skb) < padsize)
-                       return -1;
+                       goto fail_tx;
                skb_push(skb, padsize);
                memmove(skb->data, skb->data + padsize, padpos);
        }
@@ -945,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto fail_tx;
        }
 
-       return 0;
+       return;
 
 fail_tx:
        dev_kfree_skb_any(skb);
-       return 0;
 }
 
 static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -987,7 +1111,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
                return ret;
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        mode = ath9k_htc_get_curmode(priv, init_channel);
        htc_mode = cpu_to_be16(mode);
@@ -997,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
 
        ath9k_host_rx_init(priv);
 
+       ret = ath9k_htc_update_cap_target(priv);
+       if (ret)
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Failed to update capability in target\n");
+
        priv->op_flags &= ~OP_INVALID;
        htc_start(priv->htc);
 
@@ -1025,12 +1155,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
        int ret = 0;
        u8 cmd_rsp;
 
-       /* Cancel all the running timers/work .. */
-       cancel_work_sync(&priv->fatal_work);
-       cancel_work_sync(&priv->ps_work);
-       cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
-       ath9k_led_stop_brightness(priv);
-
        mutex_lock(&priv->mutex);
 
        if (priv->op_flags & OP_INVALID) {
@@ -1044,16 +1168,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
        WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
        WMI_CMD(WMI_STOP_RECV_CMDID);
+
+       tasklet_kill(&priv->swba_tasklet);
+       tasklet_kill(&priv->rx_tasklet);
+       tasklet_kill(&priv->tx_tasklet);
+
        skb_queue_purge(&priv->tx_queue);
 
-       /* Remove monitor interface here */
-       if (ah->opmode == NL80211_IFTYPE_MONITOR) {
-               if (ath9k_htc_remove_monitor_interface(priv))
-                       ath_err(common, "Unable to remove monitor interface\n");
-               else
-                       ath_dbg(common, ATH_DBG_CONFIG,
-                               "Monitor interface removed\n");
-       }
+       mutex_unlock(&priv->mutex);
+
+       /* Cancel all the running timers/work .. */
+       cancel_work_sync(&priv->fatal_work);
+       cancel_work_sync(&priv->ps_work);
+       cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+       ath9k_htc_stop_ani(priv);
+       ath9k_led_stop_brightness(priv);
+
+       mutex_lock(&priv->mutex);
 
        if (ah->btcoex_hw.enabled) {
                ath9k_hw_btcoex_disable(ah);
@@ -1061,6 +1192,10 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
                        ath_htc_cancel_btcoex_work(priv);
        }
 
+       /* Remove a monitor interface if it's present. */
+       if (priv->ah->is_monitoring)
+               ath9k_htc_remove_monitor_interface(priv);
+
        ath9k_hw_phy_disable(ah);
        ath9k_hw_disable(ah);
        ath9k_htc_ps_restore(priv);
@@ -1084,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&priv->mutex);
 
-       /* Only one interface for now */
-       if (priv->nvifs > 0) {
-               ret = -ENOBUFS;
-               goto out;
+       if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
+       }
+
+       if (priv->num_ibss_vif ||
+           (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+               ath_err(common, "IBSS coexistence with other modes is not allowed\n");
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
+       }
+
+       if (((vif->type == NL80211_IFTYPE_AP) ||
+            (vif->type == NL80211_IFTYPE_ADHOC)) &&
+           ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
+               ath_err(common, "Max. number of beaconing interfaces reached\n");
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
        }
 
        ath9k_htc_ps_wakeup(priv);
@@ -1101,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
        case NL80211_IFTYPE_ADHOC:
                hvif.opmode = cpu_to_be32(HTC_M_IBSS);
                break;
+       case NL80211_IFTYPE_AP:
+               hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
+               break;
        default:
                ath_err(common,
                        "Interface type %d not yet supported\n", vif->type);
@@ -1108,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ath_dbg(common, ATH_DBG_CONFIG,
-               "Attach a VIF of type: %d\n", vif->type);
-
-       priv->ah->opmode = vif->type;
-
        /* Index starts from zero on the target */
-       avp->index = hvif.index = priv->nvifs;
+       avp->index = hvif.index = ffz(priv->vif_slot);
        hvif.rtsthreshold = cpu_to_be16(2304);
        WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
        if (ret)
                goto out;
 
-       priv->nvifs++;
-
        /*
         * We need a node in target to tx mgmt frames
         * before association.
         */
        ret = ath9k_htc_add_station(priv, vif, NULL);
-       if (ret)
+       if (ret) {
+               WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
                goto out;
+       }
 
-       ret = ath9k_htc_update_cap_target(priv);
-       if (ret)
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "Failed to update capability in target\n");
+       ath9k_htc_set_bssid_mask(priv, vif);
 
+       priv->vif_slot |= (1 << avp->index);
+       priv->nvifs++;
        priv->vif = vif;
+
+       INC_VIF(priv, vif->type);
+       ath9k_htc_set_opmode(priv);
+
+       if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+           !(priv->op_flags & OP_ANI_RUNNING))
+               ath9k_htc_start_ani(priv);
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+
 out:
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
@@ -1153,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
        int ret = 0;
        u8 cmd_rsp;
 
-       ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
-
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
@@ -1163,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
        hvif.index = avp->index;
        WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
        priv->nvifs--;
+       priv->vif_slot &= ~(1 << avp->index);
 
        ath9k_htc_remove_station(priv, vif, NULL);
        priv->vif = NULL;
 
+       DEC_VIF(priv, vif->type);
+       ath9k_htc_set_opmode(priv);
+
+       /*
+        * Stop ANI only if there are no associated station interfaces.
+        */
+       if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
+               priv->rearm_ani = false;
+               ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                  ath9k_htc_vif_iter, priv);
+               if (!priv->rearm_ani)
+                       ath9k_htc_stop_ani(priv);
+       }
+
+       ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
 }
@@ -1202,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
         * IEEE80211_CONF_CHANGE_CHANNEL is handled.
         */
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (conf->flags & IEEE80211_CONF_MONITOR) {
-                       if (ath9k_htc_add_monitor_interface(priv))
-                               ath_err(common, "Failed to set monitor mode\n");
-                       else
-                               ath_dbg(common, ATH_DBG_CONFIG,
-                                       "HW opmode set to Monitor mode\n");
-               }
+               if ((conf->flags & IEEE80211_CONF_MONITOR) &&
+                   !priv->ah->is_monitoring)
+                       ath9k_htc_add_monitor_interface(priv);
+               else if (priv->ah->is_monitoring)
+                       ath9k_htc_remove_monitor_interface(priv);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -1243,7 +1413,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
 
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
                priv->txpowlimit = 2 * conf->power_level;
-               ath_update_txpow(priv);
+               ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+                                      priv->txpowlimit, &priv->curtxpow);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1430,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       bool set_assoc;
 
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
+       /*
+        * Set the HW AID/BSSID only for the first station interface
+        * or in IBSS mode.
+        */
+       set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
+                      ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+                       (priv->num_sta_vif == 1)));
+
+
        if (changed & BSS_CHANGED_ASSOC) {
-               common->curaid = bss_conf->assoc ?
-                                bss_conf->aid : 0;
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
-                       bss_conf->assoc);
-
-               if (bss_conf->assoc) {
-                       priv->op_flags |= OP_ASSOCIATED;
-                       ath_start_ani(priv);
-               } else {
-                       priv->op_flags &= ~OP_ASSOCIATED;
-                       cancel_delayed_work_sync(&priv->ath9k_ani_work);
+               if (set_assoc) {
+                       ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+                               bss_conf->assoc);
+
+                       common->curaid = bss_conf->assoc ?
+                               bss_conf->aid : 0;
+
+                       if (bss_conf->assoc)
+                               ath9k_htc_start_ani(priv);
+                       else
+                               ath9k_htc_stop_ani(priv);
                }
        }
 
        if (changed & BSS_CHANGED_BSSID) {
-               /* Set BSSID */
-               memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
-               ath9k_hw_write_associd(ah);
+               if (set_assoc) {
+                       memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+                       ath9k_hw_write_associd(ah);
 
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "BSSID: %pM aid: 0x%x\n",
-                       common->curbssid, common->curaid);
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "BSSID: %pM aid: 0x%x\n",
+                               common->curbssid, common->curaid);
+               }
        }
 
-       if ((changed & BSS_CHANGED_BEACON_INT) ||
-           (changed & BSS_CHANGED_BEACON) ||
-           ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           bss_conf->enable_beacon)) {
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
                priv->op_flags |= OP_ENABLE_BEACON;
                ath9k_htc_beacon_config(priv, vif);
        }
 
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           !bss_conf->enable_beacon) {
-               priv->op_flags &= ~OP_ENABLE_BEACON;
-               ath9k_htc_beacon_config(priv, vif);
-       }
-
-       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
-                       bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       priv->op_flags |= OP_PREAMBLE_SHORT;
-               else
-                       priv->op_flags &= ~OP_PREAMBLE_SHORT;
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
+               /*
+                * Disable SWBA interrupt only if there are no
+                * AP/IBSS interfaces.
+                */
+               if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "Beacon disabled for BSS: %pM\n",
+                               bss_conf->bssid);
+                       priv->op_flags &= ~OP_ENABLE_BEACON;
+                       ath9k_htc_beacon_config(priv, vif);
+               }
        }
 
-       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
-                       bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot &&
-                   hw->conf.channel->band != IEEE80211_BAND_5GHZ)
-                       priv->op_flags |= OP_PROTECT_ENABLE;
-               else
-                       priv->op_flags &= ~OP_PROTECT_ENABLE;
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               /*
+                * Reset the HW TSF for the first AP interface.
+                */
+               if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+                   (priv->nvifs == 1) &&
+                   (priv->num_ap_vif == 1) &&
+                   (vif->type == NL80211_IFTYPE_AP)) {
+                       priv->op_flags |= OP_TSF_RESET;
+               }
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Beacon interval changed for BSS: %pM\n",
+                       bss_conf->bssid);
+               ath9k_htc_beacon_config(priv, vif);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1548,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
                                  enum ieee80211_ampdu_mlme_action action,
                                  struct ieee80211_sta *sta,
-                                 u16 tid, u16 *ssn)
+                                 u16 tid, u16 *ssn, u8 buf_size)
 {
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath9k_htc_sta *ista;
        int ret = 0;
 
+       mutex_lock(&priv->mutex);
+
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
                break;
@@ -1578,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
        }
 
+       mutex_unlock(&priv->mutex);
+
        return ret;
 }
 
@@ -1590,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
        priv->op_flags |= OP_SCANNING;
        spin_unlock_bh(&priv->beacon_lock);
        cancel_work_sync(&priv->ps_work);
-       if (priv->op_flags & OP_ASSOCIATED)
-               cancel_delayed_work_sync(&priv->ath9k_ani_work);
+       ath9k_htc_stop_ani(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -1600,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
        struct ath9k_htc_priv *priv = hw->priv;
 
        mutex_lock(&priv->mutex);
-       ath9k_htc_ps_wakeup(priv);
        spin_lock_bh(&priv->beacon_lock);
        priv->op_flags &= ~OP_SCANNING;
        spin_unlock_bh(&priv->beacon_lock);
-       if (priv->op_flags & OP_ASSOCIATED) {
-               ath9k_htc_beacon_config(priv, priv->vif);
-               ath_start_ani(priv);
-       }
+       ath9k_htc_ps_wakeup(priv);
+       ath9k_htc_vif_reconfig(priv);
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
 }
index 7a5ffca..4a4f27b 100644 (file)
@@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = tx_info->control.sta;
+       struct ieee80211_vif *vif = tx_info->control.vif;
        struct ath9k_htc_sta *ista;
+       struct ath9k_htc_vif *avp;
        struct ath9k_htc_tx_ctl tx_ctl;
        enum htc_endpoint_id epid;
        u16 qnum;
@@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
        hdr = (struct ieee80211_hdr *) skb->data;
        fc = hdr->frame_control;
 
-       if (tx_info->control.vif &&
-                       (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
-               vif_idx = ((struct ath9k_htc_vif *)
-                               tx_info->control.vif->drv_priv)->index;
-       else
-               vif_idx = priv->nvifs;
+       /*
+        * Find out on which interface this packet has to be
+        * sent out.
+        */
+       if (vif) {
+               avp = (struct ath9k_htc_vif *) vif->drv_priv;
+               vif_idx = avp->index;
+       } else {
+               if (!priv->ah->is_monitoring) {
+                       ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+                               "VIF is null, but no monitor interface !\n");
+                       return -EINVAL;
+               }
 
+               vif_idx = priv->mon_vif_idx;
+       }
+
+       /*
+        * Find out which station this packet is destined for.
+        */
        if (sta) {
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                sta_idx = ista->index;
        } else {
-               sta_idx = 0;
+               sta_idx = priv->vif_sta_pos[vif_idx];
        }
 
        memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
@@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
 
                /* CTS-to-self */
                if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
-                   (priv->op_flags & OP_PROTECT_ENABLE))
+                   (vif && vif->bss_conf.use_cts_prot))
                        flags |= ATH9K_HTC_TX_CTSONLY;
 
                tx_hdr.flags = cpu_to_be32(flags);
@@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
 void ath9k_tx_tasklet(unsigned long data)
 {
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+       struct ieee80211_vif *vif;
        struct ieee80211_sta *sta;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
@@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
                hdr = (struct ieee80211_hdr *) skb->data;
                fc = hdr->frame_control;
                tx_info = IEEE80211_SKB_CB(skb);
+               vif = tx_info->control.vif;
 
                memset(&tx_info->status, 0, sizeof(tx_info->status));
 
+               if (!vif)
+                       goto send_mac80211;
+
                rcu_read_lock();
 
-               sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+               sta = ieee80211_find_sta(vif, hdr->addr1);
                if (!sta) {
                        rcu_read_unlock();
                        ieee80211_tx_status(priv->hw, skb);
@@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
 
                rcu_read_unlock();
 
+       send_mac80211:
                /* Send status to mac80211 */
                ieee80211_tx_status(priv->hw, skb);
        }
@@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
         */
        if (((ah->opmode != NL80211_IFTYPE_AP) &&
             (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
-           (ah->opmode == NL80211_IFTYPE_MONITOR))
+           ah->is_monitoring)
                rfilt |= ATH9K_RX_FILTER_PROM;
 
        if (priv->rxfilter & FIF_CONTROL)
@@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
        else
                rfilt |= ATH9K_RX_FILTER_BEACON;
 
-       if (conf_is_ht(&priv->hw->conf))
+       if (conf_is_ht(&priv->hw->conf)) {
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+               rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
+       }
+
+       if (priv->rxfilter & FIF_PSPOLL)
+               rfilt |= ATH9K_RX_FILTER_PSPOLL;
 
        return rfilt;
 
@@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
 static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
 {
        struct ath_hw *ah = priv->ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-
        u32 rfilt, mfilt[2];
 
        /* configure rx filter */
        rfilt = ath9k_htc_calcrxfilter(priv);
        ath9k_hw_setrxfilter(ah, rfilt);
 
-       /* configure bssid mask */
-       ath_hw_setbssidmask(common);
-
-       /* configure operational mode */
-       ath9k_hw_setopmode(ah);
-
        /* calculate and install multicast filter */
        mfilt[0] = mfilt[1] = ~0;
        ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
                           rxbuf->rxstatus.rs_flags);
 
-       if (priv->op_flags & OP_ASSOCIATED) {
-               if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
-                   !rxbuf->rxstatus.rs_moreaggr)
-                       ATH_RSSI_LPF(priv->rx.last_rssi,
-                                    rxbuf->rxstatus.rs_rssi);
+       if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+           !rxbuf->rxstatus.rs_moreaggr)
+               ATH_RSSI_LPF(priv->rx.last_rssi,
+                            rxbuf->rxstatus.rs_rssi);
 
-               last_rssi = priv->rx.last_rssi;
+       last_rssi = priv->rx.last_rssi;
 
-               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-                       rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
-                                                            ATH_RSSI_EP_MULTIPLIER);
+       if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+               rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+                                                    ATH_RSSI_EP_MULTIPLIER);
 
-               if (rxbuf->rxstatus.rs_rssi < 0)
-                       rxbuf->rxstatus.rs_rssi = 0;
+       if (rxbuf->rxstatus.rs_rssi < 0)
+               rxbuf->rxstatus.rs_rssi = 0;
 
-               if (ieee80211_is_beacon(fc))
-                       priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
-       }
+       if (ieee80211_is_beacon(fc))
+               priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
 
        rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
        rx_status->band = hw->conf.channel->band;
        rx_status->freq = hw->conf.channel->center_freq;
        rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
        rx_status->antenna = rxbuf->rxstatus.rs_antenna;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        return true;
 
index 1afb8bb..338b075 100644 (file)
@@ -369,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
        else
                ah->config.ht_enable = 0;
 
+       /* PAPRD needs some more work to be enabled */
+       ah->config.paprd_disable = 1;
+
        ah->config.rx_intr_mitigation = true;
        ah->config.pcieSerDesWrite = true;
 
@@ -492,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (ah->hw_version.devid == AR5416_AR9100_DEVID)
                ah->hw_version.macVersion = AR_SREV_VERSION_9100;
 
+       ath9k_hw_read_revisions(ah);
+
+       /*
+        * Read back AR_WA into a permanent copy and set bits 14 and 17.
+        * We need to do this to avoid RMW of this register. We cannot
+        * read the reg when chip is asleep.
+        */
+       ah->WARegVal = REG_READ(ah, AR_WA);
+       ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+                        AR_WA_ASPM_TIMER_BASED_DISABLE);
+
        if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
                ath_err(common, "Couldn't reset chip\n");
                return -EIO;
@@ -560,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        ath9k_hw_init_mode_regs(ah);
 
-       /*
-        * Read back AR_WA into a permanent copy and set bits 14 and 17.
-        * We need to do this to avoid RMW of this register. We cannot
-        * read the reg when chip is asleep.
-        */
-       ah->WARegVal = REG_READ(ah, AR_WA);
-       ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
-                        AR_WA_ASPM_TIMER_BASED_DISABLE);
 
        if (ah->is_pciexpress)
                ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -665,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
        REGWRITE_BUFFER_FLUSH(ah);
 }
 
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+               REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
+               udelay(100);
+               REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+
+               while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+                       udelay(100);
+
+               return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+#define DPLL2_KD_VAL            0x3D
+#define DPLL2_KI_VAL            0x06
+#define DPLL3_PHASE_SHIFT_VAL   0x1
+
 static void ath9k_hw_init_pll(struct ath_hw *ah,
                              struct ath9k_channel *chan)
 {
        u32 pll;
 
-       if (AR_SREV_9485(ah))
+       if (AR_SREV_9485(ah)) {
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+               REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
+
+               REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+               udelay(1000);
+
                REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
 
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+               udelay(1000);
+       }
+
        pll = ath9k_hw_compute_pll_control(ah, chan);
 
        REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1057,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
                REG_WRITE(ah, AR_RC, AR_RC_AHB);
 
        REG_WRITE(ah, AR_RTC_RESET, 0);
-       udelay(2);
 
        REGWRITE_BUFFER_FLUSH(ah);
 
@@ -1079,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
                return false;
        }
 
-       ath9k_hw_read_revisions(ah);
-
        return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
 }
 
@@ -1345,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_spur_mitigate_freq(ah, chan);
        ah->eep_ops->set_board_values(ah, chan);
 
-       ath9k_hw_set_operating_mode(ah, ah->opmode);
-
        ENABLE_REGWRITE_BUFFER(ah);
 
        REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1364,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        REGWRITE_BUFFER_FLUSH(ah);
 
+       ath9k_hw_set_operating_mode(ah, ah->opmode);
+
        r = ath9k_hw_rf_set_freq(ah, chan);
        if (r)
                return r;
@@ -1933,7 +1973,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                pCap->rx_status_len = sizeof(struct ar9003_rxs);
                pCap->tx_desc_len = sizeof(struct ar9003_txc);
                pCap->txs_len = sizeof(struct ar9003_txs);
-               if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+               if (!ah->config.paprd_disable &&
+                   ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
                        pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
        } else {
                pCap->tx_desc_len = sizeof(struct ath_desc);
index 5a3dfec..6650fd4 100644 (file)
@@ -70,6 +70,9 @@
 #define REG_READ(_ah, _reg) \
        ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt)         \
+       ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
+
 #define ENABLE_REGWRITE_BUFFER(_ah)                                    \
        do {                                                            \
                if (ath9k_hw_common(_ah)->ops->enable_write_buffer)     \
@@ -92,9 +95,9 @@
 #define REG_READ_FIELD(_a, _r, _f) \
        (((REG_READ(_a, _r) & _f) >> _f##_S))
 #define REG_SET_BIT(_a, _r, _f) \
-       REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+       REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f))
 #define REG_CLR_BIT(_a, _r, _f) \
-       REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+       REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f))
 
 #define DO_DELAY(x) do {                       \
                if ((++(x) % 64) == 0)          \
@@ -225,6 +228,7 @@ struct ath9k_ops_config {
        u32 pcie_waen;
        u8 analog_shiftreg;
        u8 ht_enable;
+       u8 paprd_disable;
        u32 ofdm_trig_low;
        u32 ofdm_trig_high;
        u32 cck_trig_high;
@@ -925,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
 void ath9k_hw_set11nmac2040(struct ath_hw *ah);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
index 767d8b8..79aec98 100644 (file)
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -144,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
        RATE(540, 0x0c, 0),
 };
 
+#ifdef CONFIG_MAC80211_LEDS
+static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
+       { .throughput = 0 * 1024, .blink_time = 334 },
+       { .throughput = 1 * 1024, .blink_time = 260 },
+       { .throughput = 5 * 1024, .blink_time = 220 },
+       { .throughput = 10 * 1024, .blink_time = 190 },
+       { .throughput = 20 * 1024, .blink_time = 170 },
+       { .throughput = 50 * 1024, .blink_time = 150 },
+       { .throughput = 70 * 1024, .blink_time = 130 },
+       { .throughput = 100 * 1024, .blink_time = 110 },
+       { .throughput = 200 * 1024, .blink_time = 80 },
+       { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
 static void ath9k_deinit_softc(struct ath_softc *sc);
 
 /*
@@ -254,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
                              struct regulatory_request *request)
 {
        struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
 
        return ath_reg_notifier_apply(wiphy, request, reg);
@@ -442,9 +452,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
        sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
 
-       for (i = 0; i < WME_NUM_AC; i++)
+       for (i = 0; i < WME_NUM_AC; i++) {
                sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
-
+               sc->tx.txq_map[i]->mac80211_qnum = i;
+       }
        return 0;
 }
 
@@ -516,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
 
        sc->beacon.slottime = ATH9K_SLOT_TIME_9;
 
-       for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+       for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
                sc->beacon.bslot[i] = NULL;
-               sc->beacon.bslot_aphy[i] = NULL;
-       }
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
                sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -537,6 +546,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        if (!ah)
                return -ENOMEM;
 
+       ah->hw = sc->hw;
        ah->hw_version.devid = devid;
        ah->hw_version.subsysid = subsysid;
        sc->sc_ah = ah;
@@ -554,10 +564,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        common->btcoex_enabled = ath9k_btcoex_enable == 1;
        spin_lock_init(&common->cc_lock);
 
-       spin_lock_init(&sc->wiphy_lock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        mutex_init(&sc->mutex);
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock_init(&sc->nodes_lock);
+       INIT_LIST_HEAD(&sc->nodes);
+#endif
        tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
        tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
                     (unsigned long)sc);
@@ -598,8 +611,6 @@ err_btcoex:
 err_queues:
        ath9k_hw_deinit(ah);
 err_hw:
-       tasklet_kill(&sc->intr_tq);
-       tasklet_kill(&sc->bcon_tasklet);
 
        kfree(ah);
        sc->sc_ah = NULL;
@@ -701,7 +712,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
                    const struct ath_bus_ops *bus_ops)
 {
        struct ieee80211_hw *hw = sc->hw;
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_common *common;
        struct ath_hw *ah;
        int error = 0;
@@ -736,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 
        ath9k_init_txpower_limits(sc);
 
+#ifdef CONFIG_MAC80211_LEDS
+       /* must be initialized before ieee80211_register_hw */
+       sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
+               IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
+               ARRAY_SIZE(ath9k_tpt_blink));
+#endif
+
        /* Register with mac80211 */
        error = ieee80211_register_hw(hw);
        if (error)
@@ -756,17 +773,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 
        INIT_WORK(&sc->hw_check_work, ath_hw_check);
        INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
-       INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
-       INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
-       sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+       sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
 
        ath_init_leds(sc);
        ath_start_rfkill_poll(sc);
 
-       pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
-
        return 0;
 
 error_world:
@@ -807,9 +818,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 
        ath9k_hw_deinit(sc->sc_ah);
 
-       tasklet_kill(&sc->intr_tq);
-       tasklet_kill(&sc->bcon_tasklet);
-
        kfree(sc->sc_ah);
        sc->sc_ah = NULL;
 }
@@ -817,28 +825,18 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 void ath9k_deinit_device(struct ath_softc *sc)
 {
        struct ieee80211_hw *hw = sc->hw;
-       int i = 0;
 
        ath9k_ps_wakeup(sc);
 
        wiphy_rfkill_stop_polling(sc->hw->wiphy);
        ath_deinit_leds(sc);
 
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy == NULL)
-                       continue;
-               sc->sec_wiphy[i] = NULL;
-               ieee80211_unregister_hw(aphy->hw);
-               ieee80211_free_hw(aphy->hw);
-       }
+       ath9k_ps_restore(sc);
 
        ieee80211_unregister_hw(hw);
-       pm_qos_remove_request(&sc->pm_qos_req);
        ath_rx_cleanup(sc);
        ath_tx_cleanup(sc);
        ath9k_deinit_softc(sc);
-       kfree(sc->sec_wiphy);
 }
 
 void ath_descdma_cleanup(struct ath_softc *sc,
index 180170d..562257a 100644 (file)
@@ -143,84 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 }
 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
 
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
 {
-#define ATH9K_TX_STOP_DMA_TIMEOUT      4000    /* usec */
-#define ATH9K_TIME_QUANTUM             100     /* usec */
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ath9k_hw_capabilities *pCap = &ah->caps;
-       struct ath9k_tx_queue_info *qi;
-       u32 tsfLow, j, wait;
-       u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+       int i, q;
 
-       if (q >= pCap->total_queues) {
-               ath_dbg(common, ATH_DBG_QUEUE,
-                       "Stopping TX DMA, invalid queue: %u\n", q);
-               return false;
-       }
+       REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
 
-       qi = &ah->txq[q];
-       if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-               ath_dbg(common, ATH_DBG_QUEUE,
-                       "Stopping TX DMA, inactive queue: %u\n", q);
-               return false;
-       }
+       REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+       REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+       REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 
-       REG_WRITE(ah, AR_Q_TXD, 1 << q);
+       for (q = 0; q < AR_NUM_QCU; q++) {
+               for (i = 0; i < 1000; i++) {
+                       if (i)
+                               udelay(5);
 
-       for (wait = wait_time; wait != 0; wait--) {
-               if (ath9k_hw_numtxpending(ah, q) == 0)
-                       break;
-               udelay(ATH9K_TIME_QUANTUM);
+                       if (!ath9k_hw_numtxpending(ah, q))
+                               break;
+               }
        }
 
-       if (ath9k_hw_numtxpending(ah, q)) {
-               ath_dbg(common, ATH_DBG_QUEUE,
-                       "%s: Num of pending TX Frames %d on Q %d\n",
-                       __func__, ath9k_hw_numtxpending(ah, q), q);
-
-               for (j = 0; j < 2; j++) {
-                       tsfLow = REG_READ(ah, AR_TSF_L32);
-                       REG_WRITE(ah, AR_QUIET2,
-                                 SM(10, AR_QUIET2_QUIET_DUR));
-                       REG_WRITE(ah, AR_QUIET_PERIOD, 100);
-                       REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
-                       REG_SET_BIT(ah, AR_TIMER_MODE,
-                                      AR_QUIET_TIMER_EN);
-
-                       if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
-                               break;
+       REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+       REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+       REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 
-                       ath_dbg(common, ATH_DBG_QUEUE,
-                               "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
-                               tsfLow);
-               }
+       REG_WRITE(ah, AR_Q_TXD, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
 
-               REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
+{
+#define ATH9K_TX_STOP_DMA_TIMEOUT      1000    /* usec */
+#define ATH9K_TIME_QUANTUM             100     /* usec */
+       int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+       int wait;
 
-               udelay(200);
-               REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
+       REG_WRITE(ah, AR_Q_TXD, 1 << q);
 
-               wait = wait_time;
-               while (ath9k_hw_numtxpending(ah, q)) {
-                       if ((--wait) == 0) {
-                               ath_err(common,
-                                       "Failed to stop TX DMA in 100 msec after killing last frame\n");
-                               break;
-                       }
+       for (wait = wait_time; wait != 0; wait--) {
+               if (wait != wait_time)
                        udelay(ATH9K_TIME_QUANTUM);
-               }
 
-               REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+               if (ath9k_hw_numtxpending(ah, q) == 0)
+                       break;
        }
 
        REG_WRITE(ah, AR_Q_TXD, 0);
+
        return wait != 0;
 
 #undef ATH9K_TX_STOP_DMA_TIMEOUT
 #undef ATH9K_TIME_QUANTUM
 }
-EXPORT_SYMBOL(ath9k_hw_stoptxdma);
+EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
 
 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
 {
@@ -690,17 +665,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
                rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
 
        if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+               /*
+                * Treat these errors as mutually exclusive to avoid spurious
+                * extra error reports from the hardware. If a CRC error is
+                * reported, then decryption and MIC errors are irrelevant,
+                * the frame is going to be dropped either way
+                */
                if (ads.ds_rxstatus8 & AR_CRCErr)
                        rs->rs_status |= ATH9K_RXERR_CRC;
-               if (ads.ds_rxstatus8 & AR_PHYErr) {
+               else if (ads.ds_rxstatus8 & AR_PHYErr) {
                        rs->rs_status |= ATH9K_RXERR_PHY;
                        phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
                        rs->rs_phyerr = phyerr;
-               }
-               if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+               } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
-               if (ads.ds_rxstatus8 & AR_MichaelErr)
+               else if (ads.ds_rxstatus8 & AR_MichaelErr)
                        rs->rs_status |= ATH9K_RXERR_MIC;
+
                if (ads.ds_rxstatus8 & AR_KeyMiss)
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
        }
@@ -885,7 +866,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
        struct ath_common *common = ath9k_hw_common(ah);
 
        if (!(ints & ATH9K_INT_GLOBAL))
-               ath9k_hw_enable_interrupts(ah);
+               ath9k_hw_disable_interrupts(ah);
 
        ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
@@ -963,7 +944,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
                        REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
        }
 
-       ath9k_hw_enable_interrupts(ah);
+       if (ints & ATH9K_INT_GLOBAL)
+               ath9k_hw_enable_interrupts(ah);
 
        return;
 }
index 7512f97..b2b2ff8 100644 (file)
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
        ATH9K_RX_FILTER_PHYERR = 0x00000100,
        ATH9K_RX_FILTER_MYBEACON = 0x00000200,
        ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+       ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+       ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
        ATH9K_RX_FILTER_PSPOLL = 0x00004000,
        ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
        ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
@@ -674,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
                            const struct ath9k_tx_queue_info *qinfo);
index f90a6ca..115f162 100644 (file)
  */
 
 #include <linux/nl80211.h>
+#include <linux/delay.h>
 #include "ath9k.h"
 #include "btcoex.h"
 
-static void ath_update_txpow(struct ath_softc *sc)
-{
-       struct ath_hw *ah = sc->sc_ah;
-
-       if (sc->curtxpow != sc->config.txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
-               /* read back in case value is clamped */
-               sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
-       }
-}
-
 static u8 parse_mpdudensity(u8 mpdudensity)
 {
        /*
@@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
        }
 }
 
-static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
-                                               struct ieee80211_hw *hw)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ieee80211_channel *curchan = hw->conf.channel;
-       struct ath9k_channel *channel;
-       u8 chan_idx;
+       bool pending = false;
+
+       spin_lock_bh(&txq->axq_lock);
 
-       chan_idx = curchan->hw_value;
-       channel = &sc->sc_ah->channels[chan_idx];
-       ath9k_update_ichannel(sc, hw, channel);
-       return channel;
+       if (txq->axq_depth || !list_empty(&txq->axq_acq))
+               pending = true;
+       else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+               pending = !list_empty(&txq->txq_fifo_pending);
+
+       spin_unlock_bh(&txq->axq_lock);
+       return pending;
 }
 
 bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
        }
 }
 
-static void ath_update_survey_stats(struct ath_softc *sc)
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+static int ath_update_survey_stats(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
        struct survey_info *survey = &sc->survey[pos];
        struct ath_cycle_counters *cc = &common->cc_survey;
        unsigned int div = common->clockrate * 1000;
+       int ret = 0;
 
        if (!ah->curchan)
-               return;
+               return -1;
 
        if (ah->power_mode == ATH9K_PM_AWAKE)
                ath_hw_cycle_counters_update(common);
@@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
                survey->channel_time_rx += cc->rx_frame / div;
                survey->channel_time_tx += cc->tx_frame / div;
        }
+
+       if (cc->cycles < div)
+               return -1;
+
+       if (cc->cycles > 0)
+               ret = cc->rx_busy * 100 / cc->cycles;
+
        memset(cc, 0, sizeof(*cc));
 
        ath_update_survey_nf(sc, pos);
+
+       return ret;
 }
 
 /*
@@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan)
 {
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        if (sc->sc_flags & SC_OP_INVALID)
                return -EIO;
 
+       sc->hw_busy_count = 0;
+
        del_timer_sync(&common->ani.timer);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
        cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
 
        ath9k_ps_wakeup(sc);
 
@@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        if (!ath_stoprecv(sc))
                stopped = false;
 
+       if (!ath9k_hw_check_alive(ah))
+               stopped = false;
+
        /* XXX: do not flush receive queue here. We don't want
         * to flush data frames already in queue because of
         * changing channel. */
@@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                fastcc = false;
 
        if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
-               caldata = &aphy->caldata;
+               caldata = &sc->caldata;
 
        ath_dbg(common, ATH_DBG_CONFIG,
                "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                goto ps_restore;
        }
 
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
        ath9k_hw_set_interrupts(ah, ah->imask);
 
        if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
                if (sc->sc_flags & SC_OP_BEACONS)
                        ath_beacon_config(sc, NULL);
                ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+               ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
                ath_start_ani(common);
        }
 
  ps_restore:
+       ieee80211_wake_queues(hw);
+
        spin_unlock_bh(&sc->sc_pcu_lock);
 
        ath9k_ps_restore(sc);
@@ -325,6 +341,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
 {
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_tx_control txctl;
        int time_left;
 
@@ -340,14 +358,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
        tx_info->control.rates[1].idx = -1;
 
        init_completion(&sc->paprd_complete);
-       sc->paprd_pending = true;
        txctl.paprd = BIT(chain);
-       if (ath_tx_start(hw, skb, &txctl) != 0)
+
+       if (ath_tx_start(hw, skb, &txctl) != 0) {
+               ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
+               dev_kfree_skb_any(skb);
                return false;
+       }
 
        time_left = wait_for_completion_timeout(&sc->paprd_complete,
                        msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-       sc->paprd_pending = false;
 
        if (!time_left)
                ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -545,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
        struct ath_hw *ah = sc->sc_ah;
        an = (struct ath_node *)sta->drv_priv;
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock(&sc->nodes_lock);
+       list_add(&an->list, &sc->nodes);
+       spin_unlock(&sc->nodes_lock);
+       an->sta = sta;
+#endif
        if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
                sc->sc_flags |= SC_OP_ENABLE_APM;
 
@@ -560,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock(&sc->nodes_lock);
+       list_del(&an->list);
+       spin_unlock(&sc->nodes_lock);
+       an->sta = NULL;
+#endif
+
        if (sc->sc_flags & SC_OP_TXAGGR)
                ath_tx_node_cleanup(sc, an);
 }
@@ -567,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 void ath_hw_check(struct work_struct *work)
 {
        struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
-       int i;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       unsigned long flags;
+       int busy;
 
        ath9k_ps_wakeup(sc);
+       if (ath9k_hw_check_alive(sc->sc_ah))
+               goto out;
 
-       for (i = 0; i < 3; i++) {
-               if (ath9k_hw_check_alive(sc->sc_ah))
-                       goto out;
+       spin_lock_irqsave(&common->cc_lock, flags);
+       busy = ath_update_survey_stats(sc);
+       spin_unlock_irqrestore(&common->cc_lock, flags);
 
-               msleep(1);
-       }
-       ath_reset(sc, true);
+       ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+               "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+       if (busy >= 99) {
+               if (++sc->hw_busy_count >= 3)
+                       ath_reset(sc, true);
+       } else if (busy >= 0)
+               sc->hw_busy_count = 0;
 
 out:
        ath9k_ps_restore(sc);
@@ -592,17 +633,23 @@ void ath9k_tasklet(unsigned long data)
        u32 status = sc->intrstatus;
        u32 rxmask;
 
-       ath9k_ps_wakeup(sc);
-
        if (status & ATH9K_INT_FATAL) {
                ath_reset(sc, true);
-               ath9k_ps_restore(sc);
                return;
        }
 
+       ath9k_ps_wakeup(sc);
        spin_lock(&sc->sc_pcu_lock);
 
-       if (!ath9k_hw_check_alive(ah))
+       /*
+        * Only run the baseband hang check if beacons stop working in AP or
+        * IBSS mode, because it has a high false positive rate. For station
+        * mode it should not be necessary, since the upper layers will detect
+        * this through a beacon miss automatically and the following channel
+        * change will trigger a hardware reset anyway
+        */
+       if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
+           !ath9k_hw_check_alive(ah))
                ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -781,54 +828,11 @@ chip_reset:
 #undef SCHED_INTR
 }
 
-static u32 ath_get_extchanmode(struct ath_softc *sc,
-                              struct ieee80211_channel *chan,
-                              enum nl80211_channel_type channel_type)
-{
-       u32 chanmode = 0;
-
-       switch (chan->band) {
-       case IEEE80211_BAND_2GHZ:
-               switch(channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
-                       chanmode = CHANNEL_G_HT20;
-                       break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_G_HT40PLUS;
-                       break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_G_HT40MINUS;
-                       break;
-               }
-               break;
-       case IEEE80211_BAND_5GHZ:
-               switch(channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
-                       chanmode = CHANNEL_A_HT20;
-                       break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_A_HT40PLUS;
-                       break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_A_HT40MINUS;
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return chanmode;
-}
-
 static void ath9k_bss_assoc_info(struct ath_softc *sc,
                                 struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_bss_conf *bss_conf)
 {
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
@@ -852,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
                ath_beacon_config(sc, vif);
 
                /* Reset rssi stats */
-               aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+               sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
                sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
                sc->sc_flags |= SC_OP_ANI_RUN;
@@ -879,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath9k_hw_configpcipowersave(ah, 0, 0);
 
        if (!ah->curchan)
-               ah->curchan = ath_get_curchannel(sc, sc->hw);
+               ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
 
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
@@ -888,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
                        channel->center_freq, r);
        }
 
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
                goto out;
@@ -905,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath9k_hw_set_gpio(ah, ah->led_pin, 0);
 
        ieee80211_wake_queues(hw);
+       ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
+
 out:
        spin_unlock_bh(&sc->sc_pcu_lock);
 
@@ -918,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        int r;
 
        ath9k_ps_wakeup(sc);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
+
        spin_lock_bh(&sc->sc_pcu_lock);
 
        ieee80211_stop_queues(hw);
@@ -940,7 +949,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath_flushrecv(sc);              /* flush recv queue */
 
        if (!ah->curchan)
-               ah->curchan = ath_get_curchannel(sc, hw);
+               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
@@ -955,8 +964,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
 
        spin_unlock_bh(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
-
-       ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 }
 
 int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -966,9 +973,12 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        struct ieee80211_hw *hw = sc->hw;
        int r;
 
+       sc->hw_busy_count = 0;
+
        /* Stop ANI */
        del_timer_sync(&common->ani.timer);
 
+       ath9k_ps_wakeup(sc);
        spin_lock_bh(&sc->sc_pcu_lock);
 
        ieee80211_stop_queues(hw);
@@ -992,7 +1002,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
         * that changes the channel so update any state that
         * might change as a result.
         */
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
 
        if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
                ath_beacon_config(sc, NULL);    /* restart beacons */
@@ -1015,42 +1026,18 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
 
        /* Start ANI */
        ath_start_ani(common);
+       ath9k_ps_restore(sc);
 
        return r;
 }
 
-/* XXX: Remove me once we don't depend on ath9k_channel for all
- * this redundant data */
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
-                          struct ath9k_channel *ichan)
-{
-       struct ieee80211_channel *chan = hw->conf.channel;
-       struct ieee80211_conf *conf = &hw->conf;
-
-       ichan->channel = chan->center_freq;
-       ichan->chan = chan;
-
-       if (chan->band == IEEE80211_BAND_2GHZ) {
-               ichan->chanmode = CHANNEL_G;
-               ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
-       } else {
-               ichan->chanmode = CHANNEL_A;
-               ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
-       }
-
-       if (conf_is_ht(conf))
-               ichan->chanmode = ath_get_extchanmode(sc, chan,
-                                           conf->channel_type);
-}
-
 /**********************/
 /* mac80211 callbacks */
 /**********************/
 
 static int ath9k_start(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1063,32 +1050,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
 
        mutex_lock(&sc->mutex);
 
-       if (ath9k_wiphy_started(sc)) {
-               if (sc->chan_idx == curchan->hw_value) {
-                       /*
-                        * Already on the operational channel, the new wiphy
-                        * can be marked active.
-                        */
-                       aphy->state = ATH_WIPHY_ACTIVE;
-                       ieee80211_wake_queues(hw);
-               } else {
-                       /*
-                        * Another wiphy is on another channel, start the new
-                        * wiphy in paused state.
-                        */
-                       aphy->state = ATH_WIPHY_PAUSED;
-                       ieee80211_stop_queues(hw);
-               }
-               mutex_unlock(&sc->mutex);
-               return 0;
-       }
-       aphy->state = ATH_WIPHY_ACTIVE;
-
        /* setup initial channel */
-
        sc->chan_idx = curchan->hw_value;
 
-       init_channel = ath_get_curchannel(sc, hw);
+       init_channel = ath9k_cmn_get_curchannel(hw, ah);
 
        /* Reset SERDES registers */
        ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1114,7 +1079,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * This is needed only to setup initial state
         * but it's best done after a reset.
         */
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                       sc->config.txpowlimit, &sc->curtxpow);
 
        /*
         * Setup the hardware after reset:
@@ -1171,12 +1137,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
                        ath9k_btcoex_timer_resume(sc);
        }
 
-       /* User has the option to provide pm-qos value as a module
-        * parameter rather than using the default value of
-        * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
-        */
-       pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
        if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
                common->bus_ops->extn_synch_en(common);
 
@@ -1186,22 +1146,13 @@ mutex_unlock:
        return r;
 }
 
-static int ath9k_tx(struct ieee80211_hw *hw,
-                   struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
-       if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
-               ath_dbg(common, ATH_DBG_XMIT,
-                       "ath9k: %s: TX in unexpected wiphy state %d\n",
-                       wiphy_name(hw->wiphy), aphy->state);
-               goto exit;
-       }
-
        if (sc->ps_enabled) {
                /*
                 * mac80211 does not set PM field for normal data frames, so we
@@ -1252,52 +1203,30 @@ static int ath9k_tx(struct ieee80211_hw *hw,
                goto exit;
        }
 
-       return 0;
+       return;
 exit:
        dev_kfree_skb_any(skb);
-       return 0;
 }
 
 static void ath9k_stop(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       int i;
 
        mutex_lock(&sc->mutex);
 
-       aphy->state = ATH_WIPHY_INACTIVE;
-
-       if (led_blink)
-               cancel_delayed_work_sync(&sc->ath_led_blink_work);
-
        cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
 
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i])
-                       break;
-       }
-
-       if (i == sc->num_sec_wiphy) {
-               cancel_delayed_work_sync(&sc->wiphy_work);
-               cancel_work_sync(&sc->chan_work);
-       }
-
        if (sc->sc_flags & SC_OP_INVALID) {
                ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
                return;
        }
 
-       if (ath9k_wiphy_started(sc)) {
-               mutex_unlock(&sc->mutex);
-               return; /* another wiphy still in use */
-       }
-
        /* Ensure HW is awake when we try to shut it down. */
        ath9k_ps_wakeup(sc);
 
@@ -1309,6 +1238,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        spin_lock_bh(&sc->sc_pcu_lock);
 
+       /* prevent tasklets to enable interrupts once we disable them */
+       ah->imask &= ~ATH9K_INT_GLOBAL;
+
        /* make sure h/w will not generate any interrupt
         * before setting the invalid flag. */
        ath9k_hw_disable_interrupts(ah);
@@ -1320,133 +1252,254 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        } else
                sc->rx.rxlink = NULL;
 
+       if (sc->rx.frag) {
+               dev_kfree_skb_any(sc->rx.frag);
+               sc->rx.frag = NULL;
+       }
+
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
 
        spin_unlock_bh(&sc->sc_pcu_lock);
 
+       /* we can now sync irq and kill any running tasklets, since we already
+        * disabled interrupts and not holding a spin lock */
+       synchronize_irq(sc->irq);
+       tasklet_kill(&sc->intr_tq);
+       tasklet_kill(&sc->bcon_tasklet);
+
        ath9k_ps_restore(sc);
 
        sc->ps_idle = true;
-       ath9k_set_wiphy_idle(aphy, true);
        ath_radio_disable(sc, hw);
 
        sc->sc_flags |= SC_OP_INVALID;
 
-       pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
        mutex_unlock(&sc->mutex);
 
        ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
 }
 
-static int ath9k_add_interface(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif)
+bool ath9k_uses_beacons(int type)
+{
+       switch (type) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_MESH_POINT:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void ath9k_reclaim_beacon(struct ath_softc *sc,
+                                struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
-       enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
-       int ret = 0;
 
-       mutex_lock(&sc->mutex);
+       ath9k_set_beaconing_status(sc, false);
+       ath_beacon_return(sc, avp);
+       ath9k_set_beaconing_status(sc, true);
+       sc->sc_flags &= ~SC_OP_BEACONS;
+}
+
+static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_vif_iter_data *iter_data = data;
+       int i;
+
+       if (iter_data->hw_macaddr)
+               for (i = 0; i < ETH_ALEN; i++)
+                       iter_data->mask[i] &=
+                               ~(iter_data->hw_macaddr[i] ^ mac[i]);
 
        switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               ic_opmode = NL80211_IFTYPE_STATION;
+       case NL80211_IFTYPE_AP:
+               iter_data->naps++;
                break;
-       case NL80211_IFTYPE_WDS:
-               ic_opmode = NL80211_IFTYPE_WDS;
+       case NL80211_IFTYPE_STATION:
+               iter_data->nstations++;
                break;
        case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_AP:
+               iter_data->nadhocs++;
+               break;
        case NL80211_IFTYPE_MESH_POINT:
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-               ic_opmode = vif->type;
+               iter_data->nmeshes++;
+               break;
+       case NL80211_IFTYPE_WDS:
+               iter_data->nwds++;
                break;
        default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                       vif->type);
-               ret = -EOPNOTSUPP;
-               goto out;
+               iter_data->nothers++;
+               break;
        }
+}
 
-       ath_dbg(common, ATH_DBG_CONFIG,
-               "Attach a VIF of type: %d\n", ic_opmode);
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ath9k_vif_iter_data *iter_data)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
 
-       /* Set the VIF opmode */
-       avp->av_opmode = ic_opmode;
-       avp->av_bslot = -1;
+       /*
+        * Use the hardware MAC address as reference, the hardware uses it
+        * together with the BSSID mask when matching addresses.
+        */
+       memset(iter_data, 0, sizeof(*iter_data));
+       iter_data->hw_macaddr = common->macaddr;
+       memset(&iter_data->mask, 0xff, ETH_ALEN);
 
-       sc->nvifs++;
+       if (vif)
+               ath9k_vif_iter(iter_data, vif->addr, vif);
+
+       /* Get list of all active MAC addresses */
+       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
+                                                  iter_data);
+}
+
+/* Called with sc->mutex held. */
+static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_vif_iter_data iter_data;
 
-       ath9k_set_bssid_mask(hw, vif);
+       ath9k_calculate_iter_data(hw, vif, &iter_data);
 
-       if (sc->nvifs > 1)
-               goto out; /* skip global settings for secondary vif */
+       ath9k_ps_wakeup(sc);
+       /* Set BSSID mask. */
+       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+       ath_hw_setbssidmask(common);
 
-       if (ic_opmode == NL80211_IFTYPE_AP) {
+       /* Set op-mode & TSF */
+       if (iter_data.naps > 0) {
                ath9k_hw_set_tsfadjust(ah, 1);
                sc->sc_flags |= SC_OP_TSF_RESET;
-       }
+               ah->opmode = NL80211_IFTYPE_AP;
+       } else {
+               ath9k_hw_set_tsfadjust(ah, 0);
+               sc->sc_flags &= ~SC_OP_TSF_RESET;
 
-       /* Set the device opmode */
-       ah->opmode = ic_opmode;
+               if (iter_data.nwds + iter_data.nmeshes)
+                       ah->opmode = NL80211_IFTYPE_AP;
+               else if (iter_data.nadhocs)
+                       ah->opmode = NL80211_IFTYPE_ADHOC;
+               else
+                       ah->opmode = NL80211_IFTYPE_STATION;
+       }
 
        /*
         * Enable MIB interrupts when there are hardware phy counters.
-        * Note we only do this (at the moment) for station mode.
         */
-       if ((vif->type == NL80211_IFTYPE_STATION) ||
-           (vif->type == NL80211_IFTYPE_ADHOC) ||
-           (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+       if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
                if (ah->config.enable_ani)
                        ah->imask |= ATH9K_INT_MIB;
                ah->imask |= ATH9K_INT_TSFOOR;
+       } else {
+               ah->imask &= ~ATH9K_INT_MIB;
+               ah->imask &= ~ATH9K_INT_TSFOOR;
        }
 
        ath9k_hw_set_interrupts(ah, ah->imask);
+       ath9k_ps_restore(sc);
 
-       if (vif->type == NL80211_IFTYPE_AP    ||
-           vif->type == NL80211_IFTYPE_ADHOC) {
+       /* Set up ANI */
+       if ((iter_data.naps + iter_data.nadhocs) > 0) {
                sc->sc_flags |= SC_OP_ANI_RUN;
                ath_start_ani(common);
+       } else {
+               sc->sc_flags &= ~SC_OP_ANI_RUN;
+               del_timer_sync(&common->ani.timer);
        }
+}
 
-out:
-       mutex_unlock(&sc->mutex);
-       return ret;
+/* Called with sc->mutex held, vif counts set up properly. */
+static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+
+       ath9k_calculate_summary_state(hw, vif);
+
+       if (ath9k_uses_beacons(vif->type)) {
+               int error;
+               /* This may fail because upper levels do not have beacons
+                * properly configured yet.  That's OK, we assume it
+                * will be properly configured and then we will be notified
+                * in the info_changed method and set up beacons properly
+                * there.
+                */
+               ath9k_set_beaconing_status(sc, false);
+               error = ath_beacon_alloc(sc, vif);
+               if (!error)
+                       ath_beacon_config(sc, vif);
+               ath9k_set_beaconing_status(sc, true);
+       }
 }
 
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
-                                struct ieee80211_vif *vif)
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif)
 {
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
+       int ret = 0;
 
-       /* Disable SWBA interrupt */
-       sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
-       ath9k_ps_wakeup(sc);
-       ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
-       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-       tasklet_kill(&sc->bcon_tasklet);
-       ath9k_ps_restore(sc);
+       mutex_lock(&sc->mutex);
 
-       ath_beacon_return(sc, avp);
-       sc->sc_flags &= ~SC_OP_BEACONS;
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_MESH_POINT:
+               break;
+       default:
+               ath_err(common, "Interface type %d not yet supported\n",
+                       vif->type);
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
 
-       if (sc->nbcnvifs > 0) {
-               /* Re-enable beaconing */
-               sc->sc_ah->imask |= ATH9K_INT_SWBA;
-               ath9k_ps_wakeup(sc);
-               ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
-               ath9k_ps_restore(sc);
+       if (ath9k_uses_beacons(vif->type)) {
+               if (sc->nbcnvifs >= ATH_BCBUF) {
+                       ath_err(common, "Not enough beacon buffers when adding"
+                               " new interface of type: %i\n",
+                               vif->type);
+                       ret = -ENOBUFS;
+                       goto out;
+               }
        }
+
+       if ((vif->type == NL80211_IFTYPE_ADHOC) &&
+           sc->nvifs > 0) {
+               ath_err(common, "Cannot create ADHOC interface when other"
+                       " interfaces already exist.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attach a VIF of type: %d\n", vif->type);
+
+       /* Set the VIF opmode */
+       avp->av_opmode = vif->type;
+       avp->av_bslot = -1;
+
+       sc->nvifs++;
+
+       ath9k_do_vif_add_setup(hw, vif);
+out:
+       mutex_unlock(&sc->mutex);
+       return ret;
 }
 
 static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1454,40 +1507,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
                                  enum nl80211_iftype new_type,
                                  bool p2p)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
        ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
 
-       switch (new_type) {
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_ADHOC:
+       /* See if new interface type is valid. */
+       if ((new_type == NL80211_IFTYPE_ADHOC) &&
+           (sc->nvifs > 1)) {
+               ath_err(common, "When using ADHOC, it must be the only"
+                       " interface.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (ath9k_uses_beacons(new_type) &&
+           !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
                        ath_err(common, "No beacon slot available\n");
                        ret = -ENOBUFS;
                        goto out;
                }
-               break;
-       case NL80211_IFTYPE_STATION:
-               /* Stop ANI */
-               sc->sc_flags &= ~SC_OP_ANI_RUN;
-               del_timer_sync(&common->ani.timer);
-               if ((vif->type == NL80211_IFTYPE_AP) ||
-                   (vif->type == NL80211_IFTYPE_ADHOC))
-                       ath9k_reclaim_beacon(sc, vif);
-               break;
-       default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                               vif->type);
-               ret = -ENOTSUPP;
-               goto out;
        }
+
+       /* Clean up old vif stuff */
+       if (ath9k_uses_beacons(vif->type))
+               ath9k_reclaim_beacon(sc, vif);
+
+       /* Add new settings */
        vif->type = new_type;
        vif->p2p = p2p;
 
+       ath9k_do_vif_add_setup(hw, vif);
 out:
        mutex_unlock(&sc->mutex);
        return ret;
@@ -1496,25 +1549,20 @@ out:
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
        ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
 
        mutex_lock(&sc->mutex);
 
-       /* Stop ANI */
-       sc->sc_flags &= ~SC_OP_ANI_RUN;
-       del_timer_sync(&common->ani.timer);
+       sc->nvifs--;
 
        /* Reclaim beacon resources */
-       if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
-           (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
+       if (ath9k_uses_beacons(vif->type))
                ath9k_reclaim_beacon(sc, vif);
 
-       sc->nvifs--;
+       ath9k_calculate_summary_state(hw, NULL);
 
        mutex_unlock(&sc->mutex);
 }
@@ -1555,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
 
 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
-       bool disable_radio;
+       bool disable_radio = false;
 
        mutex_lock(&sc->mutex);
 
@@ -1571,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
         * the end.
         */
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
-               bool enable_radio;
-               bool all_wiphys_idle;
-               bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
-               spin_lock_bh(&sc->wiphy_lock);
-               all_wiphys_idle =  ath9k_all_wiphys_idle(sc);
-               ath9k_set_wiphy_idle(aphy, idle);
-
-               enable_radio = (!idle && all_wiphys_idle);
-
-               /*
-                * After we unlock here its possible another wiphy
-                * can be re-renabled so to account for that we will
-                * only disable the radio toward the end of this routine
-                * if by then all wiphys are still idle.
-                */
-               spin_unlock_bh(&sc->wiphy_lock);
-
-               if (enable_radio) {
-                       sc->ps_idle = false;
+               sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+               if (!sc->ps_idle) {
                        ath_radio_enable(sc, hw);
                        ath_dbg(common, ATH_DBG_CONFIG,
                                "not-idle: enabling radio\n");
+               } else {
+                       disable_radio = true;
                }
        }
 
@@ -1634,29 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                if (ah->curchan)
                        old_pos = ah->curchan - &ah->channels[0];
 
-               aphy->chan_idx = pos;
-               aphy->chan_is_ht = conf_is_ht(conf);
                if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                        sc->sc_flags |= SC_OP_OFFCHANNEL;
                else
                        sc->sc_flags &= ~SC_OP_OFFCHANNEL;
 
-               if (aphy->state == ATH_WIPHY_SCAN ||
-                   aphy->state == ATH_WIPHY_ACTIVE)
-                       ath9k_wiphy_pause_all_forced(sc, aphy);
-               else {
-                       /*
-                        * Do not change operational channel based on a paused
-                        * wiphy changes.
-                        */
-                       goto skip_chan_change;
-               }
-
-               ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
-                       curchan->center_freq);
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Set channel: %d MHz type: %d\n",
+                       curchan->center_freq, conf->channel_type);
 
-               /* XXX: remove me eventualy */
-               ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
+               ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+                                         curchan, conf->channel_type);
 
                /* update survey stats for the old channel before switching */
                spin_lock_irqsave(&common->cc_lock, flags);
@@ -1698,19 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                        ath_update_survey_nf(sc, old_pos);
        }
 
-skip_chan_change:
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Set power: %d\n", conf->power_level);
                sc->config.txpowlimit = 2 * conf->power_level;
-               ath_update_txpow(sc);
+               ath9k_ps_wakeup(sc);
+               ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                                      sc->config.txpowlimit, &sc->curtxpow);
+               ath9k_ps_restore(sc);
        }
 
-       spin_lock_bh(&sc->wiphy_lock);
-       disable_radio = ath9k_all_wiphys_idle(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-
        if (disable_radio) {
                ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
-               sc->ps_idle = true;
                ath_radio_disable(sc, hw);
        }
 
@@ -1735,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
                                   unsigned int *total_flags,
                                   u64 multicast)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        u32 rfilt;
 
        changed_flags &= SUPPORTED_FILTERS;
@@ -1756,8 +1773,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         struct ieee80211_sta *sta)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath_node_attach(sc, sta);
 
@@ -1768,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath_node_detach(sc, sta);
 
@@ -1779,8 +1794,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                         const struct ieee80211_tx_queue_params *params)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_txq *txq;
        struct ath9k_tx_queue_info qi;
@@ -1824,8 +1838,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
                         struct ieee80211_sta *sta,
                         struct ieee80211_key_conf *key)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
@@ -1869,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                                   struct ieee80211_bss_conf *bss_conf,
                                   u32 changed)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1899,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        /* Enable transmission of beacons (AP, IBSS, MESH) */
        if ((changed & BSS_CHANGED_BEACON) ||
            ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
-               ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-               error = ath_beacon_alloc(aphy, vif);
+               ath9k_set_beaconing_status(sc, false);
+               error = ath_beacon_alloc(sc, vif);
                if (!error)
                        ath_beacon_config(sc, vif);
+               ath9k_set_beaconing_status(sc, true);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1925,21 +1939,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        /* Disable transmission of beacons */
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
-               ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+           !bss_conf->enable_beacon) {
+               ath9k_set_beaconing_status(sc, false);
+               avp->is_bslot_active = false;
+               ath9k_set_beaconing_status(sc, true);
+       }
 
        if (changed & BSS_CHANGED_BEACON_INT) {
-               sc->beacon_interval = bss_conf->beacon_int;
+               cur_conf->beacon_interval = bss_conf->beacon_int;
                /*
                 * In case of AP mode, the HW TSF has to be reset
                 * when the beacon interval changes.
                 */
                if (vif->type == NL80211_IFTYPE_AP) {
                        sc->sc_flags |= SC_OP_TSF_RESET;
-                       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-                       error = ath_beacon_alloc(aphy, vif);
+                       ath9k_set_beaconing_status(sc, false);
+                       error = ath_beacon_alloc(sc, vif);
                        if (!error)
                                ath_beacon_config(sc, vif);
+                       ath9k_set_beaconing_status(sc, true);
                } else {
                        ath_beacon_config(sc, vif);
                }
@@ -1975,9 +1994,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 
 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
 {
+       struct ath_softc *sc = hw->priv;
        u64 tsf;
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
 
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
@@ -1990,8 +2008,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
 
 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
@@ -2002,8 +2019,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
 
 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        mutex_lock(&sc->mutex);
 
@@ -2018,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
                              enum ieee80211_ampdu_mlme_action action,
                              struct ieee80211_sta *sta,
-                             u16 tid, u16 *ssn)
+                             u16 tid, u16 *ssn, u8 buf_size)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        int ret = 0;
 
        local_bh_disable();
@@ -2066,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
                             struct survey_info *survey)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *chan;
@@ -2101,53 +2115,55 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
        return 0;
 }
 
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
 
        mutex_lock(&sc->mutex);
-       if (ath9k_wiphy_scanning(sc)) {
-               /*
-                * There is a race here in mac80211 but fixing it requires
-                * we revisit how we handle the scan complete callback.
-                * After mac80211 fixes we will not have configured hardware
-                * to the home channel nor would we have configured the RX
-                * filter yet.
-                */
-               mutex_unlock(&sc->mutex);
-               return;
-       }
-
-       aphy->state = ATH_WIPHY_SCAN;
-       ath9k_wiphy_pause_all_forced(sc, aphy);
+       ah->coverage_class = coverage_class;
+       ath9k_hw_init_global_settings(ah);
        mutex_unlock(&sc->mutex);
 }
 
-/*
- * XXX: this requires a revisit after the driver
- * scan_complete gets moved to another place/removed in mac80211.
- */
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
+       int timeout = 200; /* ms */
+       int i, j;
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
-       aphy->state = ATH_WIPHY_ACTIVE;
-       mutex_unlock(&sc->mutex);
-}
 
-static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_hw *ah = sc->sc_ah;
+       cancel_delayed_work_sync(&sc->tx_complete_work);
 
-       mutex_lock(&sc->mutex);
-       ah->coverage_class = coverage_class;
-       ath9k_hw_init_global_settings(ah);
+       if (drop)
+               timeout = 1;
+
+       for (j = 0; j < timeout; j++) {
+               int npend = 0;
+
+               if (j)
+                       usleep_range(1000, 2000);
+
+               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+                       if (!ATH_TXQ_SETUP(sc, i))
+                               continue;
+
+                       npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+               }
+
+               if (!npend)
+                   goto out;
+       }
+
+       if (!ath_drain_all_txq(sc, false))
+               ath_reset(sc, false);
+
+out:
+       ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 }
 
 struct ieee80211_ops ath9k_ops = {
@@ -2169,8 +2185,7 @@ struct ieee80211_ops ath9k_ops = {
        .reset_tsf          = ath9k_reset_tsf,
        .ampdu_action       = ath9k_ampdu_action,
        .get_survey         = ath9k_get_survey,
-       .sw_scan_start      = ath9k_sw_scan_start,
-       .sw_scan_complete   = ath9k_sw_scan_complete,
        .rfkill_poll        = ath9k_rfkill_poll_state,
        .set_coverage_class = ath9k_set_coverage_class,
+       .flush              = ath9k_flush,
 };
index 78ef1f1..e83128c 100644 (file)
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
 static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        void __iomem *mem;
-       struct ath_wiphy *aphy;
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
        u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_iomap;
        }
 
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
-                               sizeof(struct ath_softc), &ath9k_ops);
+       hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
        if (!hw) {
                dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
                ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        SET_IEEE80211_DEV(hw, &pdev->dev);
        pci_set_drvdata(pdev, hw);
 
-       aphy = hw->priv;
-       sc = (struct ath_softc *) (aphy + 1);
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->pri_wiphy = aphy;
+       sc = hw->priv;
        sc->hw = hw;
        sc->dev = &pdev->dev;
        sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
 static void ath_pci_remove(struct pci_dev *pdev)
 {
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        void __iomem *mem = sc->mem;
 
        if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        u32 val;
 
        /*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
        ath9k_ps_restore(sc);
 
        sc->ps_idle = true;
-       ath9k_set_wiphy_idle(aphy, true);
        ath_radio_disable(sc, hw);
 
        return 0;
index e451478..960d717 100644 (file)
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
 
 static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       return aphy->sc;
+       return hw->priv;
 }
 
 static void ath_rate_free(void *priv)
index b2497b8..a9c3f46 100644 (file)
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
               (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
 }
 
-static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
-                                            struct ieee80211_hdr *hdr)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy == NULL)
-                       continue;
-               if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
-                   == 0) {
-                       hw = aphy->hw;
-                       break;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return hw;
-}
-
 /*
  * Setup and link descriptors.
  *
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
        int error = 0, i;
        u32 size;
 
-
-       common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
-                                    ah->caps.rx_status_len,
-                                    min(common->cachelsz, (u16)64));
-
        ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
                                    ah->caps.rx_status_len);
 
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
        sc->sc_flags &= ~SC_OP_RXFLUSH;
        spin_lock_init(&sc->rx.rxbuflock);
 
+       common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+                            sc->sc_ah->caps.rx_status_len;
+
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                return ath_rx_edma_init(sc, nbufs);
        } else {
-               common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
-                               min(common->cachelsz, (u16)64));
-
                ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
                        common->cachelsz, common->rx_bufsize);
 
@@ -439,9 +413,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
         * mode interface or when in monitor mode. AP mode does not need this
         * since it receives all in-BSS frames anyway.
         */
-       if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
-            (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
-           (sc->sc_ah->is_monitoring))
+       if (sc->sc_ah->is_monitoring)
                rfilt |= ATH9K_RX_FILTER_PROM;
 
        if (sc->rx.rxfilter & FIF_CONTROL)
@@ -463,8 +435,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
        if (conf_is_ht(&sc->hw->conf))
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
 
-       if (sc->sec_wiphy || (sc->nvifs > 1) ||
-           (sc->rx.rxfilter & FIF_OTHER_BSS)) {
+       if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
                /* The following may also be needed for other older chips */
                if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
                        rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +559,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
                return;
 
        mgmt = (struct ieee80211_mgmt *)skb->data;
-       if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+       if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
+               /* TODO:  This doesn't work well if you have stations
+                * associated to two different APs because curbssid
+                * is just the last AP that any of the stations associated
+                * with.
+                */
                return; /* not from our current AP */
+       }
 
        sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
 
@@ -662,37 +639,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
        }
 }
 
-static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
-                                   struct ath_softc *sc, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-
-       /* Send the frame to mac80211 */
-       if (is_multicast_ether_addr(hdr->addr1)) {
-               int i;
-               /*
-                * Deliver broadcast/multicast frames to all suitable
-                * virtual wiphys.
-                */
-               /* TODO: filter based on channel configuration */
-               for (i = 0; i < sc->num_sec_wiphy; i++) {
-                       struct ath_wiphy *aphy = sc->sec_wiphy[i];
-                       struct sk_buff *nskb;
-                       if (aphy == NULL)
-                               continue;
-                       nskb = skb_copy(skb, GFP_ATOMIC);
-                       if (!nskb)
-                               continue;
-                       ieee80211_rx(aphy->hw, nskb);
-               }
-               ieee80211_rx(sc->hw, skb);
-       } else
-               /* Deliver unicast frames based on receiver address */
-               ieee80211_rx(hw, skb);
-}
-
 static bool ath_edma_get_buffers(struct ath_softc *sc,
                                 enum ath9k_rx_qtype qtype)
 {
@@ -862,15 +808,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
        if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
                return false;
 
-       /*
-        * rs_more indicates chained descriptors which can be used
-        * to link buffers together for a sort of scatter-gather
-        * operation.
-        * reject the frame, we don't support scatter-gather yet and
-        * the frame is probably corrupt anyway
-        */
+       /* Only use error bits from the last fragment */
        if (rx_stats->rs_more)
-               return false;
+               return true;
 
        /*
         * The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +914,7 @@ static void ath9k_process_rssi(struct ath_common *common,
                               struct ieee80211_hdr *hdr,
                               struct ath_rx_status *rx_stats)
 {
-       struct ath_wiphy *aphy = hw->priv;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = common->ah;
        int last_rssi;
        __le16 fc;
@@ -984,13 +924,19 @@ static void ath9k_process_rssi(struct ath_common *common,
 
        fc = hdr->frame_control;
        if (!ieee80211_is_beacon(fc) ||
-           compare_ether_addr(hdr->addr3, common->curbssid))
+           compare_ether_addr(hdr->addr3, common->curbssid)) {
+               /* TODO:  This doesn't work well if you have stations
+                * associated to two different APs because curbssid
+                * is just the last AP that any of the stations associated
+                * with.
+                */
                return;
+       }
 
        if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
-               ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
+               ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
 
-       last_rssi = aphy->last_rssi;
+       last_rssi = sc->last_rssi;
        if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
                rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
                                              ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +968,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
        if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
                return -EINVAL;
 
+       /* Only use status info from the last fragment */
+       if (rx_stats->rs_more)
+               return 0;
+
        ath9k_process_rssi(common, hw, hdr, rx_stats);
 
        if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1031,7 +981,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
        rx_status->freq = hw->conf.channel->center_freq;
        rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
        rx_status->antenna = rx_stats->rs_antenna;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        return 0;
 }
@@ -1623,7 +1573,7 @@ div_comb_done:
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
        struct ath_buf *bf;
-       struct sk_buff *skb = NULL, *requeue_skb;
+       struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
        struct ieee80211_rx_status *rxs;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1582,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
         * virtual wiphy so to account for that we iterate over the active
         * wiphys and find the appropriate wiphy and therefore hw.
         */
-       struct ieee80211_hw *hw = NULL;
+       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        int retval;
        bool decrypt_error = false;
@@ -1674,10 +1624,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (!skb)
                        continue;
 
-               hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
-               rxs =  IEEE80211_SKB_RXCB(skb);
+               /*
+                * Take frame header from the first fragment and RX status from
+                * the last one.
+                */
+               if (sc->rx.frag)
+                       hdr_skb = sc->rx.frag;
+               else
+                       hdr_skb = skb;
 
-               hw = ath_get_virt_hw(sc, hdr);
+               hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
+               rxs = IEEE80211_SKB_RXCB(hdr_skb);
 
                ath_debug_stat_rx(sc, &rs);
 
@@ -1686,12 +1643,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                 * chain it back at the queue without processing it.
                 */
                if (flush)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
                                                 rxs, &decrypt_error);
                if (retval)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
                if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1668,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                 * skb and put it at the tail of the sc->rx.rxbuf list for
                 * processing. */
                if (!requeue_skb)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                /* Unmap the frame */
                dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1679,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (ah->caps.rx_status_len)
                        skb_pull(skb, ah->caps.rx_status_len);
 
-               ath9k_rx_skb_postprocess(common, skb, &rs,
-                                        rxs, decrypt_error);
+               if (!rs.rs_more)
+                       ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
+                                                rxs, decrypt_error);
 
                /* We will now give hardware our shiny new allocated skb */
                bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1694,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        bf->bf_mpdu = NULL;
                        bf->bf_buf_addr = 0;
                        ath_err(common, "dma_mapping_error() on RX\n");
-                       ath_rx_send_to_mac80211(hw, sc, skb);
+                       ieee80211_rx(hw, skb);
                        break;
                }
 
+               if (rs.rs_more) {
+                       /*
+                        * rs_more indicates chained descriptors which can be
+                        * used to link buffers together for a sort of
+                        * scatter-gather operation.
+                        */
+                       if (sc->rx.frag) {
+                               /* too many fragments - cannot handle frame */
+                               dev_kfree_skb_any(sc->rx.frag);
+                               dev_kfree_skb_any(skb);
+                               skb = NULL;
+                       }
+                       sc->rx.frag = skb;
+                       goto requeue;
+               }
+
+               if (sc->rx.frag) {
+                       int space = skb->len - skb_tailroom(hdr_skb);
+
+                       sc->rx.frag = NULL;
+
+                       if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+                               dev_kfree_skb(skb);
+                               goto requeue_drop_frag;
+                       }
+
+                       skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+                                                 skb->len);
+                       dev_kfree_skb_any(skb);
+                       skb = hdr_skb;
+               }
+
                /*
                 * change the default rx antenna if rx diversity chooses the
                 * other antenna 3 times in a row.
@@ -1763,8 +1753,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
                        ath_ant_comb_scan(sc, &rs);
 
-               ath_rx_send_to_mac80211(hw, sc, skb);
+               ieee80211_rx(hw, skb);
 
+requeue_drop_frag:
+               if (sc->rx.frag) {
+                       dev_kfree_skb_any(sc->rx.frag);
+                       sc->rx.frag = NULL;
+               }
 requeue:
                if (edma) {
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
index 4df5659..8fa8acf 100644 (file)
 #define AR_SREV_REVISION_9300_20       2 /* 2.0 and 2.1 */
 #define AR_SREV_VERSION_9485           0x240
 #define AR_SREV_REVISION_9485_10       0
+#define AR_SREV_REVISION_9485_11        1
 
 #define AR_SREV_5416(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
 #define AR_SREV_9485_10(_ah) \
        (AR_SREV_9485(_ah) && \
         ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
+#define AR_SREV_9485_11(_ah) \
+       (AR_SREV_9485(_ah) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
 
 #define AR_SREV_9285E_20(_ah) \
     (AR_SREV_9285_12_OR_LATER(_ah) && \
 enum ath_usb_dev {
        AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
        AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
+       STORAGE_DEVICE = 3,
 };
 
 #define AR_DEVID_7010(_ah) \
@@ -1083,6 +1088,17 @@ enum {
 #define AR_ENT_OTP               0x40d8
 #define AR_ENT_OTP_CHAIN2_DISABLE               0x00020000
 #define AR_ENT_OTP_MPSD                0x00800000
+#define AR_CH0_BB_DPLL2          0x16184
+#define AR_CH0_BB_DPLL3          0x16188
+#define AR_CH0_DDR_DPLL2         0x16244
+#define AR_CH0_DDR_DPLL3         0x16248
+#define AR_CH0_DPLL2_KD              0x03F80000
+#define AR_CH0_DPLL2_KD_S            19
+#define AR_CH0_DPLL2_KI              0x3C000000
+#define AR_CH0_DPLL2_KI_S            26
+#define AR_CH0_DPLL3_PHASE_SHIFT     0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S   23
+#define AR_PHY_CCA_NOM_VAL_2GHZ      -118
 
 #define AR_RTC_9300_PLL_DIV          0x000003ff
 #define AR_RTC_9300_PLL_DIV_S        0
@@ -1129,6 +1145,12 @@ enum {
 #define AR_RTC_PLL_CLKSEL       0x00000300
 #define AR_RTC_PLL_CLKSEL_S     8
 
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE    0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
 #define AR_RTC_RESET \
        ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
 #define AR_RTC_RESET_EN                (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644 (file)
index 2dc7095..0000000
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "ath9k.h"
-
-struct ath9k_vif_iter_data {
-       const u8 *hw_macaddr;
-       u8 mask[ETH_ALEN];
-};
-
-static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath9k_vif_iter_data *iter_data = data;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
-}
-
-void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath9k_vif_iter_data iter_data;
-       int i;
-
-       /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
-        */
-       iter_data.hw_macaddr = common->macaddr;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
-
-       if (vif)
-               ath9k_vif_iter(&iter_data, vif->addr, vif);
-
-       /* Get list of all active MAC addresses */
-       spin_lock_bh(&sc->wiphy_lock);
-       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
-                                                  &iter_data);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] == NULL)
-                       continue;
-               ieee80211_iterate_active_interfaces_atomic(
-                       sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
-       ath_hw_setbssidmask(common);
-}
-
-int ath9k_wiphy_add(struct ath_softc *sc)
-{
-       int i, error;
-       struct ath_wiphy *aphy;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ieee80211_hw *hw;
-       u8 addr[ETH_ALEN];
-
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
-       if (hw == NULL)
-               return -ENOMEM;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] == NULL)
-                       break;
-       }
-
-       if (i == sc->num_sec_wiphy) {
-               /* No empty slot available; increase array length */
-               struct ath_wiphy **n;
-               n = krealloc(sc->sec_wiphy,
-                            (sc->num_sec_wiphy + 1) *
-                            sizeof(struct ath_wiphy *),
-                            GFP_ATOMIC);
-               if (n == NULL) {
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ieee80211_free_hw(hw);
-                       return -ENOMEM;
-               }
-               n[i] = NULL;
-               sc->sec_wiphy = n;
-               sc->num_sec_wiphy++;
-       }
-
-       SET_IEEE80211_DEV(hw, sc->dev);
-
-       aphy = hw->priv;
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->sec_wiphy[i] = aphy;
-       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       memcpy(addr, common->macaddr, ETH_ALEN);
-       addr[0] |= 0x02; /* Locally managed address */
-       /*
-        * XOR virtual wiphy index into the least significant bits to generate
-        * a different MAC address for each virtual wiphy.
-        */
-       addr[5] ^= i & 0xff;
-       addr[4] ^= (i & 0xff00) >> 8;
-       addr[3] ^= (i & 0xff0000) >> 16;
-
-       SET_IEEE80211_PERM_ADDR(hw, addr);
-
-       ath9k_set_hw_capab(sc, hw);
-
-       error = ieee80211_register_hw(hw);
-
-       if (error == 0) {
-               /* Make sure wiphy scheduler is started (if enabled) */
-               ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
-       }
-
-       return error;
-}
-
-int ath9k_wiphy_del(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (aphy == sc->sec_wiphy[i]) {
-                       sc->sec_wiphy[i] = NULL;
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ieee80211_unregister_hw(aphy->hw);
-                       ieee80211_free_hw(aphy->hw);
-                       return 0;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return -ENOENT;
-}
-
-static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
-                              struct ieee80211_vif *vif, const u8 *bssid,
-                              int ps)
-{
-       struct ath_softc *sc = aphy->sc;
-       struct ath_tx_control txctl;
-       struct sk_buff *skb;
-       struct ieee80211_hdr *hdr;
-       __le16 fc;
-       struct ieee80211_tx_info *info;
-
-       skb = dev_alloc_skb(24);
-       if (skb == NULL)
-               return -ENOMEM;
-       hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
-       memset(hdr, 0, 24);
-       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
-                        IEEE80211_FCTL_TODS);
-       if (ps)
-               fc |= cpu_to_le16(IEEE80211_FCTL_PM);
-       hdr->frame_control = fc;
-       memcpy(hdr->addr1, bssid, ETH_ALEN);
-       memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
-       memcpy(hdr->addr3, bssid, ETH_ALEN);
-
-       info = IEEE80211_SKB_CB(skb);
-       memset(info, 0, sizeof(*info));
-       info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
-       info->control.vif = vif;
-       info->control.rates[0].idx = 0;
-       info->control.rates[0].count = 4;
-       info->control.rates[1].idx = -1;
-
-       memset(&txctl, 0, sizeof(struct ath_tx_control));
-       txctl.txq = sc->tx.txq_map[WME_AC_VO];
-       txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
-
-       if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
-               goto exit;
-
-       return 0;
-exit:
-       dev_kfree_skb_any(skb);
-       return -1;
-}
-
-static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
-               return true;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
-                       return true;
-       }
-       return false;
-}
-
-static bool ath9k_wiphy_pausing(struct ath_softc *sc)
-{
-       bool ret;
-       spin_lock_bh(&sc->wiphy_lock);
-       ret = __ath9k_wiphy_pausing(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-       return ret;
-}
-
-static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
-               return true;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
-                       return true;
-       }
-       return false;
-}
-
-bool ath9k_wiphy_scanning(struct ath_softc *sc)
-{
-       bool ret;
-       spin_lock_bh(&sc->wiphy_lock);
-       ret = __ath9k_wiphy_scanning(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-       return ret;
-}
-
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
-{
-       if (aphy == NULL)
-               return;
-       if (aphy->chan_idx != aphy->sc->chan_idx)
-               return; /* wiphy not on the selected channel */
-       __ath9k_wiphy_unpause(aphy);
-}
-
-static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
-       for (i = 0; i < sc->num_sec_wiphy; i++)
-               __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
-       spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_chan_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_wiphy *aphy = sc->next_wiphy;
-
-       if (aphy == NULL)
-               return;
-
-       /*
-        * All pending interfaces paused; ready to change
-        * channels.
-        */
-
-       /* Change channels */
-       mutex_lock(&sc->mutex);
-       /* XXX: remove me eventually */
-       ath9k_update_ichannel(sc, aphy->hw,
-                             &sc->sc_ah->channels[sc->chan_idx]);
-
-       /* sync hw configuration for hw code */
-       common->hw = aphy->hw;
-
-       if (ath_set_channel(sc, aphy->hw,
-                           &sc->sc_ah->channels[sc->chan_idx]) < 0) {
-               printk(KERN_DEBUG "ath9k: Failed to set channel for new "
-                      "virtual wiphy\n");
-               mutex_unlock(&sc->mutex);
-               return;
-       }
-       mutex_unlock(&sc->mutex);
-
-       ath9k_wiphy_unpause_channel(sc);
-}
-
-/*
- * ath9k version of ieee80211_tx_status() for TX frames that are generated
- * internally in the driver.
- */
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
-       if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
-               if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
-                       printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
-                              "frame\n", wiphy_name(hw->wiphy));
-                       /*
-                        * The AP did not reply; ignore this to allow us to
-                        * continue.
-                        */
-               }
-               aphy->state = ATH_WIPHY_PAUSED;
-               if (!ath9k_wiphy_pausing(aphy->sc)) {
-                       /*
-                        * Drop from tasklet to work to allow mutex for channel
-                        * change.
-                        */
-                       ieee80211_queue_work(aphy->sc->hw,
-                                  &aphy->sc->chan_work);
-               }
-       }
-
-       dev_kfree_skb(skb);
-}
-
-static void ath9k_mark_paused(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       aphy->state = ATH_WIPHY_PAUSED;
-       if (!__ath9k_wiphy_pausing(sc))
-               ieee80211_queue_work(sc->hw, &sc->chan_work);
-}
-
-static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = data;
-       struct ath_vif *avp = (void *) vif->drv_priv;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               if (!vif->bss_conf.assoc) {
-                       ath9k_mark_paused(aphy);
-                       break;
-               }
-               /* TODO: could avoid this if already in PS mode */
-               if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
-                       printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
-                              __func__);
-                       ath9k_mark_paused(aphy);
-               }
-               break;
-       case NL80211_IFTYPE_AP:
-               /* Beacon transmission is paused by aphy->state change */
-               ath9k_mark_paused(aphy);
-               break;
-       default:
-               break;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
-       ieee80211_stop_queues(aphy->hw);
-       aphy->state = ATH_WIPHY_PAUSING;
-       /*
-        * TODO: handle PAUSING->PAUSED for the case where there are multiple
-        * active vifs (now we do it on the first vif getting ready; should be
-        * on the last)
-        */
-       ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
-                                                  aphy);
-       return 0;
-}
-
-int ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
-       int ret;
-       spin_lock_bh(&aphy->sc->wiphy_lock);
-       ret = __ath9k_wiphy_pause(aphy);
-       spin_unlock_bh(&aphy->sc->wiphy_lock);
-       return ret;
-}
-
-static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = data;
-       struct ath_vif *avp = (void *) vif->drv_priv;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               if (!vif->bss_conf.assoc)
-                       break;
-               ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
-               break;
-       case NL80211_IFTYPE_AP:
-               /* Beacon transmission is re-enabled by aphy->state change */
-               break;
-       default:
-               break;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
-       ieee80211_iterate_active_interfaces_atomic(aphy->hw,
-                                                  ath9k_unpause_iter, aphy);
-       aphy->state = ATH_WIPHY_ACTIVE;
-       ieee80211_wake_queues(aphy->hw);
-       return 0;
-}
-
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
-       int ret;
-       spin_lock_bh(&aphy->sc->wiphy_lock);
-       ret = __ath9k_wiphy_unpause(aphy);
-       spin_unlock_bh(&aphy->sc->wiphy_lock);
-       return ret;
-}
-
-static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
-               sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
-                       sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
-               __ath9k_wiphy_pause(sc->pri_wiphy);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
-                       __ath9k_wiphy_pause(sc->sec_wiphy[i]);
-       }
-}
-
-int ath9k_wiphy_select(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       bool now;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       if (__ath9k_wiphy_scanning(sc)) {
-               /*
-                * For now, we are using mac80211 sw scan and it expects to
-                * have full control over channel changes, so avoid wiphy
-                * scheduling during a scan. This could be optimized if the
-                * scanning control were moved into the driver.
-                */
-               spin_unlock_bh(&sc->wiphy_lock);
-               return -EBUSY;
-       }
-       if (__ath9k_wiphy_pausing(sc)) {
-               if (sc->wiphy_select_failures == 0)
-                       sc->wiphy_select_first_fail = jiffies;
-               sc->wiphy_select_failures++;
-               if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
-               {
-                       printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
-                              "out; disable/enable hw to recover\n");
-                       __ath9k_wiphy_mark_all_paused(sc);
-                       /*
-                        * TODO: this workaround to fix hardware is unlikely to
-                        * be specific to virtual wiphy changes. It can happen
-                        * on normal channel change, too, and as such, this
-                        * should really be made more generic. For example,
-                        * tricker radio disable/enable on GTT interrupt burst
-                        * (say, 10 GTT interrupts received without any TX
-                        * frame being completed)
-                        */
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ath_radio_disable(sc, aphy->hw);
-                       ath_radio_enable(sc, aphy->hw);
-                       /* Only the primary wiphy hw is used for queuing work */
-                       ieee80211_queue_work(aphy->sc->hw,
-                                  &aphy->sc->chan_work);
-                       return -EBUSY; /* previous select still in progress */
-               }
-               spin_unlock_bh(&sc->wiphy_lock);
-               return -EBUSY; /* previous select still in progress */
-       }
-       sc->wiphy_select_failures = 0;
-
-       /* Store the new channel */
-       sc->chan_idx = aphy->chan_idx;
-       sc->chan_is_ht = aphy->chan_is_ht;
-       sc->next_wiphy = aphy;
-
-       __ath9k_wiphy_pause_all(sc);
-       now = !__ath9k_wiphy_pausing(aphy->sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       if (now) {
-               /* Ready to request channel change immediately */
-               ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
-       }
-
-       /*
-        * wiphys will be unpaused in ath9k_tx_status() once channel has been
-        * changed if any wiphy needs time to become paused.
-        */
-
-       return 0;
-}
-
-bool ath9k_wiphy_started(struct ath_softc *sc)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
-               spin_unlock_bh(&sc->wiphy_lock);
-               return true;
-       }
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       return true;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return false;
-}
-
-static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
-                                  struct ath_wiphy *selected)
-{
-       if (selected->state == ATH_WIPHY_SCAN) {
-               if (aphy == selected)
-                       return;
-               /*
-                * Pause all other wiphys for the duration of the scan even if
-                * they are on the current channel now.
-                */
-       } else if (aphy->chan_idx == selected->chan_idx)
-               return;
-       aphy->state = ATH_WIPHY_PAUSED;
-       ieee80211_stop_queues(aphy->hw);
-}
-
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
-                                 struct ath_wiphy *selected)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
-               ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
-                       ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc,
-                                           wiphy_work.work);
-       struct ath_wiphy *aphy = NULL;
-       bool first = true;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       if (sc->wiphy_scheduler_int == 0) {
-               /* wiphy scheduler is disabled */
-               spin_unlock_bh(&sc->wiphy_lock);
-               return;
-       }
-
-try_again:
-       sc->wiphy_scheduler_index++;
-       while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
-               aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
-               if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
-                       break;
-
-               sc->wiphy_scheduler_index++;
-               aphy = NULL;
-       }
-       if (aphy == NULL) {
-               sc->wiphy_scheduler_index = 0;
-               if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
-                       if (first) {
-                               first = false;
-                               goto try_again;
-                       }
-                       /* No wiphy is ready to be scheduled */
-               } else
-                       aphy = sc->pri_wiphy;
-       }
-
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       if (aphy &&
-           aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
-           ath9k_wiphy_select(aphy)) {
-               printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
-                      "change\n");
-       }
-
-       ieee80211_queue_delayed_work(sc->hw,
-                                    &sc->wiphy_work,
-                                    sc->wiphy_scheduler_int);
-}
-
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
-{
-       cancel_delayed_work_sync(&sc->wiphy_work);
-       sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
-       if (sc->wiphy_scheduler_int)
-               ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
-                                            sc->wiphy_scheduler_int);
-}
-
-/* caller must hold wiphy_lock */
-bool ath9k_all_wiphys_idle(struct ath_softc *sc)
-{
-       unsigned int i;
-       if (!sc->pri_wiphy->idle)
-               return false;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               if (!aphy->idle)
-                       return false;
-       }
-       return true;
-}
-
-/* caller must hold wiphy_lock */
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
-{
-       struct ath_softc *sc = aphy->sc;
-
-       aphy->idle = idle;
-       ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
-               "Marking %s as %sidle\n",
-               wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
-}
-/* Only bother starting a queue on an active virtual wiphy */
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       unsigned int i;
-       bool txq_started = false;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       /* Start the primary wiphy */
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
-               ieee80211_wake_queue(hw, skb_queue);
-               txq_started = true;
-               goto unlock;
-       }
-
-       /* Now start the secondary wiphy queues */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               if (aphy->state != ATH_WIPHY_ACTIVE)
-                       continue;
-
-               hw = aphy->hw;
-               ieee80211_wake_queue(hw, skb_queue);
-               txq_started = true;
-               break;
-       }
-
-unlock:
-       spin_unlock_bh(&sc->wiphy_lock);
-       return txq_started;
-}
-
-/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       unsigned int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       /* Stop the primary wiphy */
-       ieee80211_stop_queue(hw, skb_queue);
-
-       /* Now stop the secondary wiphy queues */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               hw = aphy->hw;
-               ieee80211_stop_queue(hw, skb_queue);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-}
index dc862f5..d3d2490 100644 (file)
@@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
 void ath9k_swba_tasklet(unsigned long data)
 {
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-
-       ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
 
        ath9k_htc_swba(priv, priv->wmi->beacon_pending);
-
 }
 
 void ath9k_fatal_work(struct work_struct *work)
index 332d1fe..ef22096 100644 (file)
@@ -19,7 +19,6 @@
 
 #define BITS_PER_BYTE           8
 #define OFDM_PLCP_BITS          22
-#define HT_RC_2_MCS(_rc)        ((_rc) & 0x1f)
 #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
 #define L_STF                   8
 #define L_LTF                   8
@@ -32,7 +31,6 @@
 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
 
-#define OFDM_SIFS_TIME             16
 
 static u16 bits_per_symbol[][2] = {
        /* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head);
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
-                            int nframes, int nbad, int txok, bool update_rc);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+                            struct ath_tx_status *ts, int nframes, int nbad,
+                            int txok, bool update_rc);
 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
                              int seqno);
 
@@ -167,9 +166,9 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
                fi = get_frame_info(bf->bf_mpdu);
                if (fi->retries) {
                        ath_tx_update_baw(sc, tid, fi->seqno);
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
                } else {
-                       ath_tx_send_normal(sc, txq, tid, &bf_head);
+                       ath_tx_send_normal(sc, txq, NULL, &bf_head);
                }
                spin_lock_bh(&txq->axq_lock);
        }
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
 
        ATH_TXBUF_RESET(tbf);
 
-       tbf->aphy = bf->aphy;
        tbf->bf_mpdu = bf->bf_mpdu;
        tbf->bf_buf_addr = bf->bf_buf_addr;
        memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        struct ath_node *an = NULL;
        struct sk_buff *skb;
        struct ieee80211_sta *sta;
-       struct ieee80211_hw *hw;
+       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
        struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        hdr = (struct ieee80211_hdr *)skb->data;
 
        tx_info = IEEE80211_SKB_CB(skb);
-       hw = bf->aphy->hw;
 
        memcpy(rates, tx_info->control.rates, sizeof(rates));
 
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                            !bf->bf_stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
 
-                       ath_tx_rc_status(bf, ts, 1, 1, 0, false);
+                       ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
                                0, 0);
 
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
        ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
        while (bf) {
-               txfail = txpending = 0;
+               txfail = txpending = sendbar = 0;
                bf_next = bf->bf_next;
 
                skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                        if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
                                memcpy(tx_info->control.rates, rates, sizeof(rates));
-                               ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
+                               ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
                                rc_update = false;
                        } else {
-                               ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
+                               ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
                        }
 
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                                                bf->bf_state.bf_type |=
                                                        BUF_XRETRY;
-                                               ath_tx_rc_status(bf, ts, nframes,
+                                               ath_tx_rc_status(sc, bf, ts, nframes,
                                                                nbad, 0, false);
                                                ath_tx_complete_buf(sc, bf, txq,
                                                                    &bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
        rcu_read_unlock();
 
-       if (needreset)
+       if (needreset) {
+               spin_unlock_bh(&sc->sc_pcu_lock);
                ath_reset(sc, false);
+               spin_lock_bh(&sc->sc_pcu_lock);
+       }
 }
 
 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        txtid->state |= AGGR_ADDBA_PROGRESS;
        txtid->paused = true;
-       *ssn = txtid->seq_start;
+       *ssn = txtid->seq_start = txtid->seq_next;
+
+       memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+       txtid->baw_head = txtid->baw_tail = 0;
 
        return 0;
 }
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                [WME_AC_VI] = ATH_TXQ_AC_VI,
                [WME_AC_VO] = ATH_TXQ_AC_VO,
        };
-       int qnum, i;
+       int axq_qnum, i;
 
        memset(&qi, 0, sizeof(qi));
        qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                        qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
                                        TXQ_FLAG_TXDESCINT_ENABLE;
        }
-       qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
-       if (qnum == -1) {
+       axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+       if (axq_qnum == -1) {
                /*
                 * NB: don't print a message, this happens
                 * normally on parts with too few tx queues
                 */
                return NULL;
        }
-       if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
+       if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
                ath_err(common, "qnum %u out of range, max %zu!\n",
-                       qnum, ARRAY_SIZE(sc->tx.txq));
-               ath9k_hw_releasetxqueue(ah, qnum);
+                       axq_qnum, ARRAY_SIZE(sc->tx.txq));
+               ath9k_hw_releasetxqueue(ah, axq_qnum);
                return NULL;
        }
-       if (!ATH_TXQ_SETUP(sc, qnum)) {
-               struct ath_txq *txq = &sc->tx.txq[qnum];
+       if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+               struct ath_txq *txq = &sc->tx.txq[axq_qnum];
 
-               txq->axq_qnum = qnum;
+               txq->axq_qnum = axq_qnum;
+               txq->mac80211_qnum = -1;
                txq->axq_link = NULL;
                INIT_LIST_HEAD(&txq->axq_q);
                INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                txq->axq_depth = 0;
                txq->axq_ampdu_depth = 0;
                txq->axq_tx_inprogress = false;
-               sc->tx.txqsetup |= 1<<qnum;
+               sc->tx.txqsetup |= 1<<axq_qnum;
 
                txq->txq_headidx = txq->txq_tailidx = 0;
                for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
                        INIT_LIST_HEAD(&txq->txq_fifo[i]);
                INIT_LIST_HEAD(&txq->txq_fifo_pending);
        }
-       return &sc->tx.txq[qnum];
+       return &sc->tx.txq[axq_qnum];
 }
 
 int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 int ath_cabq_update(struct ath_softc *sc)
 {
        struct ath9k_tx_queue_info qi;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        int qnum = sc->beacon.cabq->axq_qnum;
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc)
        else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
                sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
 
-       qi.tqi_readyTime = (sc->beacon_interval *
+       qi.tqi_readyTime = (cur_conf->beacon_interval *
                            sc->config.cabqReadytime) / 100;
        ath_txq_update(sc, qnum, &qi);
 
@@ -1189,24 +1194,31 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
        if (sc->sc_flags & SC_OP_INVALID)
                return true;
 
-       /* Stop beacon queue */
-       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+       ath9k_hw_abort_tx_dma(ah);
 
-       /* Stop data queues */
+       /* Check if any queue remains active */
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (ATH_TXQ_SETUP(sc, i)) {
-                       txq = &sc->tx.txq[i];
-                       ath9k_hw_stoptxdma(ah, txq->axq_qnum);
-                       npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
-               }
+               if (!ATH_TXQ_SETUP(sc, i))
+                       continue;
+
+               npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
        }
 
        if (npend)
                ath_err(common, "Failed to stop TX DMA!\n");
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (ATH_TXQ_SETUP(sc, i))
-                       ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
+               if (!ATH_TXQ_SETUP(sc, i))
+                       continue;
+
+               /*
+                * The caller will resume queues with ieee80211_wake_queues.
+                * Mark the queue as not stopped to prevent ath_tx_complete
+                * from waking the queue too early.
+                */
+               txq = &sc->tx.txq[i];
+               txq->stopped = false;
+               ath_draintxq(sc, txq, retry_tx);
        }
 
        return !npend;
@@ -1218,46 +1230,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
        sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
 }
 
+/* For each axq_acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ath_atx_ac *ac;
-       struct ath_atx_tid *tid;
+       struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+       struct ath_atx_tid *tid, *last_tid;
 
-       if (list_empty(&txq->axq_acq))
+       if (list_empty(&txq->axq_acq) ||
+           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
                return;
 
        ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
-       list_del(&ac->list);
-       ac->sched = false;
+       last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
 
-       do {
-               if (list_empty(&ac->tid_q))
-                       return;
+       list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+               last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+               list_del(&ac->list);
+               ac->sched = false;
 
-               tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
-               list_del(&tid->list);
-               tid->sched = false;
+               while (!list_empty(&ac->tid_q)) {
+                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+                                              list);
+                       list_del(&tid->list);
+                       tid->sched = false;
 
-               if (tid->paused)
-                       continue;
+                       if (tid->paused)
+                               continue;
 
-               ath_tx_sched_aggr(sc, txq, tid);
+                       ath_tx_sched_aggr(sc, txq, tid);
 
-               /*
-                * add tid to round-robin queue if more frames
-                * are pending for the tid
-                */
-               if (!list_empty(&tid->buf_q))
-                       ath_tx_queue_tid(txq, tid);
+                       /*
+                        * add tid to round-robin queue if more frames
+                        * are pending for the tid
+                        */
+                       if (!list_empty(&tid->buf_q))
+                               ath_tx_queue_tid(txq, tid);
 
-               break;
-       } while (!list_empty(&ac->tid_q));
+                       if (tid == last_tid ||
+                           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+                               break;
+               }
 
-       if (!list_empty(&ac->tid_q)) {
-               if (!ac->sched) {
-                       ac->sched = true;
-                       list_add_tail(&ac->list, &txq->axq_acq);
+               if (!list_empty(&ac->tid_q)) {
+                       if (!ac->sched) {
+                               ac->sched = true;
+                               list_add_tail(&ac->list, &txq->axq_acq);
+                       }
                }
+
+               if (ac == last_ac ||
+                   txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+                       return;
        }
 }
 
@@ -1301,6 +1326,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
                list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
                INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+               TX_STAT_INC(txq->axq_qnum, puttxbuf);
                ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                        txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1334,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                list_splice_tail_init(head, &txq->axq_q);
 
                if (txq->axq_link == NULL) {
+                       TX_STAT_INC(txq->axq_qnum, puttxbuf);
                        ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                        ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                                txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1348,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                }
                ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
                                       &txq->axq_link);
+               TX_STAT_INC(txq->axq_qnum, txstart);
                ath9k_hw_txstart(ah, txq->axq_qnum);
        }
        txq->axq_depth++;
@@ -1335,7 +1363,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
        struct list_head bf_head;
 
        bf->bf_state.bf_type |= BUF_AMPDU;
-       TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
 
        /*
         * Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1378,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                 * Add this frame to software queue for scheduling later
                 * for aggregation.
                 */
+               TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
                list_add_tail(&bf->list, &tid->buf_q);
                ath_tx_queue_tid(txctl->txq, tid);
                return;
@@ -1364,6 +1392,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                ath_tx_addto_baw(sc, tid, fi->seqno);
 
        /* Queue to h/w without aggregation */
+       TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
        bf->bf_lastbf = bf;
        ath_buf_set_rate(sc, bf, fi->framelen);
        ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1445,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
 static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                             int framelen)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1663,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
                                           struct ath_txq *txq,
                                           struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1679,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
 
        ATH_TXBUF_RESET(bf);
 
-       bf->aphy = aphy;
        bf->bf_flags = setup_tx_flags(skb);
        bf->bf_mpdu = skb;
 
@@ -1725,6 +1751,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
                        ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
                                                   bf->bf_state.bfs_paprd);
 
+               if (txctl->paprd)
+                       bf->bf_state.bfs_paprd_timestamp = jiffies;
+
                ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
        }
 
@@ -1738,8 +1767,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = info->control.sta;
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
        struct ath_buf *bf;
        int padpos, padsize;
@@ -1791,7 +1819,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        spin_lock_bh(&txq->axq_lock);
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
-               ath_mac80211_stop_queue(sc, q);
+               ieee80211_stop_queue(sc->hw, q);
                txq->stopped = 1;
        }
        spin_unlock_bh(&txq->axq_lock);
@@ -1806,8 +1834,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 /*****************/
 
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-                           struct ath_wiphy *aphy, int tx_flags, int ftype,
-                           struct ath_txq *txq)
+                           int tx_flags, int ftype, struct ath_txq *txq)
 {
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1817,9 +1844,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
        ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
-       if (aphy)
-               hw = aphy->hw;
-
        if (tx_flags & ATH_TX_BAR)
                tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
@@ -1849,19 +1873,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                                        PS_WAIT_FOR_TX_ACK));
        }
 
-       if (unlikely(ftype))
-               ath9k_tx_status(hw, skb, ftype);
-       else {
-               q = skb_get_queue_mapping(skb);
-               if (txq == sc->tx.txq_map[q]) {
-                       spin_lock_bh(&txq->axq_lock);
-                       if (WARN_ON(--txq->pending_frames < 0))
-                               txq->pending_frames = 0;
-                       spin_unlock_bh(&txq->axq_lock);
-               }
+       q = skb_get_queue_mapping(skb);
+       if (txq == sc->tx.txq_map[q]) {
+               spin_lock_bh(&txq->axq_lock);
+               if (WARN_ON(--txq->pending_frames < 0))
+                       txq->pending_frames = 0;
 
-               ieee80211_tx_status(hw, skb);
+               if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+                       ieee80211_wake_queue(sc->hw, q);
+                       txq->stopped = 0;
+               }
+               spin_unlock_bh(&txq->axq_lock);
        }
+
+       ieee80211_tx_status(hw, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1886,13 +1911,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
        bf->bf_buf_addr = 0;
 
        if (bf->bf_state.bfs_paprd) {
-               if (!sc->paprd_pending)
+               if (time_after(jiffies,
+                               bf->bf_state.bfs_paprd_timestamp +
+                               msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
                        dev_kfree_skb_any(skb);
                else
                        complete(&sc->paprd_complete);
        } else {
-               ath_debug_stat_tx(sc, bf, ts);
-               ath_tx_complete(sc, skb, bf->aphy, tx_flags,
+               ath_debug_stat_tx(sc, bf, ts, txq);
+               ath_tx_complete(sc, skb, tx_flags,
                                bf->bf_state.bfs_ftype, txq);
        }
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1908,14 +1935,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
 }
 
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
-                            int nframes, int nbad, int txok, bool update_rc)
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+                            struct ath_tx_status *ts, int nframes, int nbad,
+                            int txok, bool update_rc)
 {
        struct sk_buff *skb = bf->bf_mpdu;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hw *hw = bf->aphy->hw;
-       struct ath_softc *sc = bf->aphy->sc;
+       struct ieee80211_hw *hw = sc->hw;
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
 
@@ -1966,19 +1993,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
        tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
 }
 
-static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
-{
-       struct ath_txq *txq;
-
-       txq = sc->tx.txq_map[qnum];
-       spin_lock_bh(&txq->axq_lock);
-       if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
-               if (ath_mac80211_start_queue(sc, qnum))
-                       txq->stopped = 0;
-       }
-       spin_unlock_bh(&txq->axq_lock);
-}
-
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -1989,7 +2003,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
        struct ath_tx_status ts;
        int txok;
        int status;
-       int qnum;
 
        ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
                txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -1999,6 +2012,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                spin_lock_bh(&txq->axq_lock);
                if (list_empty(&txq->axq_q)) {
                        txq->axq_link = NULL;
+                       if (sc->sc_flags & SC_OP_TXAGGR)
+                               ath_txq_schedule(sc, txq);
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
@@ -2033,6 +2048,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
+               TX_STAT_INC(txq->axq_qnum, txprocdesc);
 
                /*
                 * Remove ath_buf's of the same transmit unit from txq,
@@ -2053,6 +2069,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 
                if (bf_is_ampdu_not_probing(bf))
                        txq->axq_ampdu_depth--;
+
                spin_unlock_bh(&txq->axq_lock);
 
                if (bf_held)
@@ -2065,27 +2082,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                         */
                        if (ts.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
-                       ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
+                       ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
                }
 
-               qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
                                             true);
                else
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
 
-               if (txq == sc->tx.txq_map[qnum])
-                       ath_wake_mac80211_queue(sc, qnum);
-
                spin_lock_bh(&txq->axq_lock);
+
                if (sc->sc_flags & SC_OP_TXAGGR)
                        ath_txq_schedule(sc, txq);
                spin_unlock_bh(&txq->axq_lock);
        }
 }
 
+static void ath_hw_pll_work(struct work_struct *work)
+{
+       struct ath_softc *sc = container_of(work, struct ath_softc,
+                                           hw_pll_work.work);
+       static int count;
+
+       if (AR_SREV_9485(sc->sc_ah)) {
+               if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
+                       count++;
+
+                       if (count == 3) {
+                               /* Rx is hung for more than 500ms. Reset it */
+                               ath_reset(sc, true);
+                               count = 0;
+                       }
+               } else
+                       count = 0;
+
+               ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+       }
+}
+
 static void ath_tx_complete_poll_work(struct work_struct *work)
 {
        struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2093,6 +2128,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
        struct ath_txq *txq;
        int i;
        bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+       sc->tx_complete_poll_work_seen++;
+#endif
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                if (ATH_TXQ_SETUP(sc, i)) {
@@ -2106,6 +2144,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
                                } else {
                                        txq->axq_tx_inprogress = true;
                                }
+                       } else {
+                               /* If the queue has pending buffers, then it
+                                * should be doing tx work (and have axq_depth).
+                                * Shouldn't get to this state I think..but
+                                * we do.
+                                */
+                               if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
+                                   (txq->pending_frames > 0 ||
+                                    !list_empty(&txq->axq_acq) ||
+                                    txq->stopped)) {
+                                       ath_err(ath9k_hw_common(sc->sc_ah),
+                                               "txq: %p axq_qnum: %u,"
+                                               " mac80211_qnum: %i"
+                                               " axq_link: %p"
+                                               " pending frames: %i"
+                                               " axq_acq empty: %i"
+                                               " stopped: %i"
+                                               " axq_depth: 0  Attempting to"
+                                               " restart tx logic.\n",
+                                               txq, txq->axq_qnum,
+                                               txq->mac80211_qnum,
+                                               txq->axq_link,
+                                               txq->pending_frames,
+                                               list_empty(&txq->axq_acq),
+                                               txq->stopped);
+                                       ath_txq_schedule(sc, txq);
+                               }
                        }
                        spin_unlock_bh(&txq->axq_lock);
                }
@@ -2113,9 +2178,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
        if (needreset) {
                ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
                        "tx hung, resetting the chip\n");
-               ath9k_ps_wakeup(sc);
                ath_reset(sc, true);
-               ath9k_ps_restore(sc);
        }
 
        ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2147,7 +2210,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
        struct list_head bf_head;
        int status;
        int txok;
-       int qnum;
 
        for (;;) {
                status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2190,11 +2252,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                if (!bf_isampdu(bf)) {
                        if (txs.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
-                       ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
+                       ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
                }
 
-               qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
                                             txok, true);
@@ -2202,19 +2262,19 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                        ath_tx_complete_buf(sc, bf, txq, &bf_head,
                                            &txs, txok, 0);
 
-               if (txq == sc->tx.txq_map[qnum])
-                       ath_wake_mac80211_queue(sc, qnum);
-
                spin_lock_bh(&txq->axq_lock);
+
                if (!list_empty(&txq->txq_fifo_pending)) {
                        INIT_LIST_HEAD(&bf_head);
                        bf = list_first_entry(&txq->txq_fifo_pending,
-                               struct ath_buf, list);
-                       list_cut_position(&bf_head, &txq->txq_fifo_pending,
-                               &bf->bf_lastbf->list);
+                                             struct ath_buf, list);
+                       list_cut_position(&bf_head,
+                                         &txq->txq_fifo_pending,
+                                         &bf->bf_lastbf->list);
                        ath_tx_txqaddbuf(sc, txq, &bf_head);
                } else if (sc->sc_flags & SC_OP_TXAGGR)
                        ath_txq_schedule(sc, txq);
+
                spin_unlock_bh(&txq->axq_lock);
        }
 }
@@ -2282,6 +2342,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
        }
 
        INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+       INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                error = ath_tx_edma_init(sc);
index d07ff7f..c6a5fae 100644 (file)
@@ -283,6 +283,7 @@ struct ar9170 {
                unsigned int mem_blocks;
                unsigned int mem_block_size;
                unsigned int rx_size;
+               unsigned int tx_seq_table;
        } fw;
 
        /* reset / stuck frames/queue detection */
@@ -533,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
                                const struct carl9170_rsp *cmd);
index 546b4e4..9517ede 100644 (file)
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
        const struct carl9170fw_otus_desc *otus_desc;
        const struct carl9170fw_chk_desc *chk_desc;
        const struct carl9170fw_last_desc *last_desc;
+       const struct carl9170fw_txsq_desc *txsq_desc;
 
        last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
                sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                        FIF_PROMISC_IN_BSS;
        }
 
+       if (SUPP(CARL9170FW_WOL))
+               device_set_wakeup_enable(&ar->udev->dev, true);
+
        ar->fw.vif_num = otus_desc->vif_num;
        ar->fw.cmd_bufs = otus_desc->cmd_bufs;
        ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                }
        }
 
+       txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
+               sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
+
+       if (txsq_desc) {
+               ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+               if (!valid_cpu_addr(ar->fw.tx_seq_table))
+                       return -EINVAL;
+       } else {
+               ar->fw.tx_seq_table = 0;
+       }
+
 #undef SUPPORTED
        return 0;
 }
index 3680dfc..30449d2 100644 (file)
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
 #define CARL9170_RX_FILTER_CTL_BACKR   0x20
 #define CARL9170_RX_FILTER_MGMT                0x40
 #define CARL9170_RX_FILTER_DATA                0x80
+#define CARL9170_RX_FILTER_EVERYTHING  (~0)
 
 struct carl9170_bcn_ctrl_cmd {
        __le32          vif_id;
index 71f3821..9210668 100644 (file)
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
        /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
        CARL9170FW_RX_FILTER,
 
+       /* Wake up on WLAN */
+       CARL9170FW_WOL,
+
        /* KEEP LAST */
        __CARL9170FW_FEATURE_NUM
 };
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
 #define FIX_MAGIC      "FIX\0"
 #define DBG_MAGIC      "DBG\0"
 #define CHK_MAGIC      "CHK\0"
+#define TXSQ_MAGIC     "TXSQ"
 #define LAST_MAGIC     "LAST"
 
 #define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
 #define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
 #define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
 
+#define CARL9170FW_MAGIC_SIZE                  4
+
 struct carl9170fw_desc_head {
-       u8      magic[4];
+       u8      magic[CARL9170FW_MAGIC_SIZE];
        __le16 length;
        u8 min_ver;
        u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
 #define CARL9170FW_CHK_DESC_SIZE                       \
        (sizeof(struct carl9170fw_chk_desc))
 
+#define CARL9170FW_TXSQ_DESC_MIN_VER                   1
+#define CARL9170FW_TXSQ_DESC_CUR_VER                   1
+struct carl9170fw_txsq_desc {
+       struct carl9170fw_desc_head head;
+
+       __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE                      \
+       (sizeof(struct carl9170fw_txsq_desc))
+
 #define CARL9170FW_LAST_DESC_MIN_VER                   1
 #define CARL9170FW_LAST_DESC_CUR_VER                   2
 struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
        }
 
 static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
-                                        u8 magic[4], __le16 length,
-                                        u8 min_ver, u8 cur_ver)
+                                        u8 magic[CARL9170FW_MAGIC_SIZE],
+                                        __le16 length, u8 min_ver, u8 cur_ver)
 {
        head->magic[0] = magic[0];
        head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
 
 #define carl9170fw_for_each_hdr(desc, fw_desc)                         \
        for (desc = fw_desc;                                            \
-            memcmp(desc->magic, LAST_MAGIC, 4) &&                      \
+            memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) &&  \
             le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE &&  \
             le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH;    \
             desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
 }
 
 static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
-                                      const u8 descid[4], u16 min_len,
-                                      u8 compatible_revision)
+                                      const u8 descid[CARL9170FW_MAGIC_SIZE],
+                                      u16 min_len, u8 compatible_revision)
 {
        if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
            descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
index e85df6e..4e30762 100644 (file)
 
 #define        AR9170_PWR_REG_CHIP_REVISION            (AR9170_PWR_REG_BASE + 0x010)
 #define AR9170_PWR_REG_PLL_ADDAC               (AR9170_PWR_REG_BASE + 0x014)
+#define                AR9170_PWR_PLL_ADDAC_DIV_S              2
+#define                AR9170_PWR_PLL_ADDAC_DIV                0xffc
 #define        AR9170_PWR_REG_WATCH_DOG_MAGIC          (AR9170_PWR_REG_BASE + 0x020)
 
 /* Faraday USB Controller */
 #define        AR9170_USB_REG_MAIN_CTRL                (AR9170_USB_REG_BASE + 0x000)
 #define                AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP      BIT(0)
 #define                AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT  BIT(2)
+#define                AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND      BIT(3)
+#define                AR9170_USB_MAIN_CTRL_RESET              BIT(4)
+#define                AR9170_USB_MAIN_CTRL_CHIP_ENABLE        BIT(5)
 #define                AR9170_USB_MAIN_CTRL_HIGHSPEED          BIT(6)
 
 #define        AR9170_USB_REG_DEVICE_ADDRESS           (AR9170_USB_REG_BASE + 0x001)
 #define        AR9170_USB_REG_INTR_GROUP               (AR9170_USB_REG_BASE + 0x020)
 
 #define        AR9170_USB_REG_INTR_SOURCE_0            (AR9170_USB_REG_BASE + 0x021)
+#define                AR9170_USB_INTR_SRC0_SETUP              BIT(0)
+#define                AR9170_USB_INTR_SRC0_IN                 BIT(1)
+#define                AR9170_USB_INTR_SRC0_OUT                BIT(2)
+#define                AR9170_USB_INTR_SRC0_FAIL               BIT(3) /* ??? */
+#define                AR9170_USB_INTR_SRC0_END                BIT(4) /* ??? */
+#define                AR9170_USB_INTR_SRC0_ABORT              BIT(7)
+
 #define        AR9170_USB_REG_INTR_SOURCE_1            (AR9170_USB_REG_BASE + 0x022)
 #define        AR9170_USB_REG_INTR_SOURCE_2            (AR9170_USB_REG_BASE + 0x023)
 #define        AR9170_USB_REG_INTR_SOURCE_3            (AR9170_USB_REG_BASE + 0x024)
 #define        AR9170_USB_REG_INTR_SOURCE_5            (AR9170_USB_REG_BASE + 0x026)
 #define        AR9170_USB_REG_INTR_SOURCE_6            (AR9170_USB_REG_BASE + 0x027)
 #define        AR9170_USB_REG_INTR_SOURCE_7            (AR9170_USB_REG_BASE + 0x028)
+#define                AR9170_USB_INTR_SRC7_USB_RESET          BIT(1)
+#define                AR9170_USB_INTR_SRC7_USB_SUSPEND        BIT(2)
+#define                AR9170_USB_INTR_SRC7_USB_RESUME         BIT(3)
+#define                AR9170_USB_INTR_SRC7_ISO_SEQ_ERR        BIT(4)
+#define                AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT      BIT(5)
+#define                AR9170_USB_INTR_SRC7_TX0BYTE            BIT(6)
+#define                AR9170_USB_INTR_SRC7_RX0BYTE            BIT(7)
+
+#define        AR9170_USB_REG_IDLE_COUNT               (AR9170_USB_REG_BASE + 0x02f)
 
 #define        AR9170_USB_REG_EP_MAP                   (AR9170_USB_REG_BASE + 0x030)
 #define        AR9170_USB_REG_EP1_MAP                  (AR9170_USB_REG_BASE + 0x030)
 
 #define        AR9170_USB_REG_MAX_AGG_UPLOAD           (AR9170_USB_REG_BASE + 0x110)
 #define        AR9170_USB_REG_UPLOAD_TIME_CTL          (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP                 (AR9170_USB_REG_BASE + 0x120)
+#define                AR9170_USB_WAKE_UP_WAKE                 BIT(0)
+
 #define        AR9170_USB_REG_CBUS_CTRL                (AR9170_USB_REG_BASE + 0x1f0)
 #define                AR9170_USB_CBUS_CTRL_BUFFER_END         (BIT(1))
 
index 870df8c..ede3d7e 100644 (file)
@@ -662,6 +662,13 @@ init:
                        goto unlock;
        }
 
+       if (ar->fw.tx_seq_table) {
+               err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+                                        0);
+               if (err)
+                       goto unlock;
+       }
+
 unlock:
        if (err && (vif_id >= 0)) {
                vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
                                    enum ieee80211_ampdu_mlme_action action,
                                    struct ieee80211_sta *sta,
-                                   u16 tid, u16 *ssn)
+                                   u16 tid, u16 *ssn, u8 buf_size)
 {
        struct ar9170 *ar = hw->priv;
        struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
index 939a0e9..84866a4 100644 (file)
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
        cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
 
        /* 2. Maybe the AP wants to send multicast/broadcast data? */
-       cam = !!(tim_ie->bitmap_ctrl & 0x01);
+       cam |= !!(tim_ie->bitmap_ctrl & 0x01);
 
        if (!cam) {
                /* back to low-power land. */
index 6cc58e0..0ef70b6 100644 (file)
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
        if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
                txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
 
+       if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+               txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
        if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
                txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
 
@@ -1336,7 +1339,7 @@ err_unlock_rcu:
        return false;
 }
 
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
@@ -1370,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        carl9170_tx(ar);
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        ar->tx_dropped++;
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 void carl9170_tx_scheduler(struct ar9170 *ar)
index 537732e..f82c400 100644 (file)
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x057c, 0x8402) },
        /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
        { USB_DEVICE(0x1668, 0x1200) },
+       /* Airlive X.USB a/b/g/n */
+       { USB_DEVICE(0x1b75, 0x9170) },
 
        /* terminate */
        {}
index ee0f84f..15095c0 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __CARL9170_SHARED_VERSION_H
 #define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 10
-#define CARL9170FW_VERSION_DAY 29
-#define CARL9170FW_VERSION_GIT "1.9.0"
+#define CARL9170FW_VERSION_YEAR 11
+#define CARL9170FW_VERSION_MONTH 1
+#define CARL9170FW_VERSION_DAY 22
+#define CARL9170FW_VERSION_GIT "1.9.2"
 #endif /* __CARL9170_SHARED_VERSION_H */
index 24d63b5..9e1324b 100644 (file)
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
        u8 ampdu_commit_factor:1;
        u8 ampdu_unused_bit:1;
        u8 queue:2;
-       u8 reserved:1;
+       u8 assign_seq:1;
        u8 vif_id:3;
        u8 fill_in_tsf:1;
        u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
 
 #define CARL9170_TX_SUPER_MISC_QUEUE                   0x3
 #define CARL9170_TX_SUPER_MISC_QUEUE_S                 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ              0x4
 #define        CARL9170_TX_SUPER_MISC_VIF_ID                   0x38
 #define        CARL9170_TX_SUPER_MISC_VIF_ID_S                 3
 #define        CARL9170_TX_SUPER_MISC_FILL_IN_TSF              0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
        __AR9170_NUM_TXQ,
 };
 
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ *  * The MAC does not aggregate frames on anything other
+ *    than the first HW queue.
+ *  * when an AMPDU is placed [in the first hw queue] and
+ *    additional frames are already queued on a different
+ *    hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
 static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
 
 #define        AR9170_TXQ_DEPTH                        32
index 5d465e5..37b8e11 100644 (file)
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
                REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
                REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
                REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
-               if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
+               if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
                        REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+                       REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+                                 AR_KEYTABLE_TYPE_CLR);
+               }
 
        }
 
index 2b14775..f828f29 100644 (file)
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
        }
 }
 
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+       /* possibly more */
+       return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
 /* Frequency is one where radar detection is required */
 static bool ath_is_radar_freq(u16 center_freq)
 {
index 345dd97..172f63f 100644 (file)
@@ -250,6 +250,7 @@ enum CountryCode {
 };
 
 bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
 int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
                  int (*reg_notifier)(struct wiphy *wiphy,
                  struct regulatory_request *request));
index 47033f6..480595f 100644 (file)
@@ -92,7 +92,7 @@ config B43_PHY_N
        ---help---
          Support for the N-PHY.
 
-         This enables support for devices with N-PHY revision up to 2.
+         This enables support for devices with N-PHY.
 
          Say N if you expect high stability and performance. Saying Y will not
          affect other devices support and may provide support for basic needs.
index 22bc9f1..57eb5b6 100644 (file)
@@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
        mutex_unlock(&wl->mutex);
 }
 
-static int b43_op_tx(struct ieee80211_hw *hw,
+static void b43_op_tx(struct ieee80211_hw *hw,
                     struct sk_buff *skb)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
        if (unlikely(skb->len < 2 + 2 + 6)) {
                /* Too short, this can't be a valid frame. */
                dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+               return;
        }
        B43_WARN_ON(skb_shinfo(skb)->nr_frags);
 
        skb_queue_tail(&wl->tx_queue, skb);
        ieee80211_queue_work(wl->hw, &wl->tx_work);
-
-       return NETDEV_TX_OK;
 }
 
 static void b43_qos_params_upload(struct b43_wldev *dev,
index ab81ed8..8a00f9a 100644 (file)
@@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
        bool workaround = false;
 
        if (sprom->revision < 4)
-               workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
-                               binfo->type != 0x46D ||
-                               binfo->rev < 0x41);
+               workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
+                               binfo->type == 0x46D &&
+                               binfo->rev >= 0x41);
        else
                workaround =
                        !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -1168,23 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
 static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
+       struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
+
+       /* PHY rev 0, 1, 2 */
        u8 i, j;
        u8 code;
        u16 tmp;
+       u8 rfseq_events[3] = { 6, 8, 7 };
+       u8 rfseq_delays[3] = { 10, 30, 1 };
 
-       /* TODO: for PHY >= 3
-       s8 *lna1_gain, *lna2_gain;
-       u8 *gain_db, *gain_bits;
-       u16 *rfseq_init;
+       /* PHY rev >= 3 */
+       bool ghz5;
+       bool ext_lna;
+       u16 rssi_gain;
+       struct nphy_gain_ctl_workaround_entry *e;
        u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
        u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
-       */
-
-       u8 rfseq_events[3] = { 6, 8, 7 };
-       u8 rfseq_delays[3] = { 10, 30, 1 };
 
        if (dev->phy.rev >= 3) {
-               /* TODO */
+               /* Prepare values */
+               ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+                       & B43_NPHY_BANDCTL_5GHZ;
+               ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
+               e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+               if (ghz5 && dev->phy.rev >= 5)
+                       rssi_gain = 0x90;
+               else
+                       rssi_gain = 0x50;
+
+               b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+               /* Set Clip 2 detect */
+               b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+                               B43_NPHY_C1_CGAINI_CL2DETECT);
+               b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+                               B43_NPHY_C2_CGAINI_CL2DETECT);
+
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+                               0x17);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+                               0x17);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+                               rssi_gain);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+                               rssi_gain);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+                               0x17);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+                               0x17);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+               b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+               b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+               b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+               b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+               b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+               b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+               b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+               b43_phy_write(dev, 0x2A7, e->init_gain);
+               b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+                                       e->rfseq_init);
+               b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+
+               /* TODO: check defines. Do not match variables names */
+               b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+               b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+               b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+               b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+               b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+               b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+               b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+               b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+               b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+               b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+               b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+               b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+                               ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+               b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+                               ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+               b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
        } else {
                /* Set Clip 2 detect */
                b43_phy_set(dev, B43_NPHY_C1_CGAINI,
@@ -1281,17 +1356,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
                                                B43_NPHY_TABLE_DATALO, tmp);
                                }
                        }
+               }
 
-                       b43_nphy_set_rf_sequence(dev, 5,
-                                       rfseq_events, rfseq_delays, 3);
-                       b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
-                               ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
-                               0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+               b43_nphy_set_rf_sequence(dev, 5,
+                               rfseq_events, rfseq_delays, 3);
+               b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+                       ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+                       0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
 
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-                               b43_phy_maskset(dev, B43_PHY_N(0xC5D),
-                                               0xFF80, 4);
-               }
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                       b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+                                       0xFF80, 4);
        }
 }
 
@@ -1308,6 +1383,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
        u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
        u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
 
+       u16 tmp16;
+       u32 tmp32;
+
        if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
                b43_nphy_classifier(dev, 1, 0);
        else
@@ -1320,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
                    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
        if (dev->phy.rev >= 3) {
+               tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+               tmp32 &= 0xffffff;
+               b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+               b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+               b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+               b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+               b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+               b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+               b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+               b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+               b43_phy_write(dev, 0x2AE, 0x000C);
+
                /* TODO */
+
+               tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+                       0x2 : 0x9C40;
+               b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+               b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+               b43_nphy_gain_ctrl_workarounds(dev);
+
+               b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
+               b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
+
+               /* TODO */
+
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+               b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+               b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+               /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+               if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
+                   b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+                   (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
+                   b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+                       tmp32 = 0x00088888;
+               else
+                       tmp32 = 0x88888888;
+               b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+               b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+               b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+               if (dev->phy.rev == 4 &&
+                   b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+                                       0x70);
+                       b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+                                       0x70);
+               }
+
+               b43_phy_write(dev, 0x224, 0x039C);
+               b43_phy_write(dev, 0x225, 0x0357);
+               b43_phy_write(dev, 0x226, 0x0317);
+               b43_phy_write(dev, 0x227, 0x02D7);
+               b43_phy_write(dev, 0x228, 0x039C);
+               b43_phy_write(dev, 0x229, 0x0357);
+               b43_phy_write(dev, 0x22A, 0x0317);
+               b43_phy_write(dev, 0x22B, 0x02D7);
+               b43_phy_write(dev, 0x22C, 0x039C);
+               b43_phy_write(dev, 0x22D, 0x0357);
+               b43_phy_write(dev, 0x22E, 0x0317);
+               b43_phy_write(dev, 0x22F, 0x02D7);
        } else {
                if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
                    nphy->band5g_pwrgain) {
@@ -2128,7 +2281,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
                save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
                save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
                save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
-       } else if (dev->phy.rev == 2) {
+       } else {
                save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
                save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
                save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
@@ -2179,7 +2332,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
                b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
                b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
                b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
-       } else if (dev->phy.rev == 2) {
+       } else {
                b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
                b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
                b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
@@ -3878,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
        }
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
 static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
 {
-       b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
-                     on ? 0 : 0x7FFF);
+       u16 val = on ? 0 : 0x7FFF;
+
+       if (dev->phy.rev >= 3)
+               b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
+       b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
 }
 
 static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
index dc8ef09..2de483b 100644 (file)
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 };
 
+/* static tables, PHY revision >= 3 */
+static const u32 b43_ntab_framestruct_r3[] = {
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x09804506, 0x00100030, 0x09804507, 0x00100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
+       0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+       0x1980c506, 0x00100030, 0x21810506, 0x00100030,
+       0x21810506, 0x00100030, 0x01800504, 0x00100030,
+       0x11808505, 0x00100030, 0x29814507, 0x01100030,
+       0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+       0x21810506, 0x00100030, 0x21810506, 0x00100030,
+       0x29814507, 0x01100030, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
+       0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
+       0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
+       0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
+       0x29814507, 0x01100030, 0x00000000, 0x00000000,
+       0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
+       0x1980c506, 0x00100030, 0x29814507, 0x01100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
+       0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
+       0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
+       0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x02001405, 0x00100040,
+       0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
+       0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
+       0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
+       0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
+       0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x50029404, 0x00100000, 0x32019405, 0x00100040,
+       0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
+       0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
+       0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+       0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
+       0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
+       0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
+       0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
+       0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x000f4800, 0x62031405, 0x00100040,
+       0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
+       0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
+       0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
+       0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
+       0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+       0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
+       0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
+       0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
+       0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
+       0x2981450f, 0x01100030, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
+       0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
+       0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x50029404, 0x00100000, 0x32019405, 0x00100040,
+       0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
+       0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
+       0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
+       0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
+       0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
+       0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
+       0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+       0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
+       0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+       0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_pilot_r3[] = {
+       0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
+       0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
+       0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
+       0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
+       0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
+       0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
+       0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
+       0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
+       0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
+       0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
+       0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
+       0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
+       0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
+       0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
+       0xf0a0, 0xf028, 0xffff, 0xffff,
+};
+
+static const u32 b43_ntab_tmap_r3[] = {
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+       0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
+       0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
+       0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
+       0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
+       0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
+       0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
+       0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
+       0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
+       0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
+       0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
+       0x22222222, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x11111111,
+       0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
+       0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
+       0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
+       0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
+       0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
+       0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
+       0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
+       0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
+       0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
+       0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
+       0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
+       0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
+       0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
+       0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
+       0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_intlevel_r3[] = {
+       0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
+       0x00c1188d, 0x080024d2, 0x00000070,
+};
+
+static const u32 b43_ntab_tdtrn_r3[] = {
+       0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+       0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+       0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+       0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+       0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+       0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+       0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+       0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+       0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+       0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+       0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+       0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+       0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+       0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+       0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+       0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+       0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
+       0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
+       0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
+       0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
+       0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
+       0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
+       0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
+       0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
+       0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
+       0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
+       0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
+       0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
+       0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
+       0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
+       0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
+       0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
+       0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
+       0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
+       0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
+       0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
+       0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
+       0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
+       0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
+       0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+       0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+       0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+       0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+       0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+       0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+       0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+       0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+       0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+       0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+       0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+       0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+       0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+       0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+       0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+       0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+       0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
+       0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
+       0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
+       0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
+       0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
+       0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
+       0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
+       0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
+       0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
+       0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
+       0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
+       0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
+       0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
+       0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
+       0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
+       0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
+       0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+       0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+       0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+       0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+       0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+       0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+       0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+       0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+       0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+       0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+       0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+       0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+       0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+       0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+       0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+       0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+       0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
+       0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
+       0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
+       0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
+       0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
+       0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
+       0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
+       0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
+       0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
+       0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
+       0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
+       0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
+       0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
+       0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
+       0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
+       0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
+       0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
+       0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
+       0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
+       0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
+       0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
+       0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
+       0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
+       0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
+       0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
+       0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
+       0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
+       0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
+       0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
+       0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
+       0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
+       0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
+       0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
+       0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
+       0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
+       0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
+       0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
+       0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
+       0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
+       0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
+       0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
+       0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
+       0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
+       0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
+       0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
+       0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
+       0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
+       0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
+       0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+       0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+       0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+       0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+       0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+       0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+       0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+       0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+       0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+       0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+       0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+       0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+       0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+       0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+       0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+       0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+};
+
+static const u32 b43_ntab_noisevar0_r3[] = {
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u32 b43_ntab_noisevar1_r3[] = {
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u16 b43_ntab_mcs_r3[] = {
+       0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
+       0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
+       0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
+       0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
+       0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
+       0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
+       0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
+       0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
+       0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
+       0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
+       0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
+       0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
+       0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
+       0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
+       0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
+       0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
+       0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007,
+};
+
+static const u32 b43_ntab_tdi20a0_r3[] = {
+       0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
+       0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
+       0x00020301, 0x00030504, 0x00040708, 0x0005090b,
+       0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
+       0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
+       0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
+       0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
+       0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
+       0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
+       0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
+       0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
+       0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
+       0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
+       0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi20a1_r3[] = {
+       0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
+       0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
+       0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
+       0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
+       0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
+       0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
+       0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
+       0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
+       0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
+       0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
+       0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
+       0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
+       0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
+       0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a0_r3[] = {
+       0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
+       0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
+       0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
+       0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
+       0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
+       0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
+       0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
+       0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
+       0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
+       0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
+       0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
+       0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
+       0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
+       0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
+       0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
+       0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
+       0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
+       0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
+       0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
+       0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
+       0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
+       0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
+       0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
+       0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
+       0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
+       0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
+       0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
+       0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a1_r3[] = {
+       0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
+       0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
+       0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
+       0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
+       0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
+       0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
+       0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
+       0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
+       0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
+       0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
+       0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
+       0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
+       0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
+       0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
+       0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
+       0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
+       0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
+       0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
+       0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
+       0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
+       0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
+       0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
+       0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
+       0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
+       0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
+       0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
+       0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
+       0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_pilotlt_r3[] = {
+       0x76540213, 0x62407351, 0x76543210, 0x76540213,
+       0x76540213, 0x76430521,
+};
+
+static const u32 b43_ntab_channelest_r3[] = {
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+};
+
+static const u8 b43_ntab_framelookup_r3[] = {
+       0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
+       0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
+       0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
+       0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
+};
+
+static const u8 b43_ntab_estimatepowerlt0_r3[] = {
+       0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+       0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+       0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+       0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+       0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+       0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+       0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+       0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_estimatepowerlt1_r3[] = {
+       0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+       0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+       0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+       0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+       0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+       0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+       0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+       0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_adjustpower0_r3[] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 b43_ntab_adjustpower1_r3[] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u32 b43_ntab_gainctl0_r3[] = {
+       0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+       0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+       0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+       0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+       0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+       0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+       0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+       0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+       0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+       0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+       0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+       0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+       0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+       0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+       0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+       0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+       0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+       0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+       0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+       0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+       0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+       0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+       0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+       0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+       0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+       0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+       0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+       0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+       0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+       0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+       0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+       0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_gainctl1_r3[] = {
+       0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+       0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+       0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+       0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+       0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+       0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+       0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+       0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+       0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+       0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+       0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+       0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+       0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+       0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+       0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+       0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+       0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+       0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+       0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+       0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+       0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+       0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+       0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+       0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+       0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+       0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+       0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+       0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+       0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+       0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+       0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+       0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_iqlt0_r3[] = {
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_iqlt1_r3[] = {
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_loftlt0_r3[] = {
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000,
+};
+
+static const u16 b43_ntab_loftlt1_r3[] = {
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000,
+};
+
+/* TX gain tables */
 const u32 b43_ntab_tx_gain_rev0_1_2[] = {
        0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
        0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1635,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
        { 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+       { /* 2GHz */
+               { /* PHY rev 3 */
+                       { 7, 11, 16, 23 },
+                       { -5, 6, 10, 14 },
+                       { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+                       { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+                       0x627E,
+                       { 0x613F, 0x613F, 0x613F, 0x613F },
+                       0x107E, 0x0066, 0x0074,
+                       0x18, 0x18, 0x18,
+                       0x020D, 0x5,
+               },
+               { /* PHY rev 4 */
+                       { 8, 12, 17, 25 },
+                       { -5, 6, 10, 14 },
+                       { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+                       { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+                       0x527E,
+                       { 0x513F, 0x513F, 0x513F, 0x513F },
+                       0x007E, 0x0066, 0x0074,
+                       0x18, 0x18, 0x18,
+                       0x01A1, 0x5,
+               },
+               { /* PHY rev 5+ */
+                       { 9, 13, 18, 26 },
+                       { -3, 7, 11, 16 },
+                       { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+                       { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+                       0x427E, /* invalid for external LNA! */
+                       { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
+                       0x1076, 0x0066, 0x106A,
+                       0xC, 0xC, 0xC,
+                       0x01D0, 0x5,
+               },
+       },
+       { /* 5GHz */
+               { /* PHY rev 3 */
+                       { 7, 11, 17, 23 },
+                       { -6, 2, 6, 10 },
+                       { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 },
+                       { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 },
+                       0x52DE,
+                       { 0x516F, 0x516F, 0x516F, 0x516F },
+                       0x00DE, 0x00CA, 0x00CC,
+                       0x1E, 0x1E, 0x1E,
+                       0x01A1, 25,
+               },
+               { /* PHY rev 4 */
+                       { 8, 12, 18, 23 },
+                       { -5, 2, 6, 10 },
+                       { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+                       { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+                       0x629E,
+                       { 0x614F, 0x614F, 0x614F, 0x614F },
+                       0x029E, 0x1084, 0x0086,
+                       0x24, 0x24, 0x24,
+                       0x0107, 25,
+               },
+               { /* PHY rev 5+ */
+                       { 6, 10, 16, 21 },
+                       { -7, 0, 4, 8 },
+                       { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+                       { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+                       0x729E,
+                       { 0x714F, 0x714F, 0x714F, 0x714F },
+                       0x029E, 0x2084, 0x2086,
+                       0x24, 0x24, 0x24,
+                       0x00A9, 25,
+               },
+       },
+};
+
 static inline void assert_ntab_array_sizes(void)
 {
 #undef check
@@ -1813,7 +2960,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
 #define ntab_upload(dev, offset, data) do { \
                b43_ntab_write_bulk(dev, offset, offset##_SIZE, data);  \
        } while (0)
-
 void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
 {
        /* Static tables */
@@ -1847,11 +2993,70 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
        ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
 }
 
+#define ntab_upload_r3(dev, offset, data) do { \
+               b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+       } while (0)
 void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
 {
        /* Static tables */
-       /* TODO */
+       ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+       ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+       ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+       ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+       ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
+       ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+       ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+       ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+       ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+       ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
+                      b43_ntab_estimatepowerlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
+                      b43_ntab_estimatepowerlt1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
 
        /* Volatile tables */
        /* TODO */
 }
+
+struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
+       struct b43_wldev *dev, bool ghz5, bool ext_lna)
+{
+       struct nphy_gain_ctl_workaround_entry *e;
+       u8 phy_idx;
+
+       B43_WARN_ON(dev->phy.rev < 3);
+       if (dev->phy.rev >= 5)
+               phy_idx = 2;
+       else if (dev->phy.rev == 4)
+               phy_idx = 1;
+       else
+               phy_idx = 0;
+
+       e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
+
+       /* Only one entry differs for external LNA, so instead making whole
+        * table 2 times bigger, hack is here
+        */
+       if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
+               e->rfseq_init[0] &= 0x0FFF;
+               e->rfseq_init[1] &= 0x0FFF;
+               e->rfseq_init[2] &= 0x0FFF;
+               e->rfseq_init[3] &= 0x0FFF;
+               e->init_gain &= 0x0FFF;
+       }
+
+       return e;
+}
index 4ec593b..1856936 100644 (file)
@@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 {
        u8 val_addr1;
 };
 
+struct nphy_gain_ctl_workaround_entry {
+       s8 lna1_gain[4];
+       s8 lna2_gain[4];
+       u8 gain_db[10];
+       u8 gain_bits[10];
+
+       u16 init_gain;
+       u16 rfseq_init[4];
+
+       u16 cliphi_gain;
+       u16 clipmd_gain;
+       u16 cliplo_gain;
+
+       u16 crsmin;
+       u16 crsminl;
+       u16 crsminu;
+
+       u16 nbclip;
+       u16 wlclip;
+};
+
+/* Get entry with workaround values for gain ctl. Does not return NULL. */
+struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
+       struct b43_wldev *dev, bool ghz5, bool ext_lna);
+
 /* Get the NPHY Channel Switch Table entry for a channel.
  * Returns NULL on failure to find an entry. */
 const struct b43_nphy_channeltab_entry_rev2 *
@@ -109,6 +134,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
 #define B43_NTAB_C1_LOFEEDTH           B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
 #define B43_NTAB_C1_LOFEEDTH_SIZE      128
 
+/* Static N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_FRAMESTRUCT_R3                B43_NTAB32(10, 000) /* frame struct  */
+#define B43_NTAB_PILOT_R3              B43_NTAB16(11, 000) /* pilot  */
+#define B43_NTAB_TMAP_R3               B43_NTAB32(12, 000) /* TM AP  */
+#define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13, 000) /* INT LV  */
+#define B43_NTAB_TDTRN_R3              B43_NTAB32(14, 000) /* TD TRN  */
+#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16, 000) /* noise variance 0  */
+#define B43_NTAB_NOISEVAR1_R3          B43_NTAB32(16, 128) /* noise variance 1  */
+#define B43_NTAB_MCS_R3                        B43_NTAB16(18, 000) /* MCS  */
+#define B43_NTAB_TDI20A0_R3            B43_NTAB32(19, 128) /* TDI 20/0  */
+#define B43_NTAB_TDI20A1_R3            B43_NTAB32(19, 256) /* TDI 20/1  */
+#define B43_NTAB_TDI40A0_R3            B43_NTAB32(19, 640) /* TDI 40/0  */
+#define B43_NTAB_TDI40A1_R3            B43_NTAB32(19, 768) /* TDI 40/1  */
+#define B43_NTAB_PILOTLT_R3            B43_NTAB32(20, 000) /* PLT lookup  */
+#define B43_NTAB_CHANEST_R3            B43_NTAB32(22, 000) /* channel estimate  */
+#define B43_NTAB_FRAMELT_R3            B43_NTAB8 (24, 000) /* frame lookup  */
+#define B43_NTAB_C0_ESTPLT_R3          B43_NTAB8 (26, 000) /* estimated power lookup 0  */
+#define B43_NTAB_C1_ESTPLT_R3          B43_NTAB8 (27, 000) /* estimated power lookup 1  */
+#define B43_NTAB_C0_ADJPLT_R3          B43_NTAB8 (26, 064) /* adjusted power lookup 0  */
+#define B43_NTAB_C1_ADJPLT_R3          B43_NTAB8 (27, 064) /* adjusted power lookup 1  */
+#define B43_NTAB_C0_GAINCTL_R3         B43_NTAB32(26, 192) /* gain control lookup 0  */
+#define B43_NTAB_C1_GAINCTL_R3         B43_NTAB32(27, 192) /* gain control lookup 1  */
+#define B43_NTAB_C0_IQLT_R3            B43_NTAB32(26, 320) /* I/Q lookup 0  */
+#define B43_NTAB_C1_IQLT_R3            B43_NTAB32(27, 320) /* I/Q lookup 1  */
+#define B43_NTAB_C0_LOFEEDTH_R3                B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0  */
+#define B43_NTAB_C1_LOFEEDTH_R3                B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
+
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE      18
index e6b0528..e5be381 100644 (file)
 #include "dma.h"
 #include "pio.h"
 
+static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
+       { B43_CCK_RATE_1MB,     0x0,                    0x0 },
+       { B43_CCK_RATE_2MB,     0x0,                    0x1 },
+       { B43_CCK_RATE_5MB,     0x0,                    0x2 },
+       { B43_CCK_RATE_11MB,    0x0,                    0x3 },
+       { B43_OFDM_RATE_6MB,    B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
+       { B43_OFDM_RATE_9MB,    B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
+       { B43_OFDM_RATE_12MB,   B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
+       { B43_OFDM_RATE_18MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
+       { B43_OFDM_RATE_24MB,   B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
+       { B43_OFDM_RATE_36MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
+       { B43_OFDM_RATE_48MB,   B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
+       { B43_OFDM_RATE_54MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
+};
+
+static const struct b43_tx_legacy_rate_phy_ctl_entry *
+b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
+{
+       const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
+               e = &(b43_tx_legacy_rate_phy_ctl[i]);
+               if (e->bitrate == bitrate)
+                       return e;
+       }
+
+       B43_WARN_ON(1);
+       return NULL;
+}
 
 /* Extract the bitrate index out of a CCK PLCP header. */
 static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
        }
 }
 
+static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
+{
+       const struct b43_phy *phy = &dev->phy;
+       const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+       u16 control = 0;
+       u16 bw;
+
+       if (phy->type == B43_PHYTYPE_LP)
+               bw = B43_TXH_PHY1_BW_20;
+       else /* FIXME */
+               bw = B43_TXH_PHY1_BW_20;
+
+       if (0) { /* FIXME: MIMO */
+       } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
+               control = bw;
+       } else {
+               control = bw;
+               e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
+               if (e) {
+                       control |= e->coding_rate;
+                       control |= e->modulation;
+               }
+               control |= B43_TXH_PHY1_MODE_SISO;
+       }
+
+       return control;
+}
+
 static u8 b43_calc_fallback_rate(u8 bitrate)
 {
        switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
                        extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
                else
                        extra_ft |= B43_TXH_EFT_RTSFB_CCK;
+
+               if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
+                   phy->type == B43_PHYTYPE_N) {
+                       txhdr->phy_ctl1_rts = cpu_to_le16(
+                               b43_generate_tx_phy_ctl1(dev, rts_rate));
+                       txhdr->phy_ctl1_rts_fb = cpu_to_le16(
+                               b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
+               }
        }
 
        /* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
        else
                txhdr->new_format.cookie = cpu_to_le16(cookie);
 
+       if (phy->type == B43_PHYTYPE_N) {
+               txhdr->phy_ctl1 =
+                       cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
+               txhdr->phy_ctl1_fb =
+                       cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
+       }
+
        /* Apply the bitfields */
        txhdr->mac_ctl = cpu_to_le32(mac_ctl);
        txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                status.mactime += mactime;
                if (low_mactime_now <= mactime)
                        status.mactime -= 0x10000;
-               status.flag |= RX_FLAG_TSFT;
+               status.flag |= RX_FLAG_MACTIME_MPDU;
        }
 
        chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
index d4cf9b3..42debb5 100644 (file)
@@ -73,6 +73,12 @@ struct b43_txhdr {
        } __packed;
 } __packed;
 
+struct b43_tx_legacy_rate_phy_ctl_entry {
+       u8 bitrate;
+       u16 coding_rate;
+       u16 modulation;
+};
+
 /* MAC TX control */
 #define B43_TXH_MAC_USEFBR             0x10000000 /* Use fallback rate for this AMPDU */
 #define B43_TXH_MAC_KEYIDX             0x0FF00000 /* Security key index */
index 1f11e16..c7fd73e 100644 (file)
@@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
        return err;
 }
 
-static int b43legacy_op_tx(struct ieee80211_hw *hw,
-                          struct sk_buff *skb)
+static void b43legacy_op_tx(struct ieee80211_hw *hw,
+                           struct sk_buff *skb)
 {
        struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
        struct b43legacy_wldev *dev = wl->current_dev;
@@ -2466,7 +2466,6 @@ out:
                /* Drop the packet. */
                dev_kfree_skb_any(skb);
        }
-       return NETDEV_TX_OK;
 }
 
 static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
index 7d177d9..3a95541 100644 (file)
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
                status.mactime += mactime;
                if (low_mactime_now <= mactime)
                        status.mactime -= 0x10000;
-               status.flag |= RX_FLAG_TSFT;
+               status.flag |= RX_FLAG_MACTIME_MPDU;
        }
 
        chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
index 61915f3..da60fae 100644 (file)
@@ -1397,7 +1397,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
 }
 
 /*
- * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
  *
  * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
  *
index d7d049c..d9e1d9b 100644 (file)
@@ -961,7 +961,7 @@ struct ipw_country_channel_info {
 struct ipw_country_info {
        u8 id;
        u8 length;
-       u8 country_str[3];
+       u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
        struct ipw_country_channel_info groups[7];
 } __packed;
 
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644 (file)
index 0000000..2a45dd4
--- /dev/null
@@ -0,0 +1,116 @@
+config IWLWIFI_LEGACY
+       tristate "Intel Wireless Wifi legacy devices"
+       depends on PCI && MAC80211
+       select FW_LOADER
+       select NEW_LEDS
+       select LEDS_CLASS
+       select LEDS_TRIGGERS
+       select MAC80211_LEDS
+
+menu "Debugging Options"
+       depends on IWLWIFI_LEGACY
+
+config IWLWIFI_LEGACY_DEBUG
+       bool "Enable full debugging output in 4965 and 3945 drivers"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         This option will enable debug tracing output for the iwlwifilegacy
+         drivers.
+
+         This will result in the kernel module being ~100k larger.  You can
+         control which debug output is sent to the kernel log by setting the
+         value in
+
+               /sys/class/net/wlan0/device/debug_level
+
+         This entry will only exist if this option is enabled.
+
+         To set a value, simply echo an 8-byte hex value to the same file:
+
+                 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+         You can find the list of debug mask values in:
+                 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+
+         If this is your first time using this driver, you should say Y here
+         as the debug information can assist others in helping you resolve
+         any problems you may encounter.
+
+config IWLWIFI_LEGACY_DEBUGFS
+        bool "4965 and 3945 debugfs support"
+        depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+        ---help---
+         Enable creation of debugfs files for the iwlwifilegacy drivers. This
+         is a low-impact option that allows getting insight into the
+         driver's state at runtime.
+
+config IWLWIFI_LEGACY_DEVICE_TRACING
+       bool "iwlwifilegacy legacy device access tracing"
+       depends on IWLWIFI_LEGACY
+       depends on EVENT_TRACING
+       help
+         Say Y here to trace all commands, including TX frames and IO
+         accesses, sent to the device. If you say yes, iwlwifilegacy will
+         register with the ftrace framework for event tracing and dump
+         all this information to the ringbuffer, you may need to
+         increase the ringbuffer size. See the ftrace documentation
+         for more information.
+
+         When tracing is not enabled, this option still has some
+         (though rather small) overhead.
+
+         If unsure, say Y so we can help you better when problems
+         occur.
+endmenu
+
+config IWL4965
+       tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         This option enables support for
+
+         Select to build the driver supporting the:
+
+         Intel Wireless WiFi Link 4965AGN
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwl4965.
+
+config IWL3945
+       tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         Select to build the driver supporting the:
+
+         Intel PRO/Wireless 3945ABG/BG Network Connection
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644 (file)
index 0000000..d56aeb3
--- /dev/null
@@ -0,0 +1,25 @@
+obj-$(CONFIG_IWLWIFI_LEGACY)   += iwl-legacy.o
+iwl-legacy-objs                := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwl-legacy-objs                += iwl-rx.o iwl-tx.o iwl-sta.o
+iwl-legacy-objs                += iwl-scan.o iwl-led.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+
+iwl-legacy-objs += $(iwl-legacy-m)
+
+CFLAGS_iwl-devtrace.o := -I$(src)
+
+# 4965
+obj-$(CONFIG_IWL4965)  += iwl4965.o
+iwl4965-objs           := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
+iwl4965-objs           += iwl-4965-ucode.o iwl-4965-tx.o
+iwl4965-objs           += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
+iwl4965-objs           += iwl-4965-sta.o iwl-4965-eeprom.o
+iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+
+# 3945
+obj-$(CONFIG_IWL3945)  += iwl3945.o
+iwl3945-objs           := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+
+ccflags-y += -D__CHECK_ENDIAN__
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
        int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
                    sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
        ssize_t ret;
-       struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+       struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
+                                       *max_ofdm;
        struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
        struct iwl39_statistics_rx_non_phy *general, *accum_general;
        struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
 
        buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
        ssize_t ret;
        struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
 
        buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
        struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
        struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
 
        buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
 #include "iwl-core.h"
 #include "iwl-debug.h"
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
 ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
                                    size_t count, loff_t *ppos);
 ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
similarity index 98%
rename from drivers/net/wireless/iwlwifi/iwl-3945-fh.h
rename to drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b..836c991 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
 
 
 #endif /* __iwl_3945_fh_h__ */
-
similarity index 96%
rename from drivers/net/wireless/iwlwifi/iwl-3945-hw.h
rename to drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834..779d3cb 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
 /*
  * Per-channel regulatory data.
  *
- * Each channel that *might* be supported by 3945 or 4965 has a fixed location
+ * Each channel that *might* be supported by 3945 has a fixed location
  * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
  * txpower (MSB).
  *
- * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ * Entries immediately below are for 20 MHz channel width.
  *
  * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
  */
similarity index 73%
rename from drivers/net/wireless/iwlwifi/iwl-3945-led.c
rename to drivers/net/wireless/iwlegacy/iwl-3945-led.c
index abe2b73..abd9235 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -56,36 +56,9 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
                .callback = NULL,
        };
 
-       return iwl_send_cmd(priv, &cmd);
-}
-
-/* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv)
-{
-       struct iwl_led_cmd led_cmd = {
-               .id = IWL_LED_LINK,
-               .on = IWL_LED_SOLID,
-               .off = 0,
-               .interval = IWL_DEF_LED_INTRVL
-       };
-       return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv)
-{
-       struct iwl_led_cmd led_cmd = {
-               .id = IWL_LED_LINK,
-               .on = 0,
-               .off = 0,
-               .interval = IWL_DEF_LED_INTRVL
-       };
-       IWL_DEBUG_LED(priv, "led off\n");
-       return iwl3945_send_led_cmd(priv, &led_cmd);
+       return iwl_legacy_send_cmd(priv, &cmd);
 }
 
 const struct iwl_led_ops iwl3945_led_ops = {
        .cmd = iwl3945_send_led_cmd,
-       .on = iwl3945_led_on,
-       .off = iwl3945_led_off,
 };
similarity index 95%
rename from drivers/net/wireless/iwlwifi/iwl-3945-led.h
rename to drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990ad..9671627 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
similarity index 96%
rename from drivers/net/wireless/iwlwifi/iwl-3945-rs.c
rename to drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e3..977bd24 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
 };
 
 #define IWL_RATE_MAX_WINDOW          62
-#define IWL_RATE_FLUSH          (3*HZ)
+#define IWL_RATE_FLUSH         (3*HZ)
 #define IWL_RATE_WIN_FLUSH       (HZ/2)
 #define IWL39_RATE_HIGH_TH          11520
 #define IWL_SUCCESS_UP_TH         8960
@@ -394,18 +394,18 @@ out:
        IWL_DEBUG_INFO(priv, "leave\n");
 }
 
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
        return hw->priv;
 }
 
 /* rate scale requires free function to be implemented */
-static void rs_free(void *priv)
+static void iwl3945_rs_free(void *priv)
 {
        return;
 }
 
-static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
+static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
 {
        struct iwl3945_rs_sta *rs_sta;
        struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
        return rs_sta;
 }
 
-static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
+static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
                        void *priv_sta)
 {
        struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
 
 
 /**
- * rs_tx_status - Update rate control values based on Tx results
+ * iwl3945_rs_tx_status - Update rate control values based on Tx results
  *
  * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
  * the hardware for each rate.
  */
-static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
                         struct ieee80211_sta *sta, void *priv_sta,
                         struct sk_buff *skb)
 {
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
 }
 
 /**
- * rs_get_rate - find the rate for the requested packet
+ * iwl3945_rs_get_rate - find the rate for the requested packet
  *
  * Returns the ieee80211_rate structure allocated by the driver.
  *
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
  * rate table and must reference the driver allocated rate table
  *
  */
-static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
+static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
                        void *priv_sta, struct ieee80211_tx_rate_control *txrc)
 {
        struct ieee80211_supported_band *sband = txrc->sband;
@@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
        u32 fail_count;
        s8 scale_action = 0;
        unsigned long flags;
-       u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
+       u16 rate_mask;
        s8 max_rate_idx = -1;
        struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
  * the station is added. Since mac80211 calls this function before a
  * station is added we ignore it.
  */
-static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_rate_init_stub(void *priv_r,
+                               struct ieee80211_supported_band *sband,
                              struct ieee80211_sta *sta, void *priv_sta)
 {
 }
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
 static struct rate_control_ops rs_ops = {
        .module = NULL,
        .name = RS_NAME,
-       .tx_status = rs_tx_status,
-       .get_rate = rs_get_rate,
-       .rate_init = rs_rate_init_stub,
-       .alloc = rs_alloc,
-       .free = rs_free,
-       .alloc_sta = rs_alloc_sta,
-       .free_sta = rs_free_sta,
+       .tx_status = iwl3945_rs_tx_status,
+       .get_rate = iwl3945_rs_get_rate,
+       .rate_init = iwl3945_rs_rate_init_stub,
+       .alloc = iwl3945_rs_alloc,
+       .free = iwl3945_rs_free,
+       .alloc_sta = iwl3945_rs_alloc_sta,
+       .free_sta = iwl3945_rs_free_sta,
 #ifdef CONFIG_MAC80211_DEBUGFS
        .add_sta_debugfs = iwl3945_add_debugfs,
        .remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
 {
        ieee80211_rate_control_unregister(&rs_ops);
 }
-
-
similarity index 90%
rename from drivers/net/wireless/iwlwifi/iwl-3945.c
rename to drivers/net/wireless/iwlegacy/iwl-3945.c
index a9b852b..d096dc2 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -51,7 +51,6 @@
 #include "iwl-led.h"
 #include "iwl-3945-led.h"
 #include "iwl-3945-debugfs.h"
-#include "iwl-legacy.h"
 
 #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
        [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
@@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
                return;
        }
 
-       disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
-       array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
+       disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
+       array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
 
        if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
                IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
                               disable_ptr);
                for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
-                       iwl_write_targ_mem(priv,
+                       iwl_legacy_write_targ_mem(priv,
                                           disable_ptr + (i * sizeof(u32)),
                                           evt_disable[i]);
 
@@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
        return -1;
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
 
 static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
                break;
        case IEEE80211_BAND_2GHZ:
                if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
                        if (rate == IWL_RATE_11M_INDEX)
                                next_rate = IWL_RATE_5M_INDEX;
                }
@@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
 
        BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
 
-       for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
-               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+               q->read_ptr != index;
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
                tx_info = &txq->txb[txq->q.read_ptr];
                ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
                priv->cfg->ops->lib->txq_free_tfd(priv, txq);
        }
 
-       if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
+       if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
                        (txq_id != IWL39_CMD_QUEUE_NUM) &&
                        priv->mac80211_registered)
-               iwl_wake_queue(priv, txq);
+               iwl_legacy_wake_queue(priv, txq);
 }
 
 /**
@@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
        int rate_idx;
        int fail;
 
-       if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
                IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
                          "is out of range [0-%d] %d %d\n", txq_id,
                          index, txq->q.n_bd, txq->q.write_ptr,
@@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
  *  RX handler implementations
  *
  *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
 static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
                                            __le32 *stats)
 {
@@ -402,72 +397,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
-{
-       bool rc = true;
-       struct iwl3945_notif_statistics current_stat;
-       int combined_plcp_delta;
-       unsigned int plcp_msec;
-       unsigned long plcp_received_jiffies;
-
-       if (priv->cfg->base_params->plcp_delta_threshold ==
-           IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-               return rc;
-       }
-       memcpy(&current_stat, pkt->u.raw, sizeof(struct
-                       iwl3945_notif_statistics));
-       /*
-        * check for plcp_err and trigger radio reset if it exceeds
-        * the plcp error threshold plcp_delta.
-        */
-       plcp_received_jiffies = jiffies;
-       plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-                                       (long) priv->plcp_jiffies);
-       priv->plcp_jiffies = plcp_received_jiffies;
-       /*
-        * check to make sure plcp_msec is not 0 to prevent division
-        * by zero.
-        */
-       if (plcp_msec) {
-               combined_plcp_delta =
-                       (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
-                       le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
-               if ((combined_plcp_delta > 0) &&
-                       ((combined_plcp_delta * 100) / plcp_msec) >
-                       priv->cfg->base_params->plcp_delta_threshold) {
-                       /*
-                        * if plcp_err exceed the threshold, the following
-                        * data is printed in csv format:
-                        *    Text: plcp_err exceeded %d,
-                        *    Received ofdm.plcp_err,
-                        *    Current ofdm.plcp_err,
-                        *    combined_plcp_delta,
-                        *    plcp_msec
-                        */
-                       IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-                               "%u, %d, %u mSecs\n",
-                               priv->cfg->base_params->plcp_delta_threshold,
-                               le32_to_cpu(current_stat.rx.ofdm.plcp_err),
-                               combined_plcp_delta, plcp_msec);
-                       /*
-                        * Reset the RF radio due to the high plcp
-                        * error rate
-                        */
-                       rc = false;
-               }
-       }
-       return rc;
-}
-
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
                struct iwl_rx_mem_buffer *rxb)
 {
@@ -476,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
        IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
                     (int)sizeof(struct iwl3945_notif_statistics),
                     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
        iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
 #endif
-       iwl_recover_from_statistics(priv, pkt);
+       iwl_legacy_recover_from_statistics(priv, pkt);
 
        memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
 }
@@ -491,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
        __le32 *flag = (__le32 *)&pkt->u.raw;
 
        if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
                memset(&priv->_3945.accum_statistics, 0,
                        sizeof(struct iwl3945_notif_statistics));
                memset(&priv->_3945.delta_statistics, 0,
@@ -562,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
        }
 
        if (!iwl3945_mod_params.sw_crypto)
-               iwl_set_decrypted_flag(priv,
+               iwl_legacy_set_decrypted_flag(priv,
                                       (struct ieee80211_hdr *)rxb_addr(rxb),
                                       le32_to_cpu(rx_end->status), stats);
 
        skb_add_rx_frag(skb, 0, rxb->page,
                        (void *)rx_hdr->payload - (void *)pkt, len);
 
-       iwl_update_stats(priv, false, fc, len);
+       iwl_legacy_update_stats(priv, false, fc, len);
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
        ieee80211_rx(priv->hw, skb);
@@ -594,10 +523,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
 
        rx_status.flag = 0;
        rx_status.mactime = le64_to_cpu(rx_end->timestamp);
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
        rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
                                IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+                                              rx_status.band);
 
        rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
        if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -641,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
                              rx_status.signal, rx_status.signal,
                              rx_status.rate_idx);
 
-       iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
+       iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
+                                               header);
 
        if (network_packet) {
                priv->_3945.last_beacon_time =
@@ -761,8 +692,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
 
        /* We need to figure out how to get the sta->supp_rates while
         * in this running context */
-       rate_mask = IWL_RATES_MASK;
-
+       rate_mask = IWL_RATES_MASK_3945;
 
        /* Set retry limit on DATA packets and Probe Responses*/
        if (ieee80211_is_probe_resp(fc))
@@ -810,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
        station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
        station->sta.rate_n_flags = cpu_to_le16(tx_rate);
        station->sta.mode = STA_CONTROL_MODIFY_MSK;
-       iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
+       iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
        spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 
        IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -825,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
  * to set power to V_AUX, do
 
                if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
-                       iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
                                        APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
                                        ~APMG_PS_CTRL_MSK_PWR_SRC);
 
@@ -835,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
                }
  */
 
-       iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
                        APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
                        ~APMG_PS_CTRL_MSK_PWR_SRC);
 
@@ -845,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
 
 static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 {
-       iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
-       iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
-       iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
-       iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
+       iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
+                                       rxq->rb_stts_dma);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
                FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
                FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
                FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -859,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
 
        /* fake read to flush all prev I/O */
-       iwl_read_direct32(priv, FH39_RSSR_CTRL);
+       iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
 
        return 0;
 }
@@ -868,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
 {
 
        /* bypass mode */
-       iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
+       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
 
        /* RA 0 is active */
-       iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
+       iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
 
        /* all 6 fifo are active */
-       iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
 
-       iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
-       iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
-       iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
-       iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
+       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
 
-       iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
+       iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
                             priv->_3945.shared_phys);
 
-       iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
+       iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
                FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
                FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
                FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -910,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
        iwl3945_hw_txq_ctx_free(priv);
 
        /* allocate tx queue structure */
-       rc = iwl_alloc_txq_mem(priv);
+       rc = iwl_legacy_alloc_txq_mem(priv);
        if (rc)
                return rc;
 
@@ -923,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
        for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
                slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
                                TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
-                                      txq_id);
+               rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
+                                               slots_num, txq_id);
                if (rc) {
                        IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
                        goto error;
@@ -941,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
 
 /*
  * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
  * NOTE:  This does not load uCode nor start the embedded processor
  */
 static int iwl3945_apm_init(struct iwl_priv *priv)
 {
-       int ret = iwl_apm_init(priv);
+       int ret = iwl_legacy_apm_init(priv);
 
        /* Clear APMG (NIC's internal power management) interrupts */
-       iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-       iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+       iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+       iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
 
        /* Reset radio chip */
-       iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+       iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
+                               APMG_PS_CTRL_VAL_RESET_REQ);
        udelay(5);
-       iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+       iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+                               APMG_PS_CTRL_VAL_RESET_REQ);
 
        return ret;
 }
@@ -964,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
 {
        struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
        unsigned long flags;
-       u8 rev_id = 0;
+       u8 rev_id = priv->pci_dev->revision;
 
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Determine HW type */
-       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
-
        IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
 
        if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
                IWL_DEBUG_INFO(priv, "RTP type\n");
        else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
                IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
        } else {
                IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
        }
 
        if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
                IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
        } else
                IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -995,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
        if ((eeprom->board_revision & 0xF0) == 0xD0) {
                IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
                               eeprom->board_revision);
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
        } else {
                IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
                               eeprom->board_revision);
-               iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
                              CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
        }
 
        if (eeprom->almgor_m_version <= 1) {
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
                IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
                               eeprom->almgor_m_version);
        } else {
                IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
                               eeprom->almgor_m_version);
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -1040,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
 
        /* Allocate the RX queue, or reset if it is already allocated */
        if (!rxq->bd) {
-               rc = iwl_rx_queue_alloc(priv);
+               rc = iwl_legacy_rx_queue_alloc(priv);
                if (rc) {
                        IWL_ERR(priv, "Unable to initialize Rx queue\n");
                        return -ENOMEM;
@@ -1055,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
 
        /* Look at using this instead:
        rxq->need_update = 1;
-       iwl_rx_queue_update_write_ptr(priv, rxq);
+       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
        */
 
-       iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
 
        rc = iwl3945_txq_ctx_reset(priv);
        if (rc)
@@ -1083,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
                for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
                     txq_id++)
                        if (txq_id == IWL39_CMD_QUEUE_NUM)
-                               iwl_cmd_queue_free(priv);
+                               iwl_legacy_cmd_queue_free(priv);
                        else
-                               iwl_tx_queue_free(priv, txq_id);
+                               iwl_legacy_tx_queue_free(priv, txq_id);
 
        /* free tx queue structure */
-       iwl_free_txq_mem(priv);
+       iwl_legacy_txq_mem(priv);
 }
 
 void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1096,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
        int txq_id;
 
        /* stop SCD */
-       iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
-       iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
+       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
 
        /* reset TFD queues */
        for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
+               iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
                iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
                                FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
                                1000);
@@ -1168,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
 #define IWL_TEMPERATURE_LIMIT_TIMER   6
 
 /**
- * is_temp_calib_needed - determines if new calibration is needed
+ * iwl3945_is_temp_calib_needed - determines if new calibration is needed
  *
  * records new temperature in tx_mgr->temperature.
  * replaces tx_mgr->last_temperature *only* if calib needed
  *    (assumes caller will actually do the calibration!). */
-static int is_temp_calib_needed(struct iwl_priv *priv)
+static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
 {
        int temp_diff;
 
@@ -1404,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
         *   based on eeprom channel data) for this channel.  */
        power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
 
-       /* further limit to user's max power preference.
-        * FIXME:  Other spectrum management power limitations do not
-        *   seem to apply?? */
        power = min(power, priv->tx_power_user_lmt);
        scan_power_info->requested_power = power;
 
@@ -1460,7 +1388,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
        chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
 
        txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
-       ch_info = iwl_get_channel_info(priv, priv->band, chan);
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
        if (!ch_info) {
                IWL_ERR(priv,
                        "Failed to get channel info for channel %d [%d]\n",
@@ -1468,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
                return -EINVAL;
        }
 
-       if (!is_channel_valid(ch_info)) {
+       if (!iwl_legacy_is_channel_valid(ch_info)) {
                IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
                                "non-Tx channel.\n");
                return 0;
@@ -1503,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
                                txpower.power[i].rate);
        }
 
-       return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
+       return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
                                sizeof(struct iwl3945_txpowertable_cmd),
                                &txpower);
 
@@ -1637,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
        /* set up new Tx power info for each and every channel, 2.4 and 5.x */
        for (i = 0; i < priv->channel_count; i++) {
                ch_info = &priv->channel_info[i];
-               a_band = is_channel_a_band(ch_info);
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
 
                /* Get this chnlgrp's factory calibration temperature */
                ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1649,7 +1577,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
                                                              ref_temp);
 
                /* set tx power value for all rates, OFDM and CCK */
-               for (rate_index = 0; rate_index < IWL_RATE_COUNT;
+               for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
                     rate_index++) {
                        int power_idx =
                            ch_info->power_info[rate_index].base_power_index;
@@ -1703,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
 
        for (i = 0; i < priv->channel_count; i++) {
                ch_info = &priv->channel_info[i];
-               a_band = is_channel_a_band(ch_info);
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
 
                /* find minimum power of all user and regulatory constraints
                 *    (does not consider h/w clipping limitations) */
@@ -1719,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
 
        /* update txpower settings for all channels,
         *   send to NIC if associated. */
-       is_temp_calib_needed(priv);
+       iwl3945_is_temp_calib_needed(priv);
        iwl3945_hw_reg_comp_txpower_temp(priv);
 
        return 0;
@@ -1737,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
                .flags = CMD_WANT_SKB,
                .data = &rxon_assoc,
        };
-       const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
 
        if ((rxon1->flags == rxon2->flags) &&
            (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1754,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
        rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
        rxon_assoc.reserved = 0;
 
-       rc = iwl_send_cmd_sync(priv, &cmd);
+       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
        if (rc)
                return rc;
 
@@ -1764,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
                rc = -EIO;
        }
 
-       iwl_free_pages(priv, cmd.reply_page);
+       iwl_legacy_free_pages(priv, cmd.reply_page);
 
        return rc;
 }
@@ -1788,7 +1716,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return -EINVAL;
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -1;
 
        /* always get timestamp with Rx frame */
@@ -1799,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
            ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
        staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
 
-       rc = iwl_check_rxon_cmd(priv, ctx);
+       rc = iwl_legacy_check_rxon_cmd(priv, ctx);
        if (rc) {
                IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
                return -EINVAL;
@@ -1808,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* If we don't need to send a full RXON, we can use
         * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
         * and other flags for the current radio configuration. */
-       if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
-               rc = iwl_send_rxon_assoc(priv,
+       if (!iwl_legacy_full_rxon_required(priv,
+                       &priv->contexts[IWL_RXON_CTX_BSS])) {
+               rc = iwl_legacy_send_rxon_assoc(priv,
                                         &priv->contexts[IWL_RXON_CTX_BSS]);
                if (rc) {
                        IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1826,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
         * an RXON_ASSOC and the new config wants the associated mask enabled,
         * we must clear the associated from the active configuration
         * before we apply the new config */
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
                IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
                active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 
@@ -1836,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                 */
                active_rxon->reserved4 = 0;
                active_rxon->reserved5 = 0;
-               rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+               rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
                                      sizeof(struct iwl3945_rxon_cmd),
                                      &priv->contexts[IWL_RXON_CTX_BSS].active);
 
@@ -1848,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                                  "configuration (%d).\n", rc);
                        return rc;
                }
-               iwl_clear_ucode_stations(priv,
+               iwl_legacy_clear_ucode_stations(priv,
+                                        &priv->contexts[IWL_RXON_CTX_BSS]);
+               iwl_legacy_restore_stations(priv,
                                         &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
        }
 
        IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1868,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        staging_rxon->reserved4 = 0;
        staging_rxon->reserved5 = 0;
 
-       iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
+       iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
 
        /* Apply the new configuration */
-       rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
                              sizeof(struct iwl3945_rxon_cmd),
                              staging_rxon);
        if (rc) {
@@ -1882,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
 
        if (!new_assoc) {
-               iwl_clear_ucode_stations(priv,
+               iwl_legacy_clear_ucode_stations(priv,
                                         &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
+               iwl_legacy_restore_stations(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
        }
 
        /* If we issue a new RXON command which required a tune then we must
         * send a new TXPOWER command or we won't be able to Tx any frames */
-       rc = priv->cfg->ops->lib->send_tx_power(priv);
+       rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
        if (rc) {
                IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
                return rc;
@@ -1919,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
 {
        /* This will kick in the "brute force"
         * iwl3945_hw_reg_comp_txpower_temp() below */
-       if (!is_temp_calib_needed(priv))
+       if (!iwl3945_is_temp_calib_needed(priv))
                goto reschedule;
 
        /* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1966,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
        u8 grp_channel;
 
        /* Find the group index for the channel ... don't use index 1(?) */
-       if (is_channel_a_band(ch_info)) {
+       if (iwl_legacy_is_channel_a_band(ch_info)) {
                for (group = 1; group < 5; group++) {
                        grp_channel = ch_grp[group].group_channel;
                        if (ch_info->channel <= grp_channel) {
@@ -2146,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
        /* initialize Tx power info for each and every channel, 2.4 and 5.x */
        for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
             i++, ch_info++) {
-               a_band = is_channel_a_band(ch_info);
-               if (!is_channel_valid(ch_info))
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
+               if (!iwl_legacy_is_channel_valid(ch_info))
                        continue;
 
                /* find this channel's channel group (*not* "band") index */
@@ -2250,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
 {
        int rc;
 
-       iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
        rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
                        FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
        if (rc < 0)
@@ -2267,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 
        shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
 
-       iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
-       iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
+       iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
+       iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
 
-       iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
+       iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
                FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
                FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
                FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2299,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
 }
 
 
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data)
 {
        struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
        addsta->mode = cmd->mode;
@@ -2327,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
        if (sta_id_r)
                *sta_id_r = IWL_INVALID_STATION;
 
-       ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
        if (ret) {
                IWL_ERR(priv, "Unable to add station %pM\n", addr);
                return ret;
@@ -2362,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
                return 0;
        }
 
-       return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
                                  vif->bss_conf.bssid);
 }
 
@@ -2413,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
                 * 1M CCK rates */
 
                if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
 
                        index = IWL_FIRST_CCK_RATE;
                        for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2434,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
 
        /* Update the rate scaling for control frame Tx */
        rate_cmd.table_id = 0;
-       rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
                              &rate_cmd);
        if (rc)
                return rc;
 
        /* Update the rate scaling for data frame Tx */
        rate_cmd.table_id = 1;
-       return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+       return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
                                &rate_cmd);
 }
 
@@ -2541,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
        IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
 
        /* verify BSM SRAM contents */
-       val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
        for (reg = BSM_SRAM_LOWER_BOUND;
             reg < BSM_SRAM_LOWER_BOUND + len;
             reg += sizeof(u32), image++) {
-               val = iwl_read_prph(priv, reg);
+               val = iwl_legacy_read_prph(priv, reg);
                if (val != le32_to_cpu(*image)) {
                        IWL_ERR(priv, "BSM uCode verification failed at "
                                  "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2578,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
  */
 static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
 {
-       _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+       _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
        return 0;
 }
 
@@ -2649,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
        inst_len = priv->ucode_init.len;
        data_len = priv->ucode_init_data.len;
 
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
 
        /* Fill BSM memory with bootstrap instructions */
        for (reg_offset = BSM_SRAM_LOWER_BOUND;
             reg_offset < BSM_SRAM_LOWER_BOUND + len;
             reg_offset += sizeof(u32), image++)
-               _iwl_write_prph(priv, reg_offset,
+               _iwl_legacy_write_prph(priv, reg_offset,
                                          le32_to_cpu(*image));
 
        rc = iwl3945_verify_bsm(priv);
@@ -2666,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
                return rc;
 
        /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
                                 IWL39_RTC_INST_LOWER_BOUND);
-       iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
 
        /* Load bootstrap code into instruction SRAM now,
         *   to prepare to load "initialize" uCode */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG,
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
                BSM_WR_CTRL_REG_BIT_START);
 
        /* Wait for load of bootstrap uCode to finish */
        for (i = 0; i < 100; i++) {
-               done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
                if (!(done & BSM_WR_CTRL_REG_BIT_START))
                        break;
                udelay(10);
@@ -2692,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
 
        /* Enable future boot loads whenever power management unit triggers it
         *   (e.g. when powering back up after power-save shutdown) */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG,
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
                BSM_WR_CTRL_REG_BIT_START_EN);
 
        return 0;
@@ -2701,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
 static struct iwl_hcmd_ops iwl3945_hcmd = {
        .rxon_assoc = iwl3945_send_rxon_assoc,
        .commit_rxon = iwl3945_commit_rxon,
-       .send_bt_config = iwl_send_bt_config,
 };
 
 static struct iwl_lib_ops iwl3945_lib = {
@@ -2727,14 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = {
                },
                .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
                .release_semaphore = iwl3945_eeprom_release_semaphore,
-               .query_addr = iwlcore_eeprom_query_addr,
        },
        .send_tx_power  = iwl3945_send_tx_power,
        .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-       .isr_ops = {
-               .isr = iwl_isr_legacy,
-       },
-       .check_plcp_health = iwl3945_good_plcp_health,
 
        .debugfs_ops = {
                .rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2752,7 +2678,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = {
 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
        .get_hcmd_size = iwl3945_get_hcmd_size,
        .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
        .request_scan = iwl3945_request_scan,
        .post_scan = iwl3945_post_scan,
 };
@@ -2772,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = {
        .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
        .set_l0s = false,
        .use_bsm = true,
-       .use_isr_legacy = true,
        .led_compensation = 64,
-       .broken_powersave = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .tx_power_by_driver = true,
 };
 
 static struct iwl_cfg iwl3945_bg_cfg = {
similarity index 97%
rename from drivers/net/wireless/iwlwifi/iwl-3945.h
rename to drivers/net/wireless/iwlegacy/iwl-3945.h
index 3eef1eb..b118b59 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
 
 /*
  * The common struct MUST be first because it is shared between
- * 3945 and agn!
+ * 3945 and 4965!
  */
 struct iwl3945_sta_priv {
        struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
 
 /******************************************************************************
  *
- * Functions implemented in iwl-base.c which are forward declared here
+ * Functions implemented in iwl3945-base.c which are forward declared here
  * for use by iwl-*.c
  *
  *****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
 extern void iwl3945_rx_replenish(void *data);
 extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
 extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-                                       struct ieee80211_hdr *hdr,int left);
+                                       struct ieee80211_hdr *hdr, int left);
 extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
                                       char **buf, bool display);
 extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
 /******************************************************************************
  *
  * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl-base.c
+ * for use by iwl3945-base.c
  *
  * NOTE:  The implementation of these functions are hardware specific
  * which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
 extern struct ieee80211_ops iwl3945_hw_ops;
 
 /*
- * Forward declare iwl-3945.c functions for iwl-base.c
+ * Forward declare iwl-3945.c functions for iwl3945-base.c
  */
 extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
 extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644 (file)
index 0000000..81d6a25
--- /dev/null
@@ -0,0 +1,967 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-4965-calib.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct statistics_general_data {
+       u32 beacon_silence_rssi_a;
+       u32 beacon_silence_rssi_b;
+       u32 beacon_silence_rssi_c;
+       u32 beacon_energy_a;
+       u32 beacon_energy_b;
+       u32 beacon_energy_c;
+};
+
+void iwl4965_calib_free_results(struct iwl_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < IWL_CALIB_MAX; i++) {
+               kfree(priv->calib_results[i].buf);
+               priv->calib_results[i].buf = NULL;
+               priv->calib_results[i].buf_len = 0;
+       }
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ *   but then determines that they are either noise, or transmissions
+ *   from a distant wireless network (also "noise", really) that get
+ *   "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ *   enough to receive all of our own network traffic, but not so
+ *   high that our DSP gets too busy trying to lock onto non-network
+ *   activity/noise. */
+static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
+                                  u32 norm_fa,
+                                  u32 rx_enable_time,
+                                  struct statistics_general_data *rx_info)
+{
+       u32 max_nrg_cck = 0;
+       int i = 0;
+       u8 max_silence_rssi = 0;
+       u32 silence_ref = 0;
+       u8 silence_rssi_a = 0;
+       u8 silence_rssi_b = 0;
+       u8 silence_rssi_c = 0;
+       u32 val;
+
+       /* "false_alarms" values below are cross-multiplications to assess the
+        *   numbers of false alarms within the measured period of actual Rx
+        *   (Rx is off when we're txing), vs the min/max expected false alarms
+        *   (some should be expected if rx is sensitive enough) in a
+        *   hypothetical listening period of 200 time units (TU), 204.8 msec:
+        *
+        * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+        *
+        * */
+       u32 false_alarms = norm_fa * 200 * 1024;
+       u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+       u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       data = &(priv->sensitivity_data);
+
+       data->nrg_auto_corr_silence_diff = 0;
+
+       /* Find max silence rssi among all 3 receivers.
+        * This is background noise, which may include transmissions from other
+        *    networks, measured during silence before our network's beacon */
+       silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
+                           ALL_BAND_FILTER) >> 8);
+       silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
+                           ALL_BAND_FILTER) >> 8);
+       silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
+                           ALL_BAND_FILTER) >> 8);
+
+       val = max(silence_rssi_b, silence_rssi_c);
+       max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+       /* Store silence rssi in 20-beacon history table */
+       data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+       data->nrg_silence_idx++;
+       if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+               data->nrg_silence_idx = 0;
+
+       /* Find max silence rssi across 20 beacon history */
+       for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+               val = data->nrg_silence_rssi[i];
+               silence_ref = max(silence_ref, val);
+       }
+       IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
+                       silence_rssi_a, silence_rssi_b, silence_rssi_c,
+                       silence_ref);
+
+       /* Find max rx energy (min value!) among all 3 receivers,
+        *   measured during beacon frame.
+        * Save it in 10-beacon history table. */
+       i = data->nrg_energy_idx;
+       val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+       data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+       data->nrg_energy_idx++;
+       if (data->nrg_energy_idx >= 10)
+               data->nrg_energy_idx = 0;
+
+       /* Find min rx energy (max value) across 10 beacon history.
+        * This is the minimum signal level that we want to receive well.
+        * Add backoff (margin so we don't miss slightly lower energy frames).
+        * This establishes an upper bound (min value) for energy threshold. */
+       max_nrg_cck = data->nrg_value[0];
+       for (i = 1; i < 10; i++)
+               max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+       max_nrg_cck += 6;
+
+       IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+                       rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+                       rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+       /* Count number of consecutive beacons with fewer-than-desired
+        *   false alarms. */
+       if (false_alarms < min_false_alarms)
+               data->num_in_cck_no_fa++;
+       else
+               data->num_in_cck_no_fa = 0;
+       IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
+                       data->num_in_cck_no_fa);
+
+       /* If we got too many false alarms this time, reduce sensitivity */
+       if ((false_alarms > max_false_alarms) &&
+               (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
+               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
+                    false_alarms, max_false_alarms);
+               IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
+               data->nrg_curr_state = IWL_FA_TOO_MANY;
+               /* Store for "fewer than desired" on later beacon */
+               data->nrg_silence_ref = silence_ref;
+
+               /* increase energy threshold (reduce nrg value)
+                *   to decrease sensitivity */
+               data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+       /* Else if we got fewer than desired, increase sensitivity */
+       } else if (false_alarms < min_false_alarms) {
+               data->nrg_curr_state = IWL_FA_TOO_FEW;
+
+               /* Compare silence level with silence level for most recent
+                *   healthy number or too many false alarms */
+               data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
+                                                  (s32)silence_ref;
+
+               IWL_DEBUG_CALIB(priv,
+                        "norm FA %u < min FA %u, silence diff %d\n",
+                        false_alarms, min_false_alarms,
+                        data->nrg_auto_corr_silence_diff);
+
+               /* Increase value to increase sensitivity, but only if:
+                * 1a) previous beacon did *not* have *too many* false alarms
+                * 1b) AND there's a significant difference in Rx levels
+                *      from a previous beacon with too many, or healthy # FAs
+                * OR 2) We've seen a lot of beacons (100) with too few
+                *       false alarms */
+               if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
+                       ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+                       (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+                       IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+                       /* Increase nrg value to increase sensitivity */
+                       val = data->nrg_th_cck + NRG_STEP_CCK;
+                       data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+               } else {
+                       IWL_DEBUG_CALIB(priv,
+                                        "... but not changing sensitivity\n");
+               }
+
+       /* Else we got a healthy number of false alarms, keep status quo */
+       } else {
+               IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
+               data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+
+               /* Store for use in "fewer than desired" with later beacon */
+               data->nrg_silence_ref = silence_ref;
+
+               /* If previous beacon had too many false alarms,
+                *   give it some extra margin by reducing sensitivity again
+                *   (but don't go below measured energy of desired Rx) */
+               if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
+                       IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+                       if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+                               data->nrg_th_cck -= NRG_MARGIN;
+                       else
+                               data->nrg_th_cck = max_nrg_cck;
+               }
+       }
+
+       /* Make sure the energy threshold does not go above the measured
+        * energy of the desired Rx signals (reduced by backoff margin),
+        * or else we might start missing Rx frames.
+        * Lower value is higher energy, so we use max()!
+        */
+       data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+       IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+
+       data->nrg_prev_state = data->nrg_curr_state;
+
+       /* Auto-correlation CCK algorithm */
+       if (false_alarms > min_false_alarms) {
+
+               /* increase auto_corr values to decrease sensitivity
+                * so the DSP won't be disturbed by the noise
+                */
+               if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+                       data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+               else {
+                       val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+                       data->auto_corr_cck =
+                               min((u32)ranges->auto_corr_max_cck, val);
+               }
+               val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck_mrc =
+                       min((u32)ranges->auto_corr_max_cck_mrc, val);
+       } else if ((false_alarms < min_false_alarms) &&
+          ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+          (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+               /* Decrease auto_corr values to increase sensitivity */
+               val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck =
+                       max((u32)ranges->auto_corr_min_cck, val);
+               val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck_mrc =
+                       max((u32)ranges->auto_corr_min_cck_mrc, val);
+       }
+
+       return 0;
+}
+
+
+static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
+                                      u32 norm_fa,
+                                      u32 rx_enable_time)
+{
+       u32 val;
+       u32 false_alarms = norm_fa * 200 * 1024;
+       u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+       u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       data = &(priv->sensitivity_data);
+
+       /* If we got too many false alarms this time, reduce sensitivity */
+       if (false_alarms > max_false_alarms) {
+
+               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
+                            false_alarms, max_false_alarms);
+
+               val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm =
+                       min((u32)ranges->auto_corr_max_ofdm, val);
+
+               val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc =
+                       min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+
+               val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_x1 =
+                       min((u32)ranges->auto_corr_max_ofdm_x1, val);
+
+               val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc_x1 =
+                       min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+       }
+
+       /* Else if we got fewer than desired, increase sensitivity */
+       else if (false_alarms < min_false_alarms) {
+
+               IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
+                            false_alarms, min_false_alarms);
+
+               val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm =
+                       max((u32)ranges->auto_corr_min_ofdm, val);
+
+               val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc =
+                       max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+
+               val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_x1 =
+                       max((u32)ranges->auto_corr_min_ofdm_x1, val);
+
+               val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc_x1 =
+                       max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+       } else {
+               IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
+                        min_false_alarms, false_alarms, max_false_alarms);
+       }
+       return 0;
+}
+
+static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
+                               struct iwl_sensitivity_data *data,
+                               __le16 *tbl)
+{
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm);
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_x1);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
+
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_cck);
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_cck_mrc);
+
+       tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
+                               cpu_to_le16((u16)data->nrg_th_cck);
+       tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
+                               cpu_to_le16((u16)data->nrg_th_ofdm);
+
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16(data->barker_corr_th_min);
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16(data->barker_corr_th_min_mrc);
+       tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
+                               cpu_to_le16(data->nrg_th_cca);
+
+       IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+                       data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+                       data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+                       data->nrg_th_ofdm);
+
+       IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
+                       data->auto_corr_cck, data->auto_corr_cck_mrc,
+                       data->nrg_th_cck);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+{
+       struct iwl_sensitivity_cmd cmd;
+       struct iwl_sensitivity_data *data = NULL;
+       struct iwl_host_cmd cmd_out = {
+               .id = SENSITIVITY_CMD,
+               .len = sizeof(struct iwl_sensitivity_cmd),
+               .flags = CMD_ASYNC,
+               .data = &cmd,
+       };
+
+       data = &(priv->sensitivity_data);
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+
+       /* Update uCode's "work" table, and copy it to DSP */
+       cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+       /* Don't send command to uCode if nothing has changed */
+       if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
+                   sizeof(u16)*HD_TABLE_SIZE)) {
+               IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+               return 0;
+       }
+
+       /* Copy table for comparison next time */
+       memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
+              sizeof(u16)*HD_TABLE_SIZE);
+
+       return iwl_legacy_send_cmd(priv, &cmd_out);
+}
+
+void iwl4965_init_sensitivity(struct iwl_priv *priv)
+{
+       int ret = 0;
+       int i;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       if (priv->disable_sens_cal)
+               return;
+
+       IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+
+       /* Clear driver's sensitivity algo data */
+       data = &(priv->sensitivity_data);
+
+       if (ranges == NULL)
+               return;
+
+       memset(data, 0, sizeof(struct iwl_sensitivity_data));
+
+       data->num_in_cck_no_fa = 0;
+       data->nrg_curr_state = IWL_FA_TOO_MANY;
+       data->nrg_prev_state = IWL_FA_TOO_MANY;
+       data->nrg_silence_ref = 0;
+       data->nrg_silence_idx = 0;
+       data->nrg_energy_idx = 0;
+
+       for (i = 0; i < 10; i++)
+               data->nrg_value[i] = 0;
+
+       for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+               data->nrg_silence_rssi[i] = 0;
+
+       data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
+       data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+       data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
+       data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+       data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+       data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+       data->nrg_th_cck = ranges->nrg_th_cck;
+       data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+       data->barker_corr_th_min = ranges->barker_corr_th_min;
+       data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+       data->nrg_th_cca = ranges->nrg_th_cca;
+
+       data->last_bad_plcp_cnt_ofdm = 0;
+       data->last_fa_cnt_ofdm = 0;
+       data->last_bad_plcp_cnt_cck = 0;
+       data->last_fa_cnt_cck = 0;
+
+       ret |= iwl4965_sensitivity_write(priv);
+       IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+}
+
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+{
+       u32 rx_enable_time;
+       u32 fa_cck;
+       u32 fa_ofdm;
+       u32 bad_plcp_cck;
+       u32 bad_plcp_ofdm;
+       u32 norm_fa_ofdm;
+       u32 norm_fa_cck;
+       struct iwl_sensitivity_data *data = NULL;
+       struct statistics_rx_non_phy *rx_info;
+       struct statistics_rx_phy *ofdm, *cck;
+       unsigned long flags;
+       struct statistics_general_data statis;
+
+       if (priv->disable_sens_cal)
+               return;
+
+       data = &(priv->sensitivity_data);
+
+       if (!iwl_legacy_is_any_associated(priv)) {
+               IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
+       ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
+       cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+
+       if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+               IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       /* Extract Statistics: */
+       rx_enable_time = le32_to_cpu(rx_info->channel_load);
+       fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+       fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+       bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+       bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+       statis.beacon_silence_rssi_a =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_a);
+       statis.beacon_silence_rssi_b =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_b);
+       statis.beacon_silence_rssi_c =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_c);
+       statis.beacon_energy_a =
+                       le32_to_cpu(rx_info->beacon_energy_a);
+       statis.beacon_energy_b =
+                       le32_to_cpu(rx_info->beacon_energy_b);
+       statis.beacon_energy_c =
+                       le32_to_cpu(rx_info->beacon_energy_c);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+
+       if (!rx_enable_time) {
+               IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+               return;
+       }
+
+       /* These statistics increase monotonically, and do not reset
+        *   at each beacon.  Calculate difference from last value, or just
+        *   use the new statistics value if it has reset or wrapped around. */
+       if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+               data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+       else {
+               bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+               data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+       }
+
+       if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+               data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+       else {
+               bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+               data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+       }
+
+       if (data->last_fa_cnt_ofdm > fa_ofdm)
+               data->last_fa_cnt_ofdm = fa_ofdm;
+       else {
+               fa_ofdm -= data->last_fa_cnt_ofdm;
+               data->last_fa_cnt_ofdm += fa_ofdm;
+       }
+
+       if (data->last_fa_cnt_cck > fa_cck)
+               data->last_fa_cnt_cck = fa_cck;
+       else {
+               fa_cck -= data->last_fa_cnt_cck;
+               data->last_fa_cnt_cck += fa_cck;
+       }
+
+       /* Total aborted signal locks */
+       norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+       norm_fa_cck = fa_cck + bad_plcp_cck;
+
+       IWL_DEBUG_CALIB(priv,
+                        "cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+                       bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+       iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
+       iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+
+       iwl4965_sensitivity_write(priv);
+}
+
+static inline u8 iwl4965_find_first_chain(u8 mask)
+{
+       if (mask & ANT_A)
+               return CHAIN_A;
+       if (mask & ANT_B)
+               return CHAIN_B;
+       return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+                                    struct iwl_chain_noise_data *data)
+{
+       u32 active_chains = 0;
+       u32 max_average_sig;
+       u16 max_average_sig_antenna_i;
+       u8 num_tx_chains;
+       u8 first_chain;
+       u16 i = 0;
+
+       average_sig[0] = data->chain_signal_a /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[1] = data->chain_signal_b /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[2] = data->chain_signal_c /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+
+       if (average_sig[0] >= average_sig[1]) {
+               max_average_sig = average_sig[0];
+               max_average_sig_antenna_i = 0;
+               active_chains = (1 << max_average_sig_antenna_i);
+       } else {
+               max_average_sig = average_sig[1];
+               max_average_sig_antenna_i = 1;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       if (average_sig[2] >= max_average_sig) {
+               max_average_sig = average_sig[2];
+               max_average_sig_antenna_i = 2;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+                    average_sig[0], average_sig[1], average_sig[2]);
+       IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+                    max_average_sig, max_average_sig_antenna_i);
+
+       /* Compare signal strengths for all 3 receivers. */
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               if (i != max_average_sig_antenna_i) {
+                       s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+                       /* If signal is very weak, compared with
+                        * strongest, mark it as disconnected. */
+                       if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+                               data->disconn_array[i] = 1;
+                       else
+                               active_chains |= (1 << i);
+                       IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
+                            "disconn_array[i] = %d\n",
+                            i, rssi_delta, data->disconn_array[i]);
+               }
+       }
+
+       /*
+        * The above algorithm sometimes fails when the ucode
+        * reports 0 for all chains. It's not clear why that
+        * happens to start with, but it is then causing trouble
+        * because this can make us enable more chains than the
+        * hardware really has.
+        *
+        * To be safe, simply mask out any chains that we know
+        * are not on the device.
+        */
+       active_chains &= priv->hw_params.valid_rx_ant;
+
+       num_tx_chains = 0;
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               /* loops on all the bits of
+                * priv->hw_setting.valid_tx_ant */
+               u8 ant_msk = (1 << i);
+               if (!(priv->hw_params.valid_tx_ant & ant_msk))
+                       continue;
+
+               num_tx_chains++;
+               if (data->disconn_array[i] == 0)
+                       /* there is a Tx antenna connected */
+                       break;
+               if (num_tx_chains == priv->hw_params.tx_chains_num &&
+                   data->disconn_array[i]) {
+                       /*
+                        * If all chains are disconnected
+                        * connect the first valid tx chain
+                        */
+                       first_chain =
+                       iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+                       data->disconn_array[first_chain] = 0;
+                       active_chains |= BIT(first_chain);
+                       IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+                                       W/A - declare %d as connected\n",
+                                       first_chain);
+                       break;
+               }
+       }
+
+       if (active_chains != priv->hw_params.valid_rx_ant &&
+           active_chains != priv->chain_noise_data.active_chains)
+               IWL_DEBUG_CALIB(priv,
+                               "Detected that not all antennas are connected! "
+                               "Connected: %#x, valid: %#x.\n",
+                               active_chains, priv->hw_params.valid_rx_ant);
+
+       /* Save for use within RXON, TX, SCAN commands, etc. */
+       data->active_chains = active_chains;
+       IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+                       active_chains);
+}
+
+static void iwl4965_gain_computation(struct iwl_priv *priv,
+               u32 *average_noise,
+               u16 min_average_noise_antenna_i,
+               u32 min_average_noise,
+               u8 default_chain)
+{
+       int i, ret;
+       struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+       data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+       for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+               s32 delta_g = 0;
+
+               if (!(data->disconn_array[i]) &&
+                   (data->delta_gain_code[i] ==
+                            CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+                       delta_g = average_noise[i] - min_average_noise;
+                       data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+                       data->delta_gain_code[i] =
+                               min(data->delta_gain_code[i],
+                               (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+                       data->delta_gain_code[i] =
+                               (data->delta_gain_code[i] | (1 << 2));
+               } else {
+                       data->delta_gain_code[i] = 0;
+               }
+       }
+       IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
+                    data->delta_gain_code[0],
+                    data->delta_gain_code[1],
+                    data->delta_gain_code[2]);
+
+       /* Differential gain gets sent to uCode only once */
+       if (!data->radio_write) {
+               struct iwl_calib_diff_gain_cmd cmd;
+               data->radio_write = 1;
+
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+               cmd.diff_gain_a = data->delta_gain_code[0];
+               cmd.diff_gain_b = data->delta_gain_code[1];
+               cmd.diff_gain_c = data->delta_gain_code[2];
+               ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+                                     sizeof(cmd), &cmd);
+               if (ret)
+                       IWL_DEBUG_CALIB(priv, "fail sending cmd "
+                                    "REPLY_PHY_CALIBRATION_CMD\n");
+
+               /* TODO we might want recalculate
+                * rx_chain in rxon cmd */
+
+               /* Mark so we run this algo only once! */
+               data->state = IWL_CHAIN_NOISE_CALIBRATED;
+       }
+}
+
+
+
+/*
+ * Accumulate 16 beacons of signal and noise statistics for each of
+ *   3 receivers/antennas/rx-chains, then figure out:
+ * 1)  Which antennas are connected.
+ * 2)  Differential rx gain settings to balance the 3 receivers.
+ */
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+{
+       struct iwl_chain_noise_data *data = NULL;
+
+       u32 chain_noise_a;
+       u32 chain_noise_b;
+       u32 chain_noise_c;
+       u32 chain_sig_a;
+       u32 chain_sig_b;
+       u32 chain_sig_c;
+       u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+       u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+       u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+       u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+       u16 i = 0;
+       u16 rxon_chnum = INITIALIZATION_VALUE;
+       u16 stat_chnum = INITIALIZATION_VALUE;
+       u8 rxon_band24;
+       u8 stat_band24;
+       unsigned long flags;
+       struct statistics_rx_non_phy *rx_info;
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (priv->disable_chain_noise_cal)
+               return;
+
+       data = &(priv->chain_noise_data);
+
+       /*
+        * Accumulate just the first "chain_noise_num_beacons" after
+        * the first association, then we're done forever.
+        */
+       if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
+               if (data->state == IWL_CHAIN_NOISE_ALIVE)
+                       IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
+                     rx.general);
+
+       if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+               IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+       rxon_chnum = le16_to_cpu(ctx->staging.channel);
+
+       stat_band24 = !!(((struct iwl_notif_statistics *)
+                        stat_resp)->flag &
+                        STATISTICS_REPLY_FLG_BAND_24G_MSK);
+       stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
+                                stat_resp)->flag) >> 16;
+
+       /* Make sure we accumulate data for just the associated channel
+        *   (even if scanning). */
+       if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
+               IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
+                               rxon_chnum, rxon_band24);
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       /*
+        *  Accumulate beacon statistics values across
+        * "chain_noise_num_beacons"
+        */
+       chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
+                               IN_BAND_FILTER;
+       chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
+                               IN_BAND_FILTER;
+       chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
+                               IN_BAND_FILTER;
+
+       chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+       chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+       chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       data->beacon_count++;
+
+       data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+       data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+       data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+       data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+       data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+       data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+       IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
+                       rxon_chnum, rxon_band24, data->beacon_count);
+       IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
+                       chain_sig_a, chain_sig_b, chain_sig_c);
+       IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
+                       chain_noise_a, chain_noise_b, chain_noise_c);
+
+       /* If this is the "chain_noise_num_beacons", determine:
+        * 1)  Disconnected antennas (using signal strengths)
+        * 2)  Differential gain (using silence noise) to balance receivers */
+       if (data->beacon_count !=
+               priv->cfg->base_params->chain_noise_num_beacons)
+               return;
+
+       /* Analyze signal for disconnected antenna */
+       iwl4965_find_disconn_antenna(priv, average_sig, data);
+
+       /* Analyze noise for rx balance */
+       average_noise[0] = data->chain_noise_a /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+       average_noise[1] = data->chain_noise_b /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+       average_noise[2] = data->chain_noise_c /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               if (!(data->disconn_array[i]) &&
+                  (average_noise[i] <= min_average_noise)) {
+                       /* This means that chain i is active and has
+                        * lower noise values so far: */
+                       min_average_noise = average_noise[i];
+                       min_average_noise_antenna_i = i;
+               }
+       }
+
+       IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
+                       average_noise[0], average_noise[1],
+                       average_noise[2]);
+
+       IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
+                       min_average_noise, min_average_noise_antenna_i);
+
+       iwl4965_gain_computation(priv, average_noise,
+                       min_average_noise_antenna_i, min_average_noise,
+                       iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+
+       /* Some power changes may have been made during the calibration.
+        * Update and commit the RXON
+        */
+       if (priv->cfg->ops->lib->update_chain_flags)
+               priv->cfg->ops->lib->update_chain_flags(priv);
+
+       data->state = IWL_CHAIN_NOISE_DONE;
+       iwl_legacy_power_update_mode(priv, false);
+}
+
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+{
+       int i;
+       memset(&(priv->sensitivity_data), 0,
+              sizeof(struct iwl_sensitivity_data));
+       memset(&(priv->chain_noise_data), 0,
+              sizeof(struct iwl_chain_noise_data));
+       for (i = 0; i < NUM_RX_CHAINS; i++)
+               priv->chain_noise_data.delta_gain_code[i] =
+                               CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+       /* Ask for statistics now, the uCode will send notification
+        * periodically after association */
+       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+}
similarity index 80%
rename from drivers/net/wireless/iwlwifi/iwl-legacy.h
rename to drivers/net/wireless/iwlegacy/iwl-4965-calib.h
index 9f7b2f9..f46c80e 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#ifndef __iwl_4965_calib_h__
+#define __iwl_4965_calib_h__
 
-#ifndef __iwl_legacy_h__
-#define __iwl_legacy_h__
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-commands.h"
 
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                               struct ieee80211_tx_info *info,
-                               __le16 fc, __le32 *tx_flags);
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
+void iwl4965_init_sensitivity(struct iwl_priv *priv);
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
+void iwl4965_calib_free_results(struct iwl_priv *priv);
 
-irqreturn_t iwl_isr_legacy(int irq, void *data);
-
-#endif /* __iwl_legacy_h__ */
+#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644 (file)
index 0000000..1c93665
--- /dev/null
@@ -0,0 +1,774 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+*  Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
+
+static const char *fmt_value = "  %-30s %10u\n";
+static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char *fmt_header =
+       "%-32s    current  cumulative       delta         max\n";
+
+static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+       int p = 0;
+       u32 flag;
+
+       flag = le32_to_cpu(priv->_4965.statistics.flag);
+
+       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+       if (flag & UCODE_STATISTICS_CLEAR_MSK)
+               p += scnprintf(buf + p, bufsz - p,
+               "\tStatistics have been cleared\n");
+       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+               (flag & UCODE_STATISTICS_FREQUENCY_MSK)
+               ? "2.4 GHz" : "5.2 GHz");
+       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+               (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
+                ? "enabled" : "disabled");
+
+       return p;
+}
+
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+                   sizeof(struct statistics_rx_non_phy) * 40 +
+                   sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+       ssize_t ret;
+       struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+       struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+       struct statistics_rx_non_phy *general, *accum_general;
+       struct statistics_rx_non_phy *delta_general, *max_general;
+       struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * the statistic information display here is based on
+        * the last statistics notification from uCode
+        * might not reflect the current uCode activity
+        */
+       ofdm = &priv->_4965.statistics.rx.ofdm;
+       cck = &priv->_4965.statistics.rx.cck;
+       general = &priv->_4965.statistics.rx.general;
+       ht = &priv->_4965.statistics.rx.ofdm_ht;
+       accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
+       accum_cck = &priv->_4965.accum_statistics.rx.cck;
+       accum_general = &priv->_4965.accum_statistics.rx.general;
+       accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
+       delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
+       delta_cck = &priv->_4965.delta_statistics.rx.cck;
+       delta_general = &priv->_4965.delta_statistics.rx.general;
+       delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
+       max_ofdm = &priv->_4965.max_delta.rx.ofdm;
+       max_cck = &priv->_4965.max_delta.rx.cck;
+       max_general = &priv->_4965.max_delta.rx.general;
+       max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - OFDM:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_cnt:",
+                        le32_to_cpu(ofdm->ina_cnt),
+                        accum_ofdm->ina_cnt,
+                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_cnt:",
+                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(ofdm->overrun_err),
+                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+                        max_ofdm->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(ofdm->early_overrun_err),
+                        accum_ofdm->early_overrun_err,
+                        delta_ofdm->early_overrun_err,
+                        max_ofdm->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(ofdm->crc32_good),
+                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+                        max_ofdm->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "false_alarm_cnt:",
+                        le32_to_cpu(ofdm->false_alarm_cnt),
+                        accum_ofdm->false_alarm_cnt,
+                        delta_ofdm->false_alarm_cnt,
+                        max_ofdm->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_sync_err_cnt:",
+                        le32_to_cpu(ofdm->fina_sync_err_cnt),
+                        accum_ofdm->fina_sync_err_cnt,
+                        delta_ofdm->fina_sync_err_cnt,
+                        max_ofdm->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sfd_timeout:",
+                        le32_to_cpu(ofdm->sfd_timeout),
+                        accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+                        max_ofdm->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_timeout:",
+                        le32_to_cpu(ofdm->fina_timeout),
+                        accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+                        max_ofdm->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unresponded_rts:",
+                        le32_to_cpu(ofdm->unresponded_rts),
+                        accum_ofdm->unresponded_rts,
+                        delta_ofdm->unresponded_rts,
+                        max_ofdm->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+                        accum_ofdm->rxe_frame_limit_overrun,
+                        delta_ofdm->rxe_frame_limit_overrun,
+                        max_ofdm->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ack_cnt:",
+                        le32_to_cpu(ofdm->sent_ack_cnt),
+                        accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+                        max_ofdm->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_cts_cnt:",
+                        le32_to_cpu(ofdm->sent_cts_cnt),
+                        accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+                        max_ofdm->sent_cts_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ba_rsp_cnt:",
+                        le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+                        accum_ofdm->sent_ba_rsp_cnt,
+                        delta_ofdm->sent_ba_rsp_cnt,
+                        max_ofdm->sent_ba_rsp_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_self_kill:",
+                        le32_to_cpu(ofdm->dsp_self_kill),
+                        accum_ofdm->dsp_self_kill,
+                        delta_ofdm->dsp_self_kill,
+                        max_ofdm->dsp_self_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(ofdm->mh_format_err),
+                        accum_ofdm->mh_format_err,
+                        delta_ofdm->mh_format_err,
+                        max_ofdm->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "re_acq_main_rssi_sum:",
+                        le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+                        accum_ofdm->re_acq_main_rssi_sum,
+                        delta_ofdm->re_acq_main_rssi_sum,
+                        max_ofdm->re_acq_main_rssi_sum);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - CCK:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_cnt:",
+                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+                        delta_cck->ina_cnt, max_cck->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_cnt:",
+                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+                        delta_cck->fina_cnt, max_cck->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+                        delta_cck->plcp_err, max_cck->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+                        delta_cck->crc32_err, max_cck->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(cck->overrun_err),
+                        accum_cck->overrun_err, delta_cck->overrun_err,
+                        max_cck->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(cck->early_overrun_err),
+                        accum_cck->early_overrun_err,
+                        delta_cck->early_overrun_err,
+                        max_cck->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+                        delta_cck->crc32_good, max_cck->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "false_alarm_cnt:",
+                        le32_to_cpu(cck->false_alarm_cnt),
+                        accum_cck->false_alarm_cnt,
+                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_sync_err_cnt:",
+                        le32_to_cpu(cck->fina_sync_err_cnt),
+                        accum_cck->fina_sync_err_cnt,
+                        delta_cck->fina_sync_err_cnt,
+                        max_cck->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sfd_timeout:",
+                        le32_to_cpu(cck->sfd_timeout),
+                        accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+                        max_cck->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_timeout:",
+                        le32_to_cpu(cck->fina_timeout),
+                        accum_cck->fina_timeout, delta_cck->fina_timeout,
+                        max_cck->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unresponded_rts:",
+                        le32_to_cpu(cck->unresponded_rts),
+                        accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+                        max_cck->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(cck->rxe_frame_limit_overrun),
+                        accum_cck->rxe_frame_limit_overrun,
+                        delta_cck->rxe_frame_limit_overrun,
+                        max_cck->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ack_cnt:",
+                        le32_to_cpu(cck->sent_ack_cnt),
+                        accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+                        max_cck->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_cts_cnt:",
+                        le32_to_cpu(cck->sent_cts_cnt),
+                        accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+                        max_cck->sent_cts_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ba_rsp_cnt:",
+                        le32_to_cpu(cck->sent_ba_rsp_cnt),
+                        accum_cck->sent_ba_rsp_cnt,
+                        delta_cck->sent_ba_rsp_cnt,
+                        max_cck->sent_ba_rsp_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_self_kill:",
+                        le32_to_cpu(cck->dsp_self_kill),
+                        accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+                        max_cck->dsp_self_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(cck->mh_format_err),
+                        accum_cck->mh_format_err, delta_cck->mh_format_err,
+                        max_cck->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "re_acq_main_rssi_sum:",
+                        le32_to_cpu(cck->re_acq_main_rssi_sum),
+                        accum_cck->re_acq_main_rssi_sum,
+                        delta_cck->re_acq_main_rssi_sum,
+                        max_cck->re_acq_main_rssi_sum);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - GENERAL:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bogus_cts:",
+                        le32_to_cpu(general->bogus_cts),
+                        accum_general->bogus_cts, delta_general->bogus_cts,
+                        max_general->bogus_cts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bogus_ack:",
+                        le32_to_cpu(general->bogus_ack),
+                        accum_general->bogus_ack, delta_general->bogus_ack,
+                        max_general->bogus_ack);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "non_bssid_frames:",
+                        le32_to_cpu(general->non_bssid_frames),
+                        accum_general->non_bssid_frames,
+                        delta_general->non_bssid_frames,
+                        max_general->non_bssid_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "filtered_frames:",
+                        le32_to_cpu(general->filtered_frames),
+                        accum_general->filtered_frames,
+                        delta_general->filtered_frames,
+                        max_general->filtered_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "non_channel_beacons:",
+                        le32_to_cpu(general->non_channel_beacons),
+                        accum_general->non_channel_beacons,
+                        delta_general->non_channel_beacons,
+                        max_general->non_channel_beacons);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "channel_beacons:",
+                        le32_to_cpu(general->channel_beacons),
+                        accum_general->channel_beacons,
+                        delta_general->channel_beacons,
+                        max_general->channel_beacons);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "num_missed_bcon:",
+                        le32_to_cpu(general->num_missed_bcon),
+                        accum_general->num_missed_bcon,
+                        delta_general->num_missed_bcon,
+                        max_general->num_missed_bcon);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "adc_rx_saturation_time:",
+                        le32_to_cpu(general->adc_rx_saturation_time),
+                        accum_general->adc_rx_saturation_time,
+                        delta_general->adc_rx_saturation_time,
+                        max_general->adc_rx_saturation_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_detect_search_tm:",
+                        le32_to_cpu(general->ina_detection_search_time),
+                        accum_general->ina_detection_search_time,
+                        delta_general->ina_detection_search_time,
+                        max_general->ina_detection_search_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_a:",
+                        le32_to_cpu(general->beacon_silence_rssi_a),
+                        accum_general->beacon_silence_rssi_a,
+                        delta_general->beacon_silence_rssi_a,
+                        max_general->beacon_silence_rssi_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_b:",
+                        le32_to_cpu(general->beacon_silence_rssi_b),
+                        accum_general->beacon_silence_rssi_b,
+                        delta_general->beacon_silence_rssi_b,
+                        max_general->beacon_silence_rssi_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_c:",
+                        le32_to_cpu(general->beacon_silence_rssi_c),
+                        accum_general->beacon_silence_rssi_c,
+                        delta_general->beacon_silence_rssi_c,
+                        max_general->beacon_silence_rssi_c);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "interference_data_flag:",
+                        le32_to_cpu(general->interference_data_flag),
+                        accum_general->interference_data_flag,
+                        delta_general->interference_data_flag,
+                        max_general->interference_data_flag);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "channel_load:",
+                        le32_to_cpu(general->channel_load),
+                        accum_general->channel_load,
+                        delta_general->channel_load,
+                        max_general->channel_load);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_false_alarms:",
+                        le32_to_cpu(general->dsp_false_alarms),
+                        accum_general->dsp_false_alarms,
+                        delta_general->dsp_false_alarms,
+                        max_general->dsp_false_alarms);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_a:",
+                        le32_to_cpu(general->beacon_rssi_a),
+                        accum_general->beacon_rssi_a,
+                        delta_general->beacon_rssi_a,
+                        max_general->beacon_rssi_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_b:",
+                        le32_to_cpu(general->beacon_rssi_b),
+                        accum_general->beacon_rssi_b,
+                        delta_general->beacon_rssi_b,
+                        max_general->beacon_rssi_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_c:",
+                        le32_to_cpu(general->beacon_rssi_c),
+                        accum_general->beacon_rssi_c,
+                        delta_general->beacon_rssi_c,
+                        max_general->beacon_rssi_c);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_a:",
+                        le32_to_cpu(general->beacon_energy_a),
+                        accum_general->beacon_energy_a,
+                        delta_general->beacon_energy_a,
+                        max_general->beacon_energy_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_b:",
+                        le32_to_cpu(general->beacon_energy_b),
+                        accum_general->beacon_energy_b,
+                        delta_general->beacon_energy_b,
+                        max_general->beacon_energy_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_c:",
+                        le32_to_cpu(general->beacon_energy_c),
+                        accum_general->beacon_energy_c,
+                        delta_general->beacon_energy_c,
+                        max_general->beacon_energy_c);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - OFDM_HT:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+                        delta_ht->plcp_err, max_ht->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+                        delta_ht->overrun_err, max_ht->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(ht->early_overrun_err),
+                        accum_ht->early_overrun_err,
+                        delta_ht->early_overrun_err,
+                        max_ht->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+                        delta_ht->crc32_good, max_ht->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+                        delta_ht->crc32_err, max_ht->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(ht->mh_format_err),
+                        accum_ht->mh_format_err,
+                        delta_ht->mh_format_err, max_ht->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_crc32_good:",
+                        le32_to_cpu(ht->agg_crc32_good),
+                        accum_ht->agg_crc32_good,
+                        delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_mpdu_cnt:",
+                        le32_to_cpu(ht->agg_mpdu_cnt),
+                        accum_ht->agg_mpdu_cnt,
+                        delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_cnt:",
+                        le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+                        delta_ht->agg_cnt, max_ht->agg_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unsupport_mcs:",
+                        le32_to_cpu(ht->unsupport_mcs),
+                        accum_ht->unsupport_mcs,
+                        delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
+                               char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+       ssize_t ret;
+       struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+         * the last statistics notification from uCode
+         * might not reflect the current uCode activity
+         */
+       tx = &priv->_4965.statistics.tx;
+       accum_tx = &priv->_4965.accum_statistics.tx;
+       delta_tx = &priv->_4965.delta_statistics.tx;
+       max_tx = &priv->_4965.max_delta.tx;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Tx:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "preamble:",
+                        le32_to_cpu(tx->preamble_cnt),
+                        accum_tx->preamble_cnt,
+                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rx_detected_cnt:",
+                        le32_to_cpu(tx->rx_detected_cnt),
+                        accum_tx->rx_detected_cnt,
+                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bt_prio_defer_cnt:",
+                        le32_to_cpu(tx->bt_prio_defer_cnt),
+                        accum_tx->bt_prio_defer_cnt,
+                        delta_tx->bt_prio_defer_cnt,
+                        max_tx->bt_prio_defer_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bt_prio_kill_cnt:",
+                        le32_to_cpu(tx->bt_prio_kill_cnt),
+                        accum_tx->bt_prio_kill_cnt,
+                        delta_tx->bt_prio_kill_cnt,
+                        max_tx->bt_prio_kill_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "few_bytes_cnt:",
+                        le32_to_cpu(tx->few_bytes_cnt),
+                        accum_tx->few_bytes_cnt,
+                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "cts_timeout:",
+                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+                        delta_tx->cts_timeout, max_tx->cts_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ack_timeout:",
+                        le32_to_cpu(tx->ack_timeout),
+                        accum_tx->ack_timeout,
+                        delta_tx->ack_timeout, max_tx->ack_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "expected_ack_cnt:",
+                        le32_to_cpu(tx->expected_ack_cnt),
+                        accum_tx->expected_ack_cnt,
+                        delta_tx->expected_ack_cnt,
+                        max_tx->expected_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "actual_ack_cnt:",
+                        le32_to_cpu(tx->actual_ack_cnt),
+                        accum_tx->actual_ack_cnt,
+                        delta_tx->actual_ack_cnt,
+                        max_tx->actual_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dump_msdu_cnt:",
+                        le32_to_cpu(tx->dump_msdu_cnt),
+                        accum_tx->dump_msdu_cnt,
+                        delta_tx->dump_msdu_cnt,
+                        max_tx->dump_msdu_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "abort_nxt_frame_mismatch:",
+                        le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+                        accum_tx->burst_abort_next_frame_mismatch_cnt,
+                        delta_tx->burst_abort_next_frame_mismatch_cnt,
+                        max_tx->burst_abort_next_frame_mismatch_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "abort_missing_nxt_frame:",
+                        le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+                        accum_tx->burst_abort_missing_next_frame_cnt,
+                        delta_tx->burst_abort_missing_next_frame_cnt,
+                        max_tx->burst_abort_missing_next_frame_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "cts_timeout_collision:",
+                        le32_to_cpu(tx->cts_timeout_collision),
+                        accum_tx->cts_timeout_collision,
+                        delta_tx->cts_timeout_collision,
+                        max_tx->cts_timeout_collision);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ack_ba_timeout_collision:",
+                        le32_to_cpu(tx->ack_or_ba_timeout_collision),
+                        accum_tx->ack_or_ba_timeout_collision,
+                        delta_tx->ack_or_ba_timeout_collision,
+                        max_tx->ack_or_ba_timeout_collision);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg ba_timeout:",
+                        le32_to_cpu(tx->agg.ba_timeout),
+                        accum_tx->agg.ba_timeout,
+                        delta_tx->agg.ba_timeout,
+                        max_tx->agg.ba_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg ba_resched_frames:",
+                        le32_to_cpu(tx->agg.ba_reschedule_frames),
+                        accum_tx->agg.ba_reschedule_frames,
+                        delta_tx->agg.ba_reschedule_frames,
+                        max_tx->agg.ba_reschedule_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_agg_frame:",
+                        le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+                        accum_tx->agg.scd_query_agg_frame_cnt,
+                        delta_tx->agg.scd_query_agg_frame_cnt,
+                        max_tx->agg.scd_query_agg_frame_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_no_agg:",
+                        le32_to_cpu(tx->agg.scd_query_no_agg),
+                        accum_tx->agg.scd_query_no_agg,
+                        delta_tx->agg.scd_query_no_agg,
+                        max_tx->agg.scd_query_no_agg);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_agg:",
+                        le32_to_cpu(tx->agg.scd_query_agg),
+                        accum_tx->agg.scd_query_agg,
+                        delta_tx->agg.scd_query_agg,
+                        max_tx->agg.scd_query_agg);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_mismatch:",
+                        le32_to_cpu(tx->agg.scd_query_mismatch),
+                        accum_tx->agg.scd_query_mismatch,
+                        delta_tx->agg.scd_query_mismatch,
+                        max_tx->agg.scd_query_mismatch);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg frame_not_ready:",
+                        le32_to_cpu(tx->agg.frame_not_ready),
+                        accum_tx->agg.frame_not_ready,
+                        delta_tx->agg.frame_not_ready,
+                        max_tx->agg.frame_not_ready);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg underrun:",
+                        le32_to_cpu(tx->agg.underrun),
+                        accum_tx->agg.underrun,
+                        delta_tx->agg.underrun, max_tx->agg.underrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg bt_prio_kill:",
+                        le32_to_cpu(tx->agg.bt_prio_kill),
+                        accum_tx->agg.bt_prio_kill,
+                        delta_tx->agg.bt_prio_kill,
+                        max_tx->agg.bt_prio_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg rx_ba_rsp_cnt:",
+                        le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+                        accum_tx->agg.rx_ba_rsp_cnt,
+                        delta_tx->agg.rx_ba_rsp_cnt,
+                        max_tx->agg.rx_ba_rsp_cnt);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct statistics_general) * 10 + 300;
+       ssize_t ret;
+       struct statistics_general_common *general, *accum_general;
+       struct statistics_general_common *delta_general, *max_general;
+       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+       struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+         * the last statistics notification from uCode
+         * might not reflect the current uCode activity
+         */
+       general = &priv->_4965.statistics.general.common;
+       dbg = &priv->_4965.statistics.general.common.dbg;
+       div = &priv->_4965.statistics.general.common.div;
+       accum_general = &priv->_4965.accum_statistics.general.common;
+       accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
+       accum_div = &priv->_4965.accum_statistics.general.common.div;
+       delta_general = &priv->_4965.delta_statistics.general.common;
+       max_general = &priv->_4965.max_delta.general.common;
+       delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
+       max_dbg = &priv->_4965.max_delta.general.common.dbg;
+       delta_div = &priv->_4965.delta_statistics.general.common.div;
+       max_div = &priv->_4965.max_delta.general.common.div;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_General:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_value, "temperature:",
+                        le32_to_cpu(general->temperature));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_value, "ttl_timestamp:",
+                        le32_to_cpu(general->ttl_timestamp));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "burst_check:",
+                        le32_to_cpu(dbg->burst_check),
+                        accum_dbg->burst_check,
+                        delta_dbg->burst_check, max_dbg->burst_check);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "burst_count:",
+                        le32_to_cpu(dbg->burst_count),
+                        accum_dbg->burst_count,
+                        delta_dbg->burst_count, max_dbg->burst_count);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "wait_for_silence_timeout_count:",
+                        le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+                        accum_dbg->wait_for_silence_timeout_cnt,
+                        delta_dbg->wait_for_silence_timeout_cnt,
+                        max_dbg->wait_for_silence_timeout_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sleep_time:",
+                        le32_to_cpu(general->sleep_time),
+                        accum_general->sleep_time,
+                        delta_general->sleep_time, max_general->sleep_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "slots_out:",
+                        le32_to_cpu(general->slots_out),
+                        accum_general->slots_out,
+                        delta_general->slots_out, max_general->slots_out);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "slots_idle:",
+                        le32_to_cpu(general->slots_idle),
+                        accum_general->slots_idle,
+                        delta_general->slots_idle, max_general->slots_idle);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "tx_on_a:",
+                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+                        delta_div->tx_on_a, max_div->tx_on_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "tx_on_b:",
+                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+                        delta_div->tx_on_b, max_div->tx_on_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "exec_time:",
+                        le32_to_cpu(div->exec_time), accum_div->exec_time,
+                        delta_div->exec_time, max_div->exec_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "probe_time:",
+                        le32_to_cpu(div->probe_time), accum_div->probe_time,
+                        delta_div->probe_time, max_div->probe_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rx_enable_counter:",
+                        le32_to_cpu(general->rx_enable_counter),
+                        accum_general->rx_enable_counter,
+                        delta_general->rx_enable_counter,
+                        max_general->rx_enable_counter);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "num_of_sos_states:",
+                        le32_to_cpu(general->num_of_sos_states),
+                        accum_general->num_of_sos_states,
+                        delta_general->num_of_sos_states,
+                        max_general->num_of_sos_states);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644 (file)
index 0000000..6c8e353
--- /dev/null
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_general_stats_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos);
+#else
+static ssize_t
+iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t
+iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644 (file)
index 0000000..cb9baab
--- /dev/null
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-4965.h"
+#include "iwl-io.h"
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
+{
+       u16 count;
+       int ret;
+
+       for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+               /* Request semaphore */
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+               /* See if we got it */
+               ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                               EEPROM_SEM_TIMEOUT);
+               if (ret >= 0) {
+                       IWL_DEBUG_IO(priv,
+                               "Acquired semaphore after %d tries.\n",
+                               count+1);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
+{
+       iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int iwl4965_eeprom_check_version(struct iwl_priv *priv)
+{
+       u16 eeprom_ver;
+       u16 calib_ver;
+
+       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+       calib_ver = iwl_legacy_eeprom_query16(priv,
+                       EEPROM_4965_CALIB_VERSION_OFFSET);
+
+       if (eeprom_ver < priv->cfg->eeprom_ver ||
+           calib_ver < priv->cfg->eeprom_calib_ver)
+               goto err;
+
+       IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+                eeprom_ver, calib_ver);
+
+       return 0;
+err:
+       IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+                 "CALIB=0x%x < 0x%x\n",
+                 eeprom_ver, priv->cfg->eeprom_ver,
+                 calib_ver,  priv->cfg->eeprom_calib_ver);
+       return -EINVAL;
+
+}
+
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+       const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
+                                       EEPROM_MAC_ADDRESS);
+       memcpy(mac, addr, ETH_ALEN);
+}
similarity index 97%
rename from drivers/net/wireless/iwlwifi/iwl-4965-hw.h
rename to drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794..08b189c 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
        u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
 } __packed;
 
+
+#define IWL4965_RTC_INST_LOWER_BOUND           (0x000000)
+
+/* RSSI to dBm */
+#define IWL4965_RSSI_OFFSET    44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT  0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN   0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN    0x02
+
+#define IWL4965_DEFAULT_TX_RETRY  15
+
+/* Limit range of txpower output target to be between these values */
+#define IWL4965_TX_POWER_TARGET_POWER_MIN      (0)     /* 0 dBm: 1 milliwatt */
+
+/* EEPROM */
+#define IWL4965_FIRST_AMPDU_QUEUE      10
+
+
 #endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644 (file)
index 0000000..26d324e
--- /dev/null
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-4965-led.h"
+
+/* Send led command */
+static int
+iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_LEDS_CMD,
+               .len = sizeof(struct iwl_led_cmd),
+               .data = led_cmd,
+               .flags = CMD_ASYNC,
+               .callback = NULL,
+       };
+       u32 reg;
+
+       reg = iwl_read32(priv, CSR_LED_REG);
+       if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+               iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+       return iwl_legacy_send_cmd(priv, &cmd);
+}
+
+/* Set led register off */
+void iwl4965_led_enable(struct iwl_priv *priv)
+{
+       iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct iwl_led_ops iwl4965_led_ops = {
+       .cmd = iwl4965_send_led_cmd,
+};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644 (file)
index 0000000..5ed3615
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_4965_led_h__
+#define __iwl_4965_led_h__
+
+extern const struct iwl_led_ops iwl4965_led_ops;
+void iwl4965_led_enable(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644 (file)
index 0000000..5a8a3cc
--- /dev/null
@@ -0,0 +1,1260 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-sta.h"
+
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+                           u8 frame_count, u32 status)
+{
+       if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+               IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+               if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       queue_work(priv->workqueue, &priv->tx_flush);
+       }
+}
+
+/*
+ * EEPROM
+ */
+struct iwl_mod_params iwl4965_mod_params = {
+       .amsdu_size_8K = 1,
+       .restart_fw = 1,
+       /* the rest are 0 by default */
+};
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       unsigned long flags;
+       int i;
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+               /* In the reset function, these buffers may have been allocated
+                * to an SKB, so we need to unmap and free potential storage */
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+       }
+
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
+               rxq->queue[i] = NULL;
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       u32 rb_size;
+       const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+       u32 rb_timeout = 0;
+
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+       else
+               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+       /* Stop Rx DMA */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+       /* Reset driver's Rx queue write index */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+       /* Tell device where to find RBD circular buffer in DRAM */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+                          (u32)(rxq->bd_dma >> 8));
+
+       /* Tell device where in DRAM to update its Rx status */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+                          rxq->rb_stts_dma >> 4);
+
+       /* Enable Rx DMA
+        * Direct rx interrupts to hosts
+        * Rx buffer size 4 or 8k
+        * RB timeout 0x10
+        * 256 RBDs
+        */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+                          FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+                          FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+                          FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+                          rb_size|
+                          (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+                          (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+       return 0;
+}
+
+static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+               if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                                              APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+                                              ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                              APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+                              ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int iwl4965_hw_nic_init(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       int ret;
+
+       /* nic_init */
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->cfg->ops->lib->apm_ops.init(priv);
+
+       /* Set interrupt coalescing calibration timer to default (512 usecs) */
+       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl4965_set_pwr_vmain(priv);
+
+       priv->cfg->ops->lib->apm_ops.config(priv);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (!rxq->bd) {
+               ret = iwl_legacy_rx_queue_alloc(priv);
+               if (ret) {
+                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
+                       return -ENOMEM;
+               }
+       } else
+               iwl4965_rx_queue_reset(priv, rxq);
+
+       iwl4965_rx_replenish(priv);
+
+       iwl4965_rx_init(priv, rxq);
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rxq->need_update = 1;
+       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Allocate or reset and init all Tx and Command queues */
+       if (!priv->txq) {
+               ret = iwl4965_txq_ctx_alloc(priv);
+               if (ret)
+                       return ret;
+       } else
+               iwl4965_txq_ctx_reset(priv);
+
+       set_bit(STATUS_INIT, &priv->status);
+
+       return 0;
+}
+
+/**
+ * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
+                                         dma_addr_t dma_addr)
+{
+       return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+               /* The overwritten rxb must be a used one */
+               rxb = rxq->queue[rxq->write];
+               BUG_ON(rxb && rxb->page);
+
+               /* Get next free Rx buffer, remove from free list */
+               element = rxq->rx_free.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+
+               /* Point to Rx buffer via next RBD in circular buffer */
+               rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
+                                                             rxb->page_dma);
+               rxq->queue[rxq->write] = rxb;
+               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+               rxq->free_count--;
+       }
+       spin_unlock_irqrestore(&rxq->lock, flags);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+       /* If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8. */
+       if (rxq->write_actual != (rxq->write & ~0x7)) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               rxq->need_update = 1;
+               spin_unlock_irqrestore(&rxq->lock, flags);
+               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+       }
+}
+
+/**
+ * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       struct page *page;
+       unsigned long flags;
+       gfp_t gfp_mask = priority;
+
+       while (1) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (priv->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+                                              "order: %d\n",
+                                              priv->hw_params.rx_page_order);
+
+                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+                           net_ratelimit())
+                               IWL_CRIT(priv,
+                                       "Failed to alloc_pages with %s. "
+                                       "Only %u free buffers remaining.\n",
+                                        priority == GFP_ATOMIC ?
+                                                "GFP_ATOMIC" : "GFP_KERNEL",
+                                        rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       return;
+               }
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, priv->hw_params.rx_page_order);
+                       return;
+               }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               BUG_ON(rxb->page);
+               rxb->page = page;
+               /* Get physical address of the RB */
+               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+               /* dma address must be no more than 36 bits */
+               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+               /* and also 256 byte aligned! */
+               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
+               priv->alloc_rxb_page++;
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+}
+
+void iwl4965_rx_replenish(struct iwl_priv *priv)
+{
+       unsigned long flags;
+
+       iwl4965_rx_allocate(priv, GFP_KERNEL);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl4965_rx_queue_restock(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwl4965_rx_replenish_now(struct iwl_priv *priv)
+{
+       iwl4965_rx_allocate(priv, GFP_ATOMIC);
+
+       iwl4965_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       int i;
+       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+       }
+
+       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+                         rxq->rb_stts, rxq->rb_stts_dma);
+       rxq->bd = NULL;
+       rxq->rb_stts  = NULL;
+}
+
+int iwl4965_rxq_stop(struct iwl_priv *priv)
+{
+
+       /* stop Rx DMA */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+       iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+                           FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+       return 0;
+}
+
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+       int idx = 0;
+       int band_offset = 0;
+
+       /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+               return idx;
+       /* Legacy rate format, search for match in table */
+       } else {
+               if (band == IEEE80211_BAND_5GHZ)
+                       band_offset = IWL_FIRST_OFDM_RATE;
+               for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx - band_offset;
+       }
+
+       return -1;
+}
+
+static int iwl4965_calc_rssi(struct iwl_priv *priv,
+                            struct iwl_rx_phy_res *rx_resp)
+{
+       /* data from PHY/DSP regarding signal strength, etc.,
+        *   contents are always there, not configurable by host.  */
+       struct iwl4965_rx_non_cfg_phy *ncphy =
+           (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+       u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
+                       >> IWL49_AGC_DB_POS;
+
+       u32 valid_antennae =
+           (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+                       >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+       u8 max_rssi = 0;
+       u32 i;
+
+       /* Find max rssi among 3 possible receivers.
+        * These values are measured by the digital signal processor (DSP).
+        * They should stay fairly constant even as the signal strength varies,
+        *   if the radio's automatic gain control (AGC) is working right.
+        * AGC value (see below) will provide the "interesting" info. */
+       for (i = 0; i < 3; i++)
+               if (valid_antennae & (1 << i))
+                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+       IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+               max_rssi, agc);
+
+       /* dBm = max_rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal. */
+       return max_rssi - agc - IWL4965_RSSI_OFFSET;
+}
+
+
+static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+       u32 decrypt_out = 0;
+
+       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+                                       RX_RES_STATUS_STATION_FOUND)
+               decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+                               RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+       /* packet was not encrypted */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_NONE)
+               return decrypt_out;
+
+       /* packet was encrypted with unknown alg */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_ERR)
+               return decrypt_out;
+
+       /* decryption was not done in HW */
+       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+                                       RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+               return decrypt_out;
+
+       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               /* alg is CCM: check MIC only */
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+                       /* Bad MIC */
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+               break;
+
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+                       /* Bad TTAK */
+                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+                       break;
+               }
+               /* fall through if TTAK OK */
+       default:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+               break;
+       }
+
+       IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
+                                       decrypt_in, decrypt_out);
+
+       return decrypt_out;
+}
+
+static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
+                                       struct ieee80211_hdr *hdr,
+                                       u16 len,
+                                       u32 ampdu_status,
+                                       struct iwl_rx_mem_buffer *rxb,
+                                       struct ieee80211_rx_status *stats)
+{
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!priv->is_open)) {
+               IWL_DEBUG_DROP_LIMIT(priv,
+                   "Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       /* In case of HW accelerated crypto and bad decryption, drop */
+       if (!priv->cfg->mod_params->sw_crypto &&
+           iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+               return;
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IWL_ERR(priv, "dev_alloc_skb failed\n");
+               return;
+       }
+
+       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+       iwl_legacy_update_stats(priv, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(priv->hw, skb);
+       priv->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rx_phy_res *phy_res;
+       __le32 rx_pkt_status;
+       struct iwl_rx_mpdu_res_start *amsdu;
+       u32 len;
+       u32 ampdu_status;
+       u32 rate_n_flags;
+
+       /**
+        * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+        *      REPLY_RX: physical layer info is in this buffer
+        *      REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+        *              command and cached in priv->last_phy_res
+        *
+        * Here we set up local variables depending on which command is
+        * received.
+        */
+       if (pkt->hdr.cmd == REPLY_RX) {
+               phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+                               + phy_res->cfg_phy_cnt);
+
+               len = le16_to_cpu(phy_res->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+                               phy_res->cfg_phy_cnt + len);
+               ampdu_status = le32_to_cpu(rx_pkt_status);
+       } else {
+               if (!priv->_4965.last_phy_res_valid) {
+                       IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+                       return;
+               }
+               phy_res = &priv->_4965.last_phy_res;
+               amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+               len = le16_to_cpu(amsdu->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+               ampdu_status = iwl4965_translate_rx_status(priv,
+                               le32_to_cpu(rx_pkt_status));
+       }
+
+       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+                               phy_res->cfg_phy_cnt);
+               return;
+       }
+
+       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+                               le32_to_cpu(rx_pkt_status));
+               return;
+       }
+
+       /* This will be used in several places later */
+       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+       /* rx_status carries information about the packet to mac80211 */
+       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+                                                       rx_status.band);
+       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.rate_idx =
+               iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+       rx_status.flag = 0;
+
+       /* TSF isn't reliable. In order to allow smooth user experience,
+        * this W/A doesn't propagate it to the mac80211 */
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+       priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+       rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
+
+       iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
+       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+               rx_status.signal, (unsigned long long)rx_status.mactime);
+
+       /*
+        * "antenna number"
+        *
+        * It seems that the antenna field in the phy flags value
+        * is actually a bit field. This is undefined by radiotap,
+        * it wants an actual antenna number but I always get "7"
+        * for most legacy frames I receive indicating that the
+        * same frame was received on all three RX chains.
+        *
+        * I think this field should be removed in favor of a
+        * new 802.11n radiotap field "RX chains" that is defined
+        * as a bitmask.
+        */
+       rx_status.antenna =
+               (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+       /* set the preamble flag if appropriate */
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       /* Set up the HT phy flags */
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               rx_status.flag |= RX_FLAG_HT;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               rx_status.flag |= RX_FLAG_40MHZ;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rx_status.flag |= RX_FLAG_SHORT_GI;
+
+       iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+                                   rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+                           struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       priv->_4965.last_phy_res_valid = true;
+       memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
+              sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
+                                          struct ieee80211_vif *vif,
+                                          enum ieee80211_band band,
+                                          struct iwl_scan_channel *scan_ch)
+{
+       const struct ieee80211_supported_band *sband;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added = 0;
+       u16 channel = 0;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband) {
+               IWL_ERR(priv, "invalid band\n");
+               return added;
+       }
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       channel = iwl_legacy_get_single_channel_number(priv, band);
+       if (channel) {
+               scan_ch->channel = cpu_to_le16(channel);
+               scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+               added++;
+       } else
+               IWL_ERR(priv, "no valid channel found\n");
+       return added;
+}
+
+static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
+                                    struct ieee80211_vif *vif,
+                                    enum ieee80211_band band,
+                                    u8 is_active, u8 n_probes,
+                                    struct iwl_scan_channel *scan_ch)
+{
+       struct ieee80211_channel *chan;
+       const struct ieee80211_supported_band *sband;
+       const struct iwl_channel_info *ch_info;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added, i;
+       u16 channel;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband)
+               return 0;
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+               chan = priv->scan_request->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               channel = chan->hw_value;
+               scan_ch->channel = cpu_to_le16(channel);
+
+               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_SCAN(priv,
+                                "Channel %d is INVALID for this band.\n",
+                                       channel);
+                       continue;
+               }
+
+               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
+                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+                       scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               else
+                       scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+               if (n_probes)
+                       scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+
+               /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                * power level:
+                * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                */
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+               IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+                              channel, le32_to_cpu(scan_ch->type),
+                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+                               "ACTIVE" : "PASSIVE",
+                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+                              active_dwell : passive_dwell);
+
+               scan_ch++;
+               added++;
+       }
+
+       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+       return added;
+}
+
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SCAN_CMD,
+               .len = sizeof(struct iwl_scan_cmd),
+               .flags = CMD_SIZE_HUGE,
+       };
+       struct iwl_scan_cmd *scan;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u32 rate_flags = 0;
+       u16 cmd_len;
+       u16 rx_chain = 0;
+       enum ieee80211_band band;
+       u8 n_probes = 0;
+       u8 rx_ant = priv->hw_params.valid_rx_ant;
+       u8 rate;
+       bool is_active = false;
+       int  chan_mod;
+       u8 active_chains;
+       u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (vif)
+               ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       if (!priv->scan_cmd) {
+               priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+               if (!priv->scan_cmd) {
+                       IWL_DEBUG_SCAN(priv,
+                                      "fail to allocate memory for scan\n");
+                       return -ENOMEM;
+               }
+       }
+       scan = priv->scan_cmd;
+       memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+       if (iwl_legacy_is_any_associated(priv)) {
+               u16 interval = 0;
+               u32 extra;
+               u32 suspend_time = 100;
+               u32 scan_suspend_time = 100;
+
+               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+               if (priv->is_internal_short_scan)
+                       interval = 0;
+               else
+                       interval = vif->bss_conf.beacon_int;
+
+               scan->suspend_time = 0;
+               scan->max_out_time = cpu_to_le32(200 * 1024);
+               if (!interval)
+                       interval = suspend_time;
+
+               extra = (suspend_time / interval) << 22;
+               scan_suspend_time = (extra |
+                   ((suspend_time % interval) * 1024));
+               scan->suspend_time = cpu_to_le32(scan_suspend_time);
+               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+                              scan_suspend_time, interval);
+       }
+
+       if (priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+       } else if (priv->scan_request->n_ssids) {
+               int i, p = 0;
+               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+               for (i = 0; i < priv->scan_request->n_ssids; i++) {
+                       /* always does wildcard anyway */
+                       if (!priv->scan_request->ssids[i].ssid_len)
+                               continue;
+                       scan->direct_scan[p].id = WLAN_EID_SSID;
+                       scan->direct_scan[p].len =
+                               priv->scan_request->ssids[i].ssid_len;
+                       memcpy(scan->direct_scan[p].ssid,
+                              priv->scan_request->ssids[i].ssid,
+                              priv->scan_request->ssids[i].ssid_len);
+                       n_probes++;
+                       p++;
+               }
+               is_active = true;
+       } else
+               IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+       scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       switch (priv->scan_band) {
+       case IEEE80211_BAND_2GHZ:
+               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+               chan_mod = le32_to_cpu(
+                       priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+                                               RXON_FLG_CHANNEL_MODE_MSK)
+                                      >> RXON_FLG_CHANNEL_MODE_POS;
+               if (chan_mod == CHANNEL_MODE_PURE_40) {
+                       rate = IWL_RATE_6M_PLCP;
+               } else {
+                       rate = IWL_RATE_1M_PLCP;
+                       rate_flags = RATE_MCS_CCK_MSK;
+               }
+               break;
+       case IEEE80211_BAND_5GHZ:
+               rate = IWL_RATE_6M_PLCP;
+               break;
+       default:
+               IWL_WARN(priv, "Invalid scan band\n");
+               return -EIO;
+       }
+
+       /*
+        * If active scanning is requested but a certain channel is
+        * marked passive, we can do active scanning if we detect
+        * transmissions.
+        *
+        * There is an issue with some firmware versions that triggers
+        * a sysassert on a "good CRC threshold" of zero (== disabled),
+        * on a radar channel even though this means that we should NOT
+        * send probes.
+        *
+        * The "good CRC threshold" is the number of frames that we
+        * need to receive during our dwell time on a channel before
+        * sending out probes -- setting this to a huge value will
+        * mean we never reach it, but at the same time work around
+        * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+        * here instead of IWL_GOOD_CRC_TH_DISABLED.
+        */
+       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+                                       IWL_GOOD_CRC_TH_NEVER;
+
+       band = priv->scan_band;
+
+       if (priv->cfg->scan_rx_antennas[band])
+               rx_ant = priv->cfg->scan_rx_antennas[band];
+
+       if (priv->cfg->scan_tx_antennas[band])
+               scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+       priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
+                                               priv->scan_tx_ant[band],
+                                                   scan_tx_antennas);
+       rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
+       scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
+
+       /* In power save mode use one chain, otherwise use all chains */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               /* rx_ant has been set to all valid chains previously */
+               active_chains = rx_ant &
+                               ((u8)(priv->chain_noise_data.active_chains));
+               if (!active_chains)
+                       active_chains = rx_ant;
+
+               IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+                               priv->chain_noise_data.active_chains);
+
+               rx_ant = iwl4965_first_antenna(active_chains);
+       }
+
+       /* MIMO is not used here, but value is required */
+       rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+       rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+       scan->rx_chain = cpu_to_le16(rx_chain);
+       if (!priv->is_internal_short_scan) {
+               cmd_len = iwl_legacy_fill_probe_req(priv,
+                                       (struct ieee80211_mgmt *)scan->data,
+                                       vif->addr,
+                                       priv->scan_request->ie,
+                                       priv->scan_request->ie_len,
+                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
+       } else {
+               /* use bcast addr, will not be transmitted but must be valid */
+               cmd_len = iwl_legacy_fill_probe_req(priv,
+                                       (struct ieee80211_mgmt *)scan->data,
+                                       iwlegacy_bcast_addr, NULL, 0,
+                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+       }
+       scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+       scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+                              RXON_FILTER_BCON_AWARE_MSK);
+
+       if (priv->is_internal_short_scan) {
+               scan->channel_count =
+                       iwl4965_get_single_channel_for_scan(priv, vif, band,
+                               (void *)&scan->data[le16_to_cpu(
+                               scan->tx_cmd.len)]);
+       } else {
+               scan->channel_count =
+                       iwl4965_get_channels_for_scan(priv, vif, band,
+                               is_active, n_probes,
+                               (void *)&scan->data[le16_to_cpu(
+                               scan->tx_cmd.len)]);
+       }
+       if (scan->channel_count == 0) {
+               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+               return -EIO;
+       }
+
+       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+           scan->channel_count * sizeof(struct iwl_scan_channel);
+       cmd.data = scan;
+       scan->len = cpu_to_le16(cmd.len);
+
+       set_bit(STATUS_SCAN_HW, &priv->status);
+
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (ret)
+               clear_bit(STATUS_SCAN_HW, &priv->status);
+
+       return ret;
+}
+
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+                              struct ieee80211_vif *vif, bool add)
+{
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       if (add)
+               return iwl4965_add_bssid_station(priv, vif_priv->ctx,
+                                               vif->bss_conf.bssid,
+                                               &vif_priv->ibss_bssid_sta_id);
+       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+                                 vif->bss_conf.bssid);
+}
+
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed)
+{
+       lockdep_assert_held(&priv->sta_lock);
+
+       if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+               priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+       else {
+               IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+                       priv->stations[sta_id].tid[tid].tfds_in_queue,
+                       freed);
+               priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+       }
+}
+
+#define IWL_TX_QUEUE_MSK       0xfffff
+
+static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
+{
+       return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+              priv->current_ht_config.single_chain_sufficient;
+}
+
+#define IWL_NUM_RX_CHAINS_MULTIPLE     3
+#define IWL_NUM_RX_CHAINS_SINGLE       2
+#define IWL_NUM_IDLE_CHAINS_DUAL       2
+#define IWL_NUM_IDLE_CHAINS_SINGLE     1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
+{
+       /* # of Rx chains to use when expecting MIMO. */
+       if (iwl4965_is_single_rx_stream(priv))
+               return IWL_NUM_RX_CHAINS_SINGLE;
+       else
+               return IWL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+{
+       /* # Rx chains when idling, depending on SMPS mode */
+       switch (priv->current_ht_config.smps) {
+       case IEEE80211_SMPS_STATIC:
+       case IEEE80211_SMPS_DYNAMIC:
+               return IWL_NUM_IDLE_CHAINS_SINGLE;
+       case IEEE80211_SMPS_OFF:
+               return active_cnt;
+       default:
+               WARN(1, "invalid SMPS mode %d",
+                    priv->current_ht_config.smps);
+               return active_cnt;
+       }
+}
+
+/* up to 4 chains */
+static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
+{
+       u8 res;
+       res = (chain_bitmap & BIT(0)) >> 0;
+       res += (chain_bitmap & BIT(1)) >> 1;
+       res += (chain_bitmap & BIT(2)) >> 2;
+       res += (chain_bitmap & BIT(3)) >> 3;
+       return res;
+}
+
+/**
+ * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       bool is_single = iwl4965_is_single_rx_stream(priv);
+       bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+       u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+       u32 active_chains;
+       u16 rx_chain;
+
+       /* Tell uCode which antennas are actually connected.
+        * Before first association, we assume all antennas are connected.
+        * Just after first association, iwl4965_chain_noise_calibration()
+        *    checks which antennas actually *are* connected. */
+       if (priv->chain_noise_data.active_chains)
+               active_chains = priv->chain_noise_data.active_chains;
+       else
+               active_chains = priv->hw_params.valid_rx_ant;
+
+       rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+       /* How many receivers should we use? */
+       active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
+       idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
+
+
+       /* correct rx chain count according hw settings
+        * and chain noise calibration
+        */
+       valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
+       if (valid_rx_cnt < active_rx_cnt)
+               active_rx_cnt = valid_rx_cnt;
+
+       if (valid_rx_cnt < idle_rx_cnt)
+               idle_rx_cnt = valid_rx_cnt;
+
+       rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+       rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
+
+       ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+       if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
+               ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+       else
+               ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+       IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
+                       ctx->staging.rx_chain,
+                       active_rx_cnt, idle_rx_cnt);
+
+       WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+               active_rx_cnt < idle_rx_cnt);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
+{
+       int i;
+       u8 ind = ant;
+
+       for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+               ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
+               if (valid & BIT(ind))
+                       return ind;
+       }
+       return ant;
+}
+
+static const char *iwl4965_get_fh_string(int cmd)
+{
+       switch (cmd) {
+       IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+       IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+       IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+       IWL_CMD(FH_TSSR_TX_STATUS_REG);
+       IWL_CMD(FH_TSSR_TX_ERROR_REG);
+       default:
+               return "UNKNOWN";
+       }
+}
+
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+       int i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       int pos = 0;
+       size_t bufsz = 0;
+#endif
+       static const u32 fh_tbl[] = {
+               FH_RSCSR_CHNL0_STTS_WPTR_REG,
+               FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+               FH_RSCSR_CHNL0_WPTR,
+               FH_MEM_RCSR_CHNL0_CONFIG_REG,
+               FH_MEM_RSSR_SHARED_CTRL_REG,
+               FH_MEM_RSSR_RX_STATUS_REG,
+               FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+               FH_TSSR_TX_STATUS_REG,
+               FH_TSSR_TX_ERROR_REG
+       };
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (display) {
+               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+               pos += scnprintf(*buf + pos, bufsz - pos,
+                               "FH register values:\n");
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+                       pos += scnprintf(*buf + pos, bufsz - pos,
+                               "  %34s: 0X%08x\n",
+                               iwl4965_get_fh_string(fh_tbl[i]),
+                               iwl_legacy_read_direct32(priv, fh_tbl[i]));
+               }
+               return pos;
+       }
+#endif
+       IWL_ERR(priv, "FH register values:\n");
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+               IWL_ERR(priv, "  %34s: 0X%08x\n",
+                       iwl4965_get_fh_string(fh_tbl[i]),
+                       iwl_legacy_read_direct32(priv, fh_tbl[i]));
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644 (file)
index 0000000..31ac672
--- /dev/null
@@ -0,0 +1,2870 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "iwl-dev.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-4965.h"
+
+#define IWL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY      1
+#define IWL_HT_NUMBER_TRY   3
+
+#define IWL_RATE_MAX_WINDOW            62      /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH                6       /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH                8       /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX            15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+       /*ANT_NONE -> */ ANT_NONE,
+       /*ANT_A    -> */ ANT_B,
+       /*ANT_B    -> */ ANT_C,
+       /*ANT_AB   -> */ ANT_BC,
+       /*ANT_C    -> */ ANT_A,
+       /*ANT_AC   -> */ ANT_AB,
+       /*ANT_BC   -> */ ANT_AC,
+       /*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
+                                   IWL_RATE_SISO_##s##M_PLCP, \
+                                   IWL_RATE_MIMO2_##s##M_PLCP,\
+                                   IWL_RATE_##r##M_IEEE,      \
+                                   IWL_RATE_##ip##M_INDEX,    \
+                                   IWL_RATE_##in##M_INDEX,    \
+                                   IWL_RATE_##rp##M_INDEX,    \
+                                   IWL_RATE_##rn##M_INDEX,    \
+                                   IWL_RATE_##pp##M_INDEX,    \
+                                   IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
+       IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
+       IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
+       IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
+       IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
+       IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
+       IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+       IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+       IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+       IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+       IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+       IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+       IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+       int idx = 0;
+
+       /* HT rate format */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+
+               if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+                       idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+               idx += IWL_FIRST_OFDM_RATE;
+               /* skip 9M not supported in ht*/
+               if (idx >= IWL_RATE_9M_INDEX)
+                       idx += 1;
+               if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+                       return idx;
+
+       /* legacy rate format, search for match in table */
+       } else {
+               for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
+                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx;
+       }
+
+       return -1;
+}
+
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+                                  struct sk_buff *skb,
+                                  struct ieee80211_sta *sta,
+                                  struct iwl_lq_sta *lq_sta);
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
+                                       bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index);
+#else
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *     1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
+       {0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
+       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+       {0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+       {0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+       {  "1", "BPSK DSSS"},
+       {  "2", "QPSK DSSS"},
+       {"5.5", "BPSK CCK"},
+       { "11", "QPSK CCK"},
+       {  "6", "BPSK 1/2"},
+       {  "9", "BPSK 1/2"},
+       { "12", "QPSK 1/2"},
+       { "18", "QPSK 3/4"},
+       { "24", "16QAM 1/2"},
+       { "36", "16QAM 3/4"},
+       { "48", "64QAM 2/3"},
+       { "54", "64QAM 3/4"},
+       { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM   (8)
+
+static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
+{
+       return (u8)(rate_n_flags & 0xFF);
+}
+
+static void
+iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+       window->data = 0;
+       window->success_counter = 0;
+       window->success_ratio = IWL_INVALID_VALUE;
+       window->counter = 0;
+       window->average_tpt = IWL_INVALID_VALUE;
+       window->stamp = 0;
+}
+
+static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+       return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *     removes the old data from the statistics. All data that is older than
+ *     TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+       /* The oldest age we want to keep */
+       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+       while (tl->queue_count &&
+              (tl->time_stamp < oldest_time)) {
+               tl->total -= tl->packet_count[tl->head];
+               tl->packet_count[tl->head] = 0;
+               tl->time_stamp += TID_QUEUE_CELL_SPACING;
+               tl->queue_count--;
+               tl->head++;
+               if (tl->head >= TID_QUEUE_MAX_SIZE)
+                       tl->head = 0;
+       }
+}
+
+/*
+ *     increment traffic load value for tid and also remove
+ *     any old values if passed the certain time period
+ */
+static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+                          struct ieee80211_hdr *hdr)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+       u8 tid;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       } else
+               return MAX_TID_COUNT;
+
+       if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+               return MAX_TID_COUNT;
+
+       tl = &lq_data->load[tid];
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       /* Happens only for the first packet. Initialize the data */
+       if (!(tl->queue_count)) {
+               tl->total = 1;
+               tl->time_stamp = curr_time;
+               tl->queue_count = 1;
+               tl->head = 0;
+               tl->packet_count[0] = 1;
+               return MAX_TID_COUNT;
+       }
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+       tl->packet_count[index] = tl->packet_count[index] + 1;
+       tl->total = tl->total + 1;
+
+       if ((index + 1) > tl->queue_count)
+               tl->queue_count = index + 1;
+
+       return tid;
+}
+
+/*
+       get the traffic load value for tid
+*/
+static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+
+       if (tid >= TID_MAX_LOAD_COUNT)
+               return 0;
+
+       tl = &(lq_data->load[tid]);
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       if (!(tl->queue_count))
+               return 0;
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       return tl->total;
+}
+
+static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+                                     struct iwl_lq_sta *lq_data, u8 tid,
+                                     struct ieee80211_sta *sta)
+{
+       int ret = -EAGAIN;
+       u32 load;
+
+       load = iwl4965_rs_tl_get_load(lq_data, tid);
+
+       if (load > IWL_AGG_LOAD_THRESHOLD) {
+               IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+                               sta->addr, tid);
+               ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+               if (ret == -EAGAIN) {
+                       /*
+                        * driver and mac80211 is out of sync
+                        * this might be cause by reloading firmware
+                        * stop the tx ba session here
+                        */
+                       IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+                               tid);
+                       ieee80211_stop_tx_ba_session(sta, tid);
+               }
+       } else {
+               IWL_ERR(priv, "Aggregation not enabled for tid %d "
+                       "because load = %u\n", tid, load);
+       }
+       return ret;
+}
+
+static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+                             struct iwl_lq_sta *lq_data,
+                             struct ieee80211_sta *sta)
+{
+       if (tid < TID_MAX_LOAD_COUNT)
+               iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+       else
+               IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+                       tid, TID_MAX_LOAD_COUNT);
+}
+
+static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+       return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+       if (tbl->expected_tpt)
+               return tbl->expected_tpt[rs_index];
+       return 0;
+}
+
+/**
+ * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes)
+{
+       struct iwl_rate_scale_data *window = NULL;
+       static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+       s32 fail_count, tpt;
+
+       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+               return -EINVAL;
+
+       /* Select window for current tx bit rate */
+       window = &(tbl->win[scale_index]);
+
+       /* Get expected throughput */
+       tpt = iwl4965_get_expected_tpt(tbl, scale_index);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history window; anything older isn't really relevant any more.
+        * If we have filled up the sliding window, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        */
+       while (attempts > 0) {
+               if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+                       /* remove earliest */
+                       window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+                       if (window->data & mask) {
+                               window->data &= ~mask;
+                               window->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               window->counter++;
+
+               /* Shift bitmap by one frame to throw away oldest history */
+               window->data <<= 1;
+
+               /* Mark the most recent #successes attempts as successful */
+               if (successes > 0) {
+                       window->success_counter++;
+                       window->data |= 0x1;
+                       successes--;
+               }
+
+               attempts--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (window->counter > 0)
+               window->success_ratio = 128 * (100 * window->success_counter)
+                                       / window->counter;
+       else
+               window->success_ratio = IWL_INVALID_VALUE;
+
+       fail_count = window->counter - window->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+               window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+       else
+               window->average_tpt = IWL_INVALID_VALUE;
+
+       /* Tag this window as having been updated */
+       window->stamp = jiffies;
+
+       return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
+                                struct iwl_scale_tbl_info *tbl,
+                                int index, u8 use_green)
+{
+       u32 rate_n_flags = 0;
+
+       if (is_legacy(tbl->lq_type)) {
+               rate_n_flags = iwlegacy_rates[index].plcp;
+               if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+                       rate_n_flags |= RATE_MCS_CCK_MSK;
+
+       } else if (is_Ht(tbl->lq_type)) {
+               if (index > IWL_LAST_OFDM_RATE) {
+                       IWL_ERR(priv, "Invalid HT rate index %d\n", index);
+                       index = IWL_LAST_OFDM_RATE;
+               }
+               rate_n_flags = RATE_MCS_HT_MSK;
+
+               if (is_siso(tbl->lq_type))
+                       rate_n_flags |= iwlegacy_rates[index].plcp_siso;
+               else
+                       rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
+       } else {
+               IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+       }
+
+       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+                                                    RATE_MCS_ANT_ABC_MSK);
+
+       if (is_Ht(tbl->lq_type)) {
+               if (tbl->is_ht40) {
+                       if (tbl->is_dup)
+                               rate_n_flags |= RATE_MCS_DUP_MSK;
+                       else
+                               rate_n_flags |= RATE_MCS_HT40_MSK;
+               }
+               if (tbl->is_SGI)
+                       rate_n_flags |= RATE_MCS_SGI_MSK;
+
+               if (use_green) {
+                       rate_n_flags |= RATE_MCS_GF_MSK;
+                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
+                               IWL_ERR(priv, "GF was set with SGI:SISO\n");
+                       }
+               }
+       }
+       return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+                                   enum ieee80211_band band,
+                                   struct iwl_scale_tbl_info *tbl,
+                                   int *rate_idx)
+{
+       u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+       u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
+       u8 mcs;
+
+       memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+       *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
+
+       if (*rate_idx  == IWL_RATE_INVALID) {
+               *rate_idx = -1;
+               return -EINVAL;
+       }
+       tbl->is_SGI = 0;        /* default legacy setup */
+       tbl->is_ht40 = 0;
+       tbl->is_dup = 0;
+       tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+       tbl->lq_type = LQ_NONE;
+       tbl->max_search = IWL_MAX_SEARCH;
+
+       /* legacy rate format */
+       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+               if (iwl4965_num_of_ant == 1) {
+                       if (band == IEEE80211_BAND_5GHZ)
+                               tbl->lq_type = LQ_A;
+                       else
+                               tbl->lq_type = LQ_G;
+               }
+       /* HT rate format */
+       } else {
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       tbl->is_SGI = 1;
+
+               if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+                   (rate_n_flags & RATE_MCS_DUP_MSK))
+                       tbl->is_ht40 = 1;
+
+               if (rate_n_flags & RATE_MCS_DUP_MSK)
+                       tbl->is_dup = 1;
+
+               mcs = iwl4965_rs_extract_rate(rate_n_flags);
+
+               /* SISO */
+               if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+                       if (iwl4965_num_of_ant == 1)
+                               tbl->lq_type = LQ_SISO; /*else NONE*/
+               /* MIMO2 */
+               } else {
+                       if (iwl4965_num_of_ant == 2)
+                               tbl->lq_type = LQ_MIMO2;
+               }
+       }
+       return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+                            struct iwl_scale_tbl_info *tbl)
+{
+       u8 new_ant_type;
+
+       if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+               return 0;
+
+       if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+               return 0;
+
+       new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+       while ((new_ant_type != tbl->ant_type) &&
+              !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
+               new_ant_type = ant_toggle_lookup[new_ant_type];
+
+       if (new_ant_type == tbl->ant_type)
+               return 0;
+
+       tbl->ant_type = new_ant_type;
+       *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+       *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+       return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
+{
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+               !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * iwl4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_hdr *hdr,
+                                 enum iwl_table_type rate_type)
+{
+       if (is_legacy(rate_type)) {
+               return lq_sta->active_legacy_rate;
+       } else {
+               if (is_siso(rate_type))
+                       return lq_sta->active_siso_rate;
+               else
+                       return lq_sta->active_mimo2_rate;
+       }
+}
+
+static u16
+iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
+                               int rate_type)
+{
+       u8 high = IWL_RATE_INVALID;
+       u8 low = IWL_RATE_INVALID;
+
+       /* 802.11A or ht walks to the next literal adjacent rate in
+        * the rate table */
+       if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = index - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = index + 1;
+               for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = index;
+       while (low != IWL_RATE_INVALID) {
+               low = iwlegacy_rates[low].prev_rs;
+               if (low == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+       }
+
+       high = index;
+       while (high != IWL_RATE_INVALID) {
+               high = iwlegacy_rates[high].next_rs;
+               if (high == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+                            struct iwl_scale_tbl_info *tbl,
+                            u8 scale_index, u8 ht_possible)
+{
+       s32 low;
+       u16 rate_mask;
+       u16 high_low;
+       u8 switch_to_legacy = 0;
+       u8 is_green = lq_sta->is_green;
+       struct iwl_priv *priv = lq_sta->drv;
+
+       /* check if we need to switch from HT to legacy rates.
+        * assumption is that mandatory rates (1Mbps or 6Mbps)
+        * are always supported (spec demand) */
+       if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+               switch_to_legacy = 1;
+               scale_index = rs_ht_to_legacy[scale_index];
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       tbl->lq_type = LQ_A;
+               else
+                       tbl->lq_type = LQ_G;
+
+               if (iwl4965_num_of_ant(tbl->ant_type) > 1)
+                       tbl->ant_type =
+                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+               tbl->is_ht40 = 0;
+               tbl->is_SGI = 0;
+               tbl->max_search = IWL_MAX_SEARCH;
+       }
+
+       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+       /* Mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               /* supp_rates has no CCK bits in A mode */
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       rate_mask  = (u16)(rate_mask &
+                          (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+       }
+
+       /* If we switched from HT to legacy, check current rate */
+       if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+               low = scale_index;
+               goto out;
+       }
+
+       high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
+                                       scale_index, rate_mask,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+
+       if (low == IWL_RATE_INVALID)
+               low = scale_index;
+
+out:
+       return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
+                              struct iwl_scale_tbl_info *b)
+{
+       return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+               (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+                        struct sk_buff *skb)
+{
+       int legacy_success;
+       int retries;
+       int rs_index, mac_index, i;
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       struct iwl_link_quality_cmd *table;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       enum mac80211_rate_control_flags mac_flags;
+       u32 tx_rate;
+       struct iwl_scale_tbl_info tbl_type;
+       struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       IWL_DEBUG_RATE_LIMIT(priv,
+               "get frame ack response, update rate scale window\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!lq_sta) {
+               IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+               return;
+       } else if (!lq_sta->drv) {
+               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+               return;
+       }
+
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       /* This packet was aggregated but doesn't carry status info */
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+           !(info->flags & IEEE80211_TX_STAT_AMPDU))
+               return;
+
+       /*
+        * Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+       iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
+                        priv->band, &tbl_type, &rs_index);
+       if (priv->band == IEEE80211_BAND_5GHZ)
+               rs_index -= IWL_FIRST_OFDM_RATE;
+       mac_flags = info->status.rates[0].flags;
+       mac_index = info->status.rates[0].idx;
+       /* For HT packets, map MCS to PLCP */
+       if (mac_flags & IEEE80211_TX_RC_MCS) {
+               mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+                       mac_index++;
+               /*
+                * mac80211 HT index is always zero-indexed; we need to move
+                * HT OFDM rates after CCK rates in 2.4 GHz band
+                */
+               if (priv->band == IEEE80211_BAND_2GHZ)
+                       mac_index += IWL_FIRST_OFDM_RATE;
+       }
+       /* Here we actually compare this rate to the latest LQ command */
+       if ((mac_index < 0) ||
+           (tbl_type.is_SGI !=
+                       !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+           (tbl_type.is_ht40 !=
+                       !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+           (tbl_type.is_dup !=
+                       !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
+           (tbl_type.ant_type != info->antenna_sel_tx) ||
+           (!!(tx_rate & RATE_MCS_HT_MSK) !=
+                       !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+           (!!(tx_rate & RATE_MCS_GF_MSK) !=
+                       !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+           (rs_index != mac_index)) {
+               IWL_DEBUG_RATE(priv,
+               "initial rate %d does not match %d (0x%x)\n",
+                        mac_index, rs_index, tx_rate);
+               /*
+                * Since rates mis-match, the last LQ command may have failed.
+                * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+                * ... driver.
+                */
+               lq_sta->missed_rate_counter++;
+               if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+                       lq_sta->missed_rate_counter = 0;
+                       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
+                                                       CMD_ASYNC, false);
+               }
+               /* Regardless, ignore this status info for outdated rate */
+               return;
+       } else
+               /* Rate did match, so reset the missed_rate_counter */
+               lq_sta->missed_rate_counter = 0;
+
+       /* Figure out if rate scale algorithm is in active or search table */
+       if (iwl4965_table_type_matches(&tbl_type,
+                               &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+               curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+       } else if (iwl4965_table_type_matches(&tbl_type,
+                               &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+               curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       } else {
+               IWL_DEBUG_RATE(priv,
+                       "Neither active nor search matches tx rate\n");
+               tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
+                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
+                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
+                       tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
+               /*
+                * no matching table found, let's by-pass the data collection
+                * and continue to perform rate scale to find the rate table
+                */
+               iwl4965_rs_stay_in_table(lq_sta, true);
+               goto done;
+       }
+
+       /*
+        * Updating the frame history depends on whether packets were
+        * aggregated.
+        *
+        * For aggregation, all packets were transmitted at the same rate, the
+        * first index into rate scale table.
+        */
+       if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+               tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+               iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+                               &rs_index);
+               iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
+                                  info->status.ampdu_len,
+                                  info->status.ampdu_ack_len);
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += info->status.ampdu_ack_len;
+                       lq_sta->total_failed += (info->status.ampdu_len -
+                                       info->status.ampdu_ack_len);
+               }
+       } else {
+       /*
+        * For legacy, update frame history with for each Tx retry.
+        */
+               retries = info->status.rates[0].count - 1;
+               /* HW doesn't send more than 15 retries */
+               retries = min(retries, 15);
+
+               /* The last transmission may have been successful */
+               legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               /* Collect data for each rate used during failed TX attempts */
+               for (i = 0; i <= retries; ++i) {
+                       tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+                       iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+                                       &tbl_type, &rs_index);
+                       /*
+                        * Only collect stats if retried rate is in the same RS
+                        * table as active/search.
+                        */
+                       if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
+                               tmp_tbl = curr_tbl;
+                       else if (iwl4965_table_type_matches(&tbl_type,
+                                                                other_tbl))
+                               tmp_tbl = other_tbl;
+                       else
+                               continue;
+                       iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
+                                          i < retries ? 0 : legacy_success);
+               }
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += legacy_success;
+                       lq_sta->total_failed += retries + (1 - legacy_success);
+               }
+       }
+       /* The last TX rate is cached in lq_sta; it's set in if/else above */
+       lq_sta->last_rate_n_flags = tx_rate;
+done:
+       /* See if there's a better rate or modulation mode to try. */
+       if (sta && sta->supp_rates[sband->band])
+               iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
+                                struct iwl_lq_sta *lq_sta)
+{
+       IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
+       lq_sta->stay_in_tbl = 1;        /* only place this gets set */
+       if (is_legacy) {
+               lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+       } else {
+               lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+       }
+       lq_sta->table_count = 0;
+       lq_sta->total_failed = 0;
+       lq_sta->total_success = 0;
+       lq_sta->flush_timer = jiffies;
+       lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+                                     struct iwl_scale_tbl_info *tbl)
+{
+       /* Used to choose among HT tables */
+       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+       /* Check for invalid LQ type */
+       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Legacy rates have only one table */
+       if (is_legacy(tbl->lq_type)) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Choose among many HT tables depending on number of streams
+        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+        * status */
+       if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_siso20MHz;
+       else if (is_siso(tbl->lq_type))
+               ht_tbl_pointer = expected_tpt_siso40MHz;
+       else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+       else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+       if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
+               tbl->expected_tpt = ht_tbl_pointer[0];
+       else if (tbl->is_SGI && !lq_sta->is_agg)        /* SGI */
+               tbl->expected_tpt = ht_tbl_pointer[1];
+       else if (!tbl->is_SGI && lq_sta->is_agg)        /* AGG */
+               tbl->expected_tpt = ht_tbl_pointer[2];
+       else                                            /* AGG+SGI */
+               tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
+                           struct iwl_lq_sta *lq_sta,
+                           struct iwl_scale_tbl_info *tbl,     /* "search" */
+                           u16 rate_mask, s8 index)
+{
+       /* "active" values */
+       struct iwl_scale_tbl_info *active_tbl =
+           &(lq_sta->lq_info[lq_sta->active_tbl]);
+       s32 active_sr = active_tbl->win[index].success_ratio;
+       s32 active_tpt = active_tbl->expected_tpt[index];
+
+       /* expected "search" throughput */
+       s32 *tpt_tbl = tbl->expected_tpt;
+
+       s32 new_rate, high, low, start_hi;
+       u16 high_low;
+       s8 rate = index;
+
+       new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+       for (; ;) {
+               high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
+                                               tbl->lq_type);
+
+               low = high_low & 0xff;
+               high = (high_low >> 8) & 0xff;
+
+               /*
+                * Lower the "search" bit rate, to give new "search" mode
+                * approximately the same throughput as "active" if:
+                *
+                * 1) "Active" mode has been working modestly well (but not
+                *    great), and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above the actual
+                *    measured "active" throughput (but less than expected
+                *    "active" throughput under perfect conditions).
+                * OR
+                * 2) "Active" mode has been working perfectly or very well
+                *    and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above expected
+                *    "active" throughput (under perfect conditions).
+                */
+               if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+                    ((active_sr > IWL_RATE_DECREASE_TH) &&
+                     (active_sr <= IWL_RATE_HIGH_TH) &&
+                     (tpt_tbl[rate] <= active_tpt))) ||
+                   ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+                    (tpt_tbl[rate] > active_tpt))) {
+
+                       /* (2nd or later pass)
+                        * If we've already tried to raise the rate, and are
+                        * now trying to lower it, use the higher rate. */
+                       if (start_hi != IWL_RATE_INVALID) {
+                               new_rate = start_hi;
+                               break;
+                       }
+
+                       new_rate = rate;
+
+                       /* Loop again with lower rate */
+                       if (low != IWL_RATE_INVALID)
+                               rate = low;
+
+                       /* Lower rate not available, use the original */
+                       else
+                               break;
+
+               /* Else try to raise the "search" rate to match "active" */
+               } else {
+                       /* (2nd or later pass)
+                        * If we've already tried to lower the rate, and are
+                        * now trying to raise it, use the lower rate. */
+                       if (new_rate != IWL_RATE_INVALID)
+                               break;
+
+                       /* Loop again with higher rate */
+                       else if (high != IWL_RATE_INVALID) {
+                               start_hi = high;
+                               rate = high;
+
+                       /* Higher rate not available, use the original */
+                       } else {
+                               new_rate = rate;
+                               break;
+                       }
+               }
+       }
+
+       return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       s32 rate;
+       s8 is_green = lq_sta->is_green;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+                                               == WLAN_HT_CAP_SM_PS_STATIC)
+               return -1;
+
+       /* Need both Tx chains/antennas to support MIMO */
+       if (priv->hw_params.tx_chains_num < 2)
+               return -1;
+
+       IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
+
+       tbl->lq_type = LQ_MIMO2;
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_mimo2_rate;
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
+                               rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(priv,
+                               "Can't switch with index %d rate mask %x\n",
+                                               rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+                                                tbl, rate, is_green);
+
+       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+                    tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       u8 is_green = lq_sta->is_green;
+       s32 rate;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
+
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->lq_type = LQ_SISO;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_siso_rate;
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       if (is_green)
+               tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(priv,
+                       "can not switch with index %d rate mask %x\n",
+                            rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+                                               tbl, rate, is_green);
+       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+                    tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
+                               struct iwl_lq_sta *lq_sta,
+                               struct ieee80211_conf *conf,
+                               struct ieee80211_sta *sta,
+                               int index)
+{
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       int ret = 0;
+       u8 update_search_tbl_counter = 0;
+
+       tbl->action = IWL_LEGACY_SWITCH_SISO;
+
+       start_action = tbl->action;
+       for (; ;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_LEGACY_SWITCH_ANTENNA1:
+               case IWL_LEGACY_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
+
+                       if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+                                                       tx_chains_num <= 1) ||
+                           (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+                                                       tx_chains_num <= 2))
+                               break;
+
+                       /* Don't change antenna if success has been great */
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       /* Set up search table to try other antenna */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                               &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               iwl4965_rs_set_expected_tpt_table(lq_sta,
+                                                               search_tbl);
+                               goto out;
+                       }
+                       break;
+               case IWL_LEGACY_SWITCH_SISO:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
+
+                       /* Set up search table to try SISO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+
+                       break;
+               case IWL_LEGACY_SWITCH_MIMO2_AB:
+               case IWL_LEGACY_SWITCH_MIMO2_AC:
+               case IWL_LEGACY_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
+
+                       /* Set up search table to try MIMO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                               search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+               tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+       return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta, int index)
+{
+       u8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_SISO_SWITCH_ANTENNA1:
+               case IWL_SISO_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
+                       if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+                                               tx_chains_num <= 1) ||
+                           (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+                                               tx_chains_num <= 2))
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                      &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_SISO_SWITCH_MIMO2_AB:
+               case IWL_SISO_SWITCH_MIMO2_AC:
+               case IWL_SISO_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                                search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+                       break;
+               case IWL_SISO_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (is_green) {
+                               if (!tbl->is_SGI)
+                                       break;
+                               else
+                                       IWL_ERR(priv,
+                                               "SGI was set in GF+SISO\n");
+                       }
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_SISO_SWITCH_GI)
+                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_SISO_SWITCH_GI)
+               tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta, int index)
+{
+       s8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_MIMO2_SWITCH_ANTENNA1:
+               case IWL_MIMO2_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
+
+                       if (tx_chains_num <= 2)
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                      &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_MIMO2_SWITCH_SISO_A:
+               case IWL_MIMO2_SWITCH_SISO_B:
+               case IWL_MIMO2_SWITCH_SISO_C:
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
+
+                       /* Set up new search table for SISO */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+                               search_tbl->ant_type = ANT_A;
+                       else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+                               search_tbl->ant_type = ANT_B;
+                       else
+                               search_tbl->ant_type = ANT_C;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                               search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IWL_MIMO2_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
+
+                       /* Set up new search table for MIMO2 */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       /*
+                        * If active table already uses the fastest possible
+                        * modulation (dual stream with short guard interval),
+                        * and it's working well, there's no need to look
+                        * for a better type of modulation!
+                        */
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+
+               }
+               tbl->action++;
+               if (tbl->action > IWL_MIMO2_SWITCH_GI)
+                       tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_MIMO2_SWITCH_GI)
+               tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int i;
+       int active_tbl;
+       int flush_interval_passed = 0;
+       struct iwl_priv *priv;
+
+       priv = lq_sta->drv;
+       active_tbl = lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       /* If we've been disallowing search, see if we should now allow it */
+       if (lq_sta->stay_in_tbl) {
+
+               /* Elapsed time using current modulation mode */
+               if (lq_sta->flush_timer)
+                       flush_interval_passed =
+                       time_after(jiffies,
+                                       (unsigned long)(lq_sta->flush_timer +
+                                       IWL_RATE_SCALE_FLUSH_INTVL));
+
+               /*
+                * Check if we should allow search for new modulation mode.
+                * If many frames have failed or succeeded, or we've used
+                * this same modulation for a long time, allow search, and
+                * reset history stats that keep track of whether we should
+                * allow a new search.  Also (below) reset all bitmaps and
+                * stats in active history.
+                */
+               if (force_search ||
+                   (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+                   (lq_sta->total_success > lq_sta->max_success_limit) ||
+                   ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
+                    && (flush_interval_passed))) {
+                       IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
+                                    lq_sta->total_failed,
+                                    lq_sta->total_success,
+                                    flush_interval_passed);
+
+                       /* Allow search for new mode */
+                       lq_sta->stay_in_tbl = 0;        /* only place reset */
+                       lq_sta->total_failed = 0;
+                       lq_sta->total_success = 0;
+                       lq_sta->flush_timer = 0;
+
+               /*
+                * Else if we've used this modulation mode enough repetitions
+                * (regardless of elapsed time or success/failure), reset
+                * history bitmaps and rate-specific stats for all rates in
+                * active table.
+                */
+               } else {
+                       lq_sta->table_count++;
+                       if (lq_sta->table_count >=
+                           lq_sta->table_count_limit) {
+                               lq_sta->table_count = 0;
+
+                               IWL_DEBUG_RATE(priv,
+                                       "LQ: stay in table clear win\n");
+                               for (i = 0; i < IWL_RATE_COUNT; i++)
+                                       iwl4965_rs_rate_scale_clear_window(
+                                               &(tbl->win[i]));
+                       }
+               }
+
+               /* If transitioning to allow "search", reset all history
+                * bitmaps and stats in active table (this will become the new
+                * "search" table). */
+               if (!lq_sta->stay_in_tbl) {
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               iwl4965_rs_rate_scale_clear_window(
+                                                       &(tbl->win[i]));
+               }
+       }
+}
+
+/*
+ * setup rate table in uCode
+ * return rate_n_flags as used in the table
+ */
+static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                               struct iwl_lq_sta *lq_sta,
+                               struct iwl_scale_tbl_info *tbl,
+                               int index, u8 is_green)
+{
+       u32 rate;
+
+       /* Update uCode's rate table. */
+       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
+       iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
+       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+
+       return rate;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+                                 struct sk_buff *skb,
+                                 struct ieee80211_sta *sta,
+                                 struct iwl_lq_sta *lq_sta)
+{
+       struct ieee80211_hw *hw = priv->hw;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int low = IWL_RATE_INVALID;
+       int high = IWL_RATE_INVALID;
+       int index;
+       int i;
+       struct iwl_rate_scale_data *window = NULL;
+       int current_tpt = IWL_INVALID_VALUE;
+       int low_tpt = IWL_INVALID_VALUE;
+       int high_tpt = IWL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       u16 rate_mask;
+       u8 update_lq = 0;
+       struct iwl_scale_tbl_info *tbl, *tbl1;
+       u16 rate_scale_index_msk = 0;
+       u32 rate;
+       u8 is_green = 0;
+       u8 active_tbl = 0;
+       u8 done_search = 0;
+       u16 high_low;
+       s32 sr;
+       u8 tid = MAX_TID_COUNT;
+       struct iwl_tid_data *tid_data;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       /* TODO: this could probably be improved.. */
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       if (!sta || !lq_sta)
+               return;
+
+       lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+       tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
+       if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+               tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+               if (tid_data->agg.state == IWL_AGG_OFF)
+                       lq_sta->is_agg = 0;
+               else
+                       lq_sta->is_agg = 1;
+       } else
+               lq_sta->is_agg = 0;
+
+       /*
+        * Select rate-scale / modulation-mode table to work with in
+        * the rest of this function:  "search" if searching for better
+        * modulation mode, or "active" if doing rate scaling within a mode.
+        */
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+       if (is_legacy(tbl->lq_type))
+               lq_sta->is_green = 0;
+       else
+               lq_sta->is_green = iwl4965_rs_use_green(sta);
+       is_green = lq_sta->is_green;
+
+       /* current tx rate */
+       index = lq_sta->last_txrate_idx;
+
+       IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
+                      tbl->lq_type);
+
+       /* rates available for this association, and for modulation mode */
+       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+       IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
+
+       /* mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       /* supp_rates has no CCK bits in A mode */
+                       rate_scale_index_msk = (u16) (rate_mask &
+                               (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_scale_index_msk = (u16) (rate_mask &
+                                                     lq_sta->supp_rates);
+
+       } else
+               rate_scale_index_msk = rate_mask;
+
+       if (!rate_scale_index_msk)
+               rate_scale_index_msk = rate_mask;
+
+       if (!((1 << index) & rate_scale_index_msk)) {
+               IWL_ERR(priv, "Current Rate is not valid\n");
+               if (lq_sta->search_better_tbl) {
+                       /* revert to active table if search table is not valid*/
+                       tbl->lq_type = LQ_NONE;
+                       lq_sta->search_better_tbl = 0;
+                       tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+                       /* get "active" rate info */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+                                                 tbl, index, is_green);
+               }
+               return;
+       }
+
+       /* Get expected throughput table and history window for current rate */
+       if (!tbl->expected_tpt) {
+               IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
+               return;
+       }
+
+       /* force user max rate if set by user */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < index)) {
+               index = lq_sta->max_rate_idx;
+               update_lq = 1;
+               window = &(tbl->win[index]);
+               goto lq_update;
+       }
+
+       window = &(tbl->win[index]);
+
+       /*
+        * If there is not enough history to calculate actual average
+        * throughput, keep analyzing results of more tx frames, without
+        * changing rate or mode (bypass most of the rest of this function).
+        * Set up new rate table in uCode only if old rate is not supported
+        * in current association (use new rate found above).
+        */
+       fail_count = window->counter - window->success_counter;
+       if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+                       (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+               IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
+                              "for index %d\n",
+                              window->success_counter, window->counter, index);
+
+               /* Can't calculate this yet; not enough history */
+               window->average_tpt = IWL_INVALID_VALUE;
+
+               /* Should we stay with this modulation mode,
+                * or search for a new one? */
+               iwl4965_rs_stay_in_table(lq_sta, false);
+
+               goto out;
+       }
+       /* Else we have enough samples; calculate estimate of
+        * actual average throughput */
+       if (window->average_tpt != ((window->success_ratio *
+                       tbl->expected_tpt[index] + 64) / 128)) {
+               IWL_ERR(priv,
+                        "expected_tpt should have been calculated by now\n");
+               window->average_tpt = ((window->success_ratio *
+                                       tbl->expected_tpt[index] + 64) / 128);
+       }
+
+       /* If we are searching for better modulation mode, check success. */
+       if (lq_sta->search_better_tbl) {
+               /* If good success, continue using the "search" mode;
+                * no need to send new link quality command, since we're
+                * continuing to use the setup that we've been trying. */
+               if (window->average_tpt > lq_sta->last_tpt) {
+
+                       IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
+                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
+                                       window->success_ratio,
+                                       window->average_tpt,
+                                       lq_sta->last_tpt);
+
+                       if (!is_legacy(tbl->lq_type))
+                               lq_sta->enable_counter = 1;
+
+                       /* Swap tables; "search" becomes "active" */
+                       lq_sta->active_tbl = active_tbl;
+                       current_tpt = window->average_tpt;
+
+               /* Else poor success; go back to mode in "active" table */
+               } else {
+
+                       IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
+                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
+                                       window->success_ratio,
+                                       window->average_tpt,
+                                       lq_sta->last_tpt);
+
+                       /* Nullify "search" table */
+                       tbl->lq_type = LQ_NONE;
+
+                       /* Revert to "active" table */
+                       active_tbl = lq_sta->active_tbl;
+                       tbl = &(lq_sta->lq_info[active_tbl]);
+
+                       /* Revert to "active" rate and throughput info */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       current_tpt = lq_sta->last_tpt;
+
+                       /* Need to set up a new rate table in uCode */
+                       update_lq = 1;
+               }
+
+               /* Either way, we've made a decision; modulation mode
+                * search is done, allow rate adjustment next time. */
+               lq_sta->search_better_tbl = 0;
+               done_search = 1;        /* Don't switch modes below! */
+               goto lq_update;
+       }
+
+       /* (Else) not in search of better modulation mode, try for better
+        * starting rate, while staying in this mode. */
+       high_low = iwl4965_rs_get_adjacent_rate(priv, index,
+                                       rate_scale_index_msk,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < high))
+               high = IWL_RATE_INVALID;
+
+       sr = window->success_ratio;
+
+       /* Collect measured throughputs for current and adjacent rates */
+       current_tpt = window->average_tpt;
+       if (low != IWL_RATE_INVALID)
+               low_tpt = tbl->win[low].average_tpt;
+       if (high != IWL_RATE_INVALID)
+               high_tpt = tbl->win[high].average_tpt;
+
+       scale_action = 0;
+
+       /* Too many failures, decrease rate */
+       if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+               IWL_DEBUG_RATE(priv,
+                       "decrease rate because of low success_ratio\n");
+               scale_action = -1;
+
+       /* No throughput measured yet for adjacent rates; try increase. */
+       } else if ((low_tpt == IWL_INVALID_VALUE) &&
+                  (high_tpt == IWL_INVALID_VALUE)) {
+
+               if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != IWL_RATE_INVALID)
+                       scale_action = 0;
+       }
+
+       /* Both adjacent throughputs are measured, but neither one has better
+        * throughput; we're using the best rate, don't change it! */
+       else if ((low_tpt != IWL_INVALID_VALUE) &&
+                (high_tpt != IWL_INVALID_VALUE) &&
+                (low_tpt < current_tpt) &&
+                (high_tpt < current_tpt))
+               scale_action = 0;
+
+       /* At least one adjacent rate's throughput is measured,
+        * and may have better performance. */
+       else {
+               /* Higher adjacent rate's throughput is measured */
+               if (high_tpt != IWL_INVALID_VALUE) {
+                       /* Higher rate has better throughput */
+                       if (high_tpt > current_tpt &&
+                                       sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       } else {
+                               scale_action = 0;
+                       }
+
+               /* Lower adjacent rate's throughput is measured */
+               } else if (low_tpt != IWL_INVALID_VALUE) {
+                       /* Lower rate has better throughput */
+                       if (low_tpt > current_tpt) {
+                               IWL_DEBUG_RATE(priv,
+                                   "decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+                   ((sr > IWL_RATE_HIGH_TH) ||
+                    (current_tpt > (100 * tbl->expected_tpt[low]))))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+               /* Decrease starting rate, update uCode's rate table */
+               if (low != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = low;
+               }
+
+               break;
+       case 1:
+               /* Increase starting rate, update uCode's rate table */
+               if (high != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = high;
+               }
+
+               break;
+       case 0:
+               /* No change */
+       default:
+               break;
+       }
+
+       IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
+                   "high %d type %d\n",
+                    index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+       /* Replace uCode's rate table for the destination station. */
+       if (update_lq)
+               rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+                                         tbl, index, is_green);
+
+       /* Should we stay with this modulation mode,
+        * or search for a new one? */
+        iwl4965_rs_stay_in_table(lq_sta, false);
+
+       /*
+        * Search for new modulation mode if we're:
+        * 1)  Not changing rates right now
+        * 2)  Not just finishing up a search
+        * 3)  Allowing a new search
+        */
+       if (!update_lq && !done_search &&
+               !lq_sta->stay_in_tbl && window->counter) {
+               /* Save current throughput to compare with "search" throughput*/
+               lq_sta->last_tpt = current_tpt;
+
+               /* Select a new "search" modulation mode to try.
+                * If one is found, set up the new "search" table. */
+               if (is_legacy(tbl->lq_type))
+                       iwl4965_rs_move_legacy_other(priv, lq_sta,
+                                                       conf, sta, index);
+               else if (is_siso(tbl->lq_type))
+                       iwl4965_rs_move_siso_to_other(priv, lq_sta,
+                                                       conf, sta, index);
+               else /* (is_mimo2(tbl->lq_type)) */
+                       iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
+                                                       conf, sta, index);
+
+               /* If new "search" mode was selected, set up in uCode table */
+               if (lq_sta->search_better_tbl) {
+                       /* Access the "search" table, clear its history. */
+                       tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               iwl4965_rs_rate_scale_clear_window(
+                                                       &(tbl->win[i]));
+
+                       /* Use new "search" start rate */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+                       IWL_DEBUG_RATE(priv,
+                               "Switch current  mcs: %X index: %d\n",
+                                    tbl->current_rate, index);
+                       iwl4965_rs_fill_link_cmd(priv, lq_sta,
+                                               tbl->current_rate);
+                       iwl_legacy_send_lq_cmd(priv, ctx,
+                                               &lq_sta->lq, CMD_ASYNC, false);
+               } else
+                       done_search = 1;
+       }
+
+       if (done_search && !lq_sta->stay_in_tbl) {
+               /* If the "active" (non-search) mode was legacy,
+                * and we've tried switching antennas,
+                * but we haven't been able to try HT modes (not available),
+                * stay with best antenna legacy modulation for a while
+                * before next round of mode comparisons. */
+               tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+                   lq_sta->action_counter > tbl1->max_search) {
+                       IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
+                       iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
+               }
+
+               /* If we're in an HT mode, and all 3 mode switch actions
+                * have been tried and compared, stay in this best modulation
+                * mode for a while before next round of mode comparisons. */
+               if (lq_sta->enable_counter &&
+                   (lq_sta->action_counter >= tbl1->max_search)) {
+                       if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+                           (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+                           (tid != MAX_TID_COUNT)) {
+                               tid_data =
+                                  &priv->stations[lq_sta->lq.sta_id].tid[tid];
+                               if (tid_data->agg.state == IWL_AGG_OFF) {
+                                       IWL_DEBUG_RATE(priv,
+                                                      "try to aggregate tid %d\n",
+                                                      tid);
+                                       iwl4965_rs_tl_turn_on_agg(priv, tid,
+                                                         lq_sta, sta);
+                               }
+                       }
+                       iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
+               }
+       }
+
+out:
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
+                                                       index, is_green);
+       i = index;
+       lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_lq_sta *lq_sta)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int rate_idx;
+       int i;
+       u32 rate;
+       u8 use_green = iwl4965_rs_use_green(sta);
+       u8 active_tbl = 0;
+       u8 valid_tx_ant;
+       struct iwl_station_priv *sta_priv;
+       struct iwl_rxon_context *ctx;
+
+       if (!sta || !lq_sta)
+               return;
+
+       sta_priv = (void *)sta->drv_priv;
+       ctx = sta_priv->common.ctx;
+
+       i = lq_sta->last_txrate_idx;
+
+       valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       if ((i < 0) || (i >= IWL_RATE_COUNT))
+               i = 0;
+
+       rate = iwlegacy_rates[i].plcp;
+       tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
+       rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+       if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+               rate |= RATE_MCS_CCK_MSK;
+
+       iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
+       if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+               iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
+       tbl->current_rate = rate;
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+       priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
+                       struct ieee80211_tx_rate_control *txrc)
+{
+
+       struct sk_buff *skb = txrc->skb;
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       int rate_idx;
+
+       IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
+
+       /* Get max rate if user set max rate */
+       if (lq_sta) {
+               lq_sta->max_rate_idx = txrc->max_rate_idx;
+               if ((sband->band == IEEE80211_BAND_5GHZ) &&
+                   (lq_sta->max_rate_idx != -1))
+                       lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+               if ((lq_sta->max_rate_idx < 0) ||
+                   (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+                       lq_sta->max_rate_idx = -1;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (lq_sta && !lq_sta->drv) {
+               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+               priv_sta = NULL;
+       }
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       if (rate_control_send_low(sta, priv_sta, txrc))
+               return;
+
+       rate_idx  = lq_sta->last_txrate_idx;
+
+       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+               rate_idx -= IWL_FIRST_OFDM_RATE;
+               /* 6M and 9M shared same MCS index */
+               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+               if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                        IWL_RATE_MIMO2_6M_PLCP)
+                       rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_SHORT_GI;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_DUP_DATA;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_40_MHZ_WIDTH;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_GREEN_FIELD;
+       } else {
+               /* Check for invalid rates */
+               if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+                               ((sband->band == IEEE80211_BAND_5GHZ) &&
+                                (rate_idx < IWL_FIRST_OFDM_RATE)))
+                       rate_idx = rate_lowest_index(sband, sta);
+               /* On valid 5 GHz rate, adjust index */
+               else if (sband->band == IEEE80211_BAND_5GHZ)
+                       rate_idx -= IWL_FIRST_OFDM_RATE;
+               info->control.rates[0].flags = 0;
+       }
+       info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+                         gfp_t gfp)
+{
+       struct iwl_lq_sta *lq_sta;
+       struct iwl_station_priv *sta_priv =
+                               (struct iwl_station_priv *) sta->drv_priv;
+       struct iwl_priv *priv;
+
+       priv = (struct iwl_priv *)priv_rate;
+       IWL_DEBUG_RATE(priv, "create station rate scale window\n");
+
+       lq_sta = &sta_priv->lq_sta;
+
+       return lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+iwl4965_rs_rate_init(struct iwl_priv *priv,
+                       struct ieee80211_sta *sta,
+                       u8 sta_id)
+{
+       int i, j;
+       struct ieee80211_hw *hw = priv->hw;
+       struct ieee80211_conf *conf = &priv->hw->conf;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct iwl_station_priv *sta_priv;
+       struct iwl_lq_sta *lq_sta;
+       struct ieee80211_supported_band *sband;
+
+       sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+       lq_sta = &sta_priv->lq_sta;
+       sband = hw->wiphy->bands[conf->channel->band];
+
+
+       lq_sta->lq.sta_id = sta_id;
+
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       iwl4965_rs_rate_scale_clear_window(
+                                       &lq_sta->lq_info[j].win[i]);
+
+       lq_sta->flush_timer = 0;
+       lq_sta->supp_rates = sta->supp_rates[sband->band];
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       iwl4965_rs_rate_scale_clear_window(
+                                       &lq_sta->lq_info[j].win[i]);
+
+       IWL_DEBUG_RATE(priv, "LQ:"
+                       "*** rate scale station global init for station %d ***\n",
+                      sta_id);
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       lq_sta->is_dup = 0;
+       lq_sta->max_rate_idx = -1;
+       lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+       lq_sta->is_green = iwl4965_rs_use_green(sta);
+       lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
+       lq_sta->band = priv->band;
+       /*
+        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+        * supp_rates[] does not; shift to convert format, force 9 MBits off.
+        */
+       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+       lq_sta->active_siso_rate &= ~((u16)0x2);
+       lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+       /* Same here */
+       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+       lq_sta->active_mimo2_rate &= ~((u16)0x2);
+       lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+       /* These values will be overridden later */
+       lq_sta->lq.general_params.single_stream_ant_msk =
+               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       lq_sta->lq.general_params.dual_stream_ant_msk =
+               priv->hw_params.valid_tx_ant &
+               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+               lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+               lq_sta->lq.general_params.dual_stream_ant_msk =
+                       priv->hw_params.valid_tx_ant;
+       }
+
+       /* as default allow aggregation for all tids */
+       lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+       lq_sta->drv = priv;
+
+       /* Set last_txrate_idx to lowest rate */
+       lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+       lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->dbg_fixed_rate = 0;
+#endif
+
+       iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
+}
+
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+       struct iwl_scale_tbl_info tbl_type;
+       int index = 0;
+       int rate_idx;
+       int repeat_rate = 0;
+       u8 ant_toggle_cnt = 0;
+       u8 use_ht_possible = 1;
+       u8 valid_tx_ant = 0;
+       struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+       /* Override starting rate (index 0) if needed for debug purposes */
+       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+       /* Interpret new_rate (rate_n_flags) */
+       iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+                                 &tbl_type, &rate_idx);
+
+       /* How many times should we repeat the initial rate? */
+       if (is_legacy(tbl_type.lq_type)) {
+               ant_toggle_cnt = 1;
+               repeat_rate = IWL_NUMBER_TRY;
+       } else {
+               repeat_rate = IWL_HT_NUMBER_TRY;
+       }
+
+       lq_cmd->general_params.mimo_delimiter =
+                       is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+       /* Fill 1st table entry (index 0) */
+       lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+       if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
+               lq_cmd->general_params.single_stream_ant_msk =
+                                               tbl_type.ant_type;
+       } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
+               lq_cmd->general_params.dual_stream_ant_msk =
+                                               tbl_type.ant_type;
+       } /* otherwise we don't modify the existing value */
+
+       index++;
+       repeat_rate--;
+       if (priv)
+               valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+       /* Fill rest of rate table */
+       while (index < LINK_QUAL_MAX_RETRY_NUM) {
+               /* Repeat initial/next rate.
+                * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+                * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+               while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+                       if (is_legacy(tbl_type.lq_type)) {
+                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                                       ant_toggle_cnt++;
+                               else if (priv &&
+                                        iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                                       &new_rate, &tbl_type))
+                                       ant_toggle_cnt = 1;
+                       }
+
+                       /* Override next rate if needed for debug purposes */
+                       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+                       /* Fill next table entry */
+                       lq_cmd->rs_table[index].rate_n_flags =
+                                       cpu_to_le32(new_rate);
+                       repeat_rate--;
+                       index++;
+               }
+
+               iwl4965_rs_get_tbl_info_from_mcs(new_rate,
+                                               lq_sta->band, &tbl_type,
+                                               &rate_idx);
+
+               /* Indicate to uCode which entries might be MIMO.
+                * If initial rate was MIMO, this will finally end up
+                * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+               if (is_mimo(tbl_type.lq_type))
+                       lq_cmd->general_params.mimo_delimiter = index;
+
+               /* Get next rate */
+               new_rate = iwl4965_rs_get_lower_rate(lq_sta,
+                                       &tbl_type, rate_idx,
+                                            use_ht_possible);
+
+               /* How many times should we repeat the next rate? */
+               if (is_legacy(tbl_type.lq_type)) {
+                       if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                               ant_toggle_cnt++;
+                       else if (priv &&
+                                iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                                  &new_rate, &tbl_type))
+                               ant_toggle_cnt = 1;
+
+                       repeat_rate = IWL_NUMBER_TRY;
+               } else {
+                       repeat_rate = IWL_HT_NUMBER_TRY;
+               }
+
+               /* Don't allow HT rates after next pass.
+                * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+               use_ht_possible = 0;
+
+               /* Override next rate if needed for debug purposes */
+               iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+               /* Fill next table entry */
+               lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+               index++;
+               repeat_rate--;
+       }
+
+       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+       lq_cmd->agg_params.agg_time_limit =
+               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void
+*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void iwl4965_rs_free(void *priv_rate)
+{
+       return;
+}
+
+static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+                       void *priv_sta)
+{
+       struct iwl_priv *priv __maybe_unused = priv_r;
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+       IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{
+       struct iwl_priv *priv;
+       u8 valid_tx_ant;
+       u8 ant_sel_tx;
+
+       priv = lq_sta->drv;
+       valid_tx_ant = priv->hw_params.valid_tx_ant;
+       if (lq_sta->dbg_fixed_rate) {
+               ant_sel_tx =
+                 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+                 >> RATE_MCS_ANT_POS);
+               if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+                       *rate_n_flags = lq_sta->dbg_fixed_rate;
+                       IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
+               } else {
+                       lq_sta->dbg_fixed_rate = 0;
+                       IWL_ERR(priv,
+                           "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+                           ant_sel_tx, valid_tx_ant);
+                       IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+               }
+       } else {
+               IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+       }
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+                       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       char buf[64];
+       int buf_size;
+       u32 parsed_rate;
+       struct iwl_station_priv *sta_priv =
+               container_of(lq_sta, struct iwl_station_priv, lq_sta);
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       priv = lq_sta->drv;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x", &parsed_rate) == 1)
+               lq_sta->dbg_fixed_rate = parsed_rate;
+       else
+               lq_sta->dbg_fixed_rate = 0;
+
+       lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
+       lq_sta->active_siso_rate   = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+       lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+
+       IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
+               lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+       if (lq_sta->dbg_fixed_rate) {
+               iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+               iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
+                               false);
+       }
+
+       return count;
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i = 0;
+       int index = 0;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+       priv = lq_sta->drv;
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+       desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+                       lq_sta->total_failed, lq_sta->total_success,
+                       lq_sta->active_legacy_rate);
+       desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+                       lq_sta->dbg_fixed_rate);
+       desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+           (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+           (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+           (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+       desc += sprintf(buff+desc, "lq type %s\n",
+          (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+       if (is_Ht(tbl->lq_type)) {
+               desc += sprintf(buff+desc, " %s",
+                  (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+                  desc += sprintf(buff+desc, " %s",
+                  (tbl->is_ht40) ? "40MHz" : "20MHz");
+                  desc += sprintf(buff+desc, " %s %s %s\n",
+                       (tbl->is_SGI) ? "SGI" : "",
+                  (lq_sta->is_green) ? "GF enabled" : "",
+                  (lq_sta->is_agg) ? "AGG on" : "");
+       }
+       desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+               lq_sta->last_rate_n_flags);
+       desc += sprintf(buff+desc, "general:"
+               "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+               lq_sta->lq.general_params.flags,
+               lq_sta->lq.general_params.mimo_delimiter,
+               lq_sta->lq.general_params.single_stream_ant_msk,
+               lq_sta->lq.general_params.dual_stream_ant_msk);
+
+       desc += sprintf(buff+desc, "agg:"
+                       "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+                       le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+                       lq_sta->lq.agg_params.agg_dis_start_th,
+                       lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+       desc += sprintf(buff+desc,
+                       "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+                       lq_sta->lq.general_params.start_rate_index[0],
+                       lq_sta->lq.general_params.start_rate_index[1],
+                       lq_sta->lq.general_params.start_rate_index[2],
+                       lq_sta->lq.general_params.start_rate_index[3]);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               index = iwl4965_hwrate_to_plcp_idx(
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+               if (is_legacy(tbl->lq_type)) {
+                       desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+                       i,
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+                       iwl_rate_mcs[index].mbps);
+               } else {
+                       desc += sprintf(buff+desc,
+                       " rate[%d] 0x%X %smbps (%s)\n",
+                       i,
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+                       iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+       .write = iwl4965_rs_sta_dbgfs_scale_table_write,
+       .read = iwl4965_rs_sta_dbgfs_scale_table_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i, j;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       for (i = 0; i < LQ_SIZE; i++) {
+               desc += sprintf(buff+desc,
+                               "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+                               "rate=0x%X\n",
+                               lq_sta->active_tbl == i ? "*" : "x",
+                               lq_sta->lq_info[i].lq_type,
+                               lq_sta->lq_info[i].is_SGI,
+                               lq_sta->lq_info[i].is_ht40,
+                               lq_sta->lq_info[i].is_dup,
+                               lq_sta->is_green,
+                               lq_sta->lq_info[i].current_rate);
+               for (j = 0; j < IWL_RATE_COUNT; j++) {
+                       desc += sprintf(buff+desc,
+                               "counter=%d success=%d %%=%d\n",
+                               lq_sta->lq_info[i].win[j].counter,
+                               lq_sta->lq_info[i].win[j].success_counter,
+                               lq_sta->lq_info[i].win[j].success_ratio);
+               }
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = iwl4965_rs_sta_dbgfs_stats_table_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char buff[120];
+       int desc = 0;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+       priv = lq_sta->drv;
+
+       if (is_Ht(tbl->lq_type))
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               tbl->expected_tpt[lq_sta->last_txrate_idx]);
+       else
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+       .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
+                                       struct dentry *dir)
+{
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       lq_sta->rs_sta_dbgfs_scale_table_file =
+               debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+                               lq_sta, &rs_sta_dbgfs_scale_table_ops);
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+               debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+                       lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+               debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+                       lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+               debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+               &lq_sta->tx_agg_tid_en);
+
+}
+
+static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
+{
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+static struct rate_control_ops rs_4965_ops = {
+       .module = NULL,
+       .name = IWL4965_RS_NAME,
+       .tx_status = iwl4965_rs_tx_status,
+       .get_rate = iwl4965_rs_get_rate,
+       .rate_init = iwl4965_rs_rate_init_stub,
+       .alloc = iwl4965_rs_alloc,
+       .free = iwl4965_rs_free,
+       .alloc_sta = iwl4965_rs_alloc_sta,
+       .free_sta = iwl4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = iwl4965_rs_add_debugfs,
+       .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
+#endif
+};
+
+int iwl4965_rate_control_register(void)
+{
+       pr_err("Registering 4965 rate control operations\n");
+       return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void iwl4965_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_4965_ops);
+}
similarity index 59%
rename from drivers/net/wireless/iwlwifi/iwl-agn-rx.c
rename to drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index bbd40b7..b9fa2f6 100644 (file)
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
 
 #include "iwl-dev.h"
 #include "iwl-core.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
 #include "iwl-sta.h"
 #include "iwl-io.h"
 #include "iwl-helpers.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
 
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
                                struct iwl_rx_mem_buffer *rxb)
 
 {
@@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
                    le32_to_cpu(missed_beacon->num_recvd_beacons),
                    le32_to_cpu(missed_beacon->num_expected_beacons));
                if (!test_bit(STATUS_SCANNING, &priv->status))
-                       iwl_init_sensitivity(priv);
+                       iwl4965_init_sensitivity(priv);
        }
 }
 
 /* Calculate noise level, based on measurements during network silence just
  *   before arriving beacon.  This measurement can be done only if we know
  *   exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
+static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
 {
        struct statistics_rx_non_phy *rx_info;
        int num_active_rx = 0;
@@ -73,11 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
        int bcn_silence_a, bcn_silence_b, bcn_silence_c;
        int last_rx_noise;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
-               rx_info = &(priv->_agn.statistics_bt.rx.general.common);
-       else
-               rx_info = &(priv->_agn.statistics.rx.general);
+       rx_info = &(priv->_4965.statistics.rx.general);
        bcn_silence_a =
                le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
        bcn_silence_b =
@@ -109,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
                        last_rx_noise);
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
 /*
  *  based on the assumption of all statistics counter are in DWORD
  *  FIXME: This function is for debugging, do not deal with
  *  the case of counters roll-over.
  */
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
+static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
                                        __le32 *stats)
 {
        int i, size;
@@ -125,28 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
        struct statistics_general_common *general, *accum_general;
        struct statistics_tx *tx, *accum_tx;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
-               prev_stats = (__le32 *)&priv->_agn.statistics_bt;
-               accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
-               size = sizeof(struct iwl_bt_notif_statistics);
-               general = &priv->_agn.statistics_bt.general.common;
-               accum_general = &priv->_agn.accum_statistics_bt.general.common;
-               tx = &priv->_agn.statistics_bt.tx;
-               accum_tx = &priv->_agn.accum_statistics_bt.tx;
-               delta = (u32 *)&priv->_agn.delta_statistics_bt;
-               max_delta = (u32 *)&priv->_agn.max_delta_bt;
-       } else {
-               prev_stats = (__le32 *)&priv->_agn.statistics;
-               accum_stats = (u32 *)&priv->_agn.accum_statistics;
-               size = sizeof(struct iwl_notif_statistics);
-               general = &priv->_agn.statistics.general.common;
-               accum_general = &priv->_agn.accum_statistics.general.common;
-               tx = &priv->_agn.statistics.tx;
-               accum_tx = &priv->_agn.accum_statistics.tx;
-               delta = (u32 *)&priv->_agn.delta_statistics;
-               max_delta = (u32 *)&priv->_agn.max_delta;
-       }
+       prev_stats = (__le32 *)&priv->_4965.statistics;
+       accum_stats = (u32 *)&priv->_4965.accum_statistics;
+       size = sizeof(struct iwl_notif_statistics);
+       general = &priv->_4965.statistics.general.common;
+       accum_general = &priv->_4965.accum_statistics.general.common;
+       tx = &priv->_4965.statistics.tx;
+       accum_tx = &priv->_4965.accum_statistics.tx;
+       delta = (u32 *)&priv->_4965.delta_statistics;
+       max_delta = (u32 *)&priv->_4965.max_delta;
+
        for (i = sizeof(__le32); i < size;
             i += sizeof(__le32), stats++, prev_stats++, delta++,
             max_delta++, accum_stats++) {
@@ -161,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
 
        /* reset accumulative statistics for "no-counter" type statistics */
        accum_general->temperature = general->temperature;
-       accum_general->temperature_m = general->temperature_m;
        accum_general->ttl_timestamp = general->ttl_timestamp;
-       accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
-       accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
-       accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
 }
 #endif
 
 #define REG_RECALIB_PERIOD (60)
 
 /**
- * iwl_good_plcp_health - checks for plcp error.
+ * iwl4965_good_plcp_health - checks for plcp error.
  *
  * When the plcp error is exceeding the thresholds, reset the radio
  * to improve the throughput.
  */
-bool iwl_good_plcp_health(struct iwl_priv *priv,
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
                                struct iwl_rx_packet *pkt)
 {
        bool rc = true;
@@ -207,28 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
                struct statistics_rx_phy *ofdm;
                struct statistics_rx_ht_phy *ofdm_ht;
 
-               if (priv->cfg->bt_params &&
-                   priv->cfg->bt_params->bt_statistics) {
-                       ofdm = &pkt->u.stats_bt.rx.ofdm;
-                       ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
-                       combined_plcp_delta =
-                          (le32_to_cpu(ofdm->plcp_err) -
-                          le32_to_cpu(priv->_agn.statistics_bt.
-                                      rx.ofdm.plcp_err)) +
-                          (le32_to_cpu(ofdm_ht->plcp_err) -
-                          le32_to_cpu(priv->_agn.statistics_bt.
-                                      rx.ofdm_ht.plcp_err));
-               } else {
-                       ofdm = &pkt->u.stats.rx.ofdm;
-                       ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
-                       combined_plcp_delta =
-                           (le32_to_cpu(ofdm->plcp_err) -
-                           le32_to_cpu(priv->_agn.statistics.
-                                       rx.ofdm.plcp_err)) +
-                           (le32_to_cpu(ofdm_ht->plcp_err) -
-                           le32_to_cpu(priv->_agn.statistics.
-                                       rx.ofdm_ht.plcp_err));
-               }
+               ofdm = &pkt->u.stats.rx.ofdm;
+               ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+               combined_plcp_delta =
+                   (le32_to_cpu(ofdm->plcp_err) -
+                   le32_to_cpu(priv->_4965.statistics.
+                               rx.ofdm.plcp_err)) +
+                   (le32_to_cpu(ofdm_ht->plcp_err) -
+                   le32_to_cpu(priv->_4965.statistics.
+                               rx.ofdm_ht.plcp_err));
 
                if ((combined_plcp_delta > 0) &&
                    ((combined_plcp_delta * 100) / plcp_msec) >
@@ -259,58 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
        return rc;
 }
 
-void iwl_rx_statistics(struct iwl_priv *priv,
+void iwl4965_rx_statistics(struct iwl_priv *priv,
                              struct iwl_rx_mem_buffer *rxb)
 {
        int change;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
-               IWL_DEBUG_RX(priv,
-                            "Statistics notification received (%d vs %d).\n",
-                            (int)sizeof(struct iwl_bt_notif_statistics),
-                            le32_to_cpu(pkt->len_n_flags) &
-                            FH_RSCSR_FRAME_SIZE_MSK);
-
-               change = ((priv->_agn.statistics_bt.general.common.temperature !=
-                          pkt->u.stats_bt.general.common.temperature) ||
-                          ((priv->_agn.statistics_bt.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-                          (pkt->u.stats_bt.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+       IWL_DEBUG_RX(priv,
+                    "Statistics notification received (%d vs %d).\n",
+                    (int)sizeof(struct iwl_notif_statistics),
+                    le32_to_cpu(pkt->len_n_flags) &
+                    FH_RSCSR_FRAME_SIZE_MSK);
+
+       change = ((priv->_4965.statistics.general.common.temperature !=
+                  pkt->u.stats.general.common.temperature) ||
+                  ((priv->_4965.statistics.flag &
+                  STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                  (pkt->u.stats.flag &
+                  STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
 #endif
 
-       } else {
-               IWL_DEBUG_RX(priv,
-                            "Statistics notification received (%d vs %d).\n",
-                            (int)sizeof(struct iwl_notif_statistics),
-                            le32_to_cpu(pkt->len_n_flags) &
-                            FH_RSCSR_FRAME_SIZE_MSK);
+       iwl_legacy_recover_from_statistics(priv, pkt);
 
-               change = ((priv->_agn.statistics.general.common.temperature !=
-                          pkt->u.stats.general.common.temperature) ||
-                          ((priv->_agn.statistics.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-                          (pkt->u.stats.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
-       }
-
-       iwl_recover_from_statistics(priv, pkt);
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
-               memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
-                       sizeof(priv->_agn.statistics_bt));
-       else
-               memcpy(&priv->_agn.statistics, &pkt->u.stats,
-                       sizeof(priv->_agn.statistics));
+       memcpy(&priv->_4965.statistics, &pkt->u.stats,
+               sizeof(priv->_4965.statistics));
 
        set_bit(STATUS_STATISTICS, &priv->status);
 
@@ -323,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv,
 
        if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
            (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
-               iwl_rx_calc_noise(priv);
+               iwl4965_rx_calc_noise(priv);
                queue_work(priv->workqueue, &priv->run_time_calib_work);
        }
        if (priv->cfg->ops->lib->temp_ops.temperature && change)
                priv->cfg->ops->lib->temp_ops.temperature(priv);
 }
 
-void iwl_reply_statistics(struct iwl_priv *priv,
+void iwl4965_reply_statistics(struct iwl_priv *priv,
                              struct iwl_rx_mem_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
        if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               memset(&priv->_agn.accum_statistics, 0,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+               memset(&priv->_4965.accum_statistics, 0,
                        sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.delta_statistics, 0,
+               memset(&priv->_4965.delta_statistics, 0,
                        sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.max_delta, 0,
+               memset(&priv->_4965.max_delta, 0,
                        sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.accum_statistics_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
-               memset(&priv->_agn.delta_statistics_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
-               memset(&priv->_agn.max_delta_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
 #endif
                IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
        }
-       iwl_rx_statistics(priv, rxb);
+       iwl4965_rx_statistics(priv, rxb);
 }
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644 (file)
index 0000000..a262c23
--- /dev/null
@@ -0,0 +1,721 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-4965.h"
+
+static struct iwl_link_quality_cmd *
+iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
+{
+       int i, r;
+       struct iwl_link_quality_cmd *link_cmd;
+       u32 rate_flags = 0;
+       __le32 rate_n_flags;
+
+       link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+       if (!link_cmd) {
+               IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+               return NULL;
+       }
+       /* Set up the rate scaling to start at selected rate, fall back
+        * all the way down to 1M in IEEE order, and then spin on 1M */
+       if (priv->band == IEEE80211_BAND_5GHZ)
+               r = IWL_RATE_6M_INDEX;
+       else
+               r = IWL_RATE_1M_INDEX;
+
+       if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
+                               RATE_MCS_ANT_POS;
+       rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
+                                                  rate_flags);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+       link_cmd->general_params.single_stream_ant_msk =
+                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+       link_cmd->general_params.dual_stream_ant_msk =
+               priv->hw_params.valid_tx_ant &
+               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       if (!link_cmd->general_params.dual_stream_ant_msk) {
+               link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+               link_cmd->general_params.dual_stream_ant_msk =
+                       priv->hw_params.valid_tx_ant;
+       }
+
+       link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+       link_cmd->agg_params.agg_time_limit =
+               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+       link_cmd->sta_id = sta_id;
+
+       return link_cmd;
+}
+
+/*
+ * iwl4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                            const u8 *addr, u8 *sta_id_r)
+{
+       int ret;
+       u8 sta_id;
+       struct iwl_link_quality_cmd *link_cmd;
+       unsigned long flags;
+
+       if (sta_id_r)
+               *sta_id_r = IWL_INVALID_STATION;
+
+       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM\n", addr);
+               return ret;
+       }
+
+       if (sta_id_r)
+               *sta_id_r = sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].used |= IWL_STA_LOCAL;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       /* Set up default rate scaling table in device's station table */
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+                       "Unable to initialize rate scaling for station %pM.\n",
+                       addr);
+               return -ENOMEM;
+       }
+
+       ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+       if (ret)
+               IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx,
+                                     bool send_if_empty)
+{
+       int i, not_empty = 0;
+       u8 buff[sizeof(struct iwl_wep_cmd) +
+               sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+       struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+       size_t cmd_size  = sizeof(struct iwl_wep_cmd);
+       struct iwl_host_cmd cmd = {
+               .id = ctx->wep_key_cmd,
+               .data = wep_cmd,
+               .flags = CMD_SYNC,
+       };
+
+       might_sleep();
+
+       memset(wep_cmd, 0, cmd_size +
+                       (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+       for (i = 0; i < WEP_KEYS_MAX ; i++) {
+               wep_cmd->key[i].key_index = i;
+               if (ctx->wep_keys[i].key_size) {
+                       wep_cmd->key[i].key_offset = i;
+                       not_empty = 1;
+               } else {
+                       wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+               }
+
+               wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+               memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+                               ctx->wep_keys[i].key_size);
+       }
+
+       wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+       wep_cmd->num_keys = WEP_KEYS_MAX;
+
+       cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+       cmd.len = cmd_size;
+
+       if (not_empty || send_if_empty)
+               return iwl_legacy_send_cmd(priv, &cmd);
+       else
+               return 0;
+}
+
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+                                struct iwl_rxon_context *ctx)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       return iwl4965_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx,
+                              struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+                     keyconf->keyidx);
+
+       memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_WEP(priv,
+               "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+               /* but keys in device are clear anyway so return success */
+               return 0;
+       }
+       ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
+       IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+                     keyconf->keyidx, ret);
+
+       return ret;
+}
+
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (keyconf->keylen != WEP_KEY_LEN_128 &&
+           keyconf->keylen != WEP_KEY_LEN_64) {
+               IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+               return -EINVAL;
+       }
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->hw_key_idx = HW_KEY_DEFAULT;
+       priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+       ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+       memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+                                                       keyconf->keylen);
+
+       ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
+       IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+               keyconf->keylen, keyconf->keyidx, ret);
+
+       return ret;
+}
+
+static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
+                                       struct iwl_rxon_context *ctx,
+                                       struct ieee80211_key_conf *keyconf,
+                                       u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (keyconf->keylen == WEP_KEY_LEN_128)
+               key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+       priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+       memcpy(priv->stations[sta_id].keyinfo.key,
+                               keyconf->key, keyconf->keylen);
+
+       memcpy(&priv->stations[sta_id].sta.key.key[3],
+                               keyconf->key, keyconf->keylen);
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
+                                        struct iwl_rxon_context *ctx,
+                                        struct ieee80211_key_conf *keyconf,
+                                        u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
+              keyconf->keylen);
+
+       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
+              keyconf->keylen);
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                        sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
+                                        struct iwl_rxon_context *ctx,
+                                        struct ieee80211_key_conf *keyconf,
+                                        u8 sta_id)
+{
+       unsigned long flags;
+       int ret = 0;
+       __le16 key_flags = 0;
+
+       key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = 16;
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+
+
+       /* This copy is acutally not needed: we get the key with each TX */
+       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return ret;
+}
+
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+                        struct iwl_rxon_context *ctx,
+                        struct ieee80211_key_conf *keyconf,
+                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+       u8 sta_id;
+       unsigned long flags;
+       int i;
+
+       if (iwl_legacy_scan_cancel(priv)) {
+               /* cancel scan failed, just live w/ bad key and rely
+                  briefly on SW decryption */
+               return;
+       }
+
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+       for (i = 0; i < 5; i++)
+               priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+                       cpu_to_le16(phase1key[i]);
+
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
+
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+                          struct iwl_rxon_context *ctx,
+                          struct ieee80211_key_conf *keyconf,
+                          u8 sta_id)
+{
+       unsigned long flags;
+       u16 key_flags;
+       u8 keyidx;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       ctx->key_mapping_keys--;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
+       keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+       IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+                     keyconf->keyidx, sta_id);
+
+       if (keyconf->keyidx != keyidx) {
+               /* We need to remove a key with index different that the one
+                * in the uCode. This means that the key we need to remove has
+                * been replaced by another one with different index.
+                * Don't do anything and return ok
+                */
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+               IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+                           keyconf->keyidx, key_flags);
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
+               &priv->ucode_key_table))
+               IWL_ERR(priv, "index %d not used in uCode key table.\n",
+                       priv->stations[sta_id].sta.key.key_offset);
+       memset(&priv->stations[sta_id].keyinfo, 0,
+                                       sizeof(struct iwl_hw_key));
+       memset(&priv->stations[sta_id].sta.key, 0,
+                                       sizeof(struct iwl4965_keyinfo));
+       priv->stations[sta_id].sta.key.key_flags =
+                       STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+       priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_WEP(priv,
+                "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       ctx->key_mapping_keys++;
+       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       default:
+               IWL_ERR(priv,
+                       "Unknown alg: %s cipher = %x\n", __func__,
+                       keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       IWL_DEBUG_WEP(priv,
+               "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+                     sta_id, ret);
+
+       return ret;
+}
+
+/**
+ * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
+{
+       struct iwl_link_quality_cmd *link_cmd;
+       unsigned long flags;
+       u8 sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
+                                                               false, NULL);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Unable to prepare broadcast station\n");
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+               return -EINVAL;
+       }
+
+       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+       priv->stations[sta_id].used |= IWL_STA_BCAST;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+                       "Unable to initialize rate scaling for bcast station.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+/**
+ * iwl4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int iwl4965_update_bcast_station(struct iwl_priv *priv,
+                                   struct iwl_rxon_context *ctx)
+{
+       unsigned long flags;
+       struct iwl_link_quality_cmd *link_cmd;
+       u8 sta_id = ctx->bcast_sta_id;
+
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+               "Unable to initialize rate scaling for bcast station.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       if (priv->stations[sta_id].lq)
+               kfree(priv->stations[sta_id].lq);
+       else
+               IWL_DEBUG_INFO(priv,
+               "Bcast station rate scaling has not been initialized yet.\n");
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+int iwl4965_update_bcast_stations(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int ret = 0;
+
+       for_each_context(priv, ctx) {
+               ret = iwl4965_update_bcast_station(priv, ctx);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/**
+ * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+       unsigned long flags;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /* Remove "disable" flag, to enable Tx for this TID */
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+       priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                        int tid, u16 ssn)
+{
+       unsigned long flags;
+       int sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return -ENXIO;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags_msk = 0;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+       priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+       priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                       int tid)
+{
+       unsigned long flags;
+       int sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags_msk = 0;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+       priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                               sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+void
+iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.sta.modify_mask =
+                                       STA_MODIFY_SLEEP_TX_COUNT_MSK;
+       priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       iwl_legacy_send_add_sta(priv,
+                               &priv->stations[sta_id].sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644 (file)
index 0000000..5c40502
--- /dev/null
@@ -0,0 +1,1369 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ *     VO      0
+ *     VI      1
+ *     BE      2
+ *     BK      3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+       IEEE80211_AC_BE,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VO,
+       IEEE80211_AC_VO
+};
+
+static inline int iwl4965_get_ac_from_tid(u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return tid_to_ac[tid];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+static inline int
+iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
+                                       struct sk_buff *skb,
+                                       struct iwl_tx_cmd *tx_cmd,
+                                       struct ieee80211_tx_info *info,
+                                       struct ieee80211_hdr *hdr,
+                                       u8 std_id)
+{
+       __le16 fc = hdr->frame_control;
+       __le32 tx_flags = tx_cmd->tx_flags;
+
+       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+               tx_flags |= TX_CMD_FLG_ACK_MSK;
+               if (ieee80211_is_mgmt(fc))
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (ieee80211_is_probe_resp(fc) &&
+                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+                       tx_flags |= TX_CMD_FLG_TSF_MSK;
+       } else {
+               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       if (ieee80211_is_back_req(fc))
+               tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+       tx_cmd->sta_id = std_id;
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+       } else {
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+       } else {
+               tx_cmd->timeout.pm_frame_timeout = 0;
+       }
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = tx_flags;
+       tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT         60
+
+static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
+                             struct iwl_tx_cmd *tx_cmd,
+                             struct ieee80211_tx_info *info,
+                             __le16 fc)
+{
+       u32 rate_flags;
+       int rate_idx;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       u8 rate_plcp;
+
+       /* Set retry limit on DATA packets and Probe Responses*/
+       if (ieee80211_is_probe_resp(fc))
+               data_retry_limit = 3;
+       else
+               data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
+       tx_cmd->data_retry_limit = data_retry_limit;
+
+       /* Set retry limit on RTS packets */
+       rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+       if (data_retry_limit < rts_retry_limit)
+               rts_retry_limit = data_retry_limit;
+       tx_cmd->rts_retry_limit = rts_retry_limit;
+
+       /* DATA packets will use the uCode station table for rate/antenna
+        * selection */
+       if (ieee80211_is_data(fc)) {
+               tx_cmd->initial_rate_index = 0;
+               tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+               return;
+       }
+
+       /**
+        * If the current TX rate stored in mac80211 has the MCS bit set, it's
+        * not really a TX rate.  Thus, we use the lowest supported rate for
+        * this band.  Also use the lowest supported rate if the stored rate
+        * index is invalid.
+        */
+       rate_idx = info->control.rates[0].idx;
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+                       (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+               rate_idx = rate_lowest_index(&priv->bands[info->band],
+                               info->control.sta);
+       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx += IWL_FIRST_OFDM_RATE;
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       rate_plcp = iwlegacy_rates[rate_idx].plcp;
+       /* Zero out flags for this packet */
+       rate_flags = 0;
+
+       /* Set CCK flag as needed */
+       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       /* Set up antennas */
+       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+                                     priv->hw_params.valid_tx_ant);
+
+       rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+       /* Set the rate in the TX cmd */
+       tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int sta_id)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+               if (info->flags & IEEE80211_TX_CTL_AMPDU)
+                       tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+               ieee80211_get_tkip_key(keyconf, skb_frag,
+                       IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+               IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+                       (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+               memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+                            "with key %d\n", keyconf->keyidx);
+               break;
+
+       default:
+               IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
+               break;
+       }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = info->control.sta;
+       struct iwl_station_priv *sta_priv = NULL;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       struct iwl_tx_cmd *tx_cmd;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       int txq_id;
+       dma_addr_t phys_addr;
+       dma_addr_t txcmd_phys;
+       dma_addr_t scratch_phys;
+       u16 len, firstlen, secondlen;
+       u16 seq_number = 0;
+       __le16 fc;
+       u8 hdr_len;
+       u8 sta_id;
+       u8 wait_write_ptr = 0;
+       u8 tid = 0;
+       u8 *qc = NULL;
+       unsigned long flags;
+       bool is_agg = false;
+
+       if (info->control.vif)
+               ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+               goto drop_unlock;
+       }
+
+       fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (ieee80211_is_auth(fc))
+               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+       else if (ieee80211_is_assoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+       else if (ieee80211_is_reassoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+       hdr_len = ieee80211_hdrlen(fc);
+
+       /* Find index into station table for destination station */
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                              hdr->addr1);
+               goto drop_unlock;
+       }
+
+       IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
+
+       if (sta_priv && sta_priv->asleep &&
+           (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+               /*
+                * This sends an asynchronous command to the device,
+                * but we can rely on it being processed before the
+                * next frame is processed -- and the next frame to
+                * this station is the one that will consume this
+                * counter.
+                * For now set the counter to just 1 since we do not
+                * support uAPSD yet.
+                */
+               iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
+       }
+
+       /*
+        * Send this frame after DTIM -- there's a special queue
+        * reserved for this for contexts that support AP mode.
+        */
+       if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+               txq_id = ctx->mcast_queue;
+               /*
+                * The microcode will clear the more data
+                * bit in the last frame it transmits.
+                */
+               hdr->frame_control |=
+                       cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+       } else
+               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+       /* irqs already disabled/saved above when locking priv->lock */
+       spin_lock(&priv->sta_lock);
+
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+                       spin_unlock(&priv->sta_lock);
+                       goto drop_unlock;
+               }
+               seq_number = priv->stations[sta_id].tid[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+               hdr->seq_ctrl = hdr->seq_ctrl &
+                               cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(seq_number);
+               seq_number += 0x10;
+               /* aggregation is on for this <sta,tid> */
+               if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+                   priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+                       txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+                       is_agg = true;
+               }
+       }
+
+       txq = &priv->txq[txq_id];
+       q = &txq->q;
+
+       if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
+               spin_unlock(&priv->sta_lock);
+               goto drop_unlock;
+       }
+
+       if (ieee80211_is_data_qos(fc)) {
+               priv->stations[sta_id].tid[tid].tfds_in_queue++;
+               if (!ieee80211_has_morefrags(fc))
+                       priv->stations[sta_id].tid[tid].seq_number = seq_number;
+       }
+
+       spin_unlock(&priv->sta_lock);
+
+       /* Set up driver data for this TFD */
+       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+       txq->txb[q->write_ptr].skb = skb;
+       txq->txb[q->write_ptr].ctx = ctx;
+
+       /* Set up first empty entry in queue's array of Tx/cmd buffers */
+       out_cmd = txq->cmd[q->write_ptr];
+       out_meta = &txq->meta[q->write_ptr];
+       tx_cmd = &out_cmd->cmd.tx;
+       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+       memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+       /*
+        * Set up the Tx-command (not MAC!) header.
+        * Store the chosen Tx queue and TFD index within the sequence field;
+        * after Tx, uCode's Tx response will return this value so driver can
+        * locate the frame within the tx queue and do post-tx processing.
+        */
+       out_cmd->hdr.cmd = REPLY_TX;
+       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+                               INDEX_TO_SEQ(q->write_ptr)));
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+       /* Total # bytes to be transmitted */
+       len = (u16)skb->len;
+       tx_cmd->len = cpu_to_le16(len);
+
+       if (info->control.hw_key)
+               iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+       /* TODO need this for burst mode later on */
+       iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
+       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+
+       iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+       iwl_legacy_update_stats(priv, true, fc, len);
+       /*
+        * Use the first empty entry in this queue's command buffer array
+        * to contain the Tx command and MAC header concatenated together
+        * (payload data will be in another buffer).
+        * Size of this varies, due to varying MAC header length.
+        * If end is not dword aligned, we'll have 2 extra bytes at the end
+        * of the MAC header (device reads on dword boundaries).
+        * We'll tell device about this padding later.
+        */
+       len = sizeof(struct iwl_tx_cmd) +
+               sizeof(struct iwl_cmd_header) + hdr_len;
+       firstlen = (len + 3) & ~3;
+
+       /* Tell NIC about any 2-byte padding after MAC header */
+       if (firstlen != len)
+               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+       /* Physical address of this Tx command's header (not MAC header!),
+        * within command buffer array. */
+       txcmd_phys = pci_map_single(priv->pci_dev,
+                                   &out_cmd->hdr, firstlen,
+                                   PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, firstlen);
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                  txcmd_phys, firstlen, 1, 0);
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
+
+       /* Set up TFD's 2nd entry to point directly to remainder of skb,
+        * if any (802.11 null frames have no payload). */
+       secondlen = skb->len - hdr_len;
+       if (secondlen > 0) {
+               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+                                          secondlen, PCI_DMA_TODEVICE);
+               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                          phys_addr, secondlen,
+                                                          0, 0);
+       }
+
+       scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+                               offsetof(struct iwl_tx_cmd, scratch);
+
+       /* take back ownership of DMA buffer to enable update */
+       pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+                                   firstlen, PCI_DMA_BIDIRECTIONAL);
+       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+       tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
+
+       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+                    le16_to_cpu(out_cmd->hdr.sequence));
+       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+       /* Set up entry for this TFD in Tx byte-count array */
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+                                                    le16_to_cpu(tx_cmd->len));
+
+       pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+                                      firstlen, PCI_DMA_BIDIRECTIONAL);
+
+       trace_iwlwifi_legacy_dev_tx(priv,
+                            &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+                            sizeof(struct iwl_tfd),
+                            &out_cmd->hdr, firstlen,
+                            skb->data + hdr_len, secondlen);
+
+       /* Tell device the write index *just past* this latest filled TFD */
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /*
+        * At this point the frame is "transmitted" successfully
+        * and we will get a TX status notification eventually,
+        * regardless of the value of ret. "ret" only indicates
+        * whether or not we should update the write pointer.
+        */
+
+       /*
+        * Avoid atomic ops if it isn't an associated client.
+        * Also, if this is a packet for aggregation, don't
+        * increase the counter because the ucode will stop
+        * aggregation queues when their respective station
+        * goes to sleep.
+        */
+       if (sta_priv && sta_priv->client && !is_agg)
+               atomic_inc(&sta_priv->pending_frames);
+
+       if ((iwl_legacy_queue_space(q) < q->high_mark) &&
+                       priv->mac80211_registered) {
+               if (wait_write_ptr) {
+                       spin_lock_irqsave(&priv->lock, flags);
+                       txq->need_update = 1;
+                       iwl_legacy_txq_update_write_ptr(priv, txq);
+                       spin_unlock_irqrestore(&priv->lock, flags);
+               } else {
+                       iwl_legacy_stop_queue(priv, txq);
+               }
+       }
+
+       return 0;
+
+drop_unlock:
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return -1;
+}
+
+static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
+                                   struct iwl_dma_ptr *ptr, size_t size)
+{
+       ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+                                      GFP_KERNEL);
+       if (!ptr->addr)
+               return -ENOMEM;
+       ptr->size = size;
+       return 0;
+}
+
+static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
+                                   struct iwl_dma_ptr *ptr)
+{
+       if (unlikely(!ptr->addr))
+               return;
+
+       dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+       memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwl4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       /* Tx queues */
+       if (priv->txq) {
+               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+                       if (txq_id == priv->cmd_queue)
+                               iwl_legacy_cmd_queue_free(priv);
+                       else
+                               iwl_legacy_tx_queue_free(priv, txq_id);
+       }
+       iwl4965_free_dma_ptr(priv, &priv->kw);
+
+       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+       /* free tx queue structure */
+       iwl_legacy_txq_mem(priv);
+}
+
+/**
+ * iwl4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
+{
+       int ret;
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       /* Free all tx/cmd queues and keep-warm buffer */
+       iwl4965_hw_txq_ctx_free(priv);
+
+       ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+                               priv->hw_params.scd_bc_tbls_size);
+       if (ret) {
+               IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+               goto error_bc_tbls;
+       }
+       /* Alloc keep-warm buffer */
+       ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+       if (ret) {
+               IWL_ERR(priv, "Keep Warm allocation failed\n");
+               goto error_kw;
+       }
+
+       /* allocate tx queue structure */
+       ret = iwl_legacy_alloc_txq_mem(priv);
+       if (ret)
+               goto error;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = (txq_id == priv->cmd_queue) ?
+                                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               ret = iwl_legacy_tx_queue_init(priv,
+                                       &priv->txq[txq_id], slots_num,
+                                      txq_id);
+               if (ret) {
+                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       }
+
+       return ret;
+
+ error:
+       iwl4965_hw_txq_ctx_free(priv);
+       iwl4965_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+       return ret;
+}
+
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
+{
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = txq_id == priv->cmd_queue ?
+                           TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
+                                               slots_num, txq_id);
+       }
+}
+
+/**
+ * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
+{
+       int ch, txq_id;
+       unsigned long flags;
+
+       /* Turn off all Tx DMA fifos */
+       spin_lock_irqsave(&priv->lock, flags);
+
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Stop each Tx DMA channel, and wait for it to be idle */
+       for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+               iwl_legacy_write_direct32(priv,
+                               FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+               if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+                                   FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+                                   1000))
+                       IWL_ERR(priv, "Failing on timeout while stopping"
+                           " DMA channel %d [0x%08x]", ch,
+                           iwl_legacy_read_direct32(priv,
+                                       FH_TSSR_TX_STATUS_REG));
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!priv->txq)
+               return;
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (txq_id == priv->cmd_queue)
+                       iwl_legacy_cmd_queue_unmap(priv);
+               else
+                       iwl_legacy_tx_queue_unmap(priv, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+                       return txq_id;
+       return -1;
+}
+
+/**
+ * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
+                                           u16 txq_id)
+{
+       /* Simply stop the queue, but don't change any configuration;
+        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+       iwl_legacy_write_prph(priv,
+               IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+               (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+               (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+                                       u16 txq_id)
+{
+       u32 tbl_dw_addr;
+       u32 tbl_dw;
+       u16 scd_q2ratid;
+
+       scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+       tbl_dw_addr = priv->scd_base_addr +
+                       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+       tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
+
+       if (txq_id & 0x1)
+               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+       else
+               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+       iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+       return 0;
+}
+
+/**
+ * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
+ *        i.e. it must be one of the higher queues used for aggregation
+ */
+static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+                                 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+       unsigned long flags;
+       u16 ra_tid;
+       int ret;
+
+       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IWL49_FIRST_AMPDU_QUEUE +
+               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IWL_WARN(priv,
+                       "queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
+                       IWL49_FIRST_AMPDU_QUEUE +
+                       priv->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       ra_tid = BUILD_RAxTID(sta_id, tid);
+
+       /* Modify device's station table to Tx this TID */
+       ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Stop this Tx queue before configuring it */
+       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+       /* Map receiver-address / traffic-ID to this queue */
+       iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+       /* Set this queue as a chain-building queue */
+       iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       /* Place first TFD at index corresponding to start sequence number.
+        * Assumes that ssn_idx is valid (!= 0xFFF) */
+       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+       /* Set up Tx window size and frame limit for this queue */
+       iwl_legacy_write_targ_mem(priv,
+               priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+               (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+       iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+               IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+               (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
+               & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return 0;
+}
+
+
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+       int sta_id;
+       int tx_fifo;
+       int txq_id;
+       int ret;
+       unsigned long flags;
+       struct iwl_tid_data *tid_data;
+
+       tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo < 0))
+               return tx_fifo;
+
+       IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+                       __func__, sta->addr, tid);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Start AGG on invalid station\n");
+               return -ENXIO;
+       }
+       if (unlikely(tid >= MAX_TID_COUNT))
+               return -EINVAL;
+
+       if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+               IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+               return -ENXIO;
+       }
+
+       txq_id = iwl4965_txq_ctx_activate_free(priv);
+       if (txq_id == -1) {
+               IWL_ERR(priv, "No free aggregation queue available\n");
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       tid_data = &priv->stations[sta_id].tid[tid];
+       *ssn = SEQ_TO_SN(tid_data->seq_number);
+       tid_data->agg.txq_id = txq_id;
+       iwl_legacy_set_swq_id(&priv->txq[txq_id],
+                               iwl4965_get_ac_from_tid(tid), txq_id);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
+                                                 sta_id, tid, *ssn);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       tid_data = &priv->stations[sta_id].tid[tid];
+       if (tid_data->tfds_in_queue == 0) {
+               IWL_DEBUG_HT(priv, "HW queue is empty\n");
+               tid_data->agg.state = IWL_AGG_ON;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+       } else {
+               IWL_DEBUG_HT(priv,
+                       "HW queue is NOT empty: %d packets in HW queue\n",
+                            tid_data->tfds_in_queue);
+               tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       return ret;
+}
+
+/**
+ * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
+ * priv->lock must be held by the caller
+ */
+static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+                                  u16 ssn_idx, u8 tx_fifo)
+{
+       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IWL49_FIRST_AMPDU_QUEUE +
+               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IWL_WARN(priv,
+                       "queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
+                       IWL49_FIRST_AMPDU_QUEUE +
+                       priv->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+       iwl_legacy_clear_bits_prph(priv,
+                       IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       /* supposes that ssn_idx is valid (!= 0xFFF) */
+       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+       iwl_legacy_clear_bits_prph(priv,
+                        IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+       iwl_txq_ctx_deactivate(priv, txq_id);
+       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+       return 0;
+}
+
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, u16 tid)
+{
+       int tx_fifo_id, txq_id, sta_id, ssn;
+       struct iwl_tid_data *tid_data;
+       int write_ptr, read_ptr;
+       unsigned long flags;
+
+       tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo_id < 0))
+               return tx_fifo_id;
+
+       sta_id = iwl_legacy_sta_id(sta);
+
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       tid_data = &priv->stations[sta_id].tid[tid];
+       ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+       txq_id = tid_data->agg.txq_id;
+
+       switch (priv->stations[sta_id].tid[tid].agg.state) {
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               /*
+                * This can happen if the peer stops aggregation
+                * again before we've had a chance to drain the
+                * queue we selected previously, i.e. before the
+                * session was really started completely.
+                */
+               IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+               goto turn_off;
+       case IWL_AGG_ON:
+               break;
+       default:
+               IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+       }
+
+       write_ptr = priv->txq[txq_id].q.write_ptr;
+       read_ptr = priv->txq[txq_id].q.read_ptr;
+
+       /* The queue is not empty */
+       if (write_ptr != read_ptr) {
+               IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+               priv->stations[sta_id].tid[tid].agg.state =
+                               IWL_EMPTYING_HW_QUEUE_DELBA;
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ turn_off:
+       priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+       /* do not restore/save irqs */
+       spin_unlock(&priv->sta_lock);
+       spin_lock(&priv->lock);
+
+       /*
+        * the only reason this call can fail is queue number out of range,
+        * which can happen if uCode is reloaded and all the station
+        * information are lost. if it is outside the range, there is no need
+        * to deactivate the uCode queue, just return "success" to allow
+        *  mac80211 to clean up it own data.
+        */
+       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+       return 0;
+}
+
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+                          int sta_id, u8 tid, int txq_id)
+{
+       struct iwl_queue *q = &priv->txq[txq_id].q;
+       u8 *addr = priv->stations[sta_id].sta.sta.addr;
+       struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+       struct iwl_rxon_context *ctx;
+
+       ctx = &priv->contexts[priv->stations[sta_id].ctxid];
+
+       lockdep_assert_held(&priv->sta_lock);
+
+       switch (priv->stations[sta_id].tid[tid].agg.state) {
+       case IWL_EMPTYING_HW_QUEUE_DELBA:
+               /* We are reclaiming the last packet of the */
+               /* aggregated HW queue */
+               if ((txq_id  == tid_data->agg.txq_id) &&
+                   (q->read_ptr == q->write_ptr)) {
+                       u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+                       int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
+                       IWL_DEBUG_HT(priv,
+                               "HW queue empty: continue DELBA flow\n");
+                       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
+                       tid_data->agg.state = IWL_AGG_OFF;
+                       ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               /* We are reclaiming the last packet of the queue */
+               if (tid_data->tfds_in_queue == 0) {
+                       IWL_DEBUG_HT(priv,
+                               "HW queue empty: continue ADDBA flow\n");
+                       tid_data->agg.state = IWL_AGG_ON;
+                       ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
+                                    struct iwl_rxon_context *ctx,
+                                    const u8 *addr1)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_station_priv *sta_priv;
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(ctx->vif, addr1);
+       if (sta) {
+               sta_priv = (void *)sta->drv_priv;
+               /* avoid atomic ops if this isn't a client */
+               if (sta_priv->client &&
+                   atomic_dec_return(&sta_priv->pending_frames) == 0)
+                       ieee80211_sta_block_awake(priv->hw, sta, false);
+       }
+       rcu_read_unlock();
+}
+
+static void
+iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
+                            bool is_agg)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+
+       if (!is_agg)
+               iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+
+       ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+}
+
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+       struct iwl_tx_info *tx_info;
+       int nfreed = 0;
+       struct ieee80211_hdr *hdr;
+
+       if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+                         "is out of range [0-%d] %d %d.\n", txq_id,
+                         index, q->n_bd, q->write_ptr, q->read_ptr);
+               return 0;
+       }
+
+       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+            q->read_ptr != index;
+            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               tx_info = &txq->txb[txq->q.read_ptr];
+               iwl4965_tx_status(priv, tx_info,
+                                txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
+
+               hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+               if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+                       nfreed++;
+               tx_info->skb = NULL;
+
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+       }
+       return nfreed;
+}
+
+/**
+ * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+                                struct iwl_ht_agg *agg,
+                                struct iwl_compressed_ba_resp *ba_resp)
+
+{
+       int i, sh, ack;
+       u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+       int successes = 0;
+       struct ieee80211_tx_info *info;
+       u64 bitmap, sent_bitmap;
+
+       if (unlikely(!agg->wait_for_ba))  {
+               if (unlikely(ba_resp->bitmap))
+                       IWL_ERR(priv, "Received BA when not expected\n");
+               return -EINVAL;
+       }
+
+       /* Mark that the expected block-ack response arrived */
+       agg->wait_for_ba = 0;
+       IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
+                                                       ba_resp->seq_ctl);
+
+       /* Calculate shift to align block-ack bits with our Tx window bits */
+       sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+       if (sh < 0) /* tbw something is wrong with indices */
+               sh += 0x100;
+
+       if (agg->frame_count > (64 - sh)) {
+               IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+               return -1;
+       }
+
+       /* don't use 64-bit values for now */
+       bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+       /* check for success or failure according to the
+        * transmitted bitmap and block-ack bitmap */
+       sent_bitmap = bitmap & agg->bitmap;
+
+       /* For each frame attempted in aggregation,
+        * update driver's record of tx frame's status. */
+       i = 0;
+       while (sent_bitmap) {
+               ack = sent_bitmap & 1ULL;
+               successes += ack;
+               IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+                       ack ? "ACK" : "NACK", i,
+                       (agg->start_idx + i) & 0xff,
+                       agg->start_idx + i);
+               sent_bitmap >>= 1;
+               ++i;
+       }
+
+       IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
+                                  (unsigned long long)bitmap);
+
+       info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
+       memset(&info->status, 0, sizeof(info->status));
+       info->flags |= IEEE80211_TX_STAT_ACK;
+       info->flags |= IEEE80211_TX_STAT_AMPDU;
+       info->status.ampdu_ack_len = successes;
+       info->status.ampdu_len = agg->frame_count;
+       iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+       return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+                                 struct ieee80211_tx_info *info)
+{
+       struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+       info->antenna_sel_tx =
+               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               r->flags |= IEEE80211_TX_RC_MCS;
+       if (rate_n_flags & RATE_MCS_GF_MSK)
+               r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+       if (rate_n_flags & RATE_MCS_DUP_MSK)
+               r->flags |= IEEE80211_TX_RC_DUP_DATA;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               r->flags |= IEEE80211_TX_RC_SHORT_GI;
+       r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                          struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+       struct iwl_tx_queue *txq = NULL;
+       struct iwl_ht_agg *agg;
+       int index;
+       int sta_id;
+       int tid;
+       unsigned long flags;
+
+       /* "flow" corresponds to Tx queue */
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+       /* "ssn" is start of block-ack Tx window, corresponds to index
+        * (in Tx queue's circular buffer) of first TFD/frame in window */
+       u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+       if (scd_flow >= priv->hw_params.max_txq_num) {
+               IWL_ERR(priv,
+                       "BUG_ON scd_flow is bigger than number of queues\n");
+               return;
+       }
+
+       txq = &priv->txq[scd_flow];
+       sta_id = ba_resp->sta_id;
+       tid = ba_resp->tid;
+       agg = &priv->stations[sta_id].tid[tid].agg;
+       if (unlikely(agg->txq_id != scd_flow)) {
+               /*
+                * FIXME: this is a uCode bug which need to be addressed,
+                * log the information and return for now!
+                * since it is possible happen very often and in order
+                * not to fill the syslog, don't enable the logging by default
+                */
+               IWL_DEBUG_TX_REPLY(priv,
+                       "BA scd_flow %d does not match txq_id %d\n",
+                       scd_flow, agg->txq_id);
+               return;
+       }
+
+       /* Find index just before block-ack window */
+       index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+                          "sta_id = %d\n",
+                          agg->wait_for_ba,
+                          (u8 *) &ba_resp->sta_addr_lo32,
+                          ba_resp->sta_id);
+       IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
+                       "scd_flow = "
+                          "%d, scd_ssn = %d\n",
+                          ba_resp->tid,
+                          ba_resp->seq_ctl,
+                          (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+                          ba_resp->scd_flow,
+                          ba_resp->scd_ssn);
+       IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+                          agg->start_idx,
+                          (unsigned long long)agg->bitmap);
+
+       /* Update driver's record of ACK vs. not for each frame in window */
+       iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+       /* Release all TFDs before the SSN, i.e. all TFDs in front of
+        * block-ack window (we assume that they've been successfully
+        * transmitted ... if not, it's too late anyway). */
+       if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+               /* calculate mac80211 ampdu sw queue to wake */
+               int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
+               iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+               if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
+                   priv->mac80211_registered &&
+                   (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+                       iwl_legacy_wake_queue(priv, txq);
+
+               iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+       switch (status & TX_STATUS_MSK) {
+       case TX_STATUS_SUCCESS:
+               return "SUCCESS";
+       TX_STATUS_POSTPONE(DELAY);
+       TX_STATUS_POSTPONE(FEW_BYTES);
+       TX_STATUS_POSTPONE(QUIET_PERIOD);
+       TX_STATUS_POSTPONE(CALC_TTAK);
+       TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+       TX_STATUS_FAIL(SHORT_LIMIT);
+       TX_STATUS_FAIL(LONG_LIMIT);
+       TX_STATUS_FAIL(FIFO_UNDERRUN);
+       TX_STATUS_FAIL(DRAIN_FLOW);
+       TX_STATUS_FAIL(RFKILL_FLUSH);
+       TX_STATUS_FAIL(LIFE_EXPIRE);
+       TX_STATUS_FAIL(DEST_PS);
+       TX_STATUS_FAIL(HOST_ABORTED);
+       TX_STATUS_FAIL(BT_RETRY);
+       TX_STATUS_FAIL(STA_INVALID);
+       TX_STATUS_FAIL(FRAG_DROPPED);
+       TX_STATUS_FAIL(TID_DISABLE);
+       TX_STATUS_FAIL(FIFO_FLUSHED);
+       TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+       TX_STATUS_FAIL(PASSIVE_NO_RX);
+       TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+       }
+
+       return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644 (file)
index 0000000..001d148
--- /dev/null
@@ -0,0 +1,166 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-4965-calib.h"
+
+#define IWL_AC_UNSET -1
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+       u32 val;
+       int ret = 0;
+       u32 errcnt = 0;
+       u32 i;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                       i + IWL4965_RTC_INST_LOWER_BOUND);
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 3)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+                                u32 len)
+{
+       u32 val;
+       u32 save_len = len;
+       int ret = 0;
+       u32 errcnt;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                          IWL4965_RTC_INST_LOWER_BOUND);
+
+       errcnt = 0;
+       for (; len > 0; len -= sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(priv, "uCode INST section is invalid at "
+                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                                 save_len - len, val, le32_to_cpu(*image));
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 20)
+                               break;
+               }
+       }
+
+       if (!errcnt)
+               IWL_DEBUG_INFO(priv,
+                   "ucode image in INSTRUCTION memory is good\n");
+
+       return ret;
+}
+
+/**
+ * iwl4965_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int iwl4965_verify_ucode(struct iwl_priv *priv)
+{
+       __le32 *image;
+       u32 len;
+       int ret;
+
+       /* Try bootstrap */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try initialize */
+       image = (__le32 *)priv->ucode_init.v_addr;
+       len = priv->ucode_init.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try runtime/protocol */
+       image = (__le32 *)priv->ucode_code.v_addr;
+       len = priv->ucode_code.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+       /* Since nothing seems to match, show first several data entries in
+        * instruction SRAM, so maybe visual inspection will give a clue.
+        * Selection of bootstrap image (vs. other images) is arbitrary. */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       ret = iwl4965_verify_inst_full(priv, image, len);
+
+       return ret;
+}
similarity index 71%
rename from drivers/net/wireless/iwlwifi/iwl-4965.c
rename to drivers/net/wireless/iwlegacy/iwl-4965.c
index 3f1e5f1..f5433c7 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
 #include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-helpers.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
 #include "iwl-sta.h"
-#include "iwl-agn-led.h"
-#include "iwl-agn.h"
-#include "iwl-agn-debugfs.h"
-#include "iwl-legacy.h"
+#include "iwl-4965-led.h"
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
 
 static int iwl4965_send_tx_power(struct iwl_priv *priv);
 static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
        IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
 
        /* verify BSM SRAM contents */
-       val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
        for (reg = BSM_SRAM_LOWER_BOUND;
             reg < BSM_SRAM_LOWER_BOUND + len;
             reg += sizeof(u32), image++) {
-               val = iwl_read_prph(priv, reg);
+               val = iwl_legacy_read_prph(priv, reg);
                if (val != le32_to_cpu(*image)) {
                        IWL_ERR(priv, "BSM uCode verification failed at "
                                  "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
        inst_len = priv->ucode_init.len;
        data_len = priv->ucode_init_data.len;
 
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
 
        /* Fill BSM memory with bootstrap instructions */
        for (reg_offset = BSM_SRAM_LOWER_BOUND;
             reg_offset < BSM_SRAM_LOWER_BOUND + len;
             reg_offset += sizeof(u32), image++)
-               _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
+               _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
 
        ret = iwl4965_verify_bsm(priv);
        if (ret)
                return ret;
 
        /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
-       iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+       iwl_legacy_write_prph(priv,
+                       BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
+       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
 
        /* Load bootstrap code into instruction SRAM now,
         *   to prepare to load "initialize" uCode */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
 
        /* Wait for load of bootstrap uCode to finish */
        for (i = 0; i < 100; i++) {
-               done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
                if (!(done & BSM_WR_CTRL_REG_BIT_START))
                        break;
                udelay(10);
@@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
 
        /* Enable future boot loads whenever power management unit triggers it
         *   (e.g. when powering back up after power-save shutdown) */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+       iwl_legacy_write_prph(priv,
+                       BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
 
 
        return 0;
@@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
        pdata = priv->ucode_data_backup.p_addr >> 4;
 
        /* Tell bootstrap uCode where to find image to load */
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
                                 priv->ucode_data.len);
 
        /* Inst byte count must be last to set up, bit 31 signals uCode
         *   that all new ptr/size info is in place */
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
                                 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
        IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
 
@@ -251,18 +252,10 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
 */
 static void iwl4965_init_alive_start(struct iwl_priv *priv)
 {
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
        /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "initialize" alive if code weren't properly loaded.  */
-       if (iwl_verify_ucode(priv)) {
+       if (iwl4965_verify_ucode(priv)) {
                /* Runtime instruction load was bad;
                 * take it all the way back down so we can try again */
                IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -288,7 +281,7 @@ restart:
        queue_work(priv->workqueue, &priv->restart);
 }
 
-static bool is_ht40_channel(__le32 rxon_flags)
+static bool iw4965_is_ht40_channel(__le32 rxon_flags)
 {
        int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
                                    >> RXON_FLG_CHANNEL_MODE_POS;
@@ -296,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
                  (chan_mod == CHANNEL_MODE_MIXED));
 }
 
-/*
- * EEPROM handlers
- */
-static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
-{
-       return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
-       iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
 static void iwl4965_nic_config(struct iwl_priv *priv)
 {
        unsigned long flags;
@@ -320,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+       radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
        /* write radio config values to register */
        if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                            EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
                            EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
                            EEPROM_RF_CFG_DASH_MSK(radio_cfg));
 
        /* set CSR_HW_CONFIG_REG for uCode use */
-       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
                    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
                    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
 
        priv->calib_info = (struct iwl_eeprom_calib_info *)
-               iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+               iwl_legacy_eeprom_query_addr(priv,
+                               EEPROM_4965_CALIB_TXPOWER_OFFSET);
 
        spin_unlock_irqrestore(&priv->lock, flags);
 }
@@ -348,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
        struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
 
        if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
-           iwl_is_any_associated(priv)) {
+           iwl_legacy_is_any_associated(priv)) {
                struct iwl_calib_diff_gain_cmd cmd;
 
                /* clear data for chain noise calibration algorithm */
@@ -365,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
                cmd.diff_gain_a = 0;
                cmd.diff_gain_b = 0;
                cmd.diff_gain_c = 0;
-               if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+               if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
                                 sizeof(cmd), &cmd))
                        IWL_ERR(priv,
                                "Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -374,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
        }
 }
 
-static void iwl4965_gain_computation(struct iwl_priv *priv,
-               u32 *average_noise,
-               u16 min_average_noise_antenna_i,
-               u32 min_average_noise,
-               u8 default_chain)
-{
-       int i, ret;
-       struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
-       data->delta_gain_code[min_average_noise_antenna_i] = 0;
-
-       for (i = default_chain; i < NUM_RX_CHAINS; i++) {
-               s32 delta_g = 0;
-
-               if (!(data->disconn_array[i]) &&
-                   (data->delta_gain_code[i] ==
-                            CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
-                       delta_g = average_noise[i] - min_average_noise;
-                       data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
-                       data->delta_gain_code[i] =
-                               min(data->delta_gain_code[i],
-                               (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
-                       data->delta_gain_code[i] =
-                               (data->delta_gain_code[i] | (1 << 2));
-               } else {
-                       data->delta_gain_code[i] = 0;
-               }
-       }
-       IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
-                    data->delta_gain_code[0],
-                    data->delta_gain_code[1],
-                    data->delta_gain_code[2]);
-
-       /* Differential gain gets sent to uCode only once */
-       if (!data->radio_write) {
-               struct iwl_calib_diff_gain_cmd cmd;
-               data->radio_write = 1;
-
-               memset(&cmd, 0, sizeof(cmd));
-               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-               cmd.diff_gain_a = data->delta_gain_code[0];
-               cmd.diff_gain_b = data->delta_gain_code[1];
-               cmd.diff_gain_c = data->delta_gain_code[2];
-               ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-                                     sizeof(cmd), &cmd);
-               if (ret)
-                       IWL_DEBUG_CALIB(priv, "fail sending cmd "
-                                    "REPLY_PHY_CALIBRATION_CMD\n");
-
-               /* TODO we might want recalculate
-                * rx_chain in rxon cmd */
-
-               /* Mark so we run this algo only once! */
-               data->state = IWL_CHAIN_NOISE_CALIBRATED;
-       }
-}
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                       txpower_work);
-
-       /* If a scan happened to start before we got here
-        * then just return; the statistics notification will
-        * kick off another scheduled work to compensate for
-        * any temperature delta we missed here. */
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-
-       /* Regardless of if we are associated, we must reconfigure the
-        * TX power since frames can be sent on non-radar channels while
-        * not associated */
-       iwl4965_send_tx_power(priv);
-
-       /* Update last_temperature to keep is_calib_needed from running
-        * when it isn't needed... */
-       priv->last_temperature = priv->temperature;
-
-       mutex_unlock(&priv->mutex);
-}
-
-/*
- * Acquire priv->lock before calling this function !
- */
-static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
-       iwl_write_direct32(priv, HBUS_TARG_WRPTR,
-                            (index & 0xff) | (txq_id << 8));
-       iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE:  Acquire priv->lock before calling this function !
- */
-static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq,
-                                       int tx_fifo_id, int scd_retry)
-{
-       int txq_id = txq->q.id;
-
-       /* Find out whether to activate Tx queue */
-       int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
-       /* Set up and activate */
-       iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-                        (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
-                        (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
-                        IWL49_SCD_QUEUE_STTS_REG_MSK);
-
-       txq->sched_retry = scd_retry;
-
-       IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
-                      active ? "Activate" : "Deactivate",
-                      scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-       IWL49_CMD_FIFO_NUM,
-       IWL_TX_FIFO_UNUSED,
-       IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
-       u32 a;
-       unsigned long flags;
-       int i, chan;
-       u32 reg_val;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Clear 4965's internal Tx Scheduler data base */
-       priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
-       a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
-       for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr +
-              IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-
-       /* Tel 4965 where to find Tx byte count tables */
-       iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
-                       priv->scd_bc_tbls.dma >> 10);
-
-       /* Enable DMA channel */
-       for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
-               iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
-       /* Update FH chicken bits */
-       reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
-       iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
-                          reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
-       /* Disable chain mode for all queues */
-       iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
-       /* Initialize each Tx queue (including the command queue) */
-       for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
-               /* TFD circular buffer read/write indexes */
-               iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
-               iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
-               /* Max Tx Window size for Scheduler-ACK mode */
-               iwl_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
-                               (SCD_WIN_SIZE <<
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-               /* Frame limit */
-               iwl_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
-                               sizeof(u32),
-                               (SCD_FRAME_LIMIT <<
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       }
-       iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
-                                (1 << priv->hw_params.max_txq_num) - 1);
-
-       /* Activate all Tx DMA/FIFO channels */
-       priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
-
-       iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
-       /* make sure all queue are not stopped */
-       memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
-       for (i = 0; i < 4; i++)
-               atomic_set(&priv->queue_stop_count[i], 0);
-
-       /* reset to 0 to enable all the queue first */
-       priv->txq_ctx_active_msk = 0;
-       /* Map each Tx/cmd queue to its corresponding fifo */
-       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
-       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
-               int ac = default_queue_to_tx_fifo[i];
-
-               iwl_txq_ctx_activate(priv, i);
-
-               if (ac == IWL_TX_FIFO_UNUSED)
-                       continue;
-
-               iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
-       }
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
 static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
        .min_nrg_cck = 97,
        .max_nrg_cck = 0, /* not used, set to 0 */
@@ -666,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
 
        priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
 
-       priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-       priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+       priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
+       priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
        priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
        priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
 
        iwl4965_set_ct_threshold(priv);
 
        priv->hw_params.sens = &iwl4965_sensitivity;
-       priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+       priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
 
        return 0;
 }
@@ -1158,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
        IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
                          is_ht40);
 
-       ch_info = iwl_get_channel_info(priv, priv->band, channel);
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
 
-       if (!is_channel_valid(ch_info))
+       if (!iwl_legacy_is_channel_valid(ch_info))
                return -EINVAL;
 
        /* get txatten group, used to select 1) thermal txpower adjustment
@@ -1384,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
 
        band = priv->band == IEEE80211_BAND_2GHZ;
 
-       is_ht40 = is_ht40_channel(ctx->active.flags);
+       is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
 
        if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
                ctrl_chan_high = 1;
@@ -1398,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
        if (ret)
                goto out;
 
-       ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
+       ret = iwl_legacy_send_cmd_pdu(priv,
+                        REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
 
 out:
        return ret;
@@ -1409,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
 {
        int ret = 0;
        struct iwl4965_rxon_assoc_cmd rxon_assoc;
-       const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
 
        if ((rxon1->flags == rxon2->flags) &&
            (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1436,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
            ctx->staging.ofdm_ht_dual_stream_basic_rates;
        rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
 
-       ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+       ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
                                     sizeof(rxon_assoc), &rxon_assoc, NULL);
        if (ret)
                return ret;
@@ -1447,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
 static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
        /* cast away the const for active_rxon in this function */
-       struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
+       struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
        int ret;
        bool new_assoc =
                !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EBUSY;
 
        if (!ctx->is_active)
@@ -1461,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
        /* always get timestamp with Rx frame */
        ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
 
-       ret = iwl_check_rxon_cmd(priv, ctx);
+       ret = iwl_legacy_check_rxon_cmd(priv, ctx);
        if (ret) {
                IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
                return -EINVAL;
@@ -1475,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
            (priv->switch_rxon.channel != ctx->staging.channel)) {
                IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
                      le16_to_cpu(priv->switch_rxon.channel));
-               iwl_chswitch_done(priv, false);
+               iwl_legacy_chswitch_done(priv, false);
        }
 
        /* If we don't need to send a full RXON, we can use
         * iwl_rxon_assoc_cmd which is used to reconfigure filter
         * and other flags for the current radio configuration. */
-       if (!iwl_full_rxon_required(priv, ctx)) {
-               ret = iwl_send_rxon_assoc(priv, ctx);
+       if (!iwl_legacy_full_rxon_required(priv, ctx)) {
+               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
                if (ret) {
                        IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
                        return ret;
                }
 
                memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_print_rx_config_cmd(priv, ctx);
+               iwl_legacy_print_rx_config_cmd(priv, ctx);
                return 0;
        }
 
@@ -1497,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
         * an RXON_ASSOC and the new config wants the associated mask enabled,
         * we must clear the associated from the active configuration
         * before we apply the new config */
-       if (iwl_is_associated_ctx(ctx) && new_assoc) {
+       if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
                IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
                active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                                      sizeof(struct iwl_rxon_cmd),
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                                      sizeof(struct iwl_legacy_rxon_cmd),
                                       active_rxon);
 
                /* If the mask clearing failed then we set
@@ -1512,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
                        IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
                        return ret;
                }
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
+               iwl_legacy_clear_ucode_stations(priv, ctx);
+               iwl_legacy_restore_stations(priv, ctx);
+               ret = iwl4965_restore_default_wep_keys(priv, ctx);
                if (ret) {
                        IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
                        return ret;
@@ -1529,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
                       le16_to_cpu(ctx->staging.channel),
                       ctx->staging.bssid_addr);
 
-       iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
+       iwl_legacy_set_rxon_hwcrypto(priv, ctx,
+                               !priv->cfg->mod_params->sw_crypto);
 
        /* Apply the new configuration
         * RXON unassoc clears the station table in uCode so restoration of
         * stations is needed after it (the RXON command) completes
         */
        if (!new_assoc) {
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
                if (ret) {
                        IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
                        return ret;
                }
                IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
                memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
+               iwl_legacy_clear_ucode_stations(priv, ctx);
+               iwl_legacy_restore_stations(priv, ctx);
+               ret = iwl4965_restore_default_wep_keys(priv, ctx);
                if (ret) {
                        IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
                        return ret;
@@ -1557,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
                /* Apply the new configuration
                 * RXON assoc doesn't clear the station table in uCode,
                 */
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
                if (ret) {
                        IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
                        return ret;
                }
                memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
        }
-       iwl_print_rx_config_cmd(priv, ctx);
+       iwl_legacy_print_rx_config_cmd(priv, ctx);
 
-       iwl_init_sensitivity(priv);
+       iwl4965_init_sensitivity(priv);
 
        /* If we issue a new RXON command which required a tune then we must
         * send a new TXPOWER command or we won't be able to Tx any frames */
-       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
        if (ret) {
                IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
                return ret;
@@ -1598,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
        struct ieee80211_vif *vif = ctx->vif;
        band = priv->band == IEEE80211_BAND_2GHZ;
 
-       is_ht40 = is_ht40_channel(ctx->staging.flags);
+       is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
 
        if (is_ht40 &&
            (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1629,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
        else {
                switch_time_in_usec =
                        vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-               ucode_switch_time = iwl_usecs_to_beacons(priv,
+               ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
                                                         switch_time_in_usec,
                                                         beacon_interval);
-               cmd.switch_time = iwl_add_beacon_time(priv,
+               cmd.switch_time = iwl_legacy_add_beacon_time(priv,
                                                      priv->ucode_beacon_time,
                                                      ucode_switch_time,
                                                      beacon_interval);
        }
        IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
                      cmd.switch_time);
-       ch_info = iwl_get_channel_info(priv, priv->band, ch);
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
        if (ch_info)
-               cmd.expect_beacon = is_channel_radar(ch_info);
+               cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
        else {
                IWL_ERR(priv, "invalid channel switch from %u to %u\n",
                        ctx->active.channel, ch);
@@ -1658,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
        priv->switch_rxon.channel = cmd.channel;
        priv->switch_rxon.switch_in_progress = true;
 
-       return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+       return iwl_legacy_send_cmd_pdu(priv,
+                        REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
 }
 
 /**
@@ -1700,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
        u32 R4;
 
        if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
-           (priv->_agn.statistics.flag &
+           (priv->_4965.statistics.flag &
                        STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
                IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
                R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1725,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
        if (!test_bit(STATUS_TEMPERATURE, &priv->status))
                vt = sign_extend32(R4, 23);
        else
-               vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
+               vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
                                 general.common.temperature), 23);
 
        IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1810,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
        }
 
        priv->temperature = temp;
-       iwl_tt_handler(priv);
        set_bit(STATUS_TEMPERATURE, &priv->status);
 
        if (!priv->disable_tx_power_cal &&
@@ -1819,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
                queue_work(priv->workqueue, &priv->txpower_work);
 }
 
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
-                                           u16 txq_id)
-{
-       /* Simply stop the queue, but don't change any configuration;
-        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
-       iwl_write_prph(priv,
-               IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-               (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
-               (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
-                                  u16 ssn_idx, u8 tx_fifo)
-{
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       /* supposes that ssn_idx is valid (!= 0xFFF) */
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-       iwl_txq_ctx_deactivate(priv, txq_id);
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
-       return 0;
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
-                                       u16 txq_id)
-{
-       u32 tbl_dw_addr;
-       u32 tbl_dw;
-       u16 scd_q2ratid;
-
-       scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
-       tbl_dw_addr = priv->scd_base_addr +
-                       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
-       tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
-       if (txq_id & 0x1)
-               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
-       else
-               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
-       iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
-       return 0;
-}
-
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- *        i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
-                                 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
-       unsigned long flags;
-       u16 ra_tid;
-       int ret;
-
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       ra_tid = BUILD_RAxTID(sta_id, tid);
-
-       /* Modify device's station table to Tx this TID */
-       ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Stop this Tx queue before configuring it */
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       /* Map receiver-address / traffic-ID to this queue */
-       iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
-       /* Set this queue as a chain-building queue */
-       iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       /* Place first TFD at index corresponding to start sequence number.
-        * Assumes that ssn_idx is valid (!= 0xFFF) */
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       /* Set up Tx window size and frame limit for this queue */
-       iwl_write_targ_mem(priv,
-               priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
-               (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-       iwl_write_targ_mem(priv, priv->scd_base_addr +
-               IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
-               (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
-               & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
-       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-
 static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
 {
        switch (cmd_id) {
@@ -1975,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
        }
 }
 
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data)
 {
        struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
        addsta->mode = cmd->mode;
@@ -2028,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
                status = le16_to_cpu(frame_status[0].status);
                idx = start_idx;
 
-               /* FIXME: code repetition */
                IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
                                   agg->frame_count, agg->start_idx, idx);
 
                info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-               info->flags |= iwl_tx_status_to_mac80211(status);
-               iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
-               /* FIXME: code repetition end */
+               info->flags |= iwl4965_tx_status_to_mac80211(status);
+               iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
 
                IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
                                    status & 0xff, tx_resp->failure_frame);
@@ -2064,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
                        IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
                                           agg->frame_count, txq_id, idx);
 
-                       hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+                       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
                        if (!hdr) {
                                IWL_ERR(priv,
                                        "BUG_ON idx doesn't point to valid skb"
@@ -2115,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
        return 0;
 }
 
-static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
 {
        int i;
        int start = 0;
        int ret = IWL_INVALID_STATION;
        unsigned long flags;
 
-       if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
-           (priv->iw_mode == NL80211_IFTYPE_AP))
+       if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
                start = IWL_STA_ID;
 
        if (is_broadcast_ether_addr(addr))
@@ -2159,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
        return ret;
 }
 
-static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
 {
        if (priv->iw_mode == NL80211_IFTYPE_STATION) {
                return IWL_AP_ID;
        } else {
                u8 *da = ieee80211_get_DA(hdr);
-               return iwl_find_station(priv, da);
+               return iwl4965_find_station(priv, da);
        }
 }
 
@@ -2190,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
        u8 *qc = NULL;
        unsigned long flags;
 
-       if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
                IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
                          "is out of range [0-%d] %d %d\n", txq_id,
                          index, txq->q.n_bd, txq->q.write_ptr,
@@ -2202,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
        info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
        memset(&info->status, 0, sizeof(info->status));
 
-       hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
+       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
        if (ieee80211_is_data_qos(hdr->frame_control)) {
                qc = ieee80211_get_qos_ctl(hdr);
                tid = qc[0] & 0xf;
        }
 
-       sta_id = iwl_get_ra_sta_id(priv, hdr);
+       sta_id = iwl4965_get_ra_sta_id(priv, hdr);
        if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
                IWL_ERR(priv, "Station not known\n");
                return;
@@ -2225,114 +1825,89 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
                iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
 
                /* check if BAR is needed */
-               if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+               if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
                if (txq->q.read_ptr != (scd_ssn & 0xff)) {
-                       index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+                       index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
+                                                               txq->q.n_bd);
                        IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
                                           "%d index %d\n", scd_ssn , index);
-                       freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+                       freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
                        if (qc)
-                               iwl_free_tfds_in_queue(priv, sta_id,
+                               iwl4965_free_tfds_in_queue(priv, sta_id,
                                                       tid, freed);
 
                        if (priv->mac80211_registered &&
-                           (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
-                           (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-                               iwl_wake_queue(priv, txq);
+                           (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
+                                && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+                               iwl_legacy_wake_queue(priv, txq);
                }
        } else {
                info->status.rates[0].count = tx_resp->failure_frame + 1;
-               info->flags |= iwl_tx_status_to_mac80211(status);
-               iwlagn_hwrate_to_tx_control(priv,
+               info->flags |= iwl4965_tx_status_to_mac80211(status);
+               iwl4965_hwrate_to_tx_control(priv,
                                        le32_to_cpu(tx_resp->rate_n_flags),
                                        info);
 
                IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
                                   "rate_n_flags 0x%x retries %d\n",
                                   txq_id,
-                                  iwl_get_tx_fail_reason(status), status,
+                                  iwl4965_get_tx_fail_reason(status), status,
                                   le32_to_cpu(tx_resp->rate_n_flags),
                                   tx_resp->failure_frame);
 
-               freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+               freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
                if (qc && likely(sta_id != IWL_INVALID_STATION))
-                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+                       iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
                else if (sta_id == IWL_INVALID_STATION)
                        IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
 
                if (priv->mac80211_registered &&
-                   (iwl_queue_space(&txq->q) > txq->q.low_mark))
-                       iwl_wake_queue(priv, txq);
+                   (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
+                       iwl_legacy_wake_queue(priv, txq);
        }
        if (qc && likely(sta_id != IWL_INVALID_STATION))
-               iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+               iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
 
-       iwl_check_abort_status(priv, tx_resp->frame_count, status);
+       iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
 
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
 
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
-                            struct iwl_rx_phy_res *rx_resp)
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
 {
-       /* data from PHY/DSP regarding signal strength, etc.,
-        *   contents are always there, not configurable by host.  */
-       struct iwl4965_rx_non_cfg_phy *ncphy =
-           (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
-       u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
-                       >> IWL49_AGC_DB_POS;
-
-       u32 valid_antennae =
-           (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
-                       >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
-       u8 max_rssi = 0;
-       u32 i;
-
-       /* Find max rssi among 3 possible receivers.
-        * These values are measured by the digital signal processor (DSP).
-        * They should stay fairly constant even as the signal strength varies,
-        *   if the radio's automatic gain control (AGC) is working right.
-        * AGC value (see below) will provide the "interesting" info. */
-       for (i = 0; i < 3; i++)
-               if (valid_antennae & (1 << i))
-                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
-       IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
-               max_rssi, agc);
-
-       /* dBm = max_rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal. */
-       return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
+       u8 rate __maybe_unused =
+               iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+               "tsf:0x%.8x%.8x rate:%d\n",
+               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
 }
 
-
 /* Set up 4965-specific Rx frame reply handlers */
 static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
 {
        /* Legacy Rx frames */
-       priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
+       priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
        /* Tx response */
        priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
-       INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
-       cancel_work_sync(&priv->txpower_work);
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
 }
 
 static struct iwl_hcmd_ops iwl4965_hcmd = {
        .rxon_assoc = iwl4965_send_rxon_assoc,
        .commit_rxon = iwl4965_commit_rxon,
-       .set_rxon_chain = iwlagn_set_rxon_chain,
-       .send_bt_config = iwl_send_bt_config,
+       .set_rxon_chain = iwl4965_set_rxon_chain,
 };
 
 static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2344,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
         * performing the scan, fire one off if needed
         */
        if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-               iwlcore_commit_rxon(priv, ctx);
+               iwl_legacy_commit_rxon(priv, ctx);
 }
 
 static void iwl4965_post_associate(struct iwl_priv *priv)
@@ -2357,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
        if (!vif || !priv->is_open)
                return;
 
-       if (vif->type == NL80211_IFTYPE_AP) {
-               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
-               return;
-       }
-
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       iwl_scan_cancel_timeout(priv, 200);
+       iwl_legacy_scan_cancel_timeout(priv, 200);
 
-       conf = ieee80211_get_hw_conf(priv->hw);
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
 
        ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
+       iwl_legacy_commit_rxon(priv, ctx);
 
-       ret = iwl_send_rxon_timing(priv, ctx);
+       ret = iwl_legacy_send_rxon_timing(priv, ctx);
        if (ret)
                IWL_WARN(priv, "RXON timing - "
                            "Attempting to continue.\n");
 
        ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
 
-       iwl_set_rxon_ht(priv, &priv->current_ht_config);
+       iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
 
        if (priv->cfg->ops->hcmd->set_rxon_chain)
                priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2401,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
                        ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
        }
 
-       iwlcore_commit_rxon(priv, ctx);
+       iwl_legacy_commit_rxon(priv, ctx);
 
        IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
                        vif->bss_conf.aid, ctx->active.bssid_addr);
@@ -2410,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
        case NL80211_IFTYPE_STATION:
                break;
        case NL80211_IFTYPE_ADHOC:
-               iwlagn_send_beacon_cmd(priv);
+               iwl4965_send_beacon_cmd(priv);
                break;
        default:
                IWL_ERR(priv, "%s Should not be called in %d mode\n",
@@ -2422,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
         * If chain noise has already been run, then we need to enable
         * power management here */
        if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
-               iwl_power_update_mode(priv, false);
+               iwl_legacy_power_update_mode(priv, false);
 
        /* Enable Rx differential gain and sensitivity calibrations */
-       iwl_chain_noise_reset(priv);
+       iwl4965_chain_noise_reset(priv);
        priv->start_calib = 1;
 }
 
@@ -2441,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
                return;
 
        /* The following should be done only at AP bring up */
-       if (!iwl_is_associated_ctx(ctx)) {
+       if (!iwl_legacy_is_associated_ctx(ctx)) {
 
                /* RXON - unassoc (to set timing command) */
                ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
+               iwl_legacy_commit_rxon(priv, ctx);
 
                /* RXON Timing */
-               ret = iwl_send_rxon_timing(priv, ctx);
+               ret = iwl_legacy_send_rxon_timing(priv, ctx);
                if (ret)
                        IWL_WARN(priv, "RXON timing failed - "
                                        "Attempting to continue.\n");
@@ -2456,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
                /* AP has all antennas */
                priv->chain_noise_data.active_chains =
                        priv->hw_params.valid_rx_ant;
-               iwl_set_rxon_ht(priv, &priv->current_ht_config);
+               iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
                if (priv->cfg->ops->hcmd->set_rxon_chain)
                        priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 
@@ -2478,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
                                        ~RXON_FLG_SHORT_SLOT_MSK;
                }
                /* need to send beacon cmd before committing assoc RXON! */
-               iwlagn_send_beacon_cmd(priv);
+               iwl4965_send_beacon_cmd(priv);
                /* restore RXON assoc */
                ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
+               iwl_legacy_commit_rxon(priv, ctx);
        }
-       iwlagn_send_beacon_cmd(priv);
-
-       /* FIXME - we need to add code here to detect a totally new
-        * configuration, reset the AP, unassoc, rxon timing, assoc,
-        * clear sta table, add BCAST sta... */
+       iwl4965_send_beacon_cmd(priv);
 }
 
 static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
        .get_hcmd_size = iwl4965_get_hcmd_size,
        .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
-       .chain_noise_reset = iwl4965_chain_noise_reset,
-       .gain_computation = iwl4965_gain_computation,
-       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
-       .calc_rssi = iwl4965_calc_rssi,
-       .request_scan = iwlagn_request_scan,
+       .request_scan = iwl4965_request_scan,
        .post_scan = iwl4965_post_scan,
 };
 
 static struct iwl_lib_ops iwl4965_lib = {
        .set_hw_params = iwl4965_hw_set_hw_params,
        .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
-       .txq_set_sched = iwl4965_txq_set_sched,
-       .txq_agg_enable = iwl4965_txq_agg_enable,
-       .txq_agg_disable = iwl4965_txq_agg_disable,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
+       .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = iwl4965_hw_txq_free_tfd,
+       .txq_init = iwl4965_hw_tx_queue_init,
        .rx_handler_setup = iwl4965_rx_handler_setup,
-       .setup_deferred_work = iwl4965_setup_deferred_work,
-       .cancel_deferred_work = iwl4965_cancel_deferred_work,
        .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
-       .alive_notify = iwl4965_alive_notify,
        .init_alive_start = iwl4965_init_alive_start,
        .load_ucode = iwl4965_load_bsm,
-       .dump_nic_event_log = iwl_dump_nic_event_log,
-       .dump_nic_error_log = iwl_dump_nic_error_log,
-       .dump_fh = iwl_dump_fh,
+       .dump_nic_event_log = iwl4965_dump_nic_event_log,
+       .dump_nic_error_log = iwl4965_dump_nic_error_log,
+       .dump_fh = iwl4965_dump_fh,
        .set_channel_switch = iwl4965_hw_channel_switch,
        .apm_ops = {
-               .init = iwl_apm_init,
+               .init = iwl_legacy_apm_init,
                .config = iwl4965_nic_config,
        },
        .eeprom_ops = {
@@ -2535,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = {
                        EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
                        EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
                },
-               .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
-               .release_semaphore = iwlcore_eeprom_release_semaphore,
-               .calib_version = iwl4965_eeprom_calib_version,
-               .query_addr = iwlcore_eeprom_query_addr,
+               .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
+               .release_semaphore = iwl4965_eeprom_release_semaphore,
        },
        .send_tx_power  = iwl4965_send_tx_power,
-       .update_chain_flags = iwl_update_chain_flags,
-       .isr_ops = {
-               .isr = iwl_isr_legacy,
-       },
+       .update_chain_flags = iwl4965_update_chain_flags,
        .temp_ops = {
                .temperature = iwl4965_temperature_calib,
        },
        .debugfs_ops = {
-               .rx_stats_read = iwl_ucode_rx_stats_read,
-               .tx_stats_read = iwl_ucode_tx_stats_read,
-               .general_stats_read = iwl_ucode_general_stats_read,
-               .bt_stats_read = iwl_ucode_bt_stats_read,
-               .reply_tx_error = iwl_reply_tx_error_read,
+               .rx_stats_read = iwl4965_ucode_rx_stats_read,
+               .tx_stats_read = iwl4965_ucode_tx_stats_read,
+               .general_stats_read = iwl4965_ucode_general_stats_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
+       .check_plcp_health = iwl4965_good_plcp_health,
 };
 
 static const struct iwl_legacy_ops iwl4965_legacy_ops = {
        .post_associate = iwl4965_post_associate,
        .config_ap = iwl4965_config_ap,
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
+       .manage_ibss_station = iwl4965_manage_ibss_station,
+       .update_bcast_stations = iwl4965_update_bcast_stations,
 };
 
 struct ieee80211_ops iwl4965_hw_ops = {
-       .tx = iwlagn_mac_tx,
-       .start = iwlagn_mac_start,
-       .stop = iwlagn_mac_stop,
-       .add_interface = iwl_mac_add_interface,
-       .remove_interface = iwl_mac_remove_interface,
-       .change_interface = iwl_mac_change_interface,
+       .tx = iwl4965_mac_tx,
+       .start = iwl4965_mac_start,
+       .stop = iwl4965_mac_stop,
+       .add_interface = iwl_legacy_mac_add_interface,
+       .remove_interface = iwl_legacy_mac_remove_interface,
+       .change_interface = iwl_legacy_mac_change_interface,
        .config = iwl_legacy_mac_config,
-       .configure_filter = iwlagn_configure_filter,
-       .set_key = iwlagn_mac_set_key,
-       .update_tkip_key = iwlagn_mac_update_tkip_key,
-       .conf_tx = iwl_mac_conf_tx,
+       .configure_filter = iwl4965_configure_filter,
+       .set_key = iwl4965_mac_set_key,
+       .update_tkip_key = iwl4965_mac_update_tkip_key,
+       .conf_tx = iwl_legacy_mac_conf_tx,
        .reset_tsf = iwl_legacy_mac_reset_tsf,
        .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .ampdu_action = iwlagn_mac_ampdu_action,
-       .hw_scan = iwl_mac_hw_scan,
-       .sta_add = iwlagn_mac_sta_add,
-       .sta_remove = iwl_mac_sta_remove,
-       .channel_switch = iwlagn_mac_channel_switch,
-       .flush = iwlagn_mac_flush,
-       .tx_last_beacon = iwl_mac_tx_last_beacon,
+       .ampdu_action = iwl4965_mac_ampdu_action,
+       .hw_scan = iwl_legacy_mac_hw_scan,
+       .sta_add = iwl4965_mac_sta_add,
+       .sta_remove = iwl_legacy_mac_sta_remove,
+       .channel_switch = iwl4965_mac_channel_switch,
+       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
 };
 
 static const struct iwl_ops iwl4965_ops = {
        .lib = &iwl4965_lib,
        .hcmd = &iwl4965_hcmd,
        .utils = &iwl4965_hcmd_utils,
-       .led = &iwlagn_led_ops,
+       .led = &iwl4965_led_ops,
        .legacy = &iwl4965_legacy_ops,
        .ieee80211_ops = &iwl4965_hw_ops,
 };
@@ -2604,32 +2152,29 @@ static struct iwl_base_params iwl4965_base_params = {
        .pll_cfg_val = 0,
        .set_l0s = true,
        .use_bsm = true,
-       .use_isr_legacy = true,
-       .broken_powersave = true,
        .led_compensation = 61,
        .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .temperature_kelvin = true,
        .max_event_log_size = 512,
-       .tx_power_by_driver = true,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
-       .no_agg_framecnt_info = true,
 };
 
-struct iwl_cfg iwl4965_agn_cfg = {
+struct iwl_cfg iwl4965_cfg = {
        .name = "Intel(R) Wireless WiFi Link 4965AGN",
        .fw_name_pre = IWL4965_FW_PRE,
        .ucode_api_max = IWL4965_UCODE_API_MAX,
        .ucode_api_min = IWL4965_UCODE_API_MIN,
+       .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
        .valid_tx_ant = ANT_AB,
        .valid_rx_ant = ANT_ABC,
        .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
        .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
        .ops = &iwl4965_ops,
-       .mod_params = &iwlagn_mod_params,
+       .mod_params = &iwl4965_mod_params,
        .base_params = &iwl4965_base_params,
        .led_mode = IWL_LED_BLINK,
        /*
@@ -2641,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
 
 /* Module firmware */
 MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644 (file)
index 0000000..01f8163
--- /dev/null
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_4965_h__
+#define __iwl_4965_h__
+
+#include "iwl-dev.h"
+
+/* configuration for the _4965 devices */
+extern struct iwl_cfg iwl4965_cfg;
+
+extern struct iwl_mod_params iwl4965_mod_params;
+
+extern struct ieee80211_ops iwl4965_hw_ops;
+
+/* tx queue */
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed);
+
+/* RXON */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+
+/* uCode */
+int iwl4965_verify_ucode(struct iwl_priv *priv);
+
+/* lib */
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+                           u8 frame_count, u32 status);
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_hw_nic_init(struct iwl_priv *priv);
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
+
+/* rx */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv);
+void iwl4965_rx_replenish(struct iwl_priv *priv);
+void iwl4965_rx_replenish_now(struct iwl_priv *priv);
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rxq_stop(struct iwl_priv *priv);
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+                    struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+                        struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_handle(struct iwl_priv *priv);
+
+/* tx */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                struct iwl_tx_queue *txq,
+                                dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+                        struct iwl_tx_queue *txq);
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+                             struct ieee80211_tx_info *info);
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, u16 tid);
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+                          int sta_id, u8 tid, int txq_id);
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
+/**
+ * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE:  Acquire priv->lock before calling this function !
+ */
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       int tx_fifo_id, int scd_retry);
+
+static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
+{
+       status &= TX_STATUS_MSK;
+
+       switch (status) {
+       case TX_STATUS_SUCCESS:
+       case TX_STATUS_DIRECT_DONE:
+               return IEEE80211_TX_STAT_ACK;
+       case TX_STATUS_FAIL_DEST_PS:
+               return IEEE80211_TX_STAT_TX_FILTERED;
+       default:
+               return 0;
+       }
+}
+
+static inline bool iwl4965_is_tx_success(u32 status)
+{
+       status &= TX_STATUS_MSK;
+       return (status == TX_STATUS_SUCCESS) ||
+              (status == TX_STATUS_DIRECT_DONE);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
+
+/* rx */
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
+                         struct iwl_rx_packet *pkt);
+void iwl4965_rx_statistics(struct iwl_priv *priv,
+                      struct iwl_rx_mem_buffer *rxb);
+void iwl4965_reply_statistics(struct iwl_priv *priv,
+                         struct iwl_rx_mem_buffer *rxb);
+
+/* scan */
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+                              struct ieee80211_vif *vif, bool add);
+
+/* hcmd */
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+iwl4965_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+
+/* station management */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx);
+int iwl4965_add_bssid_station(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx,
+                            const u8 *addr, u8 *sta_id_r);
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx,
+                              struct ieee80211_key_conf *key);
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_key_conf *key);
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+                                struct iwl_rxon_context *ctx);
+int iwl4965_set_dynamic_key(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *key, u8 sta_id);
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *key, u8 sta_id);
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+                        struct iwl_rxon_context *ctx,
+                        struct ieee80211_key_conf *keyconf,
+                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
+                       int sta_id, int tid);
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                        int tid, u16 ssn);
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                       int tid);
+void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
+                       int sta_id, int cnt);
+int iwl4965_update_bcast_stations(struct iwl_priv *priv);
+
+/* rate */
+static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
+{
+       return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
+{
+       return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+       return cpu_to_le32(flags|(u32)rate);
+}
+
+/* eeprom */
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
+int  iwl4965_eeprom_check_version(struct iwl_priv *priv);
+
+/* mac80211 handlers (for 4965) */
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwl4965_mac_start(struct ieee80211_hw *hw);
+void iwl4965_mac_stop(struct ieee80211_hw *hw);
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast);
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key);
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key);
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch);
+
+#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644 (file)
index 0000000..17a1d50
--- /dev/null
@@ -0,0 +1,3405 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use iwl-xxxx-hw.h for hardware-related definitions.
+ * Please use iwl-dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_legacy_commands_h__
+#define __iwl_legacy_commands_h__
+
+struct iwl_priv;
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver)   (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver)     (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver)  ((ver) & 0x000000FF)
+
+
+/* Tx rates */
+#define IWL_CCK_RATES  4
+#define IWL_OFDM_RATES 8
+#define IWL_MAX_RATES  (IWL_CCK_RATES + IWL_OFDM_RATES)
+
+enum {
+       REPLY_ALIVE = 0x1,
+       REPLY_ERROR = 0x2,
+
+       /* RXON and QOS commands */
+       REPLY_RXON = 0x10,
+       REPLY_RXON_ASSOC = 0x11,
+       REPLY_QOS_PARAM = 0x13,
+       REPLY_RXON_TIMING = 0x14,
+
+       /* Multi-Station support */
+       REPLY_ADD_STA = 0x18,
+       REPLY_REMOVE_STA = 0x19,
+
+       /* Security */
+       REPLY_WEPKEY = 0x20,
+
+       /* RX, TX, LEDs */
+       REPLY_3945_RX = 0x1b,           /* 3945 only */
+       REPLY_TX = 0x1c,
+       REPLY_RATE_SCALE = 0x47,        /* 3945 only */
+       REPLY_LEDS_CMD = 0x48,
+       REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+
+       /* 802.11h related */
+       REPLY_CHANNEL_SWITCH = 0x72,
+       CHANNEL_SWITCH_NOTIFICATION = 0x73,
+       REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
+       SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+
+       /* Power Management */
+       POWER_TABLE_CMD = 0x77,
+       PM_SLEEP_NOTIFICATION = 0x7A,
+       PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+
+       /* Scan commands and notifications */
+       REPLY_SCAN_CMD = 0x80,
+       REPLY_SCAN_ABORT_CMD = 0x81,
+       SCAN_START_NOTIFICATION = 0x82,
+       SCAN_RESULTS_NOTIFICATION = 0x83,
+       SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+       /* IBSS/AP commands */
+       BEACON_NOTIFICATION = 0x90,
+       REPLY_TX_BEACON = 0x91,
+
+       /* Miscellaneous commands */
+       REPLY_TX_PWR_TABLE_CMD = 0x97,
+
+       /* Bluetooth device coexistence config command */
+       REPLY_BT_CONFIG = 0x9b,
+
+       /* Statistics */
+       REPLY_STATISTICS_CMD = 0x9c,
+       STATISTICS_NOTIFICATION = 0x9d,
+
+       /* RF-KILL commands and notifications */
+       CARD_STATE_NOTIFICATION = 0xa1,
+
+       /* Missed beacons notification */
+       MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+       REPLY_CT_KILL_CONFIG_CMD = 0xa4,
+       SENSITIVITY_CMD = 0xa8,
+       REPLY_PHY_CALIBRATION_CMD = 0xb0,
+       REPLY_RX_PHY_CMD = 0xc0,
+       REPLY_RX_MPDU_CMD = 0xc1,
+       REPLY_RX = 0xc3,
+       REPLY_COMPRESSED_BA = 0xc5,
+
+       REPLY_MAX = 0xff
+};
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
+#define SEQ_TO_QUEUE(s)        (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q)        (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s)        ((s) & 0xff)
+#define INDEX_TO_SEQ(i)        ((i) & 0xff)
+#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME   cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+       u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
+       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+       /*
+        * The driver sets up the sequence number to values of its choosing.
+        * uCode does not use this value, but passes it back to the driver
+        * when sending the response to each driver-originated command, so
+        * the driver can match the response to the command.  Since the values
+        * don't get used by uCode, the driver may set up an arbitrary format.
+        *
+        * There is one exception:  uCode sets bit 15 when it originates
+        * the response/notification, i.e. when the response/notification
+        * is not a direct response to a command sent by the driver.  For
+        * example, uCode issues REPLY_3945_RX when it sends a received frame
+        * to the driver; it is not a direct response to any driver command.
+        *
+        * The Linux driver uses the following format:
+        *
+        *  0:7         tfd index - position within TX queue
+        *  8:12        TX queue id
+        *  13          reserved
+        *  14          huge - driver sets this to indicate command is in the
+        *              'huge' storage at the end of the command buffers
+        *  15          unsolicited RX or uCode-originated notification
+       */
+       __le16 sequence;
+
+       /* command or response/notification data follows immediately */
+       u8 data[0];
+} __packed;
+
+
+/**
+ * struct iwl3945_tx_power
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ */
+struct iwl3945_tx_power {
+       u8 tx_gain;             /* gain for analog radio */
+       u8 dsp_atten;           /* gain for DSP */
+} __packed;
+
+/**
+ * struct iwl3945_power_per_rate
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl3945_power_per_rate {
+       u8 rate;                /* plcp */
+       struct iwl3945_tx_power tpc;
+       u8 reserved;
+} __packed;
+
+/**
+ * iwl4965 rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwl4965 commands:
+ *  REPLY_RX (response only)
+ *  REPLY_RX_MPDU (response only)
+ *  REPLY_TX (both command and response)
+ *  REPLY_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ *  2-0:  0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * 4965 has 2 transmitters
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS       14
+#define RATE_MCS_ANT_A_MSK     0x04000
+#define RATE_MCS_ANT_B_MSK     0x08000
+#define RATE_MCS_ANT_C_MSK     0x10000
+#define RATE_MCS_ANT_AB_MSK    (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK   (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TABLE_NUM_ENTRIES                        33
+#define POWER_TABLE_NUM_HT_OFDM_ENTRIES                32
+#define POWER_TABLE_CCK_ENTRY                  32
+
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES            24
+#define IWL_PWR_CCK_ENTRIES                    2
+
+/**
+ * union iwl4965_tx_power_dual_stream
+ *
+ * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Use __le32 version (struct tx_power_dual_stream) when building command.
+ *
+ * Driver provides radio gain and DSP attenuation settings to device in pairs,
+ * one value for each transmitter chain.  The first value is for transmitter A,
+ * second for transmitter B.
+ *
+ * For SISO bit rates, both values in a pair should be identical.
+ * For MIMO rates, one value may be different from the other,
+ * in order to balance the Tx output between the two transmitters.
+ *
+ * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ */
+union iwl4965_tx_power_dual_stream {
+       struct {
+               u8 radio_tx_gain[2];
+               u8 dsp_predis_atten[2];
+       } s;
+       u32 dw;
+};
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Same format as iwl_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+       __le32 dw;
+} __packed;
+
+/**
+ * struct iwl4965_tx_power_db
+ *
+ * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl4965_tx_power_db {
+       struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+#define INITIALIZE_SUBTYPE    (9)
+
+/*
+ * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "initialize alive" notification once the initialization
+ * uCode image has completed its work, and is ready to load the runtime image.
+ * This is the *first* "alive" notification that the driver will receive after
+ * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * For 4965, this notification contains important calibration data for
+ * calculating txpower settings:
+ *
+ * 1)  Power supply voltage indication.  The voltage sensor outputs higher
+ *     values for lower voltage, and vice verse.
+ *
+ * 2)  Temperature measurement parameters, for each of two channel widths
+ *     (20 MHz and 40 MHz) supported by the radios.  Temperature sensing
+ *     is done via one of the receiver chains, and channel width influences
+ *     the results.
+ *
+ * 3)  Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
+ *     for each of 5 frequency ranges.
+ */
+struct iwl_init_alive_resp {
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 reserved1;
+       u8 sw_rev[8];
+       u8 ver_type;
+       u8 ver_subtype;         /* "9" for initialize alive */
+       __le16 reserved2;
+       __le32 log_event_table_ptr;
+       __le32 error_event_table_ptr;
+       __le32 timestamp;
+       __le32 is_valid;
+
+       /* calibration values from "initialize" uCode */
+       __le32 voltage;         /* signed, higher value is lower voltage */
+       __le32 therm_r1[2];     /* signed, 1st for normal, 2nd for HT40 */
+       __le32 therm_r2[2];     /* signed */
+       __le32 therm_r3[2];     /* signed */
+       __le32 therm_r4[2];     /* signed */
+       __le32 tx_atten[5][2];  /* signed MIMO gain comp, 5 freq groups,
+                                * 2 Tx chains */
+} __packed;
+
+
+/**
+ * REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver.  This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1)  log_event_table_ptr indicates base of the event log.  This traces
+ *     a 256-entry history of uCode execution within a circular buffer.
+ *     Its header format is:
+ *
+ *     __le32 log_size;     log capacity (in number of entries)
+ *     __le32 type;         (1) timestamp with each entry, (0) no timestamp
+ *     __le32 wraps;        # times uCode has wrapped to top of circular buffer
+ *      __le32 write_index;  next circular buffer entry that uCode would fill
+ *
+ *     The header is followed by the circular buffer of log entries.  Entries
+ *     with timestamps have the following format:
+ *
+ *     __le32 event_id;     range 0 - 1500
+ *     __le32 timestamp;    low 32 bits of TSF (of network, if associated)
+ *     __le32 data;         event_id-specific data value
+ *
+ *     Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2)  error_event_table_ptr indicates base of the error log.  This contains
+ *     information about any uCode error that occurs.  For 4965, the format
+ *     of the error log is:
+ *
+ *     __le32 valid;        (nonzero) valid, (0) log is empty
+ *     __le32 error_id;     type of error
+ *     __le32 pc;           program counter
+ *     __le32 blink1;       branch link
+ *     __le32 blink2;       branch link
+ *     __le32 ilink1;       interrupt link
+ *     __le32 ilink2;       interrupt link
+ *     __le32 data1;        error-specific data
+ *     __le32 data2;        error-specific data
+ *     __le32 line;         source code line of error
+ *     __le32 bcon_time;    beacon timer
+ *     __le32 tsf_low;      network timestamp function timer
+ *     __le32 tsf_hi;       network timestamp function timer
+ *     __le32 gp1;          GP1 timer register
+ *     __le32 gp2;          GP2 timer register
+ *     __le32 gp3;          GP3 timer register
+ *     __le32 ucode_ver;    uCode version
+ *     __le32 hw_ver;       HW Silicon version
+ *     __le32 brd_ver;      HW board version
+ *     __le32 log_pc;       log program counter
+ *     __le32 frame_ptr;    frame pointer
+ *     __le32 stack_ptr;    stack pointer
+ *     __le32 hcmd;         last host command
+ *     __le32 isr0;         isr status register LMPM_NIC_ISR0: rxtx_flag
+ *     __le32 isr1;         isr status register LMPM_NIC_ISR1: host_flag
+ *     __le32 isr2;         isr status register LMPM_NIC_ISR2: enc_flag
+ *     __le32 isr3;         isr status register LMPM_NIC_ISR3: time_flag
+ *     __le32 isr4;         isr status register LMPM_NIC_ISR4: wico interrupt
+ *     __le32 isr_pref;     isr status register LMPM_NIC_PREF_STAT
+ *     __le32 wait_event;   wait event() caller address
+ *     __le32 l2p_control;  L2pControlField
+ *     __le32 l2p_duration; L2pDurationField
+ *     __le32 l2p_mhvalid;  L2pMhValidBits
+ *     __le32 l2p_addr_match; L2pAddrMatchStat
+ *     __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ *     __le32 u_timestamp;  indicate when the date and time of the compilation
+ *     __le32 reserved;
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+struct iwl_alive_resp {
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 reserved1;
+       u8 sw_rev[8];
+       u8 ver_type;
+       u8 ver_subtype;                 /* not "9" for runtime alive */
+       __le16 reserved2;
+       __le32 log_event_table_ptr;     /* SRAM address for event log */
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 timestamp;
+       __le32 is_valid;
+} __packed;
+
+/*
+ * REPLY_ERROR = 0x2 (response only, not a command)
+ */
+struct iwl_error_resp {
+       __le32 error_type;
+       u8 cmd_id;
+       u8 reserved1;
+       __le16 bad_cmd_seq_num;
+       __le32 error_info;
+       __le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types  */
+enum {
+       RXON_DEV_TYPE_AP = 1,
+       RXON_DEV_TYPE_ESS = 3,
+       RXON_DEV_TYPE_IBSS = 4,
+       RXON_DEV_TYPE_SNIFFER = 6,
+};
+
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK         cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS         (0)
+#define RXON_RX_CHAIN_VALID_MSK                        cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS                        (1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK            cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS            (4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK       cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS       (7)
+#define RXON_RX_CHAIN_CNT_MSK                  cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS                  (10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK             cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS             (12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK           cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS           (14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK           cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK                cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK        cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK        cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK          cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK     cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK            cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK            cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK              cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK              cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK       cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK    cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
+
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS          (22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK       cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS         (23)
+
+#define RXON_FLG_HT_PROT_MSK                   cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK                 cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS              (25)
+#define RXON_FLG_CHANNEL_MODE_MSK              cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+       CHANNEL_MODE_LEGACY = 0,
+       CHANNEL_MODE_PURE_40 = 1,
+       CHANNEL_MODE_MIXED = 2,
+       CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY                   \
+       cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40                  \
+       cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED                    \
+       cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN                   cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK         cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK        cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK      cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK     cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK           cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
+
+/**
+ * REPLY_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE:  When tuning to a new channel, driver must set the
+ *        RXON_FILTER_ASSOC_MSK to 0.  This will clear station-dependent
+ *        info within the device, including the station tables, tx retry
+ *        rate tables, and txpower tables.  Driver must build a new station
+ *        table and txpower table before transmitting anything on the RXON
+ *        channel.
+ *
+ * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
+ *        issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct iwl3945_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 reserved4;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       __le16 reserved5;
+} __packed;
+
+struct iwl4965_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 rx_chain;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+} __packed;
+
+/* Create a common rxon cmd which will be typecast into the 3945 or 4965
+ * specific rxon cmd, depending on where it is called from.
+ */
+struct iwl_legacy_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 rx_chain;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+       u8 reserved4;
+       u8 reserved5;
+} __packed;
+
+
+/*
+ * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct iwl3945_rxon_assoc_cmd {
+       __le32 flags;
+       __le32 filter_flags;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 reserved;
+} __packed;
+
+struct iwl4965_rxon_assoc_cmd {
+       __le32 flags;
+       __le32 filter_flags;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+       __le16 rx_chain_select_flags;
+       __le16 reserved;
+} __packed;
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL   10
+#define IWL_MAX_UCODE_BEACON_INTERVAL  4 /* 4096 */
+#define IWL39_MAX_UCODE_BEACON_INTERVAL        1 /* 1024 */
+
+/*
+ * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct iwl_rxon_time_cmd {
+       __le64 timestamp;
+       __le16 beacon_interval;
+       __le16 atim_window;
+       __le32 beacon_init_val;
+       __le16 listen_interval;
+       u8 dtim_period;
+       u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+struct iwl3945_channel_switch_cmd {
+       u8 band;
+       u8 expect_beacon;
+       __le16 channel;
+       __le32 rxon_flags;
+       __le32 rxon_filter_flags;
+       __le32 switch_time;
+       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_channel_switch_cmd {
+       u8 band;
+       u8 expect_beacon;
+       __le16 channel;
+       __le32 rxon_flags;
+       __le32 rxon_filter_flags;
+       __le32 switch_time;
+       struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+/*
+ * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ */
+struct iwl_csa_notification {
+       __le16 band;
+       __le16 channel;
+       __le32 status;          /* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ *
+ * @cw_min: Contention window, start value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *          performing random backoff timing prior to Tx).  Device default 1.
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+       __le16 cw_min;
+       __le16 cw_max;
+       u8 aifsn;
+       u8 reserved1;
+       __le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK  cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK          cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK    cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM                4
+
+/*
+ * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct iwl_qosparam_cmd {
+       __le32 qos_flags;
+       struct iwl_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define        IWL_AP_ID               0
+#define        IWL_STA_ID              2
+#define        IWL3945_BROADCAST_ID    24
+#define IWL3945_STATION_COUNT  25
+#define IWL4965_BROADCAST_ID   31
+#define        IWL4965_STATION_COUNT   32
+
+#define        IWL_STATION_COUNT       32      /* MAX(3945,4965)*/
+#define        IWL_INVALID_STATION     255
+
+#define STA_FLG_TX_RATE_MSK            cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK           cpu_to_le32(1 << 8)
+#define STA_FLG_RTS_MIMO_PROT_MSK      cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK       cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS       (19)
+#define STA_FLG_MAX_AGG_SIZE_MSK       cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK            cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK           cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS   (23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK   cpu_to_le32(7 << 23)
+
+/* Use in mode field.  1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK         0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK        cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC     cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP                cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP       cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP       cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS  8
+#define STA_KEY_FLG_INVALID    cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK        cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK       cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK          cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM                8
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define        STA_MODIFY_KEY_MASK             0x01
+#define        STA_MODIFY_TID_DISABLE_TX       0x02
+#define        STA_MODIFY_TX_RATE_MSK          0x04
+#define STA_MODIFY_ADDBA_TID_MSK       0x08
+#define STA_MODIFY_DELBA_TID_MSK       0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK  0x20
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
+
+struct iwl4965_keyinfo {
+       __le16 key_flags;
+       u8 tkip_rx_tsc_byte2;   /* TSC[2] for key mix ph1 detection */
+       u8 reserved1;
+       __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
+       u8 key_offset;
+       u8 reserved2;
+       u8 key[16];             /* 16-byte unicast decryption key */
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table index when adding new station,
+ * or the index to a pre-existing station entry when modifying that station.
+ * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+       u8 addr[ETH_ALEN];
+       __le16 reserved1;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved2;
+} __packed;
+
+/*
+ * REPLY_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (4965 devices uses
+ * REPLY_TX_LINK_QUALITY_CMD,
+ * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ *
+ * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE:  RXON command (without "associated" bit set) wipes the station table
+ *        clean.  Moving into RF_KILL state does this also.  Driver must set up
+ *        new station table before transmitting anything on the RXON channel
+ *        (except active scans or active measurements; those commands carry
+ *        their own txpower/rate setup data).
+ *
+ *        When getting started on a new channel, driver must set up the
+ *        IWL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        station in a BSS, once an AP is selected, driver sets up the AP STA
+ *        in the IWL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        are all that are needed for a BSS client station.  If the device is
+ *        used as AP, or in an IBSS network, driver must set up station table
+ *        entries for all STAs in network, starting with index IWL_STA_ID.
+ */
+
+struct iwl3945_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16 rate_n_flags;
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+} __packed;
+
+struct iwl4965_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16  reserved1;
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+
+       /*
+        * Number of packets OK to transmit to station even though
+        * it is asleep -- used to synchronise PS-poll and u-APSD
+        * responses while ucode keeps track of STA sleep state.
+        */
+       __le16 sleep_tx_count;
+
+       __le16 reserved2;
+} __packed;
+
+/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
+struct iwl_legacy_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16  rate_n_flags;           /* 3945 only */
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+
+       /*
+        * Number of packets OK to transmit to station even though
+        * it is asleep -- used to synchronise PS-poll and u-APSD
+        * responses while ucode keeps track of STA sleep state.
+        */
+       __le16 sleep_tx_count;
+
+       __le16 reserved2;
+} __packed;
+
+
+#define ADD_STA_SUCCESS_MSK            0x1
+#define ADD_STA_NO_ROOM_IN_TABLE       0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE  0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA   0x8
+/*
+ * REPLY_ADD_STA = 0x18 (response)
+ */
+struct iwl_add_sta_resp {
+       u8 status;      /* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK              0x1
+/*
+ *  REPLY_REM_STA = 0x19 (response)
+ */
+struct iwl_rem_sta_resp {
+       u8 status;
+} __packed;
+
+/*
+ *  REPLY_REM_STA = 0x19 (command)
+ */
+struct iwl_rem_sta_cmd {
+       u8 num_sta;     /* number of removed stations */
+       u8 reserved[3];
+       u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+       u8 reserved2[2];
+} __packed;
+
+#define IWL_TX_FIFO_BK_MSK             cpu_to_le32(BIT(0))
+#define IWL_TX_FIFO_BE_MSK             cpu_to_le32(BIT(1))
+#define IWL_TX_FIFO_VI_MSK             cpu_to_le32(BIT(2))
+#define IWL_TX_FIFO_VO_MSK             cpu_to_le32(BIT(3))
+#define IWL_AGG_TX_QUEUE_MSK           cpu_to_le32(0xffc00)
+
+#define IWL_DROP_SINGLE                0
+#define IWL_DROP_SELECTED      1
+#define IWL_DROP_ALL           2
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct iwl_wep_key {
+       u8 key_index;
+       u8 key_offset;
+       u8 reserved1[2];
+       u8 key_size;
+       u8 reserved2[3];
+       u8 key[16];
+} __packed;
+
+struct iwl_wep_cmd {
+       u8 num_keys;
+       u8 global_key_type;
+       u8 flags;
+       u8 reserved;
+       struct iwl_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR   cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW  cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK   cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK           cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK    cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK       cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_POS           4
+
+#define RX_RES_STATUS_SEC_TYPE_MSK     (0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE    (0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP     (0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP    (0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP    (0x3 << 8)
+#define        RX_RES_STATUS_SEC_TYPE_ERR      (0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND    (1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT      (0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK       (0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC      (0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK     (0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK      (0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK      (0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK     (1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK        (0x800)
+
+
+struct iwl3945_rx_frame_stats {
+       u8 phy_count;
+       u8 id;
+       u8 rssi;
+       u8 agc;
+       __le16 sig_avg;
+       __le16 noise_diff;
+       u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_hdr {
+       __le16 channel;
+       __le16 phy_flags;
+       u8 reserved1;
+       u8 rate;
+       __le16 len;
+       u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_end {
+       __le32 status;
+       __le64 timestamp;
+       __le32 beacon_timestamp;
+} __packed;
+
+/*
+ * REPLY_3945_RX = 0x1b (response only, not a command)
+ *
+ * NOTE:  DO NOT dereference from casts to this structure
+ * It is provided only for calculating minimum data set size.
+ * The actual offsets of the hdr and end are dynamic based on
+ * stats.phy_count
+ */
+struct iwl3945_rx_frame {
+       struct iwl3945_rx_frame_stats stats;
+       struct iwl3945_rx_frame_hdr hdr;
+       struct iwl3945_rx_frame_end end;
+} __packed;
+
+#define IWL39_RX_FRAME_SIZE    (4 + sizeof(struct iwl3945_rx_frame))
+
+/* Fixed (non-configurable) rx data from phy */
+
+#define IWL49_RX_RES_PHY_CNT 14
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET     (4)
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK       (0x70)
+#define IWL49_AGC_DB_MASK                      (0x3f80)        /* MASK(7,13) */
+#define IWL49_AGC_DB_POS                       (7)
+struct iwl4965_rx_non_cfg_phy {
+       __le16 ant_selection;   /* ant A bit 4, ant B bit 5, ant C bit 6 */
+       __le16 agc_info;        /* agc code 0:6, agc dB 7:13, reserved 14:15 */
+       u8 rssi_info[6];        /* we use even entries, 0/2/4 for A/B/C rssi */
+       u8 pad[0];
+} __packed;
+
+
+/*
+ * REPLY_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct iwl_rx_phy_res {
+       u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
+       u8 cfg_phy_cnt;         /* configurable DSP phy data byte count */
+       u8 stat_id;             /* configurable DSP phy data set ID */
+       u8 reserved1;
+       __le64 timestamp;       /* TSF at on air rise */
+       __le32 beacon_time_stamp; /* beacon at on-air rise */
+       __le16 phy_flags;       /* general phy flags: band, modulation, ... */
+       __le16 channel;         /* channel number */
+       u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+       __le32 rate_n_flags;    /* RATE_MCS_* */
+       __le16 byte_count;      /* frame's byte-count */
+       __le16 frame_time;      /* frame's time on the air */
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+       __le16 byte_count;
+       __le16 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * REPLY_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the REPLY_RATE_SCALE (for 3945) or
+ * REPLY_TX_LINK_QUALITY_CMD (4965).
+ *
+ * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* REPLY_TX Tx flags field */
+
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ */
+#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
+
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
+ * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ */
+#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For 4965 devices:
+ * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_index indicates first rate to try;
+ *    uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ *    This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
+
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ */
+#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
+
+/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
+ * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ *    alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both).  Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP         0x01
+#define TX_CMD_SEC_CCM         0x02
+#define TX_CMD_SEC_TKIP                0x03
+#define TX_CMD_SEC_MSK         0x03
+#define TX_CMD_SEC_SHIFT       6
+#define TX_CMD_SEC_KEY128      0x08
+
+/*
+ * security overhead sizes
+ */
+#define WEP_IV_LEN 4
+#define WEP_ICV_LEN 4
+#define CCMP_MIC_LEN 8
+#define TKIP_ICV_LEN 4
+
+/*
+ * REPLY_TX = 0x1c (command)
+ */
+
+struct iwl3945_tx_cmd {
+       /*
+        * MPDU byte count:
+        * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+        * + 8 byte IV for CCM or TKIP (not used for WEP)
+        * + Data payload
+        * + 8-byte MIC (not used for CCM/WEP)
+        * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+        *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+        * Range: 14-2342 bytes.
+        */
+       __le16 len;
+
+       /*
+        * MPDU or MSDU byte count for next frame.
+        * Used for fragmentation and bursting, but not 11n aggregation.
+        * Same as "len", but for next frame.  Set to 0 if not applicable.
+        */
+       __le16 next_frame_len;
+
+       __le32 tx_flags;        /* TX_CMD_FLG_* */
+
+       u8 rate;
+
+       /* Index of recipient station in uCode's station table */
+       u8 sta_id;
+       u8 tid_tspec;
+       u8 sec_ctl;
+       u8 key[16];
+       union {
+               u8 byte[8];
+               __le16 word[4];
+               __le32 dw[2];
+       } tkip_mic;
+       __le32 next_frame_info;
+       union {
+               __le32 life_time;
+               __le32 attempt;
+       } stop_time;
+       u8 supp_rates[2];
+       u8 rts_retry_limit;     /*byte 50 */
+       u8 data_retry_limit;    /*byte 51 */
+       union {
+               __le16 pm_frame_timeout;
+               __le16 attempt_duration;
+       } timeout;
+
+       /*
+        * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+        * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+        */
+       __le16 driver_txop;
+
+       /*
+        * MAC header goes here, followed by 2 bytes padding if MAC header
+        * length is 26 or 30 bytes, followed by payload data
+        */
+       u8 payload[0];
+       struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * REPLY_TX = 0x1c (response)
+ */
+struct iwl3945_tx_resp {
+       u8 failure_rts;
+       u8 failure_frame;
+       u8 bt_kill_count;
+       u8 rate;
+       __le32 wireless_media_time;
+       __le32 status;          /* TX status */
+} __packed;
+
+
+/*
+ * 4965 uCode updates these Tx attempt count values in host DRAM.
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct iwl_dram_scratch {
+       u8 try_cnt;             /* Tx attempts */
+       u8 bt_kill_cnt;         /* Tx attempts blocked by Bluetooth device */
+       __le16 reserved;
+} __packed;
+
+struct iwl_tx_cmd {
+       /*
+        * MPDU byte count:
+        * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+        * + 8 byte IV for CCM or TKIP (not used for WEP)
+        * + Data payload
+        * + 8-byte MIC (not used for CCM/WEP)
+        * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+        *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+        * Range: 14-2342 bytes.
+        */
+       __le16 len;
+
+       /*
+        * MPDU or MSDU byte count for next frame.
+        * Used for fragmentation and bursting, but not 11n aggregation.
+        * Same as "len", but for next frame.  Set to 0 if not applicable.
+        */
+       __le16 next_frame_len;
+
+       __le32 tx_flags;        /* TX_CMD_FLG_* */
+
+       /* uCode may modify this field of the Tx command (in host DRAM!).
+        * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+       struct iwl_dram_scratch scratch;
+
+       /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+       __le32 rate_n_flags;    /* RATE_MCS_* */
+
+       /* Index of destination station in uCode's station table */
+       u8 sta_id;
+
+       /* Type of security encryption:  CCM or TKIP */
+       u8 sec_ctl;             /* TX_CMD_SEC_* */
+
+       /*
+        * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+        * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
+        * data frames, this field may be used to selectively reduce initial
+        * rate (via non-0 value) for special frames (e.g. management), while
+        * still supporting rate scaling for all frames.
+        */
+       u8 initial_rate_index;
+       u8 reserved;
+       u8 key[16];
+       __le16 next_frame_flags;
+       __le16 reserved2;
+       union {
+               __le32 life_time;
+               __le32 attempt;
+       } stop_time;
+
+       /* Host DRAM physical address pointer to "scratch" in this command.
+        * Must be dword aligned.  "0" in dram_lsb_ptr disables usage. */
+       __le32 dram_lsb_ptr;
+       u8 dram_msb_ptr;
+
+       u8 rts_retry_limit;     /*byte 50 */
+       u8 data_retry_limit;    /*byte 51 */
+       u8 tid_tspec;
+       union {
+               __le16 pm_frame_timeout;
+               __le16 attempt_duration;
+       } timeout;
+
+       /*
+        * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+        * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+        */
+       __le16 driver_txop;
+
+       /*
+        * MAC header goes here, followed by 2 bytes padding if MAC header
+        * length is 26 or 30 bytes, followed by payload data
+        */
+       u8 payload[0];
+       struct ieee80211_hdr hdr[0];
+} __packed;
+
+/* TX command response is sent after *3945* transmission attempts.
+ *
+ * NOTES:
+ *
+ * TX_STATUS_FAIL_NEXT_FRAG
+ *
+ * If the fragment flag in the MAC header for the frame being transmitted
+ * is set and there is insufficient time to transmit the next frame, the
+ * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
+ *
+ * TX_STATUS_FIFO_UNDERRUN
+ *
+ * Indicates the host did not provide bytes to the FIFO fast enough while
+ * a TX was in progress.
+ *
+ * TX_STATUS_FAIL_MGMNT_ABORT
+ *
+ * This status is only possible if the ABORT ON MGMT RX parameter was
+ * set to true with the TX command.
+ *
+ * If the MSB of the status parameter is set then an abort sequence is
+ * required.  This sequence consists of the host activating the TX Abort
+ * control line, and then waiting for the TX Abort command response.  This
+ * indicates that a the device is no longer in a transmit state, and that the
+ * command FIFO has been cleared.  The host must then deactivate the TX Abort
+ * control line.  Receiving is still allowed in this case.
+ */
+enum {
+       TX_3945_STATUS_SUCCESS = 0x01,
+       TX_3945_STATUS_DIRECT_DONE = 0x02,
+       TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+       TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+       TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+       TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+       TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+       TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+       TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+       TX_3945_STATUS_FAIL_ABORTED = 0x89,
+       TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+       TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+       TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+       TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+       TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+       TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+       TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+       TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *4965* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+       TX_STATUS_SUCCESS = 0x01,
+       TX_STATUS_DIRECT_DONE = 0x02,
+       /* postpone TX */
+       TX_STATUS_POSTPONE_DELAY = 0x40,
+       TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+       TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+       TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+       /* abort TX */
+       TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+       TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+       TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+       TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+       TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+       TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+       TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+       TX_STATUS_FAIL_DEST_PS = 0x88,
+       TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+       TX_STATUS_FAIL_BT_RETRY = 0x8a,
+       TX_STATUS_FAIL_STA_INVALID = 0x8b,
+       TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+       TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+       TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+       TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+       TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+       TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define        TX_PACKET_MODE_REGULAR          0x0000
+#define        TX_PACKET_MODE_BURST_SEQ        0x0100
+#define        TX_PACKET_MODE_BURST_FIRST      0x0200
+
+enum {
+       TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+       TX_STATUS_MSK = 0x000000ff,             /* bits 0:7 */
+       TX_STATUS_DELAY_MSK = 0x00000040,
+       TX_STATUS_ABORT_MSK = 0x00000080,
+       TX_PACKET_MODE_MSK = 0x0000ff00,        /* bits 8:15 */
+       TX_FIFO_NUMBER_MSK = 0x00070000,        /* bits 16:18 */
+       TX_RESERVED = 0x00780000,               /* bits 19:22 */
+       TX_POWER_PA_DETECT_MSK = 0x7f800000,    /* bits 23:30 */
+       TX_ABORT_REQUIRED_MSK = 0x80000000,     /* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+       AGG_TX_STATE_TRANSMITTED = 0x00,
+       AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+       AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+       AGG_TX_STATE_ABORT_MSK = 0x08,
+       AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+       AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+       AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+       AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+       AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+       AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+       AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK      0x00000fff      /* bits 0:11 */
+#define AGG_TX_TRY_MSK         0x0000f000      /* bits 12:15 */
+
+#define AGG_TX_STATE_LAST_SENT_MSK  (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+                                    AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)  No aggregation (frame_count == 1).  This reports Tx results for
+ *     a single frame.  Multiple attempts, at various bit rates, may have
+ *     been made for this frame.
+ *
+ * 2)  Aggregation (frame_count > 1).  This reports Tx results for
+ *     2 or more frames that used block-acknowledge.  All frames were
+ *     transmitted at same rate.  Rate scaling may have been used if first
+ *     frame in this new agg block failed in previous agg block(s).
+ *
+ *     Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ *     block-ack has not been received by the time the 4965 device records
+ *     this status.
+ *     This status relates to reasons the tx might have been blocked or aborted
+ *     within the sending station (this 4965 device), rather than whether it was
+ *     received successfully by the destination station.
+ */
+struct agg_tx_status {
+       __le16 status;
+       __le16 sequence;
+} __packed;
+
+struct iwl4965_tx_resp {
+       u8 frame_count;         /* 1 no aggregation, >1 aggregation */
+       u8 bt_kill_count;       /* # blocked by bluetooth (unused for agg) */
+       u8 failure_rts;         /* # failures due to unsuccessful RTS */
+       u8 failure_frame;       /* # failures due to no ACK (unused for agg) */
+
+       /* For non-agg:  Rate at which frame was successful.
+        * For agg:  Rate at which all frames were transmitted. */
+       __le32 rate_n_flags;    /* RATE_MCS_*  */
+
+       /* For non-agg:  RTS + CTS + frame tx attempts time + ACK.
+        * For agg:  RTS + CTS + aggregation tx time + block-ack time. */
+       __le16 wireless_media_time;     /* uSecs */
+
+       __le16 reserved;
+       __le32 pa_power1;       /* RF power amplifier measurement (not used) */
+       __le32 pa_power2;
+
+       /*
+        * For non-agg:  frame status TX_STATUS_*
+        * For agg:  status of 1st frame, AGG_TX_STATE_*; other frame status
+        *           fields follow this one, up to frame_count.
+        *           Bit fields:
+        *           11- 0:  AGG_TX_STATE_* status code
+        *           15-12:  Retry count for 1st frame in aggregation (retries
+        *                   occur if tx failed for this frame when it was a
+        *                   member of a previous aggregation block).  If rate
+        *                   scaling is used, retry count indicates the rate
+        *                   table entry used for all frames in the new agg.
+        *           31-16:  Sequence # for this frame's Tx cmd (not SSN!)
+        */
+       union {
+               __le32 status;
+               struct agg_tx_status agg_status[0]; /* for each agg frame */
+       } u;
+} __packed;
+
+/*
+ * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct iwl_compressed_ba_resp {
+       __le32 sta_addr_lo32;
+       __le16 sta_addr_hi16;
+       __le16 reserved;
+
+       /* Index of recipient (BA-sending) station in uCode's station table */
+       u8 sta_id;
+       u8 tid;
+       __le16 seq_ctl;
+       __le64 bitmap;
+       __le16 scd_flow;
+       __le16 scd_ssn;
+} __packed;
+
+/*
+ * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ *
+ * See details under "TXPOWER" in iwl-4965-hw.h.
+ */
+
+struct iwl3945_txpowertable_cmd {
+       u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
+       u8 reserved;
+       __le16 channel;
+       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_txpowertable_cmd {
+       u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
+       u8 reserved;
+       __le16 channel;
+       struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+
+/**
+ * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ *
+ * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ *
+ * NOTE: The table of rates passed to the uCode via the
+ * RATE_SCALE command sets up the corresponding order of
+ * rates used for all related commands, including rate
+ * masks, etc.
+ *
+ * For example, if you set 9MB (PLCP 0x0f) as the first
+ * rate in the rate table, the bit mask for that rate
+ * when passed through ofdm_basic_rates on the REPLY_RXON
+ * command would be bit 0 (1 << 0)
+ */
+struct iwl3945_rate_scaling_info {
+       __le16 rate_n_flags;
+       u8 try_cnt;
+       u8 next_rate_index;
+} __packed;
+
+struct iwl3945_rate_scaling_cmd {
+       u8 table_id;
+       u8 reserved[3];
+       struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+} __packed;
+
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK   (1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define  LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define  LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define  LINK_QUAL_ANT_A_MSK (1 << 0)
+#define  LINK_QUAL_ANT_B_MSK (1 << 1)
+#define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+
+/**
+ * struct iwl_link_qual_general_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_general_params {
+       u8 flags;
+
+       /* No entries at or above this (driver chosen) index contain MIMO */
+       u8 mimo_delimiter;
+
+       /* Best single antenna to use for single stream (legacy, SISO). */
+       u8 single_stream_ant_msk;       /* LINK_QUAL_ANT_* */
+
+       /* Best antennas to use for MIMO (unused for 4965, assumes both). */
+       u8 dual_stream_ant_msk;         /* LINK_QUAL_ANT_* */
+
+       /*
+        * If driver needs to use different initial rates for different
+        * EDCA QOS access categories (as implemented by tx fifos 0-3),
+        * this table will set that up, by indicating the indexes in the
+        * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+        * Otherwise, driver should set all entries to 0.
+        *
+        * Entry usage:
+        * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+        * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+        */
+       u8 start_rate_index[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF        (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX        (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN        (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF  (31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX  (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
+
+/**
+ * struct iwl_link_qual_agg_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_agg_params {
+
+       /*
+        *Maximum number of uSec in aggregation.
+        * default set to 4000 (4 milliseconds) if not configured in .cfg
+        */
+       __le16 agg_time_limit;
+
+       /*
+        * Number of Tx retries allowed for a frame, before that frame will
+        * no longer be considered for the start of an aggregation sequence
+        * (scheduler will then try to tx it as single frame).
+        * Driver should set this to 3.
+        */
+       u8 agg_dis_start_th;
+
+       /*
+        * Maximum number of frames in aggregation.
+        * 0 = no limit (default).  1 = no aggregation.
+        * Other values = max # frames in aggregation.
+        */
+       u8 agg_frame_cnt_limit;
+
+       __le32 reserved;
+} __packed;
+
+/*
+ * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ *
+ * Each station in the 4965 device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received.  This command replaces the entire table for
+ * one station.
+ *
+ * NOTE:  Station must already be in 4965 device's station table.
+ *       Use REPLY_ADD_STA.
+ *
+ * The rate scaling procedures described below work well.  Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TABLE
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1)  If using High-throughput (HT) (SISO or MIMO) initial rate:
+ *     a) Use this same initial rate for first 3 entries.
+ *     b) Find next lower available rate using same mode (SISO or MIMO),
+ *        use for next 3 entries.  If no lower rate available, switch to
+ *        legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ *     c) If using MIMO, set command's mimo_delimiter to number of entries
+ *        using MIMO (3 or 6).
+ *     d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ *        no MIMO, no short guard interval), at the next lower bit rate
+ *        (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ *        legacy procedure for remaining table entries.
+ *
+ * 2)  If using legacy initial rate:
+ *     a) Use the initial rate for only one entry.
+ *     b) For each following entry, reduce the rate to next lower available
+ *        rate, until reaching the lowest available rate.
+ *     c) When reducing rate, also switch antenna selection.
+ *     d) Once lowest available rate is reached, repeat this rate until
+ *        rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history:  One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding window of the 62 most recent tx attempts at that rate.  The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the window (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the window.
+ *
+ * When the 4965 device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command.  The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station.  The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * rate_n_flags field.  After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate.  The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1)  Calculate actual throughput (success ratio * expected throughput, see
+ *     table below) for current initial rate.  Do this only if enough frames
+ *     have been attempted to make the value meaningful:  at least 6 failed
+ *     tx attempts, or at least 8 successes.  If not enough, don't try rate
+ *     scaling yet.
+ *
+ * 2)  Find available rates adjacent to current initial rate.  Available means:
+ *     a)  supported by hardware &&
+ *     b)  supported by association &&
+ *     c)  within any constraints selected by user
+ *
+ * 3)  Gather measured throughputs for adjacent rates.  These might not have
+ *     enough history to calculate a throughput.  That's okay, we might try
+ *     using one of them anyway!
+ *
+ * 4)  Try decreasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  lower adjacent rate has better measured throughput ||
+ *     c)  higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ *     As a sanity check, if decrease was determined above, leave rate
+ *     unchanged if:
+ *     a)  lower rate unavailable
+ *     b)  success ratio at current rate > 85% (very good)
+ *     c)  current measured throughput is better than expected throughput
+ *         of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5)  Try increasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  both adjacent rates' throughputs are unmeasured (try it!) ||
+ *     b)  higher adjacent rate has better measured throughput ||
+ *     c)  lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ *     As a sanity check, if increase was determined above, leave rate
+ *     unchanged if:
+ *     a)  success ratio at current rate < 70%.  This is not particularly
+ *         good performance; higher rate is sure to have poorer success.
+ *
+ * 6)  Re-evaluate the rate after each tx frame.  If working with block-
+ *     acknowledge, history and statistics may be calculated for the entire
+ *     block (including prior history that fits within the history windows),
+ *     before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput.  The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ *   480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ *   4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ *   Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ *   Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ *   Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval.  When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames).  Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples.  The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ *           RATE:  1    2    5   11    6   9   12   18   24   36   48   54   60
+ *
+ *              G:  7   13   35   58   40  57   72   98  121  154  177  186  186
+ *              A:  0    0    0    0   40  57   72   98  121  154  177  186  186
+ *     SISO 20MHz:  0    0    0    0   42  42   76  102  124  159  183  193  202
+ * SGI SISO 20MHz:  0    0    0    0   46  46   82  110  132  168  192  202  211
+ *     MIMO 20MHz:  0    0    0    0   74  74  123  155  179  214  236  244  251
+ * SGI MIMO 20MHz:  0    0    0    0   81  81  131  164  188  222  243  251  257
+ *     SISO 40MHz:  0    0    0    0   77  77  127  160  184  220  242  250  257
+ * SGI SISO 40MHz:  0    0    0    0   83  83  135  169  193  229  250  257  264
+ *     MIMO 40MHz:  0    0    0    0  123 123  182  214  235  264  279  285  289
+ * SGI MIMO 40MHz:  0    0    0    0  131 131  191  222  242  270  284  289  293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old.  If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode.  After trying all 3, a best mode is found.  Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct iwl_link_quality_cmd {
+
+       /* Index of destination/recipient station in uCode's station table */
+       u8 sta_id;
+       u8 reserved1;
+       __le16 control;         /* not used */
+       struct iwl_link_qual_general_params general_params;
+       struct iwl_link_qual_agg_params agg_params;
+
+       /*
+        * Rate info; when using rate-scaling, Tx command's initial_rate_index
+        * specifies 1st Tx rate attempted, via index into this table.
+        * 4965 devices works its way through table when retrying Tx.
+        */
+       struct {
+               __le32 rate_n_flags;    /* RATE_MCS_*, IWL_RATE_* */
+       } rs_table[LINK_QUAL_MAX_RETRY_NUM];
+       __le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ *   bit 0 - 1: BT channel announcement enabled
+ *           0: disable
+ *   bit 1 - 1: priority of BT device enabled
+ *           0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY        BIT(1)
+
+#define BT_COEX_ENABLE  (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_DEF (0x1E)
+
+#define BT_MAX_KILL_DEF (0x5)
+
+/*
+ * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * 3945 and 4965 devices support hardware handshake with Bluetooth device on
+ * same platform.  Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct iwl_bt_cmd {
+       u8 flags;
+       u8 lead_time;
+       u8 max_kill;
+       u8 reserved;
+       __le32 kill_ack_mask;
+       __le32 kill_cts_mask;
+} __packed;
+
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK         | \
+                                RXON_FILTER_CTL2HOST_MSK        | \
+                                RXON_FILTER_ACCEPT_GRP_MSK      | \
+                                RXON_FILTER_DIS_DECRYPT_MSK     | \
+                                RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+                                RXON_FILTER_ASSOC_MSK           | \
+                                RXON_FILTER_BCON_AWARE_MSK)
+
+struct iwl_measure_channel {
+       __le32 duration;        /* measurement duration in extended beacon
+                                * format */
+       u8 channel;             /* channel to measure */
+       u8 type;                /* see enum iwl_measure_type */
+       __le16 reserved;
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ */
+struct iwl_spectrum_cmd {
+       __le16 len;             /* number of bytes starting from token */
+       u8 token;               /* token id */
+       u8 id;                  /* measurement id -- 0 or 1 */
+       u8 origin;              /* 0 = TGh, 1 = other, 2 = TGk */
+       u8 periodic;            /* 1 = periodic */
+       __le16 path_loss_timeout;
+       __le32 start_time;      /* start time in extended beacon format */
+       __le32 reserved2;
+       __le32 flags;           /* rxon flags */
+       __le32 filter_flags;    /* rxon filter flags */
+       __le16 channel_count;   /* minimum 1, maximum 10 */
+       __le16 reserved3;
+       struct iwl_measure_channel channels[10];
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ */
+struct iwl_spectrum_resp {
+       u8 token;
+       u8 id;                  /* id of the prior command replaced, or 0xff */
+       __le16 status;          /* 0 - command will be handled
+                                * 1 - cannot handle (conflicts with another
+                                *     measurement) */
+} __packed;
+
+enum iwl_measurement_state {
+       IWL_MEASUREMENT_START = 0,
+       IWL_MEASUREMENT_STOP = 1,
+};
+
+enum iwl_measurement_status {
+       IWL_MEASUREMENT_OK = 0,
+       IWL_MEASUREMENT_CONCURRENT = 1,
+       IWL_MEASUREMENT_CSA_CONFLICT = 2,
+       IWL_MEASUREMENT_TGH_CONFLICT = 3,
+       /* 4-5 reserved */
+       IWL_MEASUREMENT_STOPPED = 6,
+       IWL_MEASUREMENT_TIMEOUT = 7,
+       IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct iwl_measurement_histogram {
+       __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
+       __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];  /* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct iwl_measurement_cca_counters {
+       __le32 ofdm;
+       __le32 cck;
+} __packed;
+
+enum iwl_measure_type {
+       IWL_MEASURE_BASIC = (1 << 0),
+       IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
+       IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+       IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+       IWL_MEASURE_FRAME = (1 << 4),
+       /* bits 5:6 are reserved */
+       IWL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ */
+struct iwl_spectrum_notification {
+       u8 id;                  /* measurement id -- 0 or 1 */
+       u8 token;
+       u8 channel_index;       /* index in measurement channel list */
+       u8 state;               /* 0 - start, 1 - stop */
+       __le32 start_time;      /* lower 32-bits of TSF */
+       u8 band;                /* 0 - 5.2GHz, 1 - 2.4GHz */
+       u8 channel;
+       u8 type;                /* see enum iwl_measurement_type */
+       u8 reserved1;
+       /* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
+        * valid if applicable for measurement type requested. */
+       __le32 cca_ofdm;        /* cca fraction time in 40Mhz clock periods */
+       __le32 cca_cck;         /* cca fraction time in 44Mhz clock periods */
+       __le32 cca_time;        /* channel load time in usecs */
+       u8 basic_type;          /* 0 - bss, 1 - ofdm preamble, 2 -
+                                * unidentified */
+       u8 reserved2[3];
+       struct iwl_measurement_histogram histogram;
+       __le32 stop_time;       /* lower 32-bits of TSF */
+       __le32 status;          /* see iwl_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ *   bit 0 - '0' Driver not allow power management
+ *           '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ *   bit 1 - '0' Don't send sleep notification
+ *           '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ *   bit 2 - '0' PM have to walk up every DTIM
+ *           '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ *   bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ *           '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ *   bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ *   bit 31/30- '00' use both mac/xtal sleeps
+ *              '01' force Mac sleep
+ *              '10' force xtal sleep
+ *              '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IWL_POWER_VEC_SIZE 5
+
+#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK       cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK           cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK     cpu_to_le16(BIT(1))
+#define IWL_POWER_SLEEP_OVER_DTIM_MSK          cpu_to_le16(BIT(2))
+#define IWL_POWER_PCI_PM_MSK                   cpu_to_le16(BIT(3))
+#define IWL_POWER_FAST_PD                      cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING             cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA               cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET                  cpu_to_le16(BIT(7))
+
+struct iwl3945_powertable_cmd {
+       __le16 flags;
+       u8 reserved[2];
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+} __packed;
+
+struct iwl_powertable_cmd {
+       __le16 flags;
+       u8 keep_alive_seconds;          /* 3945 reserved */
+       u8 debug_flags;                 /* 3945 reserved */
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+       __le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct iwl_sleep_notification {
+       u8 pm_sleep_mode;
+       u8 pm_wakeup_src;
+       __le16 reserved;
+       __le32 sleep_time;
+       __le32 tsf_low;
+       __le32 bcon_timer;
+} __packed;
+
+/* Sleep states.  all devices identical. */
+enum {
+       IWL_PM_NO_SLEEP = 0,
+       IWL_PM_SLP_MAC = 1,
+       IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+       IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+       IWL_PM_SLP_PHY = 4,
+       IWL_PM_SLP_REPENT = 5,
+       IWL_PM_WAKEUP_BY_TIMER = 6,
+       IWL_PM_WAKEUP_BY_DRIVER = 7,
+       IWL_PM_WAKEUP_BY_RFKILL = 8,
+       /* 3 reserved */
+       IWL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ */
+struct iwl_card_state_notif {
+       __le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED   0x01
+#define SW_CARD_DISABLED   0x02
+#define CT_CARD_DISABLED   0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct iwl_ct_kill_config {
+       __le32   reserved;
+       __le32   critical_temperature_M;
+       __le32   critical_temperature_R;
+}  __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1)  SSID for directed active scans
+ * 2)  Txpower setting (for rate specified within Tx command)
+ * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
+ *     quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1)  If using passive_dwell (i.e. passive_dwell != 0):
+ *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2)  quiet_time <= active_dwell
+ * 3)  If restricting off-channel time (i.e. max_out_time !=0):
+ *     passive_dwell < max_out_time
+ *     active_dwell < max_out_time
+ */
+struct iwl3945_scan_channel {
+       /*
+        * type is defined as:
+        * 0:0 1 = active, 0 = passive
+        * 1:4 SSID direct bit map; if a bit is set, then corresponding
+        *     SSID IE is transmitted in probe request.
+        * 5:7 reserved
+        */
+       u8 type;
+       u8 channel;     /* band is selected by iwl3945_scan_cmd "flags" field */
+       struct iwl3945_tx_power tpc;
+       __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
+       __le16 passive_dwell;   /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes u8 type */
+#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+
+struct iwl_scan_channel {
+       /*
+        * type is defined as:
+        * 0:0 1 = active, 0 = passive
+        * 1:20 SSID direct bit map; if a bit is set, then corresponding
+        *     SSID IE is transmitted in probe request.
+        * 21:31 reserved
+        */
+       __le32 type;
+       __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+       u8 tx_gain;             /* gain for analog radio */
+       u8 dsp_atten;           /* gain for DSP */
+       __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
+       __le16 passive_dwell;   /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+       u8 id;
+       u8 len;
+       u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX_3945          4
+#define PROBE_OPTION_MAX               20
+#define TX_CMD_LIFE_TIME_INFINITE      cpu_to_le32(0xFFFFFFFF)
+#define IWL_GOOD_CRC_TH_DISABLED       0
+#define IWL_GOOD_CRC_TH_DEFAULT                cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER          cpu_to_le16(0xffff)
+#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background.  The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel.  That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments.  The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1)  Sends SCAN_START notification to driver
+ * 2)  Checks to see if it has time to do scan for one channel
+ * 3)  Sends NULL packet, with power-save (PS) bit set to 1,
+ *     to tell AP that we're going off-channel
+ * 4)  Tunes to first channel in scan list, does active or passive scan
+ * 5)  Sends SCAN_RESULT notification to driver
+ * 6)  Checks to see if it has time to do scan on *next* channel in list
+ * 7)  Repeats 4-6 until it no longer has time to scan the next channel
+ *     before max_out_time expires
+ * 8)  Returns to service channel
+ * 9)  Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request.  This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel.  If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct iwl_scan_channel.
+ */
+
+struct iwl3945_scan_cmd {
+       __le16 len;
+       u8 reserved0;
+       u8 channel_count;       /* # channels in channel list */
+       __le16 quiet_time;      /* dwell only this # millisecs on quiet channel
+                                * (only for active scan) */
+       __le16 quiet_plcp_th;   /* quiet chnl is < this # pkts (typ. 1) */
+       __le16 good_CRC_th;     /* passive -> active promotion threshold */
+       __le16 reserved1;
+       __le32 max_out_time;    /* max usec to be away from associated (service)
+                                * channel */
+       __le32 suspend_time;    /* pause scan this long (in "extended beacon
+                                * format") when returning to service channel:
+                                * 3945; 31:24 # beacons, 19:0 additional usec,
+                                * 4965; 31:22 # beacons, 21:0 additional usec.
+                                */
+       __le32 flags;           /* RXON_FLG_* */
+       __le32 filter_flags;    /* RXON_FILTER_* */
+
+       /* For active scans (set to all-0s for passive scans).
+        * Does not include payload.  Must specify Tx rate; no rate scaling. */
+       struct iwl3945_tx_cmd tx_cmd;
+
+       /* For directed active scans (set to all-0s otherwise) */
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+
+       /*
+        * Probe request frame, followed by channel list.
+        *
+        * Size of probe request frame is specified by byte count in tx_cmd.
+        * Channel list follows immediately after probe request frame.
+        * Number of channels in list is specified by channel_count.
+        * Each channel in list is of type:
+        *
+        * struct iwl3945_scan_channel channels[0];
+        *
+        * NOTE:  Only one band of channels can be scanned per pass.  You
+        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * before requesting another scan.
+        */
+       u8 data[0];
+} __packed;
+
+struct iwl_scan_cmd {
+       __le16 len;
+       u8 reserved0;
+       u8 channel_count;       /* # channels in channel list */
+       __le16 quiet_time;      /* dwell only this # millisecs on quiet channel
+                                * (only for active scan) */
+       __le16 quiet_plcp_th;   /* quiet chnl is < this # pkts (typ. 1) */
+       __le16 good_CRC_th;     /* passive -> active promotion threshold */
+       __le16 rx_chain;        /* RXON_RX_CHAIN_* */
+       __le32 max_out_time;    /* max usec to be away from associated (service)
+                                * channel */
+       __le32 suspend_time;    /* pause scan this long (in "extended beacon
+                                * format") when returning to service chnl:
+                                * 3945; 31:24 # beacons, 19:0 additional usec,
+                                * 4965; 31:22 # beacons, 21:0 additional usec.
+                                */
+       __le32 flags;           /* RXON_FLG_* */
+       __le32 filter_flags;    /* RXON_FILTER_* */
+
+       /* For active scans (set to all-0s for passive scans).
+        * Does not include payload.  Must specify Tx rate; no rate scaling. */
+       struct iwl_tx_cmd tx_cmd;
+
+       /* For directed active scans (set to all-0s otherwise) */
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+       /*
+        * Probe request frame, followed by channel list.
+        *
+        * Size of probe request frame is specified by byte count in tx_cmd.
+        * Channel list follows immediately after probe request frame.
+        * Number of channels in list is specified by channel_count.
+        * Each channel in list is of type:
+        *
+        * struct iwl_scan_channel channels[0];
+        *
+        * NOTE:  Only one band of channels can be scanned per pass.  You
+        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * before requesting another scan.
+        */
+       u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS       cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS            0x2
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (response)
+ */
+struct iwl_scanreq_notification {
+       __le32 status;          /* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ */
+struct iwl_scanstart_notification {
+       __le32 tsf_low;
+       __le32 tsf_high;
+       __le32 beacon_timer;
+       u8 channel;
+       u8 band;
+       u8 reserved[2];
+       __le32 status;
+} __packed;
+
+#define  SCAN_OWNER_STATUS 0x1;
+#define  MEASURE_OWNER_STATUS 0x2;
+
+#define IWL_PROBE_STATUS_OK            0
+#define IWL_PROBE_STATUS_TX_FAILED     BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IWL_PROBE_STATUS_FAIL_TTL      BIT(1)
+#define IWL_PROBE_STATUS_FAIL_BT       BIT(2)
+
+#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+/*
+ * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ */
+struct iwl_scanresults_notification {
+       u8 channel;
+       u8 band;
+       u8 probe_status;
+       u8 num_probe_not_sent; /* not enough time to send */
+       __le32 tsf_low;
+       __le32 tsf_high;
+       __le32 statistics[NUMBER_OF_STATISTICS];
+} __packed;
+
+/*
+ * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ */
+struct iwl_scancomplete_notification {
+       u8 scanned_channels;
+       u8 status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+} __packed;
+
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum iwl_ibss_manager {
+       IWL_NOT_IBSS_MANAGER = 0,
+       IWL_IBSS_MANAGER = 1,
+};
+
+/*
+ * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ */
+
+struct iwl3945_beacon_notif {
+       struct iwl3945_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
+struct iwl4965_beacon_notif {
+       struct iwl4965_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ */
+
+struct iwl3945_tx_beacon_cmd {
+       struct iwl3945_tx_cmd tx;
+       __le16 tim_idx;
+       u8 tim_size;
+       u8 reserved1;
+       struct ieee80211_hdr frame[0];  /* beacon frame */
+} __packed;
+
+struct iwl_tx_beacon_cmd {
+       struct iwl_tx_cmd tx;
+       __le16 tim_idx;
+       u8 tim_size;
+       u8 reserved1;
+       struct ieee80211_hdr frame[0];  /* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IWL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+       union {
+               __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+               __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+               __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+       } success;
+       union {
+               __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+               __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+               __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+       } failed;
+} __packed;
+
+/* statistics command response */
+
+struct iwl39_statistics_rx_phy {
+       __le32 ina_cnt;
+       __le32 fina_cnt;
+       __le32 plcp_err;
+       __le32 crc32_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 false_alarm_cnt;
+       __le32 fina_sync_err_cnt;
+       __le32 sfd_timeout;
+       __le32 fina_timeout;
+       __le32 unresponded_rts;
+       __le32 rxe_frame_limit_overrun;
+       __le32 sent_ack_cnt;
+       __le32 sent_cts_cnt;
+} __packed;
+
+struct iwl39_statistics_rx_non_phy {
+       __le32 bogus_cts;       /* CTS received when not expecting CTS */
+       __le32 bogus_ack;       /* ACK received when not expecting ACK */
+       __le32 non_bssid_frames;        /* number of frames with BSSID that
+                                        * doesn't belong to the STA BSSID */
+       __le32 filtered_frames; /* count frames that were dumped in the
+                                * filtering process */
+       __le32 non_channel_beacons;     /* beacons with our bss id but not on
+                                        * our serving channel */
+} __packed;
+
+struct iwl39_statistics_rx {
+       struct iwl39_statistics_rx_phy ofdm;
+       struct iwl39_statistics_rx_phy cck;
+       struct iwl39_statistics_rx_non_phy general;
+} __packed;
+
+struct iwl39_statistics_tx {
+       __le32 preamble_cnt;
+       __le32 rx_detected_cnt;
+       __le32 bt_prio_defer_cnt;
+       __le32 bt_prio_kill_cnt;
+       __le32 few_bytes_cnt;
+       __le32 cts_timeout;
+       __le32 ack_timeout;
+       __le32 expected_ack_cnt;
+       __le32 actual_ack_cnt;
+} __packed;
+
+struct statistics_dbg {
+       __le32 burst_check;
+       __le32 burst_count;
+       __le32 wait_for_silence_timeout_cnt;
+       __le32 reserved[3];
+} __packed;
+
+struct iwl39_statistics_div {
+       __le32 tx_on_a;
+       __le32 tx_on_b;
+       __le32 exec_time;
+       __le32 probe_time;
+} __packed;
+
+struct iwl39_statistics_general {
+       __le32 temperature;
+       struct statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct iwl39_statistics_div div;
+} __packed;
+
+struct statistics_rx_phy {
+       __le32 ina_cnt;
+       __le32 fina_cnt;
+       __le32 plcp_err;
+       __le32 crc32_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 false_alarm_cnt;
+       __le32 fina_sync_err_cnt;
+       __le32 sfd_timeout;
+       __le32 fina_timeout;
+       __le32 unresponded_rts;
+       __le32 rxe_frame_limit_overrun;
+       __le32 sent_ack_cnt;
+       __le32 sent_cts_cnt;
+       __le32 sent_ba_rsp_cnt;
+       __le32 dsp_self_kill;
+       __le32 mh_format_err;
+       __le32 re_acq_main_rssi_sum;
+       __le32 reserved3;
+} __packed;
+
+struct statistics_rx_ht_phy {
+       __le32 plcp_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 crc32_err;
+       __le32 mh_format_err;
+       __le32 agg_crc32_good;
+       __le32 agg_mpdu_cnt;
+       __le32 agg_cnt;
+       __le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
+
+struct statistics_rx_non_phy {
+       __le32 bogus_cts;       /* CTS received when not expecting CTS */
+       __le32 bogus_ack;       /* ACK received when not expecting ACK */
+       __le32 non_bssid_frames;        /* number of frames with BSSID that
+                                        * doesn't belong to the STA BSSID */
+       __le32 filtered_frames; /* count frames that were dumped in the
+                                * filtering process */
+       __le32 non_channel_beacons;     /* beacons with our bss id but not on
+                                        * our serving channel */
+       __le32 channel_beacons; /* beacons with our bss id and in our
+                                * serving channel */
+       __le32 num_missed_bcon; /* number of missed beacons */
+       __le32 adc_rx_saturation_time;  /* count in 0.8us units the time the
+                                        * ADC was in saturation */
+       __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+                                         * for INA */
+       __le32 beacon_silence_rssi_a;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_b;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_c;   /* RSSI silence after beacon frame */
+       __le32 interference_data_flag;  /* flag for interference data
+                                        * availability. 1 when data is
+                                        * available. */
+       __le32 channel_load;            /* counts RX Enable time in uSec */
+       __le32 dsp_false_alarms;        /* DSP false alarm (both OFDM
+                                        * and CCK) counter */
+       __le32 beacon_rssi_a;
+       __le32 beacon_rssi_b;
+       __le32 beacon_rssi_c;
+       __le32 beacon_energy_a;
+       __le32 beacon_energy_b;
+       __le32 beacon_energy_c;
+} __packed;
+
+struct statistics_rx {
+       struct statistics_rx_phy ofdm;
+       struct statistics_rx_phy cck;
+       struct statistics_rx_non_phy general;
+       struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct statistics_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct statistics_tx_power {
+       u8 ant_a;
+       u8 ant_b;
+       u8 ant_c;
+       u8 reserved;
+} __packed;
+
+struct statistics_tx_non_phy_agg {
+       __le32 ba_timeout;
+       __le32 ba_reschedule_frames;
+       __le32 scd_query_agg_frame_cnt;
+       __le32 scd_query_no_agg;
+       __le32 scd_query_agg;
+       __le32 scd_query_mismatch;
+       __le32 frame_not_ready;
+       __le32 underrun;
+       __le32 bt_prio_kill;
+       __le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct statistics_tx {
+       __le32 preamble_cnt;
+       __le32 rx_detected_cnt;
+       __le32 bt_prio_defer_cnt;
+       __le32 bt_prio_kill_cnt;
+       __le32 few_bytes_cnt;
+       __le32 cts_timeout;
+       __le32 ack_timeout;
+       __le32 expected_ack_cnt;
+       __le32 actual_ack_cnt;
+       __le32 dump_msdu_cnt;
+       __le32 burst_abort_next_frame_mismatch_cnt;
+       __le32 burst_abort_missing_next_frame_cnt;
+       __le32 cts_timeout_collision;
+       __le32 ack_or_ba_timeout_collision;
+       struct statistics_tx_non_phy_agg agg;
+
+       __le32 reserved1;
+} __packed;
+
+
+struct statistics_div {
+       __le32 tx_on_a;
+       __le32 tx_on_b;
+       __le32 exec_time;
+       __le32 probe_time;
+       __le32 reserved1;
+       __le32 reserved2;
+} __packed;
+
+struct statistics_general_common {
+       __le32 temperature;   /* radio temperature */
+       struct statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct statistics_div div;
+       __le32 rx_enable_counter;
+       /*
+        * num_of_sos_states:
+        *  count the number of times we have to re-tune
+        *  in order to get out of bad PHY status
+        */
+       __le32 num_of_sos_states;
+} __packed;
+
+struct statistics_general {
+       struct statistics_general_common common;
+       __le32 reserved2;
+       __le32 reserved3;
+} __packed;
+
+#define UCODE_STATISTICS_CLEAR_MSK             (0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK         (0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK       (0x1 << 2)
+
+/*
+ * REPLY_STATISTICS_CMD = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode statistics.
+ * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the statistics (counters) after issuing the response.
+ * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * STATISTICS_NOTIFICATIONs after received beacons (see below).  This flag
+ * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ */
+#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)    /* see above */
+#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
+struct iwl_statistics_cmd {
+       __le32 configuration_flags;     /* IWL_STATS_CONF_* */
+} __packed;
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans.  uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+#define STATISTICS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATISTICS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+
+struct iwl3945_notif_statistics {
+       __le32 flag;
+       struct iwl39_statistics_rx rx;
+       struct iwl39_statistics_tx tx;
+       struct iwl39_statistics_general general;
+} __packed;
+
+struct iwl_notif_statistics {
+       __le32 flag;
+       struct statistics_rx rx;
+       struct statistics_tx tx;
+       struct statistics_general general;
+} __packed;
+
+/*
+ * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN        (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF        (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX        IWL_MISSED_BEACON_THRESHOLD_DEF
+
+struct iwl_missed_beacon_notif {
+       __le32 consecutive_missed_beacons;
+       __le32 total_missed_becons;
+       __le32 num_expected_beacons;
+       __le32 num_recvd_beacons;
+} __packed;
+
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot.  Driver must calibrate only:
+ *
+ * 1)  Tx power (depends on temperature), described elsewhere
+ * 2)  Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3)  Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms".  False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting).  Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * received beacon.  These provide information to the driver to analyze the
+ * sensitivity.  Don't analyze statistics that come in from scanning, or any
+ * other non-associated-network source.  Pertinent statistics include:
+ *
+ * From "general" statistics (struct statistics_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ *   Measure of energy of desired signal.  Used for establishing a level
+ *   below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ *   Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ *   uSecs of actual Rx time during beacon period (varies according to
+ *   how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ *   Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ *   Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE:  Both false_alarm_cnt and plcp_err increment monotonically from
+ *        beacon to beacon, i.e. each value is an accumulation of all errors
+ *        before and including the latest beacon.  Values will wrap around to 0
+ *        after counting up to 2^32 - 1.  Driver must differentiate vs.
+ *        previous beacon's values to determine # false alarms in the current
+ *        beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX     220   /  220  /  270
+ *
+ *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ *   by *adding* 1 to all 4 of the table entries above, up to the max for
+ *   each entry.  Conversely, if false alarm rate is too low (less than 5
+ *   for each 204.8 msecs listening), *subtract* 1 from each entry to
+ *   increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ *   1).  20-beacon history of maximum background noise, indicated by
+ *        (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ *        3 receivers.  For any given beacon, the "silence reference" is
+ *        the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ *   2).  10-beacon history of strongest signal level, as indicated
+ *        by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ *        i.e. the strength of the signal through the best receiver at the
+ *        moment.  These measurements are "upside down", with lower values
+ *        for stronger signals, so max energy will be *minimum* value.
+ *
+ *        Then for any given beacon, the driver must determine the *weakest*
+ *        of the strongest signals; this is the minimum level that needs to be
+ *        successfully detected, when using the best receiver at the moment.
+ *        "Max cck energy" is the maximum (higher value means lower energy!)
+ *        of the last 10 minima.  Once this is determined, driver must add
+ *        a little margin by adding "6" to it.
+ *
+ *   3).  Number of consecutive beacon periods with too few false alarms.
+ *        Reset this to 0 at the first beacon period that falls within the
+ *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_INDEX                100   /    0  /  100
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), method for reducing
+ *   sensitivity is:
+ *
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       up to max 400.
+ *
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ *       sensitivity has been reduced a significant amount; bring it up to
+ *       a moderate 161.  Otherwise, *add* 3, up to max 200.
+ *
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ *       sensitivity has been reduced only a moderate or small amount;
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ *       down to min 0.  Otherwise (if gain has been significantly reduced),
+ *       don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *
+ *       b)  Save a snapshot of the "silence reference".
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too low
+ *   (less than 5 for each 204.8 msecs listening), method for increasing
+ *   sensitivity is used only if:
+ *
+ *   1a)  Previous beacon did not have too many false alarms
+ *   1b)  AND difference between previous "silence reference" and current
+ *        "silence reference" (prev - current) is 2 or more,
+ *   OR 2)  100 or more consecutive beacon periods have had rate of
+ *          less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ *   Method for increasing sensitivity:
+ *
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ *       down to min 125.
+ *
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       down to min 200.
+ *
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ *   (between 5 and 50 for each 204.8 msecs listening):
+ *
+ *   1)  Save a snapshot of the silence reference.
+ *
+ *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
+ *       give some extra margin to energy threshold by *subtracting* 8
+ *       from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *
+ *   For all cases (too few, too many, good range), make sure that the CCK
+ *   detection threshold (energy) is below the energy level for robust
+ *   detection over the past 10 beacon periods, the "Max cck energy".
+ *   Lower values mean higher energy; this means making sure that the value
+ *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ */
+#define HD_TABLE_SIZE  (11)    /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_INDEX                 (0)        /* table indexes */
+#define HD_MIN_ENERGY_OFDM_DET_INDEX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_INDEX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX          (9)
+#define HD_OFDM_ENERGY_TH_IN_INDEX                  (10)
+
+/* Control field in struct iwl_sensitivity_cmd */
+#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE  cpu_to_le16(0)
+#define SENSITIVITY_CMD_CONTROL_WORK_TABLE     cpu_to_le16(1)
+
+/**
+ * struct iwl_sensitivity_cmd
+ * @control:  (1) updates working table, (0) updates default table
+ * @table:  energy threshold values, use HD_* as index into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct iwl_sensitivity_cmd {
+       __le16 control;                 /* always use "1" */
+       __le16 table[HD_TABLE_SIZE];    /* use HD_* as index */
+} __packed;
+
+
+/**
+ * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of 4965 device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
+ * beacons from the associated network (don't collect statistics that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c.  Compare the other two to the
+ * strongest.  If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx.  If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c.  This Rx chain
+ * will be the reference, with 0 gain adjustment.  Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain".  The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ *   2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+/* The default calibrate table size if not specified by firmware */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE    18
+enum {
+       IWL_PHY_CALIBRATE_DIFF_GAIN_CMD         = 7,
+       IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+};
+
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE         (253)
+
+struct iwl_calib_hdr {
+       u8 op_code;
+       u8 first_group;
+       u8 groups_num;
+       u8 data_valid;
+} __packed;
+
+/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct iwl_calib_diff_gain_cmd {
+       struct iwl_calib_hdr hdr;
+       s8 diff_gain_a;         /* see above */
+       s8 diff_gain_b;
+       s8 diff_gain_c;
+       u8 reserved1;
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct iwl_led_cmd {
+       __le32 interval;        /* "interval" in uSec */
+       u8 id;                  /* 1: Activity, 2: Link, 3: Tech */
+       u8 off;                 /* # intervals off while blinking;
+                                * "0", with >0 "on" value, turns LED on */
+       u8 on;                  /* # intervals on while blinking;
+                                * "0", regardless of "off", turns LED off */
+       u8 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (13)
+ * Union of all expected notifications/responses:
+ *
+ *****************************************************************************/
+
+struct iwl_rx_packet {
+       /*
+        * The first 4 bytes of the RX frame header contain both the RX frame
+        * size and some flags.
+        * Bit fields:
+        * 31:    flag flush RB request
+        * 30:    flag ignore TC (terminal counter) request
+        * 29:    flag fast IRQ request
+        * 28-14: Reserved
+        * 13-00: RX frame size
+        */
+       __le32 len_n_flags;
+       struct iwl_cmd_header hdr;
+       union {
+               struct iwl3945_rx_frame rx_frame;
+               struct iwl3945_tx_resp tx_resp;
+               struct iwl3945_beacon_notif beacon_status;
+
+               struct iwl_alive_resp alive_frame;
+               struct iwl_spectrum_notification spectrum_notif;
+               struct iwl_csa_notification csa_notif;
+               struct iwl_error_resp err_resp;
+               struct iwl_card_state_notif card_state_notif;
+               struct iwl_add_sta_resp add_sta;
+               struct iwl_rem_sta_resp rem_sta;
+               struct iwl_sleep_notification sleep_notif;
+               struct iwl_spectrum_resp spectrum;
+               struct iwl_notif_statistics stats;
+               struct iwl_compressed_ba_resp compressed_ba;
+               struct iwl_missed_beacon_notif missed_beacon;
+               __le32 status;
+               u8 raw[0];
+       } u;
+} __packed;
+
+#endif                         /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644 (file)
index 0000000..d418b64
--- /dev/null
@@ -0,0 +1,2674 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-power.h"
+#include "iwl-sta.h"
+#include "iwl-helpers.h"
+
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 iwlegacy_debug_level;
+EXPORT_SYMBOL(iwlegacy_debug_level);
+
+const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(iwlegacy_bcast_addr);
+
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
+{
+       struct iwl_priv *priv;
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's private structure */
+       struct ieee80211_hw *hw;
+
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+                               cfg->ops->ieee80211_ops);
+       if (hw == NULL) {
+               pr_err("%s: Can not allocate network device\n",
+                      cfg->name);
+               goto out;
+       }
+
+       priv = hw->priv;
+       priv->hw = hw;
+
+out:
+       return hw;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
+                             struct ieee80211_sta_ht_cap *ht_info,
+                             enum ieee80211_band band)
+{
+       u16 max_bit_rate = 0;
+       u8 rx_chains_num = priv->hw_params.rx_chains_num;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+
+       ht_info->cap = 0;
+       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+       ht_info->ht_supported = true;
+
+       ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+       max_bit_rate = MAX_BIT_RATE_20_MHZ;
+       if (priv->hw_params.ht40_channel & BIT(band)) {
+               ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+               ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+               ht_info->mcs.rx_mask[4] = 0x01;
+               max_bit_rate = MAX_BIT_RATE_40_MHZ;
+       }
+
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+       ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+       ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+       ht_info->mcs.rx_mask[0] = 0xFF;
+       if (rx_chains_num >= 2)
+               ht_info->mcs.rx_mask[1] = 0xFF;
+       if (rx_chains_num >= 3)
+               ht_info->mcs.rx_mask[2] = 0xFF;
+
+       /* Highest supported Rx data rate */
+       max_bit_rate *= rx_chains_num;
+       WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+       ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+       /* Tx MCS capabilities */
+       ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       if (tx_chains_num != rx_chains_num) {
+               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+               ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
+                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+       }
+}
+
+/**
+ * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int iwl_legacy_init_geos(struct iwl_priv *priv)
+{
+       struct iwl_channel_info *ch;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *channels;
+       struct ieee80211_channel *geo_ch;
+       struct ieee80211_rate *rates;
+       int i = 0;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+           priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+               IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
+               set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+               return 0;
+       }
+
+       channels = kzalloc(sizeof(struct ieee80211_channel) *
+                          priv->channel_count, GFP_KERNEL);
+       if (!channels)
+               return -ENOMEM;
+
+       rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+                       GFP_KERNEL);
+       if (!rates) {
+               kfree(channels);
+               return -ENOMEM;
+       }
+
+       /* 5.2GHz channels start after the 2.4GHz channels */
+       sband = &priv->bands[IEEE80211_BAND_5GHZ];
+       sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
+       /* just OFDM */
+       sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
+       sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+                                        IEEE80211_BAND_5GHZ);
+
+       sband = &priv->bands[IEEE80211_BAND_2GHZ];
+       sband->channels = channels;
+       /* OFDM & CCK */
+       sband->bitrates = rates;
+       sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+                                        IEEE80211_BAND_2GHZ);
+
+       priv->ieee_channels = channels;
+       priv->ieee_rates = rates;
+
+       for (i = 0;  i < priv->channel_count; i++) {
+               ch = &priv->channel_info[i];
+
+               if (!iwl_legacy_is_channel_valid(ch))
+                       continue;
+
+               if (iwl_legacy_is_channel_a_band(ch))
+                       sband =  &priv->bands[IEEE80211_BAND_5GHZ];
+               else
+                       sband =  &priv->bands[IEEE80211_BAND_2GHZ];
+
+               geo_ch = &sband->channels[sband->n_channels++];
+
+               geo_ch->center_freq =
+                       ieee80211_channel_to_frequency(ch->channel, ch->band);
+               geo_ch->max_power = ch->max_power_avg;
+               geo_ch->max_antenna_gain = 0xff;
+               geo_ch->hw_value = ch->channel;
+
+               if (iwl_legacy_is_channel_valid(ch)) {
+                       if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+                               geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+                       if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+                               geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+                       if (ch->flags & EEPROM_CHANNEL_RADAR)
+                               geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+                       geo_ch->flags |= ch->ht40_extension_channel;
+
+                       if (ch->max_power_avg > priv->tx_power_device_lmt)
+                               priv->tx_power_device_lmt = ch->max_power_avg;
+               } else {
+                       geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+               }
+
+               IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
+                               ch->channel, geo_ch->center_freq,
+                               iwl_legacy_is_channel_a_band(ch) ?  "5.2" : "2.4",
+                               geo_ch->flags & IEEE80211_CHAN_DISABLED ?
+                               "restricted" : "valid",
+                                geo_ch->flags);
+       }
+
+       if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
+            priv->cfg->sku & IWL_SKU_A) {
+               IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
+                       "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+                          priv->pci_dev->device,
+                          priv->pci_dev->subsystem_device);
+               priv->cfg->sku &= ~IWL_SKU_A;
+       }
+
+       IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+                  priv->bands[IEEE80211_BAND_2GHZ].n_channels,
+                  priv->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+       set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_geos);
+
+/*
+ * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
+ */
+void iwl_legacy_free_geos(struct iwl_priv *priv)
+{
+       kfree(priv->ieee_channels);
+       kfree(priv->ieee_rates);
+       clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_free_geos);
+
+static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
+                                    enum ieee80211_band band,
+                                    u16 channel, u8 extension_chan_offset)
+{
+       const struct iwl_channel_info *ch_info;
+
+       ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+       if (!iwl_legacy_is_channel_valid(ch_info))
+               return false;
+
+       if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+               return !(ch_info->ht40_extension_channel &
+                                       IEEE80211_CHAN_NO_HT40PLUS);
+       else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+               return !(ch_info->ht40_extension_channel &
+                                       IEEE80211_CHAN_NO_HT40MINUS);
+
+       return false;
+}
+
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_sta_ht_cap *ht_cap)
+{
+       if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+               return false;
+
+       /*
+        * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+        * the bit will not set if it is pure 40MHz case
+        */
+       if (ht_cap && !ht_cap->ht_supported)
+               return false;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       if (priv->disable_ht40)
+               return false;
+#endif
+
+       return iwl_legacy_is_channel_extension(priv, priv->band,
+                       le16_to_cpu(ctx->staging.channel),
+                       ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
+
+static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+       u16 new_val;
+       u16 beacon_factor;
+
+       /*
+        * If mac80211 hasn't given us a beacon interval, program
+        * the default into the device.
+        */
+       if (!beacon_val)
+               return DEFAULT_BEACON_INTERVAL;
+
+       /*
+        * If the beacon interval we obtained from the peer
+        * is too large, we'll have to wake up more often
+        * (and in IBSS case, we'll beacon too much)
+        *
+        * For example, if max_beacon_val is 4096, and the
+        * requested beacon interval is 7000, we'll have to
+        * use 3500 to be able to wake up on the beacons.
+        *
+        * This could badly influence beacon detection stats.
+        */
+
+       beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+       new_val = beacon_val / beacon_factor;
+
+       if (!new_val)
+               new_val = max_beacon_val;
+
+       return new_val;
+}
+
+int
+iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       u64 tsf;
+       s32 interval_tm, rem;
+       struct ieee80211_conf *conf = NULL;
+       u16 beacon_int;
+       struct ieee80211_vif *vif = ctx->vif;
+
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+       lockdep_assert_held(&priv->mutex);
+
+       memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+       ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+       ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+       beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+       /*
+        * TODO: For IBSS we need to get atim_window from mac80211,
+        *       for now just always use 0
+        */
+       ctx->timing.atim_window = 0;
+
+       beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
+                       priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+       ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+       tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+       interval_tm = beacon_int * TIME_UNIT;
+       rem = do_div(tsf, interval_tm);
+       ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+       ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+       IWL_DEBUG_ASSOC(priv,
+                       "beacon interval %d beacon timer %d beacon tim %d\n",
+                       le16_to_cpu(ctx->timing.beacon_interval),
+                       le32_to_cpu(ctx->timing.beacon_init_val),
+                       le16_to_cpu(ctx->timing.atim_window));
+
+       return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+                               sizeof(ctx->timing), &ctx->timing);
+}
+EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
+
+void
+iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx,
+                               int hw_decrypt)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       if (hw_decrypt)
+               rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+       else
+               rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+       bool error = false;
+
+       if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+               if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+                       IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+                       IWL_WARN(priv, "check 2.4G: wrong radar\n");
+                       error = true;
+               }
+       } else {
+               if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+                       IWL_WARN(priv, "check 5.2G: not short slot!\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_CCK_MSK) {
+                       IWL_WARN(priv, "check 5.2G: CCK!\n");
+                       error = true;
+               }
+       }
+       if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+               IWL_WARN(priv, "mac/bssid mcast!\n");
+               error = true;
+       }
+
+       /* make sure basic rates 6Mbps and 1Mbps are supported */
+       if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+           (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+               IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+               error = true;
+       }
+
+       if (le16_to_cpu(rxon->assoc_id) > 2007) {
+               IWL_WARN(priv, "aid > 2007\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+                       == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+               IWL_WARN(priv, "CCK and short slot\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+                       == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+               IWL_WARN(priv, "CCK and auto detect");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+                           RXON_FLG_TGG_PROTECT_MSK)) ==
+                           RXON_FLG_TGG_PROTECT_MSK) {
+               IWL_WARN(priv, "TGg but no auto-detect\n");
+               error = true;
+       }
+
+       if (error)
+               IWL_WARN(priv, "Tuning to channel %d\n",
+                           le16_to_cpu(rxon->channel));
+
+       if (error) {
+               IWL_ERR(priv, "Invalid RXON\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
+
+/**
+ * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+                          struct iwl_rxon_context *ctx)
+{
+       const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)                                                      \
+       if ((cond)) {                                                   \
+               IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");   \
+               return 1;                                               \
+       }
+
+#define CHK_NEQ(c1, c2)                                                \
+       if ((c1) != (c2)) {                                     \
+               IWL_DEBUG_INFO(priv, "need full RXON - "        \
+                              #c1 " != " #c2 " - %d != %d\n",  \
+                              (c1), (c2));                     \
+               return 1;                                       \
+       }
+
+       /* These items are only settable from the full RXON command */
+       CHK(!iwl_legacy_is_associated_ctx(ctx));
+       CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+       CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+       CHK(compare_ether_addr(staging->wlap_bssid_addr,
+                               active->wlap_bssid_addr));
+       CHK_NEQ(staging->dev_type, active->dev_type);
+       CHK_NEQ(staging->channel, active->channel);
+       CHK_NEQ(staging->air_propagation, active->air_propagation);
+       CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+               active->ofdm_ht_single_stream_basic_rates);
+       CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+               active->ofdm_ht_dual_stream_basic_rates);
+       CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+       /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+        * be updated with the RXON_ASSOC command -- however only some
+        * flag transitions are allowed using RXON_ASSOC */
+
+       /* Check if we are not switching bands */
+       CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+               active->flags & RXON_FLG_BAND_24G_MSK);
+
+       /* Check if we are switching association toggle */
+       CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+               active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx)
+{
+       /*
+        * Assign the lowest rate -- should really get this from
+        * the beacon skb from mac80211.
+        */
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+               return IWL_RATE_1M_PLCP;
+       else
+               return IWL_RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
+
+static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+                            struct iwl_ht_config *ht_conf,
+                            struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       if (!ctx->ht.enabled) {
+               rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+                       RXON_FLG_HT40_PROT_MSK |
+                       RXON_FLG_HT_PROT_MSK);
+               return;
+       }
+
+       rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+                                       RXON_FLG_HT_OPERATING_MODE_POS);
+
+       /* Set up channel bandwidth:
+        * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+       /* clear the HT channel mode before set the mode */
+       rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+                        RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
+               /* pure ht40 */
+               if (ctx->ht.protection ==
+                               IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+                       rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                       ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |=
+                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       }
+               } else {
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                       ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |=
+                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+                       default:
+                               /* channel location only valid if in Mixed mode */
+                               IWL_ERR(priv,
+                                       "invalid extension channel offset\n");
+                               break;
+                       }
+               }
+       } else {
+               rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+       }
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+                       "extension channel offset 0x%x\n",
+                       le32_to_cpu(rxon->flags), ctx->ht.protection,
+                       ctx->ht.extension_chan_offset);
+}
+
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+       struct iwl_rxon_context *ctx;
+
+       for_each_context(priv, ctx)
+               _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+                                enum ieee80211_band band)
+{
+       const struct iwl_channel_info *ch_info;
+       int i;
+       u8 channel = 0;
+       u8 min, max;
+       struct iwl_rxon_context *ctx;
+
+       if (band == IEEE80211_BAND_5GHZ) {
+               min = 14;
+               max = priv->channel_count;
+       } else {
+               min = 0;
+               max = 14;
+       }
+
+       for (i = min; i < max; i++) {
+               bool busy = false;
+
+               for_each_context(priv, ctx) {
+                       busy = priv->channel_info[i].channel ==
+                               le16_to_cpu(ctx->staging.channel);
+                       if (busy)
+                               break;
+               }
+
+               if (busy)
+                       continue;
+
+               channel = priv->channel_info[i].channel;
+               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+               if (iwl_legacy_is_channel_valid(ch_info))
+                       break;
+       }
+
+       return channel;
+}
+EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
+
+/**
+ * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+                        struct iwl_rxon_context *ctx)
+{
+       enum ieee80211_band band = ch->band;
+       u16 channel = ch->hw_value;
+
+       if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+           (priv->band == band))
+               return 0;
+
+       ctx->staging.channel = cpu_to_le16(channel);
+       if (band == IEEE80211_BAND_5GHZ)
+               ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+       priv->band = band;
+
+       IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
+
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           enum ieee80211_band band,
+                           struct ieee80211_vif *vif)
+{
+       if (band == IEEE80211_BAND_5GHZ) {
+               ctx->staging.flags &=
+                   ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+                     | RXON_FLG_CCK_MSK);
+               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+       } else {
+               /* Copied from iwl_post_associate() */
+               if (vif && vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+               ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+               ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx)
+{
+       const struct iwl_channel_info *ch_info;
+
+       memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+       if (!ctx->vif) {
+               ctx->staging.dev_type = ctx->unused_devtype;
+       } else
+       switch (ctx->vif->type) {
+
+       case NL80211_IFTYPE_STATION:
+               ctx->staging.dev_type = ctx->station_devtype;
+               ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+               break;
+
+       case NL80211_IFTYPE_ADHOC:
+               ctx->staging.dev_type = ctx->ibss_devtype;
+               ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+               ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+                                                 RXON_FILTER_ACCEPT_GRP_MSK;
+               break;
+
+       default:
+               IWL_ERR(priv, "Unsupported interface type %d\n",
+                       ctx->vif->type);
+               break;
+       }
+
+#if 0
+       /* TODO:  Figure out when short_preamble would be set and cache from
+        * that */
+       if (!hw_to_local(priv->hw)->short_preamble)
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band,
+                                      le16_to_cpu(ctx->active.channel));
+
+       if (!ch_info)
+               ch_info = &priv->channel_info[0];
+
+       ctx->staging.channel = cpu_to_le16(ch_info->channel);
+       priv->band = ch_info->band;
+
+       iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+       ctx->staging.ofdm_basic_rates =
+           (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+       ctx->staging.cck_basic_rates =
+           (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+       /* clear both MIX and PURE40 mode flag */
+       ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+                                       RXON_FLG_CHANNEL_MODE_PURE_40);
+       if (ctx->vif)
+               memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+       ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+       ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
+
+void iwl_legacy_set_rate(struct iwl_priv *priv)
+{
+       const struct ieee80211_supported_band *hw = NULL;
+       struct ieee80211_rate *rate;
+       struct iwl_rxon_context *ctx;
+       int i;
+
+       hw = iwl_get_hw_mode(priv, priv->band);
+       if (!hw) {
+               IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
+               return;
+       }
+
+       priv->active_rate = 0;
+
+       for (i = 0; i < hw->n_bitrates; i++) {
+               rate = &(hw->bitrates[i]);
+               if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
+                       priv->active_rate |= (1 << rate->hw_value);
+       }
+
+       IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
+
+       for_each_context(priv, ctx) {
+               ctx->staging.cck_basic_rates =
+                   (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+               ctx->staging.ofdm_basic_rates =
+                  (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_set_rate);
+
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (priv->switch_rxon.switch_in_progress) {
+               ieee80211_chswitch_done(ctx->vif, is_success);
+               mutex_lock(&priv->mutex);
+               priv->switch_rxon.switch_in_progress = false;
+               mutex_unlock(&priv->mutex);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_chswitch_done);
+
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
+
+       if (priv->switch_rxon.switch_in_progress) {
+               if (!le32_to_cpu(csa->status) &&
+                   (csa->channel == priv->switch_rxon.channel)) {
+                       rxon->channel = csa->channel;
+                       ctx->staging.channel = csa->channel;
+                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_legacy_chswitch_done(priv, true);
+               } else {
+                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_legacy_chswitch_done(priv, false);
+               }
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_rx_csa);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                            struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+       iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+       IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+                               le16_to_cpu(rxon->channel));
+       IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+       IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+                               le32_to_cpu(rxon->filter_flags));
+       IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+       IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+                       rxon->ofdm_basic_rates);
+       IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+                               rxon->cck_basic_rates);
+       IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+       IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+       IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+                               le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
+#endif
+/**
+ * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
+{
+       /* Set the FW error flag -- cleared on iwl_down */
+       set_bit(STATUS_FW_ERROR, &priv->status);
+
+       /* Cancel currently queued command. */
+       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+
+       IWL_ERR(priv, "Loaded firmware version: %s\n",
+               priv->hw->wiphy->fw_version);
+
+       priv->cfg->ops->lib->dump_nic_error_log(priv);
+       if (priv->cfg->ops->lib->dump_fh)
+               priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+       priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
+               iwl_legacy_print_rx_config_cmd(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
+#endif
+
+       wake_up_interruptible(&priv->wait_command_queue);
+
+       /* Keep the restart process from trying to send host
+        * commands by clearing the INIT status bit */
+       clear_bit(STATUS_READY, &priv->status);
+
+       if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+               IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+                         "Restarting adapter due to uCode error.\n");
+
+               if (priv->cfg->mod_params->restart_fw)
+                       queue_work(priv->workqueue, &priv->restart);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
+
+static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       /* stop device's busmaster DMA activity */
+       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+       ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+                       CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+       if (ret)
+               IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
+
+       IWL_DEBUG_INFO(priv, "stop master\n");
+
+       return ret;
+}
+
+void iwl_legacy_apm_stop(struct iwl_priv *priv)
+{
+       IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
+       /* Stop device's DMA activity */
+       iwl_legacy_apm_stop_master(priv);
+
+       /* Reset the entire device */
+       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+       udelay(10);
+
+       /*
+        * Clear "initialization complete" bit to move adapter from
+        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+        */
+       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(iwl_legacy_apm_stop);
+
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int iwl_legacy_apm_init(struct iwl_priv *priv)
+{
+       int ret = 0;
+       u16 lctl;
+
+       IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+       /*
+        * Use "set_bit" below rather than "write", to preserve any hardware
+        * bits already set by default after reset.
+        */
+
+       /* Disable L0S exit timer (platform NMI Work/Around) */
+       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+                         CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+       /*
+        * Disable L0s without affecting L1;
+        *  don't wait for ICH L0s (ICH bug W/A)
+        */
+       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+                         CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+       /* Set FH wait threshold to maximum (HW error during stress W/A) */
+       iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
+                                       CSR_DBG_HPET_MEM_REG_VAL);
+
+       /*
+        * Enable HAP INTA (interrupt from management bus) to
+        * wake device's PCI Express link L1a -> L0s
+        * NOTE:  This is no-op for 3945 (non-existant bit)
+        */
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                                   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+       /*
+        * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+        * Check if BIOS (or OS) enabled L1-ASPM on this device.
+        * If so (likely), disable L0S, so device moves directly L0->L1;
+        *    costs negligible amount of power savings.
+        * If not (unlikely), enable L0S, so there is at least some
+        *    power savings, even without L1.
+        */
+       if (priv->cfg->base_params->set_l0s) {
+               lctl = iwl_legacy_pcie_link_ctl(priv);
+               if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+                                       PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+                       /* L1-ASPM enabled; disable(!) L0S  */
+                       iwl_legacy_set_bit(priv, CSR_GIO_REG,
+                                       CSR_GIO_REG_VAL_L0S_ENABLED);
+                       IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+               } else {
+                       /* L1-ASPM disabled; enable(!) L0S */
+                       iwl_legacy_clear_bit(priv, CSR_GIO_REG,
+                                       CSR_GIO_REG_VAL_L0S_ENABLED);
+                       IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+               }
+       }
+
+       /* Configure analog phase-lock-loop before activating to D0A */
+       if (priv->cfg->base_params->pll_cfg_val)
+               iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
+                           priv->cfg->base_params->pll_cfg_val);
+
+       /*
+        * Set "initialization complete" bit to move adapter from
+        * D0U* --> D0A* (powered-up active) state.
+        */
+       iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /*
+        * Wait for clock stabilization; once stabilized, access to
+        * device-internal resources is supported, e.g. iwl_legacy_write_prph()
+        * and accesses to uCode SRAM.
+        */
+       ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+       if (ret < 0) {
+               IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+               goto out;
+       }
+
+       /*
+        * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+        * BSM (Boostrap State Machine) is only in 3945 and 4965.
+        *
+        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+        * do not disable clocks.  This preserves any hardware bits already
+        * set by default in "CLK_CTRL_REG" after reset.
+        */
+       if (priv->cfg->base_params->use_bsm)
+               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+                       APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+       else
+               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+                       APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(20);
+
+       /* Disable L1-Active */
+       iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+                         APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_apm_init);
+
+
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+       int ret;
+       s8 prev_tx_power;
+       bool defer;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (priv->tx_power_user_lmt == tx_power && !force)
+               return 0;
+
+       if (!priv->cfg->ops->lib->send_tx_power)
+               return -EOPNOTSUPP;
+
+       if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+               IWL_WARN(priv,
+                        "Requested user TXPOWER %d below lower limit %d.\n",
+                        tx_power,
+                        IWL4965_TX_POWER_TARGET_POWER_MIN);
+               return -EINVAL;
+       }
+
+       if (tx_power > priv->tx_power_device_lmt) {
+               IWL_WARN(priv,
+                       "Requested user TXPOWER %d above upper limit %d.\n",
+                        tx_power, priv->tx_power_device_lmt);
+               return -EINVAL;
+       }
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return -EIO;
+
+       /* scan complete and commit_rxon use tx_power_next value,
+        * it always need to be updated for newest request */
+       priv->tx_power_next = tx_power;
+
+       /* do not set tx power when scanning or channel changing */
+       defer = test_bit(STATUS_SCANNING, &priv->status) ||
+               memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+       if (defer && !force) {
+               IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+               return 0;
+       }
+
+       prev_tx_power = priv->tx_power_user_lmt;
+       priv->tx_power_user_lmt = tx_power;
+
+       ret = priv->cfg->ops->lib->send_tx_power(priv);
+
+       /* if fail to set tx_power, restore the orig. tx power */
+       if (ret) {
+               priv->tx_power_user_lmt = prev_tx_power;
+               priv->tx_power_next = prev_tx_power;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_set_tx_power);
+
+void iwl_legacy_send_bt_config(struct iwl_priv *priv)
+{
+       struct iwl_bt_cmd bt_cmd = {
+               .lead_time = BT_LEAD_TIME_DEF,
+               .max_kill = BT_MAX_KILL_DEF,
+               .kill_ack_mask = 0,
+               .kill_cts_mask = 0,
+       };
+
+       if (!bt_coex_active)
+               bt_cmd.flags = BT_COEX_DISABLE;
+       else
+               bt_cmd.flags = BT_COEX_ENABLE;
+
+       IWL_DEBUG_INFO(priv, "BT coex %s\n",
+               (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+       if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                            sizeof(struct iwl_bt_cmd), &bt_cmd))
+               IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(iwl_legacy_send_bt_config);
+
+int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+       struct iwl_statistics_cmd statistics_cmd = {
+               .configuration_flags =
+                       clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+       };
+
+       if (flags & CMD_ASYNC)
+               return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+                                       sizeof(struct iwl_statistics_cmd),
+                                       &statistics_cmd, NULL);
+       else
+               return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+                                       sizeof(struct iwl_statistics_cmd),
+                                       &statistics_cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
+
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+                          struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+       IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+                    sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
+
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+                       "notification for %s:\n", len,
+                       iwl_legacy_get_cmd_string(pkt->hdr.cmd));
+       iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
+
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+                       struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+               "seq 0x%04X ser 0x%08X\n",
+               le32_to_cpu(pkt->u.err_resp.error_type),
+               iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
+               pkt->u.err_resp.cmd_id,
+               le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+               le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
+{
+       memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
+}
+
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                          const struct ieee80211_tx_queue_params *params)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx;
+       unsigned long flags;
+       int q;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return -EIO;
+       }
+
+       if (queue >= AC_NUM) {
+               IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+               return 0;
+       }
+
+       q = AC_NUM - 1 - queue;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       for_each_context(priv, ctx) {
+               ctx->qos_data.def_qos_parm.ac[q].cw_min =
+                       cpu_to_le16(params->cw_min);
+               ctx->qos_data.def_qos_parm.ac[q].cw_max =
+                       cpu_to_le16(params->cw_max);
+               ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+               ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+                               cpu_to_le16((params->txop * 32));
+
+               ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
+
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
+
+static int
+iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       iwl_legacy_connection_init_rx_config(priv, ctx);
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       return iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static int iwl_legacy_setup_interface(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
+{
+       struct ieee80211_vif *vif = ctx->vif;
+       int err;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /*
+        * This variable will be correct only when there's just
+        * a single context, but all code using it is for hardware
+        * that supports only one context.
+        */
+       priv->iw_mode = vif->type;
+
+       ctx->is_active = true;
+
+       err = iwl_legacy_set_mode(priv, ctx);
+       if (err) {
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+               return err;
+       }
+
+       return 0;
+}
+
+int
+iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *tmp, *ctx = NULL;
+       int err;
+
+       IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+                          vif->type, vif->addr);
+
+       mutex_lock(&priv->mutex);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_WARN(priv, "Try to add interface when device not ready\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       for_each_context(priv, tmp) {
+               u32 possible_modes =
+                       tmp->interface_modes | tmp->exclusive_interface_modes;
+
+               if (tmp->vif) {
+                       /* check if this busy context is exclusive */
+                       if (tmp->exclusive_interface_modes &
+                                               BIT(tmp->vif->type)) {
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       continue;
+               }
+
+               if (!(possible_modes & BIT(vif->type)))
+                       continue;
+
+               /* have maybe usable context w/o interface */
+               ctx = tmp;
+               break;
+       }
+
+       if (!ctx) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       vif_priv->ctx = ctx;
+       ctx->vif = vif;
+
+       err = iwl_legacy_setup_interface(priv, ctx);
+       if (!err)
+               goto out;
+
+       ctx->vif = NULL;
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
+
+static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  bool mode_change)
+{
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (priv->scan_vif == vif) {
+               iwl_legacy_scan_cancel_timeout(priv, 200);
+               iwl_legacy_force_scan_end(priv);
+       }
+
+       if (!mode_change) {
+               iwl_legacy_set_mode(priv, ctx);
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+       }
+}
+
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       mutex_lock(&priv->mutex);
+
+       WARN_ON(ctx->vif != vif);
+       ctx->vif = NULL;
+
+       iwl_legacy_teardown_interface(priv, vif, false);
+
+       memset(priv->bssid, 0, ETH_ALEN);
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
+
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
+{
+       if (!priv->txq)
+               priv->txq = kzalloc(
+                       sizeof(struct iwl_tx_queue) *
+                               priv->cfg->base_params->num_of_queues,
+                       GFP_KERNEL);
+       if (!priv->txq) {
+               IWL_ERR(priv, "Not enough memory for txq\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
+
+void iwl_legacy_txq_mem(struct iwl_priv *priv)
+{
+       kfree(priv->txq);
+       priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_mem);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+
+#define IWL_TRAFFIC_DUMP_SIZE  (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
+
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+       priv->tx_traffic_idx = 0;
+       priv->rx_traffic_idx = 0;
+       if (priv->tx_traffic)
+               memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+       if (priv->rx_traffic)
+               memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+}
+
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+       u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
+
+       if (iwlegacy_debug_level & IWL_DL_TX) {
+               if (!priv->tx_traffic) {
+                       priv->tx_traffic =
+                               kzalloc(traffic_size, GFP_KERNEL);
+                       if (!priv->tx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       if (iwlegacy_debug_level & IWL_DL_RX) {
+               if (!priv->rx_traffic) {
+                       priv->rx_traffic =
+                               kzalloc(traffic_size, GFP_KERNEL);
+                       if (!priv->rx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       iwl_legacy_reset_traffic_log(priv);
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
+
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+       kfree(priv->tx_traffic);
+       priv->tx_traffic = NULL;
+
+       kfree(priv->rx_traffic);
+       priv->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
+
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
+               return;
+
+       if (!priv->tx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((priv->tx_traffic +
+                      (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+                      header, len);
+               priv->tx_traffic_idx =
+                       (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
+
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
+               return;
+
+       if (!priv->rx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((priv->rx_traffic +
+                      (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+                      header, len);
+               priv->rx_traffic_idx =
+                       (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
+
+const char *iwl_legacy_get_mgmt_string(int cmd)
+{
+       switch (cmd) {
+               IWL_CMD(MANAGEMENT_ASSOC_REQ);
+               IWL_CMD(MANAGEMENT_ASSOC_RESP);
+               IWL_CMD(MANAGEMENT_REASSOC_REQ);
+               IWL_CMD(MANAGEMENT_REASSOC_RESP);
+               IWL_CMD(MANAGEMENT_PROBE_REQ);
+               IWL_CMD(MANAGEMENT_PROBE_RESP);
+               IWL_CMD(MANAGEMENT_BEACON);
+               IWL_CMD(MANAGEMENT_ATIM);
+               IWL_CMD(MANAGEMENT_DISASSOC);
+               IWL_CMD(MANAGEMENT_AUTH);
+               IWL_CMD(MANAGEMENT_DEAUTH);
+               IWL_CMD(MANAGEMENT_ACTION);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+const char *iwl_legacy_get_ctrl_string(int cmd)
+{
+       switch (cmd) {
+               IWL_CMD(CONTROL_BACK_REQ);
+               IWL_CMD(CONTROL_BACK);
+               IWL_CMD(CONTROL_PSPOLL);
+               IWL_CMD(CONTROL_RTS);
+               IWL_CMD(CONTROL_CTS);
+               IWL_CMD(CONTROL_ACK);
+               IWL_CMD(CONTROL_CFEND);
+               IWL_CMD(CONTROL_CFENDACK);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
+{
+       memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
+       memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
+ * iwl_legacy_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_statistics
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of iwl_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
+{
+       struct traffic_stats    *stats;
+
+       if (is_tx)
+               stats = &priv->tx_stats;
+       else
+               stats = &priv->rx_stats;
+
+       if (ieee80211_is_mgmt(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+                       stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+                       stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BEACON):
+                       stats->mgmt[MANAGEMENT_BEACON]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ATIM):
+                       stats->mgmt[MANAGEMENT_ATIM]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+                       stats->mgmt[MANAGEMENT_DISASSOC]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+                       stats->mgmt[MANAGEMENT_AUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+                       stats->mgmt[MANAGEMENT_DEAUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACTION):
+                       stats->mgmt[MANAGEMENT_ACTION]++;
+                       break;
+               }
+       } else if (ieee80211_is_ctl(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+                       stats->ctrl[CONTROL_BACK_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BACK):
+                       stats->ctrl[CONTROL_BACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+                       stats->ctrl[CONTROL_PSPOLL]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_RTS):
+                       stats->ctrl[CONTROL_RTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CTS):
+                       stats->ctrl[CONTROL_CTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACK):
+                       stats->ctrl[CONTROL_ACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFEND):
+                       stats->ctrl[CONTROL_CFEND]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+                       stats->ctrl[CONTROL_CFENDACK]++;
+                       break;
+               }
+       } else {
+               /* data */
+               stats->data_cnt++;
+               stats->data_bytes += len;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_update_stats);
+#endif
+
+static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (!iwl_legacy_is_any_associated(priv)) {
+               IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+               return;
+       }
+       /*
+        * There is no easy and better way to force reset the radio,
+        * the only known method is switching channel which will force to
+        * reset and tune the radio.
+        * Use internal short scan (single channel) operation to should
+        * achieve this objective.
+        * Driver should reset the radio when number of consecutive missed
+        * beacon, or any other uCode error condition detected.
+        */
+       IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+       iwl_legacy_internal_short_hw_scan(priv);
+}
+
+
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
+{
+       struct iwl_force_reset *force_reset;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EINVAL;
+
+       if (mode >= IWL_MAX_FORCE_RESET) {
+               IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+               return -EINVAL;
+       }
+       force_reset = &priv->force_reset[mode];
+       force_reset->reset_request_count++;
+       if (!external) {
+               if (force_reset->last_force_reset_jiffies &&
+                   time_after(force_reset->last_force_reset_jiffies +
+                   force_reset->reset_duration, jiffies)) {
+                       IWL_DEBUG_INFO(priv, "force reset rejected\n");
+                       force_reset->reset_reject_count++;
+                       return -EAGAIN;
+               }
+       }
+       force_reset->reset_success_count++;
+       force_reset->last_force_reset_jiffies = jiffies;
+       IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+       switch (mode) {
+       case IWL_RF_RESET:
+               _iwl_legacy_force_rf_reset(priv);
+               break;
+       case IWL_FW_RESET:
+               /*
+                * if the request is from external(ex: debugfs),
+                * then always perform the request in regardless the module
+                * parameter setting
+                * if the request is from internal (uCode error or driver
+                * detect failure), then fw_restart module parameter
+                * need to be check before performing firmware reload
+                */
+               if (!external && !priv->cfg->mod_params->restart_fw) {
+                       IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
+                                      "module parameter setting\n");
+                       break;
+               }
+               IWL_ERR(priv, "On demand firmware reload\n");
+               /* Set the FW error flag -- cleared on iwl_down */
+               set_bit(STATUS_FW_ERROR, &priv->status);
+               wake_up_interruptible(&priv->wait_command_queue);
+               /*
+                * Keep the restart process from trying to send host
+                * commands by clearing the INIT status bit
+                */
+               clear_bit(STATUS_READY, &priv->status);
+               queue_work(priv->workqueue, &priv->restart);
+               break;
+       }
+       return 0;
+}
+
+int
+iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       enum nl80211_iftype newtype, bool newp2p)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+       struct iwl_rxon_context *tmp;
+       u32 interface_modes;
+       int err;
+
+       newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+       mutex_lock(&priv->mutex);
+
+       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+       if (!(interface_modes & BIT(newtype))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (ctx->exclusive_interface_modes & BIT(newtype)) {
+               for_each_context(priv, tmp) {
+                       if (ctx == tmp)
+                               continue;
+
+                       if (!tmp->vif)
+                               continue;
+
+                       /*
+                        * The current mode switch would be exclusive, but
+                        * another context is active ... refuse the switch.
+                        */
+                       err = -EBUSY;
+                       goto out;
+               }
+       }
+
+       /* success */
+       iwl_legacy_teardown_interface(priv, vif, true);
+       vif->type = newtype;
+       err = iwl_legacy_setup_interface(priv, ctx);
+       WARN_ON(err);
+       /*
+        * We've switched internally, but submitting to the
+        * device may have failed for some reason. Mask this
+        * error, because otherwise mac80211 will not switch
+        * (and set the interface type back) and we'll be
+        * out of sync with it.
+        */
+       err = 0;
+
+ out:
+       mutex_unlock(&priv->mutex);
+       return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+       struct iwl_tx_queue *txq = &priv->txq[cnt];
+       struct iwl_queue *q = &txq->q;
+       unsigned long timeout;
+       int ret;
+
+       if (q->read_ptr == q->write_ptr) {
+               txq->time_stamp = jiffies;
+               return 0;
+       }
+
+       timeout = txq->time_stamp +
+                 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+
+       if (time_after(jiffies, timeout)) {
+               IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
+                               q->id, priv->cfg->base_params->wd_timeout);
+               ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
+               return (ret == -EAGAIN) ? 0 : 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IWL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void iwl_legacy_bg_watchdog(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+       int cnt;
+       unsigned long timeout;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       timeout = priv->cfg->base_params->wd_timeout;
+       if (timeout == 0)
+               return;
+
+       /* monitor and check for stuck cmd queue */
+       if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
+               return;
+
+       /* monitor and check for other stuck queues */
+       if (iwl_legacy_is_any_associated(priv)) {
+               for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+                       /* skip as we already checked the command queue */
+                       if (cnt == priv->cmd_queue)
+                               continue;
+                       if (iwl_legacy_check_stuck_queue(priv, cnt))
+                               return;
+               }
+       }
+
+       mod_timer(&priv->watchdog, jiffies +
+                 msecs_to_jiffies(IWL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
+
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
+{
+       unsigned int timeout = priv->cfg->base_params->wd_timeout;
+
+       if (timeout)
+               mod_timer(&priv->watchdog,
+                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
+       else
+               del_timer(&priv->watchdog);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+                                       u32 usec, u32 beacon_interval)
+{
+       u32 quot;
+       u32 rem;
+       u32 interval = beacon_interval * TIME_UNIT;
+
+       if (!interval || !usec)
+               return 0;
+
+       quot = (usec / interval) &
+               (iwl_legacy_beacon_time_mask_high(priv,
+               priv->hw_params.beacon_time_tsf_bits) >>
+               priv->hw_params.beacon_time_tsf_bits);
+       rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
+                                  priv->hw_params.beacon_time_tsf_bits);
+
+       return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+                          u32 addon, u32 beacon_interval)
+{
+       u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
+                                       priv->hw_params.beacon_time_tsf_bits);
+       u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
+                                       priv->hw_params.beacon_time_tsf_bits);
+       u32 interval = beacon_interval * TIME_UNIT;
+       u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
+                               priv->hw_params.beacon_time_tsf_bits)) +
+                               (addon & iwl_legacy_beacon_time_mask_high(priv,
+                               priv->hw_params.beacon_time_tsf_bits));
+
+       if (base_low > addon_low)
+               res += base_low - addon_low;
+       else if (base_low < addon_low) {
+               res += interval + base_low - addon_low;
+               res += (1 << priv->hw_params.beacon_time_tsf_bits);
+       } else
+               res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+       return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int iwl_legacy_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+       /*
+        * This function is called when system goes into suspend state
+        * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
+        * first but since iwl_mac_stop() has no knowledge of who the caller is,
+        * it will not call apm_ops.stop() to stop the DMA operation.
+        * Calling apm_ops.stop here to make sure we stop the DMA.
+        */
+       iwl_legacy_apm_stop(priv);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_suspend);
+
+int iwl_legacy_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+       bool hw_rfkill = false;
+
+       /*
+        * We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state.
+        */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       iwl_legacy_enable_interrupts(priv);
+
+       if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+               hw_rfkill = true;
+
+       if (hw_rfkill)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_resume);
+
+const struct dev_pm_ops iwl_legacy_pm_ops = {
+       .suspend = iwl_legacy_pci_suspend,
+       .resume = iwl_legacy_pci_resume,
+       .freeze = iwl_legacy_pci_suspend,
+       .thaw = iwl_legacy_pci_resume,
+       .poweroff = iwl_legacy_pci_suspend,
+       .restore = iwl_legacy_pci_resume,
+};
+EXPORT_SYMBOL(iwl_legacy_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+       if (ctx->qos_data.qos_active)
+               ctx->qos_data.def_qos_parm.qos_flags |=
+                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+       if (ctx->ht.enabled)
+               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+                     ctx->qos_data.qos_active,
+                     ctx->qos_data.def_qos_parm.qos_flags);
+
+       iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
+                              sizeof(struct iwl_qosparam_cmd),
+                              &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * iwl_legacy_mac_config - mac80211 config callback
+ */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = conf->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct iwl_rxon_context *ctx;
+       unsigned long flags = 0;
+       int ret = 0;
+       u16 ch;
+       int scan_active = 0;
+       bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+                                       channel->hw_value, changed);
+
+       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+                       test_bit(STATUS_SCANNING, &priv->status))) {
+               scan_active = 1;
+               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+                      IEEE80211_CONF_CHANGE_CHANNEL)) {
+               /* mac80211 uses static for non-HT which is what we want */
+               priv->current_ht_config.smps = conf->smps_mode;
+
+               /*
+                * Recalculate chain counts.
+                *
+                * If monitor mode is enabled then mac80211 will
+                * set up the SM PS mode to OFF if an HT channel is
+                * configured.
+                */
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       for_each_context(priv, ctx)
+                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       /* during scanning mac80211 will delay channel setting until
+        * scan finish with changed = 0
+        */
+       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+               if (scan_active)
+                       goto set_ch_out;
+
+               ch = channel->hw_value;
+               ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
+               spin_lock_irqsave(&priv->lock, flags);
+
+               for_each_context(priv, ctx) {
+                       /* Configure HT40 channels */
+                       if (ctx->ht.enabled != conf_is_ht(conf)) {
+                               ctx->ht.enabled = conf_is_ht(conf);
+                               ht_changed[ctx->ctxid] = true;
+                       }
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       /*
+                        * Default to no protection. Protection mode will
+                        * later be set from BSS config in iwl_ht_conf
+                        */
+                       ctx->ht.protection =
+                                       IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+                       /* if we are switching from ht to 2.4 clear flags
+                        * from any ht related info since 2.4 does not
+                        * support ht */
+                       if ((le16_to_cpu(ctx->staging.channel) != ch))
+                               ctx->staging.flags = 0;
+
+                       iwl_legacy_set_rxon_channel(priv, channel, ctx);
+                       iwl_legacy_set_rxon_ht(priv, ht_conf);
+
+                       iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+               }
+
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               if (priv->cfg->ops->legacy->update_bcast_stations)
+                       ret =
+                       priv->cfg->ops->legacy->update_bcast_stations(priv);
+
+ set_ch_out:
+               /* The list of supported rates and rate mask can be different
+                * for each band; since the band may have changed, reset
+                * the rate mask to what mac80211 lists */
+               iwl_legacy_set_rate(priv);
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_PS |
+                       IEEE80211_CONF_CHANGE_IDLE)) {
+               ret = iwl_legacy_power_update_mode(priv, false);
+               if (ret)
+                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+                       priv->tx_power_user_lmt, conf->power_level);
+
+               iwl_legacy_set_tx_power(priv, conf->power_level, false);
+       }
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               goto out;
+       }
+
+       if (scan_active)
+               goto out;
+
+       for_each_context(priv, ctx) {
+               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+                       iwl_legacy_commit_rxon(priv, ctx);
+               else
+                       IWL_DEBUG_INFO(priv,
+                               "Not re-sending same RXON configuration.\n");
+               if (ht_changed[ctx->ctxid])
+                       iwl_legacy_update_qos(priv, ctx);
+       }
+
+out:
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_config);
+
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       /* IBSS can only be the IWL_RXON_CTX_BSS context */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       spin_lock_irqsave(&priv->lock, flags);
+       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* new association get rid of ibss beacon skb */
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = NULL;
+
+       priv->timestamp = 0;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl_legacy_scan_cancel_timeout(priv, 100);
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               mutex_unlock(&priv->mutex);
+               return;
+       }
+
+       /* we are restarting association process
+        * clear RXON_FILTER_ASSOC_MSK bit
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       iwl_legacy_set_rate(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
+
+static void iwl_legacy_ht_conf(struct iwl_priv *priv,
+                       struct ieee80211_vif *vif)
+{
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct ieee80211_sta *sta;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_ASSOC(priv, "enter:\n");
+
+       if (!ctx->ht.enabled)
+               return;
+
+       ctx->ht.protection =
+               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+       ctx->ht.non_gf_sta_present =
+               !!(bss_conf->ht_operation_mode &
+                               IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+       ht_conf->single_chain_sufficient = false;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, bss_conf->bssid);
+               if (sta) {
+                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+                       int maxstreams;
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                             IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+                               >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
+                           (ht_cap->mcs.rx_mask[2] == 0))
+                               ht_conf->single_chain_sufficient = true;
+                       if (maxstreams <= 1)
+                               ht_conf->single_chain_sufficient = true;
+               } else {
+                       /*
+                        * If at all, this can only happen through a race
+                        * when the AP disconnects us while we're still
+                        * setting up the connection, in that case mac80211
+                        * will soon tell us about that.
+                        */
+                       ht_conf->single_chain_sufficient = true;
+               }
+               rcu_read_unlock();
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ht_conf->single_chain_sufficient = true;
+               break;
+       default:
+               break;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "leave\n");
+}
+
+static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
+                                   struct ieee80211_vif *vif)
+{
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       /*
+        * inform the ucode that there is no longer an
+        * association and that no more packets should be
+        * sent
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       ctx->staging.assoc_id = 0;
+       iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       __le64 timestamp;
+       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+       if (!skb)
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_ctx) {
+               IWL_ERR(priv, "update beacon but no beacon context!\n");
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = skb;
+
+       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+       priv->timestamp = le64_to_cpu(timestamp);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return;
+       }
+
+       priv->cfg->ops->legacy->post_associate(priv);
+}
+
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+       int ret;
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
+
+       if (!iwl_legacy_is_alive(priv))
+               return;
+
+       mutex_lock(&priv->mutex);
+
+       if (changes & BSS_CHANGED_QOS) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&priv->lock, flags);
+               ctx->qos_data.qos_active = bss_conf->qos;
+               iwl_legacy_update_qos(priv, ctx);
+               spin_unlock_irqrestore(&priv->lock, flags);
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               /*
+                * the add_interface code must make sure we only ever
+                * have a single interface that could be beaconing at
+                * any time.
+                */
+               if (vif->bss_conf.enable_beacon)
+                       priv->beacon_ctx = ctx;
+               else
+                       priv->beacon_ctx = NULL;
+       }
+
+       if (changes & BSS_CHANGED_BSSID) {
+               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+               /*
+                * If there is currently a HW scan going on in the
+                * background then we need to cancel it else the RXON
+                * below/in post_associate will fail.
+                */
+               if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
+                       IWL_WARN(priv,
+                               "Aborted scan still in progress after 100ms\n");
+                       IWL_DEBUG_MAC80211(priv,
+                               "leaving - scan abort failed.\n");
+                       mutex_unlock(&priv->mutex);
+                       return;
+               }
+
+               /* mac80211 only sets assoc when in STATION mode */
+               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+
+                       /* currently needed in a few places */
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+               } else {
+                       ctx->staging.filter_flags &=
+                               ~RXON_FILTER_ASSOC_MSK;
+               }
+
+       }
+
+       /*
+        * This needs to be after setting the BSSID in case
+        * mac80211 decides to do both changes at once because
+        * it will invoke post_associate.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
+               iwl_legacy_beacon_update(hw, vif);
+
+       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+                                  bss_conf->use_short_preamble);
+               if (bss_conf->use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       }
+
+       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+               IWL_DEBUG_MAC80211(priv,
+                       "ERP_CTS %d\n", bss_conf->use_cts_prot);
+               if (bss_conf->use_cts_prot &&
+                       (priv->band != IEEE80211_BAND_5GHZ))
+                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+               if (bss_conf->use_cts_prot)
+                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+       }
+
+       if (changes & BSS_CHANGED_BASIC_RATES) {
+               /* XXX use this information
+                *
+                * To do that, remove code from iwl_legacy_set_rate() and put something
+                * like this here:
+                *
+               if (A-band)
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates;
+               else
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates >> 4;
+                       ctx->staging.cck_basic_rates =
+                               bss_conf->basic_rates & 0xF;
+                */
+       }
+
+       if (changes & BSS_CHANGED_HT) {
+               iwl_legacy_ht_conf(priv, vif);
+
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+               if (bss_conf->assoc) {
+                       priv->timestamp = bss_conf->timestamp;
+
+                       if (!iwl_legacy_is_rfkill(priv))
+                               priv->cfg->ops->legacy->post_associate(priv);
+               } else
+                       iwl_legacy_set_no_assoc(priv, vif);
+       }
+
+       if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
+               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+                                  changes);
+               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+               if (!ret) {
+                       /* Sync active_rxon with latest change. */
+                       memcpy((void *)&ctx->active,
+                               &ctx->staging,
+                               sizeof(struct iwl_legacy_rxon_cmd));
+               }
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (vif->bss_conf.enable_beacon) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+                       priv->cfg->ops->legacy->config_ap(priv);
+               } else
+                       iwl_legacy_set_no_assoc(priv, vif);
+       }
+
+       if (changes & BSS_CHANGED_IBSS) {
+               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
+                                                       bss_conf->ibss_joined);
+               if (ret)
+                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+                               bss_conf->ibss_joined ? "add" : "remove",
+                               bss_conf->bssid);
+       }
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data)
+{
+       struct iwl_priv *priv = data;
+       u32 inta, inta_mask;
+       u32 inta_fh;
+       unsigned long flags;
+       if (!priv)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable (but don't clear!) interrupts here to avoid
+        *    back-to-back ISRs and sporadic interrupts from our NIC.
+        * If we have something to service, the tasklet will re-enable ints.
+        * If we *don't* have something, we'll re-enable before leaving here. */
+       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
+       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+       /* Discover which interrupts are active/pending */
+       inta = iwl_read32(priv, CSR_INT);
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+
+       /* Ignore interrupt if there's nothing in NIC to service.
+        * This may be due to IRQ shared with another device,
+        * or due to sporadic interrupts thrown from our NIC. */
+       if (!inta && !inta_fh) {
+               IWL_DEBUG_ISR(priv,
+                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
+               goto none;
+       }
+
+       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+               /* Hardware disappeared. It might have already raised
+                * an interrupt */
+               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+               goto unplugged;
+       }
+
+       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                     inta, inta_mask, inta_fh);
+
+       inta &= ~CSR_INT_BIT_SCD;
+
+       /* iwl_irq_tasklet() will service interrupts and re-enable them */
+       if (likely(inta || inta_fh))
+               tasklet_schedule(&priv->irq_tasklet);
+
+unplugged:
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_HANDLED;
+
+none:
+       /* re-enable interrupts here since we don't have anything to service. */
+       /* only Re-enable if diabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_legacy_enable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_legacy_isr);
+
+/*
+ *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                              struct ieee80211_tx_info *info,
+                              __le16 fc, __le32 *tx_flags)
+{
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+               *tx_flags |= TX_CMD_FLG_RTS_MSK;
+               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+               if (!ieee80211_is_mgmt(fc))
+                       return;
+
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
+                       break;
+               }
+       } else if (info->control.rates[0].flags &
+                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
+               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+               *tx_flags |= TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644 (file)
index 0000000..f03b463
--- /dev/null
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_core_h__
+#define __iwl_legacy_core_h__
+
+/************************
+ * forward declarations *
+ ************************/
+struct iwl_host_cmd;
+struct iwl_cmd;
+
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+       .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+       .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT              1024
+
+#define IWL_SKU_G       0x1
+#define IWL_SKU_A       0x2
+#define IWL_SKU_N       0x8
+
+#define IWL_CMD(x) case x: return #x
+
+struct iwl_hcmd_ops {
+       int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+       int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+       void (*set_rxon_chain)(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx);
+};
+
+struct iwl_hcmd_utils_ops {
+       u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
+       u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data);
+       int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
+       void (*post_scan)(struct iwl_priv *priv);
+};
+
+struct iwl_apm_ops {
+       int (*init)(struct iwl_priv *priv);
+       void (*config)(struct iwl_priv *priv);
+};
+
+struct iwl_debugfs_ops {
+       ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos);
+};
+
+struct iwl_temp_ops {
+       void (*temperature)(struct iwl_priv *priv);
+};
+
+struct iwl_lib_ops {
+       /* set hw dependent parameters */
+       int (*set_hw_params)(struct iwl_priv *priv);
+       /* Handling TX */
+       void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       u16 byte_cnt);
+       int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
+                                    struct iwl_tx_queue *txq,
+                                    dma_addr_t addr,
+                                    u16 len, u8 reset, u8 pad);
+       void (*txq_free_tfd)(struct iwl_priv *priv,
+                            struct iwl_tx_queue *txq);
+       int (*txq_init)(struct iwl_priv *priv,
+                       struct iwl_tx_queue *txq);
+       /* setup Rx handler */
+       void (*rx_handler_setup)(struct iwl_priv *priv);
+       /* alive notification after init uCode load */
+       void (*init_alive_start)(struct iwl_priv *priv);
+       /* check validity of rtc data address */
+       int (*is_valid_rtc_data_addr)(u32 addr);
+       /* 1st ucode load */
+       int (*load_ucode)(struct iwl_priv *priv);
+       int (*dump_nic_event_log)(struct iwl_priv *priv,
+                                 bool full_log, char **buf, bool display);
+       void (*dump_nic_error_log)(struct iwl_priv *priv);
+       int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
+       int (*set_channel_switch)(struct iwl_priv *priv,
+                                 struct ieee80211_channel_switch *ch_switch);
+       /* power management */
+       struct iwl_apm_ops apm_ops;
+
+       /* power */
+       int (*send_tx_power) (struct iwl_priv *priv);
+       void (*update_chain_flags)(struct iwl_priv *priv);
+
+       /* eeprom operations (as defined in iwl-eeprom.h) */
+       struct iwl_eeprom_ops eeprom_ops;
+
+       /* temperature */
+       struct iwl_temp_ops temp_ops;
+       /* check for plcp health */
+       bool (*check_plcp_health)(struct iwl_priv *priv,
+                                       struct iwl_rx_packet *pkt);
+
+       struct iwl_debugfs_ops debugfs_ops;
+
+};
+
+struct iwl_led_ops {
+       int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
+};
+
+struct iwl_legacy_ops {
+       void (*post_associate)(struct iwl_priv *priv);
+       void (*config_ap)(struct iwl_priv *priv);
+       /* station management */
+       int (*update_bcast_stations)(struct iwl_priv *priv);
+       int (*manage_ibss_station)(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif, bool add);
+};
+
+struct iwl_ops {
+       const struct iwl_lib_ops *lib;
+       const struct iwl_hcmd_ops *hcmd;
+       const struct iwl_hcmd_utils_ops *utils;
+       const struct iwl_led_ops *led;
+       const struct iwl_nic_ops *nic;
+       const struct iwl_legacy_ops *legacy;
+       const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct iwl_mod_params {
+       int sw_crypto;          /* def: 0 = using hardware encryption */
+       int disable_hw_scan;    /* def: 0 = use h/w scan */
+       int num_of_queues;      /* def: HW dependent */
+       int disable_11n;        /* def: 0 = 11n capabilities enabled */
+       int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
+       int antenna;            /* def: 0 = both antennas (use diversity) */
+       int restart_fw;         /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ *     to the deviation to achieve the desired led frequency.
+ *     The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ *     radio tuning when there is a high receiving plcp error rate
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ *     sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ *     chain noise calibration operation
+ */
+struct iwl_base_params {
+       int eeprom_size;
+       int num_of_queues;      /* def: HW dependent */
+       int num_of_ampdu_queues;/* def: HW dependent */
+       /* for iwl_legacy_apm_init() */
+       u32 pll_cfg_val;
+       bool set_l0s;
+       bool use_bsm;
+
+       u16 led_compensation;
+       int chain_noise_num_beacons;
+       u8 plcp_delta_threshold;
+       unsigned int wd_timeout;
+       bool temperature_kelvin;
+       u32 max_event_log_size;
+       const bool ucode_tracing;
+       const bool sensitivity_calib_by_driver;
+       const bool chain_noise_calib_by_driver;
+};
+
+/**
+ * struct iwl_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *     (.ucode) will be added to filename before loading from disk. The
+ *     filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @iwl_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
+ *     Driver interacts with Firmware API version >= 2.
+ * } else {
+ *     Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * iwl_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct iwl_cfg {
+       /* params specific to an individual device within a device family */
+       const char *name;
+       const char *fw_name_pre;
+       const unsigned int ucode_api_max;
+       const unsigned int ucode_api_min;
+       u8   valid_tx_ant;
+       u8   valid_rx_ant;
+       unsigned int sku;
+       u16  eeprom_ver;
+       u16  eeprom_calib_ver;
+       const struct iwl_ops *ops;
+       /* module based parameters which can be set from modprobe cmd */
+       const struct iwl_mod_params *mod_params;
+       /* params not likely to change within a device family */
+       struct iwl_base_params *base_params;
+       /* params likely to change within a device family */
+       u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+       u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+       enum iwl_led_mode led_mode;
+};
+
+/***************************
+ *   L i b                 *
+ ***************************/
+
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                   const struct ieee80211_tx_queue_params *params);
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
+void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       int hw_decrypt);
+int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx);
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx);
+int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
+                       struct ieee80211_channel *ch,
+                       struct iwl_rxon_context *ctx);
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           enum ieee80211_band band,
+                           struct ieee80211_vif *vif);
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+                                 enum ieee80211_band band);
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+                       struct iwl_ht_config *ht_conf);
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_sta_ht_cap *ht_cap);
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx);
+void iwl_legacy_set_rate(struct iwl_priv *priv);
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+                          struct ieee80211_hdr *hdr,
+                          u32 decrypt_res,
+                          struct ieee80211_rx_status *stats);
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
+int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif);
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif);
+int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            enum nl80211_iftype newtype, bool newp2p);
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_legacy_txq_mem(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                               u16 length, struct ieee80211_hdr *header);
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                               u16 length, struct ieee80211_hdr *header);
+const char *iwl_legacy_get_mgmt_string(int cmd);
+const char *iwl_legacy_get_ctrl_string(int cmd);
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
+void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
+                     u16 len);
+#else
+static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+       return 0;
+}
+static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
+                                   __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+                          struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+                       struct iwl_rx_mem_buffer *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
+void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+                                 struct iwl_rx_queue *q);
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
+void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+/* Handlers */
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+                               struct iwl_rx_packet *pkt);
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq);
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                     int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
+                       struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void iwl_legacy_init_scan_params(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_legacy_force_scan_end(struct iwl_priv *priv);
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   struct cfg80211_scan_request *req);
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
+u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
+                       struct ieee80211_mgmt *frame,
+                      const u8 *ta, const u8 *ie, int ie_len, int left);
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
+u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+                             enum ieee80211_band band,
+                             u8 n_probes);
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+                              enum ieee80211_band band,
+                              struct ieee80211_vif *vif);
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
+#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG                (HZ * 7)
+
+/*****************************************************
+ *   S e n d i n g     H o s t     C o m m a n d s   *
+ *****************************************************/
+
+const char *iwl_legacy_get_cmd_string(u8 cmd);
+int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
+                                  struct iwl_host_cmd *cmd);
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+                                 u16 len, const void *data);
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
+                          const void *data,
+                          void (*callback)(struct iwl_priv *priv,
+                                           struct iwl_device_cmd *cmd,
+                                           struct iwl_rx_packet *pkt));
+
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+
+
+/*****************************************************
+ * PCI                                              *
+ *****************************************************/
+
+static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
+{
+       int pos;
+       u16 pci_lnk_ctl;
+       pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
+       pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+       return pci_lnk_ctl;
+}
+
+void iwl_legacy_bg_watchdog(unsigned long data);
+u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+                                       u32 usec, u32 beacon_interval);
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+                          u32 addon, u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int iwl_legacy_pci_suspend(struct device *device);
+int iwl_legacy_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_legacy_pm_ops;
+
+#define IWL_LEGACY_PM_OPS      (&iwl_legacy_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_LEGACY_PM_OPS      NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+*  Error Handling Debugging
+******************************************************/
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
+                          bool full_log, char **buf, bool display);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                            struct iwl_rxon_context *ctx);
+#else
+static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                                          struct iwl_rxon_context *ctx)
+{
+}
+#endif
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
+
+/*****************************************************
+*  GEOS
+******************************************************/
+int iwl_legacy_init_geos(struct iwl_priv *priv);
+void iwl_legacy_free_geos(struct iwl_priv *priv);
+
+/*************** DRIVER STATUS FUNCTIONS   *****/
+
+#define STATUS_HCMD_ACTIVE     0       /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED     2
+#define STATUS_RF_KILL_HW      3
+#define STATUS_CT_KILL         4
+#define STATUS_INIT            5
+#define STATUS_ALIVE           6
+#define STATUS_READY           7
+#define STATUS_TEMPERATURE     8
+#define STATUS_GEO_CONFIGURED  9
+#define STATUS_EXIT_PENDING    10
+#define STATUS_STATISTICS      12
+#define STATUS_SCANNING                13
+#define STATUS_SCAN_ABORTING   14
+#define STATUS_SCAN_HW         15
+#define STATUS_POWER_PMI       16
+#define STATUS_FW_ERROR                17
+
+
+static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
+{
+       /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+        * set but EXIT_PENDING is not */
+       return test_bit(STATUS_READY, &priv->status) &&
+              test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+              !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_legacy_is_init(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_INIT, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
+{
+       return iwl_legacy_is_rfkill_hw(priv);
+}
+
+static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
+{
+
+       if (iwl_legacy_is_rfkill(priv))
+               return 0;
+
+       return iwl_legacy_is_ready(priv);
+}
+
+extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
+extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
+                                      u8 flags, bool clear);
+void iwl_legacy_apm_stop(struct iwl_priv *priv);
+int iwl_legacy_apm_init(struct iwl_priv *priv);
+
+int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx)
+{
+       return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
+}
+static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx)
+{
+       return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
+}
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+                       struct iwl_priv *priv, enum ieee80211_band band)
+{
+       return priv->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes);
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                               struct ieee80211_tx_info *info,
+                               __le16 fc, __le32 *tx_flags);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data);
+
+#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644 (file)
index 0000000..668a961
--- /dev/null
@@ -0,0 +1,422 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_csr_h__
+#define __iwl_legacy_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_legacy_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE:  Device does need to be awake in order to read this memory
+ *        via CSR_EEPROM register
+ */
+#define CSR_BASE    (0x000)
+
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
+#define CSR_GPIO_IN             (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_GP_CNTRL            (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG   (CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-8:  Reserved
+ *  7-4:  Type of device:  see CSR_HW_REV_TYPE_xxx definitions
+ *  3-2:  Revision step:  0 = A, 1 = B, 2 = C, 3 = D
+ *  1-0:  "Dash" (-) value, as in A-1, etc.
+ *
+ * NOTE:  Revision step affects calculation of CCK txpower for 4965.
+ * NOTE:  See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
+ */
+#define CSR_HW_REV              (CSR_BASE+0x028)
+
+/*
+ * EEPROM memory reads
+ *
+ * NOTE:  Device must be awake, initialized via apm_ops.init(),
+ *        in order to read.
+ */
+#define CSR_EEPROM_REG          (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP           (CSR_BASE+0x030)
+
+#define CSR_GIO_REG            (CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG       (CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG      (CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1       (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET   (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR   (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2       (CSR_BASE+0x060)
+
+#define CSR_LED_REG             (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG   (CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+
+/* Analog phase-lock-loop configuration  */
+#define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation.  Used for 4965 only.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
+ *  1-0:  "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG              (CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG           (CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG      (CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R      (0x00000010)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER     (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI        (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI      (0x00000200)
+
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB         (0x00000100)
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM         (0x00000200)
+#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC            (0x00000400)
+#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE         (0x00000800)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A    (0x00000000)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B    (0x00001000)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A  (0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM        (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY     (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE             (0x08000000) /* WAKE_ME */
+
+#define CSR_INT_PERIODIC_DIS                   (0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA                   (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX        (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC         (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)  /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)  /* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK       (CSR_INT_BIT_FH_RX   | \
+                                CSR_INT_BIT_HW_ERR  | \
+                                CSR_INT_BIT_FH_TX   | \
+                                CSR_INT_BIT_SW_ERR  | \
+                                CSR_INT_BIT_RF_KILL | \
+                                CSR_INT_BIT_SW_RX   | \
+                                CSR_INT_BIT_WAKEUP  | \
+                                CSR_INT_BIT_ALIVE)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)  /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)  /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)  /* Tx channel 0 */
+
+#define CSR39_FH_INT_RX_MASK   (CSR_FH_INT_BIT_HI_PRIOR | \
+                                CSR39_FH_INT_BIT_RX_CHNL2 | \
+                                CSR_FH_INT_BIT_RX_CHNL1 | \
+                                CSR_FH_INT_BIT_RX_CHNL0)
+
+
+#define CSR39_FH_INT_TX_MASK   (CSR39_FH_INT_BIT_TX_CHNL6 | \
+                                CSR_FH_INT_BIT_TX_CHNL1 | \
+                                CSR_FH_INT_BIT_TX_CHNL0)
+
+#define CSR49_FH_INT_RX_MASK   (CSR_FH_INT_BIT_HI_PRIOR | \
+                                CSR_FH_INT_BIT_RX_CHNL1 | \
+                                CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR49_FH_INT_TX_MASK   (CSR_FH_INT_BIT_TX_CHNL1 | \
+                                CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER                   (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC                (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC               (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET                (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI                 (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET                  (0x00000080)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ *    27:  HW_RF_KILL_SW
+ *         Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24:  POWER_SAVE_TYPE
+ *         Indicates current power-saving mode:
+ *         000 -- No power saving
+ *         001 -- MAC power-down
+ *         010 -- PHY (radio) power-down
+ *         011 -- Error
+ *   9-6:  SYS_CONFIG
+ *         Indicates current system configuration, reflecting pins on chip
+ *         as forced high/low by device circuit board.
+ *     4:  GOING_TO_SLEEP
+ *         Indicates MAC is entering a power-saving sleep power-down.
+ *         Not a good time to access device-internal resources.
+ *     3:  MAC_ACCESS_REQ
+ *         Host sets this to request and maintain MAC wakeup, to allow host
+ *         access to device-internal resources.  Host must wait for
+ *         MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ *         device registers.
+ *     2:  INIT_DONE
+ *         Host sets this to put device into fully operational D0 power mode.
+ *         Host resets this after SW_RESET to put device into low power mode.
+ *     0:  MAC_CLOCK_READY
+ *         Indicates MAC (ucode processor, etc.) is powered up and can run.
+ *         Internal resources are accessible.
+ *         NOTE:  This does not indicate that the processor is actually running.
+ *         NOTE:  This does not indicate that 4965 or 3945 has completed
+ *                init or post-power-down restore of internal SRAM memory.
+ *                Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ *                SRAM is restored and uCode is in normal operation mode.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ *         NOTE:  After device reset, this bit remains "0" until host sets
+ *                INIT_DONE
+ */
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY        (0x00000001)
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE              (0x00000004)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ         (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
+
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN           (0x00000001)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE         (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE         (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
+
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD         (0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR                (0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA                (0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK                (0x00000007) /* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK     (0x00000180)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K                (0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K                (0x00000004)
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
+
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ *     4:  UCODE_DISABLE
+ *         Host sets this to request permanent halt of uCode, same as
+ *         sending CARD_STATE command with "halt" bit set.
+ *     3:  CT_KILL_EXIT
+ *         Host sets this to request exit from CT_KILL state, i.e. host thinks
+ *         device temperature is low enough to continue normal operation.
+ *     2:  CMD_BLOCKED
+ *         Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ *         to release uCode to clear all Tx and command queues, enter
+ *         unassociated mode, and power down.
+ *         NOTE:  Some devices also use HBUS_TARG_MBX_C register for this bit.
+ *     1:  SW_BIT_RFKILL
+ *         Host sets this when issuing CARD_STATE command to request
+ *         device sleep.
+ *     0:  MAC_SLEEP
+ *         uCode sets this when preparing a power-saving power-down.
+ *         uCode resets this when power-up is complete and SRAM is sane.
+ *         NOTE:  3945/4965 saves internal SRAM data to host when powering down,
+ *                and must restore this data after powering back up.
+ *                MAC_SLEEP is the best indication that restore is complete.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP             (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL                     (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED           (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT      (0x00000008)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TRUN_ON (0x78)
+#define CSR_LED_REG_TRUN_OFF (0x38)
+
+/* ANA_PLL */
+#define CSR39_ANA_PLL_CFG_VAL        (0x01000000)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL       (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE      (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job.  Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ *  0-31:  memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR     (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR     (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT      (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT      (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.).  First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ *  0-15:  register address (offset) within device
+ * 24-25:  (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR    (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR    (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT     (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ *  0-7:  queue write index
+ * 11-8:  queue selector
+ */
+#define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
+
+#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644 (file)
index 0000000..ae13112
--- /dev/null
@@ -0,0 +1,198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_debug_h__
+#define __iwl_legacy_debug_h__
+
+struct iwl_priv;
+extern u32 iwlegacy_debug_level;
+
+#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
+#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
+#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
+#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
+
+#define iwl_print_hex_error(priv, p, len)                               \
+do {                                                                   \
+       print_hex_dump(KERN_ERR, "iwl data: ",                          \
+                      DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);           \
+} while (0)
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define IWL_DEBUG(__priv, level, fmt, args...)                         \
+do {                                                                   \
+       if (iwl_legacy_get_debug_level(__priv) & (level))                       \
+               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
+                        "%c %s " fmt, in_interrupt() ? 'I' : 'U',      \
+                       __func__ , ## args);                            \
+} while (0)
+
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)                   \
+do {                                                                   \
+       if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit())  \
+               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
+                       "%c %s " fmt, in_interrupt() ? 'I' : 'U',       \
+                        __func__ , ## args);                           \
+} while (0)
+
+#define iwl_print_hex_dump(priv, level, p, len)                        \
+do {                                                                   \
+       if (iwl_legacy_get_debug_level(priv) & level)                           \
+               print_hex_dump(KERN_DEBUG, "iwl data: ",                \
+                              DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);   \
+} while (0)
+
+#else
+#define IWL_DEBUG(__priv, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
+static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
+                                     const void *p, u32 len)
+{}
+#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
+#else
+static inline int
+iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+       return 0;
+}
+static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+}
+#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IWL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
+ * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *     /sys/module/iwl4965/parameters/debug{50}
+ *     /sys/module/iwl3945/parameters/debug
+ *     /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IWL_DL_INFO            (1 << 0)
+#define IWL_DL_MAC80211                (1 << 1)
+#define IWL_DL_HCMD            (1 << 2)
+#define IWL_DL_STATE           (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_MACDUMP         (1 << 4)
+#define IWL_DL_HCMD_DUMP       (1 << 5)
+#define IWL_DL_EEPROM          (1 << 6)
+#define IWL_DL_RADIO           (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IWL_DL_POWER           (1 << 8)
+#define IWL_DL_TEMP            (1 << 9)
+#define IWL_DL_NOTIF           (1 << 10)
+#define IWL_DL_SCAN            (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IWL_DL_ASSOC           (1 << 12)
+#define IWL_DL_DROP            (1 << 13)
+#define IWL_DL_TXPOWER         (1 << 14)
+#define IWL_DL_AP              (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IWL_DL_FW              (1 << 16)
+#define IWL_DL_RF_KILL         (1 << 17)
+#define IWL_DL_FW_ERRORS       (1 << 18)
+#define IWL_DL_LED             (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IWL_DL_RATE            (1 << 20)
+#define IWL_DL_CALIB           (1 << 21)
+#define IWL_DL_WEP             (1 << 22)
+#define IWL_DL_TX              (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IWL_DL_RX              (1 << 24)
+#define IWL_DL_ISR             (1 << 25)
+#define IWL_DL_HT              (1 << 26)
+#define IWL_DL_IO              (1 << 27)
+/* 0xF0000000 - 0x10000000 */
+#define IWL_DL_11H             (1 << 28)
+#define IWL_DL_STATS           (1 << 29)
+#define IWL_DL_TX_REPLY                (1 << 30)
+#define IWL_DL_QOS             (1 << 31)
+
+#define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_MACDUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
+#define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
+#define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
+#define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
+#define IWL_DEBUG_TX(p, f, a...)       IWL_DEBUG(p, IWL_DL_TX, f, ## a)
+#define IWL_DEBUG_ISR(p, f, a...)      IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
+#define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
+#define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
+#define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_HC_DUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
+#define IWL_DEBUG_EEPROM(p, f, a...)   IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
+#define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
+#define IWL_DEBUG_RF_KILL(p, f, a...)  IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_DROP(p, f, a...)     IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_DROP_LIMIT(p, f, a...)       \
+               IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_AP(p, f, a...)       IWL_DEBUG(p, IWL_DL_AP, f, ## a)
+#define IWL_DEBUG_TXPOWER(p, f, a...)  IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
+#define IWL_DEBUG_IO(p, f, a...)       IWL_DEBUG(p, IWL_DL_IO, f, ## a)
+#define IWL_DEBUG_RATE(p, f, a...)     IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_RATE_LIMIT(p, f, a...)       \
+               IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_NOTIF(p, f, a...)    IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
+#define IWL_DEBUG_ASSOC(p, f, a...)    \
+               IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)      \
+               IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_HT(p, f, a...)       IWL_DEBUG(p, IWL_DL_HT, f, ## a)
+#define IWL_DEBUG_STATS(p, f, a...)    IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...)      \
+               IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
+               IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_QOS(p, f, a...)      IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644 (file)
index 0000000..2d32438
--- /dev/null
@@ -0,0 +1,1467 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
+       if (!debugfs_create_file(#name, mode, parent, priv,             \
+                        &iwl_legacy_dbgfs_##name##_ops))               \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {                       \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,           \
+                                   parent, ptr);                       \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {                                \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,            \
+                                  parent, ptr);                        \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file,               \
+                                       char __user *user_buf,          \
+                                       size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file,              \
+                                       const char __user *user_buf,    \
+                                       size_t count, loff_t *ppos);
+
+
+static int
+iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name)                            \
+       DEBUGFS_READ_FUNC(name);                                        \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .read = iwl_legacy_dbgfs_##name##_read,                         \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                           \
+       DEBUGFS_WRITE_FUNC(name);                                       \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .write = iwl_legacy_dbgfs_##name##_write,                       \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)                           \
+       DEBUGFS_READ_FUNC(name);                                        \
+       DEBUGFS_WRITE_FUNC(name);                                       \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .write = iwl_legacy_dbgfs_##name##_write,                       \
+       .read = iwl_legacy_dbgfs_##name##_read,                         \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz = 100 +
+               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_mgmt_string(cnt),
+                                priv->tx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_ctrl_string(cnt),
+                                priv->tx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                        priv->tx_stats.data_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                        priv->tx_stats.data_bytes);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       u32 clear_flag;
+       char buf[8];
+       int buf_size;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &clear_flag) != 1)
+               return -EFAULT;
+       iwl_legacy_clear_traffic_stats(priv);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz = 100 +
+               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_mgmt_string(cnt),
+                                priv->rx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_ctrl_string(cnt),
+                                priv->rx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                        priv->rx_stats.data_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                        priv->rx_stats.data_bytes);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       u32 val;
+       char *buf;
+       ssize_t ret;
+       int i;
+       int pos = 0;
+       struct iwl_priv *priv = file->private_data;
+       size_t bufsz;
+
+       /* default is to dump the entire data segment */
+       if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+               priv->dbgfs_sram_offset = 0x800000;
+               if (priv->ucode_type == UCODE_INIT)
+                       priv->dbgfs_sram_len = priv->ucode_init_data.len;
+               else
+                       priv->dbgfs_sram_len = priv->ucode_data.len;
+       }
+       bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+                       priv->dbgfs_sram_len);
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+                       priv->dbgfs_sram_offset);
+       for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+               val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+                                       priv->dbgfs_sram_len - i);
+               if (i < 4) {
+                       switch (i) {
+                       case 1:
+                               val &= BYTE1_MASK;
+                               break;
+                       case 2:
+                               val &= BYTE2_MASK;
+                               break;
+                       case 3:
+                               val &= BYTE3_MASK;
+                               break;
+                       }
+               }
+               if (!(i % 16))
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[64];
+       int buf_size;
+       u32 offset, len;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+               priv->dbgfs_sram_offset = offset;
+               priv->dbgfs_sram_len = len;
+       } else {
+               priv->dbgfs_sram_offset = 0;
+               priv->dbgfs_sram_len = 0;
+       }
+
+       return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_station_entry *station;
+       int max_sta = priv->hw_params.max_stations;
+       char *buf;
+       int i, j, pos = 0;
+       ssize_t ret;
+       /* Add 30 for initial string */
+       const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
+
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+                       priv->num_stations);
+
+       for (i = 0; i < max_sta; i++) {
+               station = &priv->stations[i];
+               if (!station->used)
+                       continue;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "station %d - addr: %pM, flags: %#x\n",
+                                i, station->sta.sta.addr,
+                                station->sta.station_flags_msk);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+               for (j = 0; j < MAX_TID_COUNT; j++) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+                               j, station->tid[j].seq_number,
+                               station->tid[j].agg.txq_id,
+                               station->tid[j].agg.frame_count,
+                               station->tid[j].tfds_in_queue,
+                               station->tid[j].agg.start_idx,
+                               station->tid[j].agg.bitmap,
+                               station->tid[j].agg.rate_n_flags);
+
+                       if (station->tid[j].agg.wait_for_ba)
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                                " - waitforba");
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               }
+
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
+                                      char __user *user_buf,
+                                      size_t count,
+                                      loff_t *ppos)
+{
+       ssize_t ret;
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0, ofs = 0, buf_size = 0;
+       const u8 *ptr;
+       char *buf;
+       u16 eeprom_ver;
+       size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+       buf_size = 4 * eeprom_len + 256;
+
+       if (eeprom_len % 16) {
+               IWL_ERR(priv, "NVM size is not multiple of 16.\n");
+               return -ENODATA;
+       }
+
+       ptr = priv->eeprom;
+       if (!ptr) {
+               IWL_ERR(priv, "Invalid EEPROM memory\n");
+               return -ENOMEM;
+       }
+
+       /* 4 characters for byte 0xYY */
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+       pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
+                       "version: 0x%x\n", eeprom_ver);
+       for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+               hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
+                                  buf_size - pos, 0);
+               pos += strlen(buf + pos);
+               if (buf_size - pos > 0)
+                       buf[pos++] = '\n';
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       ssize_t ret = -ENOMEM;
+
+       ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+                                       priv, true, &buf, true);
+       if (buf) {
+               ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+               kfree(buf);
+       }
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       u32 event_log_flag;
+       char buf[8];
+       int buf_size;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &event_log_flag) != 1)
+               return -EFAULT;
+       if (event_log_flag == 1)
+               priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+                                                       NULL, false);
+
+       return count;
+}
+
+
+
+static ssize_t
+iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct ieee80211_channel *channels = NULL;
+       const struct ieee80211_supported_band *supp_band = NULL;
+       int pos = 0, i, bufsz = PAGE_SIZE;
+       char *buf;
+       ssize_t ret;
+
+       if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+                               supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d: %ddBm: BSS%s%s, %s.\n",
+                               channels[i].hw_value,
+                               channels[i].max_power,
+                               channels[i].flags & IEEE80211_CHAN_RADAR ?
+                               " (IEEE 802.11h required)" : "",
+                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+                               || (channels[i].flags &
+                               IEEE80211_CHAN_RADAR)) ? "" :
+                               ", IBSS",
+                               channels[i].flags &
+                               IEEE80211_CHAN_PASSIVE_SCAN ?
+                               "passive only" : "active/passive");
+       }
+       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Displaying %d channels in 5.2GHz band (802.11a)\n",
+                               supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d: %ddBm: BSS%s%s, %s.\n",
+                               channels[i].hw_value,
+                               channels[i].max_power,
+                               channels[i].flags & IEEE80211_CHAN_RADAR ?
+                               " (IEEE 802.11h required)" : "",
+                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+                               || (channels[i].flags &
+                               IEEE80211_CHAN_RADAR)) ? "" :
+                               ", IBSS",
+                               channels[i].flags &
+                               IEEE80211_CHAN_PASSIVE_SCAN ?
+                               "passive only" : "active/passive");
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+               test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+               test_bit(STATUS_INT_ENABLED, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+               test_bit(STATUS_RF_KILL_HW, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+               test_bit(STATUS_CT_KILL, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
+               test_bit(STATUS_INIT, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+               test_bit(STATUS_ALIVE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+               test_bit(STATUS_READY, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
+               test_bit(STATUS_TEMPERATURE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
+               test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+               test_bit(STATUS_EXIT_PENDING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+               test_bit(STATUS_STATISTICS, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+               test_bit(STATUS_SCANNING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+               test_bit(STATUS_SCAN_ABORTING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+               test_bit(STATUS_SCAN_HW, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+               test_bit(STATUS_POWER_PMI, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+               test_bit(STATUS_FW_ERROR, &priv->status));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = 24 * 64; /* 24 items * 64 char per item */
+       ssize_t ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "Interrupt Statistics Report:\n");
+
+       pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+               priv->isr_stats.hw);
+       pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+               priv->isr_stats.sw);
+       if (priv->isr_stats.sw || priv->isr_stats.hw) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                       "\tLast Restarting Code:  0x%X\n",
+                       priv->isr_stats.err_code);
+       }
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+               priv->isr_stats.sch);
+       pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+               priv->isr_stats.alive);
+#endif
+       pos += scnprintf(buf + pos, bufsz - pos,
+               "HW RF KILL switch toggled:\t %u\n",
+               priv->isr_stats.rfkill);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+               priv->isr_stats.ctkill);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+               priv->isr_stats.wakeup);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+               "Rx command responses:\t\t %u\n",
+               priv->isr_stats.rx);
+       for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+               if (priv->isr_stats.rx_handlers[cnt] > 0)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tRx handler[%36s]:\t\t %u\n",
+                               iwl_legacy_get_cmd_string(cnt),
+                               priv->isr_stats.rx_handlers[cnt]);
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+               priv->isr_stats.tx);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+               priv->isr_stats.unhandled);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       u32 reset_flag;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &reset_flag) != 1)
+               return -EFAULT;
+       if (reset_flag == 0)
+               iwl_legacy_clear_isr_stats(priv);
+
+       return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_rxon_context *ctx;
+       int pos = 0, i;
+       char buf[256 * NUM_IWL_RXON_CTX];
+       const size_t bufsz = sizeof(buf);
+
+       for_each_context(priv, ctx) {
+               pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+                                ctx->ctxid);
+               for (i = 0; i < AC_NUM; i++) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tcw_min\tcw_max\taifsn\ttxop\n");
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+                               ctx->qos_data.def_qos_parm.ac[i].cw_min,
+                               ctx->qos_data.def_qos_parm.ac[i].cw_max,
+                               ctx->qos_data.def_qos_parm.ac[i].aifsn,
+                               ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+               }
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int ht40;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &ht40) != 1)
+               return -EFAULT;
+       if (!iwl_legacy_is_any_associated(priv))
+               priv->disable_ht40 = ht40 ? true : false;
+       else {
+               IWL_ERR(priv, "Sta associated with AP - "
+                       "Change to 40MHz channel support is not allowed\n");
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[100];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "11n 40MHz Mode: %s\n",
+                       priv->disable_ht40 ? "Disabled" : "Enabled");
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0, ofs = 0;
+       int cnt = 0, entry;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       char *buf;
+       int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+               (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+       const u8 *ptr;
+       ssize_t ret;
+
+       if (!priv->txq) {
+               IWL_ERR(priv, "txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate buffer\n");
+               return -ENOMEM;
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+               txq = &priv->txq[cnt];
+               q = &txq->q;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "q[%d]: read_ptr: %u, write_ptr: %u\n",
+                               cnt, q->read_ptr, q->write_ptr);
+       }
+       if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
+               ptr = priv->tx_traffic;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++,  ofs += 16) {
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                               "0x%.4x ", ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "read: %u, write: %u\n",
+                        rxq->read, rxq->write);
+
+       if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
+               ptr = priv->rx_traffic;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++,  ofs += 16) {
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                               "0x%.4x ", ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int traffic_log;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &traffic_log) != 1)
+               return -EFAULT;
+       if (traffic_log == 0)
+               iwl_legacy_reset_traffic_log(priv);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       int ret;
+       const size_t bufsz = sizeof(char) * 64 *
+                               priv->cfg->base_params->num_of_queues;
+
+       if (!priv->txq) {
+               IWL_ERR(priv, "txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+               txq = &priv->txq[cnt];
+               q = &txq->q;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "hwq %.2d: read=%u write=%u stop=%d"
+                               " swq_id=%#.2x (ac %d/hwq %d)\n",
+                               cnt, q->read_ptr, q->write_ptr,
+                               !!test_bit(cnt, priv->queue_stopped),
+                               txq->swq_id, txq->swq_id & 3,
+                               (txq->swq_id >> 2) & 0x1f);
+               if (cnt >= 4)
+                       continue;
+               /* for the ACs, display the stop count too */
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "        stop-count: %d\n",
+                               atomic_read(&priv->queue_stop_count[cnt]));
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+                                               rxq->read);
+       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+                                               rxq->write);
+       pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+                                               rxq->free_count);
+       if (rxq->rb_stts) {
+               pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
+       } else {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                       "closed_rb_num: Not Allocated\n");
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
+       ssize_t ret;
+       struct iwl_sensitivity_data *data;
+
+       data = &priv->sensitivity_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+                       data->auto_corr_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "auto_corr_ofdm_mrc:\t\t %u\n",
+                       data->auto_corr_ofdm_mrc);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+                       data->auto_corr_ofdm_x1);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+                       data->auto_corr_ofdm_mrc_x1);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+                       data->auto_corr_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+                       data->auto_corr_cck_mrc);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+                       data->last_bad_plcp_cnt_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+                       data->last_fa_cnt_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "last_bad_plcp_cnt_cck:\t\t %u\n",
+                       data->last_bad_plcp_cnt_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+                       data->last_fa_cnt_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+                       data->nrg_curr_state);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+                       data->nrg_prev_state);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+       for (cnt = 0; cnt < 10; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->nrg_value[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+       for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->nrg_silence_rssi[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+                       data->nrg_silence_ref);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+                       data->nrg_energy_idx);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+                       data->nrg_silence_idx);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+                       data->nrg_th_cck);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "nrg_auto_corr_silence_diff:\t %u\n",
+                       data->nrg_auto_corr_silence_diff);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+                       data->num_in_cck_no_fa);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+                       data->nrg_th_ofdm);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+
+static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
+       ssize_t ret;
+       struct iwl_chain_noise_data *data;
+
+       data = &priv->chain_noise_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+                       data->active_chains);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+                       data->chain_noise_a);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+                       data->chain_noise_b);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+                       data->chain_noise_c);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+                       data->chain_signal_a);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+                       data->chain_signal_b);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+                       data->chain_signal_c);
+       pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+                       data->beacon_count);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->disconn_array[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->delta_gain_code[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+                       data->radio_write);
+       pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+                       data->state);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
+                                                   char __user *user_buf,
+                                                   size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[60];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       u32 pwrsave_status;
+
+       pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+                       CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+       pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+               (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+               (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+               (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+               "error");
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int clear;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &clear) != 1)
+               return -EFAULT;
+
+       /* make request to uCode to retrieve statistics information */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
+       mutex_unlock(&priv->mutex);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[128];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+                       priv->event_log.ucode_trace ? "On" : "Off");
+       pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+                       priv->event_log.non_wraps_count);
+       pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+                       priv->event_log.wraps_once_count);
+       pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+                       priv->event_log.wraps_more_count);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int trace;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &trace) != 1)
+               return -EFAULT;
+
+       if (trace) {
+               priv->event_log.ucode_trace = true;
+               /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+               mod_timer(&priv->ucode_trace,
+                       jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+       } else {
+               priv->event_log.ucode_trace = false;
+               del_timer_sync(&priv->ucode_trace);
+       }
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len = sprintf(buf, "0x%04X\n",
+               le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len = sprintf(buf, "0x%04X\n",
+       le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       ssize_t ret = -EFAULT;
+
+       if (priv->cfg->ops->lib->dump_fh) {
+               ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+               if (buf) {
+                       ret = simple_read_from_buffer(user_buf,
+                                                     count, ppos, buf, pos);
+                       kfree(buf);
+               }
+       }
+
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[12];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+                       priv->missed_beacon_threshold);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int missed;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &missed) != 1)
+               return -EINVAL;
+
+       if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+           missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+               priv->missed_beacon_threshold =
+                       IWL_MISSED_BEACON_THRESHOLD_DEF;
+       else
+               priv->missed_beacon_threshold = missed;
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[12];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+                       priv->cfg->base_params->plcp_delta_threshold);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int plcp;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &plcp) != 1)
+               return -EINVAL;
+       if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+               (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+               priv->cfg->base_params->plcp_delta_threshold =
+                       IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
+       else
+               priv->cfg->base_params->plcp_delta_threshold = plcp;
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int i, pos = 0;
+       char buf[300];
+       const size_t bufsz = sizeof(buf);
+       struct iwl_force_reset *force_reset;
+
+       for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+               force_reset = &priv->force_reset[i];
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Force reset method %d\n", i);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request: %d\n",
+                               force_reset->reset_request_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request success: %d\n",
+                               force_reset->reset_success_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request reject: %d\n",
+                               force_reset->reset_reject_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\treset duration: %lu\n",
+                               force_reset->reset_duration);
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int reset, ret;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &reset) != 1)
+               return -EINVAL;
+       switch (reset) {
+       case IWL_RF_RESET:
+       case IWL_FW_RESET:
+               ret = iwl_legacy_force_reset(priv, reset, true);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return ret ? ret : count;
+}
+
+static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int timeout;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &timeout) != 1)
+               return -EINVAL;
+       if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
+               timeout = IWL_DEF_WD_TIMEOUT;
+
+       priv->cfg->base_params->wd_timeout = timeout;
+       iwl_legacy_setup_watchdog(priv);
+       return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_statistics);
+DEBUGFS_READ_FILE_OPS(tx_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
+       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+       dir_drv = debugfs_create_dir(name, phyd);
+       if (!dir_drv)
+               return -ENOMEM;
+
+       priv->debugfs_dir = dir_drv;
+
+       dir_data = debugfs_create_dir("data", dir_drv);
+       if (!dir_data)
+               goto err;
+       dir_rf = debugfs_create_dir("rf", dir_drv);
+       if (!dir_rf)
+               goto err;
+       dir_debug = debugfs_create_dir("debug", dir_drv);
+       if (!dir_debug)
+               goto err;
+
+       DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+       if (priv->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+       if (priv->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+       if (priv->cfg->base_params->ucode_tracing)
+               DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+       if (priv->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+                                &priv->disable_sens_cal);
+       if (priv->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+                                &priv->disable_chain_noise_cal);
+       DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
+                               &priv->disable_tx_power_cal);
+       return 0;
+
+err:
+       IWL_ERR(priv, "Can't create the debugfs directory\n");
+       iwl_legacy_dbgfs_unregister(priv);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+       if (!priv->debugfs_dir)
+               return;
+
+       debugfs_remove_recursive(priv->debugfs_dir);
+       priv->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644 (file)
index 0000000..9ee849d
--- /dev/null
@@ -0,0 +1,1426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-dev.h) for driver implementation definitions.
+ * Please use iwl-commands.h for uCode API definitions.
+ * Please use iwl-4965-hw.h for hardware-related definitions.
+ */
+
+#ifndef __iwl_legacy_dev_h__
+#define __iwl_legacy_dev_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+#include "iwl-debug.h"
+#include "iwl-4965-hw.h"
+#include "iwl-3945-hw.h"
+#include "iwl-led.h"
+#include "iwl-power.h"
+#include "iwl-legacy-rs.h"
+
+struct iwl_tx_queue;
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon statistics being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE            2304U
+#define MAX_MPDU_SIZE            2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define        DEFAULT_SHORT_RETRY_LIMIT 7U
+#define        DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct iwl_rx_mem_buffer {
+       dma_addr_t page_dma;
+       struct page *page;
+       struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct iwl_device_cmd;
+
+struct iwl_cmd_meta {
+       /* only for SYNC commands, iff the reply skb is wanted */
+       struct iwl_host_cmd *source;
+       /*
+        * only for ASYNC commands
+        * (which is somewhat stupid -- look at iwl-sta.c for instance
+        * which duplicates a bunch of code because the callback isn't
+        * invoked for SYNC commands, if it were and its result passed
+        * through it would be simpler...)
+        */
+       void (*callback)(struct iwl_priv *priv,
+                        struct iwl_device_cmd *cmd,
+                        struct iwl_rx_packet *pkt);
+
+       /* The CMD_SIZE_HUGE flag bit indicates that the command
+        * structure is stored at the end of the shared queue memory. */
+       u32 flags;
+
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+       DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct iwl_queue {
+       int n_bd;              /* number of BDs in this queue */
+       int write_ptr;       /* 1-st empty entry (index) host_w*/
+       int read_ptr;         /* last used entry (index) host_r*/
+       /* use for monitoring and recovering the stuck queue */
+       dma_addr_t dma_addr;   /* physical addr for BD's */
+       int n_window;          /* safe queue window */
+       u32 id;
+       int low_mark;          /* low watermark, resume queue if free
+                               * space more than this */
+       int high_mark;         /* high watermark, stop queue if free
+                               * space less than this */
+} __packed;
+
+/* One for each TFD */
+struct iwl_tx_info {
+       struct sk_buff *skb;
+       struct iwl_rxon_context *ctx;
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+       struct iwl_queue q;
+       void *tfds;
+       struct iwl_device_cmd **cmd;
+       struct iwl_cmd_meta *meta;
+       struct iwl_tx_info *txb;
+       unsigned long time_stamp;
+       u8 need_update;
+       u8 sched_retry;
+       u8 active;
+       u8 swq_id;
+};
+
+#define IWL_NUM_SCAN_RATES         (2)
+
+struct iwl4965_channel_tgd_info {
+       u8 type;
+       s8 max_power;
+};
+
+struct iwl4965_channel_tgh_info {
+       s64 last_radar_time;
+};
+
+#define IWL4965_MAX_RATE (33)
+
+struct iwl3945_clip_group {
+       /* maximum power level to prevent clipping for each rate, derived by
+        *   us from this band's saturation power in EEPROM */
+       const s8 clip_powers[IWL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power index must also be set. */
+struct iwl3945_channel_power_info {
+       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
+       s8 power_table_index;   /* actual (compenst'd) index into gain table */
+       s8 base_power_index;    /* gain index for power at factory temp. */
+       s8 requested_power;     /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct iwl3945_scan_power_info {
+       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
+       s8 power_table_index;   /* actual (compenst'd) index into gain table */
+       s8 requested_power;     /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ *     with one another!
+ */
+struct iwl_channel_info {
+       struct iwl4965_channel_tgd_info tgd;
+       struct iwl4965_channel_tgh_info tgh;
+       struct iwl_eeprom_channel eeprom;       /* EEPROM regulatory limit */
+       struct iwl_eeprom_channel ht40_eeprom;  /* EEPROM regulatory limit for
+                                                * HT40 channel */
+
+       u8 channel;       /* channel number */
+       u8 flags;         /* flags copied from EEPROM */
+       s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+       s8 curr_txpow;    /* (dBm) regulatory/spectrum/user (not h/w) limit */
+       s8 min_power;     /* always 0 */
+       s8 scan_power;    /* (dBm) regul. eeprom, direct scans, any rate */
+
+       u8 group_index;   /* 0-4, maps channel to group1/2/3/4/5 */
+       u8 band_index;    /* 0-4, maps channel to band1/2/3/4/5 */
+       enum ieee80211_band band;
+
+       /* HT40 channel info */
+       s8 ht40_max_power_avg;  /* (dBm) regul. eeprom, normal Tx, any rate */
+       u8 ht40_flags;          /* flags copied from EEPROM */
+       u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+       /* Radio/DSP gain settings for each "normal" data Tx rate.
+        * These include, in addition to RF and DSP gain, a few fields for
+        *   remembering/modifying gain settings (indexes). */
+       struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
+
+       /* Radio/DSP gain settings for each scan rate, for directed scans. */
+       struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
+};
+
+#define IWL_TX_FIFO_BK         0       /* shared */
+#define IWL_TX_FIFO_BE         1
+#define IWL_TX_FIFO_VI         2       /* shared */
+#define IWL_TX_FIFO_VO         3
+#define IWL_TX_FIFO_UNUSED     -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IWL_MIN_NUM_QUEUES     10
+
+#define IWL_DEFAULT_CMD_QUEUE_NUM      4
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct iwl_frame {
+       union {
+               struct ieee80211_hdr frame;
+               struct iwl_tx_beacon_cmd beacon;
+               u8 raw[IEEE80211_FRAME_LEN];
+               u8 cmd[360];
+       } u;
+       struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+       CMD_SYNC = 0,
+       CMD_SIZE_NORMAL = 0,
+       CMD_NO_SKB = 0,
+       CMD_SIZE_HUGE = (1 << 0),
+       CMD_ASYNC = (1 << 1),
+       CMD_WANT_SKB = (1 << 2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct iwl_device_cmd {
+       struct iwl_cmd_header hdr;      /* uCode API */
+       union {
+               u32 flags;
+               u8 val8;
+               u16 val16;
+               u32 val32;
+               struct iwl_tx_cmd tx;
+               u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+
+struct iwl_host_cmd {
+       const void *data;
+       unsigned long reply_page;
+       void (*callback)(struct iwl_priv *priv,
+                        struct iwl_device_cmd *cmd,
+                        struct iwl_rx_packet *pkt);
+       u32 flags;
+       u16 len;
+       u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+       __le32 *bd;
+       dma_addr_t bd_dma;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+       u32 read;
+       u32 write;
+       u32 free_count;
+       u32 write_actual;
+       struct list_head rx_free;
+       struct list_head rx_used;
+       int need_update;
+       struct iwl_rb_status *rb_stts;
+       dma_addr_t rb_stts_dma;
+       spinlock_t lock;
+};
+
+#define IWL_SUPPORTED_RATES_IE_LEN         8
+
+#define MAX_TID_COUNT        9
+
+#define IWL_INVALID_RATE     0xFF
+#define IWL_INVALID_VALUE    -1
+
+/**
+ * struct iwl_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx window
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If REPLY_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (REPLY_COMPRESSED_BA).  This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct iwl_ht_agg {
+       u16 txq_id;
+       u16 frame_count;
+       u16 wait_for_ba;
+       u16 start_idx;
+       u64 bitmap;
+       u32 rate_n_flags;
+#define IWL_AGG_OFF 0
+#define IWL_AGG_ON 1
+#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IWL_EMPTYING_HW_QUEUE_DELBA 3
+       u8 state;
+};
+
+
+struct iwl_tid_data {
+       u16 seq_number; /* 4965 only */
+       u16 tfds_in_queue;
+       struct iwl_ht_agg agg;
+};
+
+struct iwl_hw_key {
+       u32 cipher;
+       int keylen;
+       u8 keyidx;
+       u8 key[32];
+};
+
+union iwl_ht_rate_supp {
+       u16 rates;
+       struct {
+               u8 siso_rate;
+               u8 mimo_rate;
+       };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
+
+struct iwl_ht_config {
+       bool single_chain_sufficient;
+       enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct iwl_qos_info {
+       int qos_active;
+       struct iwl_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct iwl_station_entry {
+       struct iwl_legacy_addsta_cmd sta;
+       struct iwl_tid_data tid[MAX_TID_COUNT];
+       u8 used, ctxid;
+       struct iwl_hw_key keyinfo;
+       struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+       struct iwl_rxon_context *ctx;
+       u8 sta_id;
+};
+
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct iwl_station_priv {
+       struct iwl_station_priv_common common;
+       struct iwl_lq_sta lq_sta;
+       atomic_t pending_frames;
+       bool client;
+       bool asleep;
+};
+
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+       struct iwl_rxon_context *ctx;
+       u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+       void *v_addr;           /* access by driver */
+       dma_addr_t p_addr;      /* access by card's busmaster DMA */
+       u32 len;                /* bytes */
+};
+
+/* uCode file layout */
+struct iwl_ucode_header {
+       __le32 ver;     /* major/minor/API/serial */
+       struct {
+               __le32 inst_size;       /* bytes of runtime code */
+               __le32 data_size;       /* bytes of runtime data */
+               __le32 init_size;       /* bytes of init code */
+               __le32 init_data_size;  /* bytes of init data */
+               __le32 boot_size;       /* bytes of bootstrap code */
+               u8 data[0];             /* in same order as sizes */
+       } v1;
+};
+
+struct iwl4965_ibss_seq {
+       u8 mac[ETH_ALEN];
+       u16 seq_num;
+       u16 frag_num;
+       unsigned long packet_time;
+       struct list_head list;
+};
+
+struct iwl_sensitivity_ranges {
+       u16 min_nrg_cck;
+       u16 max_nrg_cck;
+
+       u16 nrg_th_cck;
+       u16 nrg_th_ofdm;
+
+       u16 auto_corr_min_ofdm;
+       u16 auto_corr_min_ofdm_mrc;
+       u16 auto_corr_min_ofdm_x1;
+       u16 auto_corr_min_ofdm_mrc_x1;
+
+       u16 auto_corr_max_ofdm;
+       u16 auto_corr_max_ofdm_mrc;
+       u16 auto_corr_max_ofdm_x1;
+       u16 auto_corr_max_ofdm_mrc_x1;
+
+       u16 auto_corr_max_cck;
+       u16 auto_corr_max_cck_mrc;
+       u16 auto_corr_min_cck;
+       u16 auto_corr_min_cck_mrc;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+
+/**
+ * struct iwl_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+       u8 max_txq_num;
+       u8 dma_chnl_num;
+       u16 scd_bc_tbls_size;
+       u32 tfd_size;
+       u8  tx_chains_num;
+       u8  rx_chains_num;
+       u8  valid_tx_ant;
+       u8  valid_rx_ant;
+       u16 max_rxq_size;
+       u16 max_rxq_log;
+       u32 rx_page_order;
+       u32 rx_wrt_ptr_reg;
+       u8  max_stations;
+       u8  ht40_channel;
+       u8  max_beacon_itrvl;   /* in 1024 ms */
+       u32 max_inst_size;
+       u32 max_data_size;
+       u32 max_bsm_size;
+       u32 ct_kill_threshold; /* value in hw-dependent units */
+       u16 beacon_time_tsf_bits;
+       const struct iwl_sensitivity_ranges *sens;
+};
+
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * iwl_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * iwl4965_bg_      <-- Called from work queue context
+ * iwl4965_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
+extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
+extern int iwl_legacy_queue_space(const struct iwl_queue *q);
+static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
+{
+       return q->write_ptr >= q->read_ptr ?
+               (i >= q->read_ptr && i < q->write_ptr) :
+               !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+
+static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
+                                                               int is_huge)
+{
+       /*
+        * This is for init calibration result and scan command which
+        * required buffer > TFD_MAX_PAYLOAD_SIZE,
+        * the big buffer at end of command array
+        */
+       if (is_huge)
+               return q->n_window;     /* must be power of 2 */
+
+       /* Otherwise, use normal size buffers */
+       return index & (q->n_window - 1);
+}
+
+
+struct iwl_dma_ptr {
+       dma_addr_t dma;
+       void *addr;
+       size_t size;
+};
+
+#define IWL_OPERATION_MODE_AUTO     0
+#define IWL_OPERATION_MODE_HT_ONLY  1
+#define IWL_OPERATION_MODE_MIXED    2
+#define IWL_OPERATION_MODE_20MHZ    3
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE           0xFFFF
+#define IWL4965_CAL_NUM_BEACONS                20
+#define IWL_CAL_NUM_BEACONS            16
+#define MAXIMUM_ALLOWED_PATHLOSS       15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER                        0xFF00
+#define IN_BAND_FILTER                 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE    0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum iwl4965_false_alarm_state {
+       IWL_FA_TOO_MANY = 0,
+       IWL_FA_TOO_FEW = 1,
+       IWL_FA_GOOD_RANGE = 2,
+};
+
+enum iwl4965_chain_noise_state {
+       IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
+       IWL_CHAIN_NOISE_ACCUMULATE,
+       IWL_CHAIN_NOISE_CALIBRATED,
+       IWL_CHAIN_NOISE_DONE,
+};
+
+enum iwl4965_calib_enabled_state {
+       IWL_CALIB_DISABLED = 0,  /* must be 0 */
+       IWL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum iwl_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum iwl_calib {
+       IWL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+       void *buf;
+       size_t buf_len;
+};
+
+enum ucode_type {
+       UCODE_NONE = 0,
+       UCODE_INIT,
+       UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct iwl_sensitivity_data {
+       u32 auto_corr_ofdm;
+       u32 auto_corr_ofdm_mrc;
+       u32 auto_corr_ofdm_x1;
+       u32 auto_corr_ofdm_mrc_x1;
+       u32 auto_corr_cck;
+       u32 auto_corr_cck_mrc;
+
+       u32 last_bad_plcp_cnt_ofdm;
+       u32 last_fa_cnt_ofdm;
+       u32 last_bad_plcp_cnt_cck;
+       u32 last_fa_cnt_cck;
+
+       u32 nrg_curr_state;
+       u32 nrg_prev_state;
+       u32 nrg_value[10];
+       u8  nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+       u32 nrg_silence_ref;
+       u32 nrg_energy_idx;
+       u32 nrg_silence_idx;
+       u32 nrg_th_cck;
+       s32 nrg_auto_corr_silence_diff;
+       u32 num_in_cck_no_fa;
+       u32 nrg_th_ofdm;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct iwl_chain_noise_data {
+       u32 active_chains;
+       u32 chain_noise_a;
+       u32 chain_noise_b;
+       u32 chain_noise_c;
+       u32 chain_signal_a;
+       u32 chain_signal_b;
+       u32 chain_signal_c;
+       u16 beacon_count;
+       u8 disconn_array[NUM_RX_CHAINS];
+       u8 delta_gain_code[NUM_RX_CHAINS];
+       u8 radio_write;
+       u8 state;
+};
+
+#define        EEPROM_SEM_TIMEOUT 10           /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000    /* number of attempts (not time) */
+
+#define IWL_TRAFFIC_ENTRIES    (256)
+#define IWL_TRAFFIC_ENTRY_SIZE  (64)
+
+enum {
+       MEASUREMENT_READY = (1 << 0),
+       MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+       u32 hw;
+       u32 sw;
+       u32 err_code;
+       u32 sch;
+       u32 alive;
+       u32 rfkill;
+       u32 ctkill;
+       u32 wakeup;
+       u32 rx;
+       u32 rx_handlers[REPLY_MAX];
+       u32 tx;
+       u32 unhandled;
+};
+
+/* management statistics */
+enum iwl_mgmt_stats {
+       MANAGEMENT_ASSOC_REQ = 0,
+       MANAGEMENT_ASSOC_RESP,
+       MANAGEMENT_REASSOC_REQ,
+       MANAGEMENT_REASSOC_RESP,
+       MANAGEMENT_PROBE_REQ,
+       MANAGEMENT_PROBE_RESP,
+       MANAGEMENT_BEACON,
+       MANAGEMENT_ATIM,
+       MANAGEMENT_DISASSOC,
+       MANAGEMENT_AUTH,
+       MANAGEMENT_DEAUTH,
+       MANAGEMENT_ACTION,
+       MANAGEMENT_MAX,
+};
+/* control statistics */
+enum iwl_ctrl_stats {
+       CONTROL_BACK_REQ =  0,
+       CONTROL_BACK,
+       CONTROL_PSPOLL,
+       CONTROL_RTS,
+       CONTROL_CTS,
+       CONTROL_ACK,
+       CONTROL_CFEND,
+       CONTROL_CFENDACK,
+       CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       u32 mgmt[MANAGEMENT_MAX];
+       u32 ctrl[CONTROL_MAX];
+       u32 data_cnt;
+       u64 data_bytes;
+#endif
+};
+
+/*
+ * iwl_switch_rxon: "channel switch" structure
+ *
+ * @ switch_in_progress: channel switch in progress
+ * @ channel: new channel
+ */
+struct iwl_switch_rxon {
+       bool switch_in_progress;
+       __le16 channel;
+};
+
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry:  the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ *                   when dump ucode events
+ */
+struct iwl_event_log {
+       bool ucode_trace;
+       u32 num_wraps;
+       u32 next_entry;
+       int non_wraps_count;
+       int wraps_once_count;
+       int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX       (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF       (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN       (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs.  It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF    (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF        (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE     (0)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET  (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_DEF_WD_TIMEOUT     (2000)
+#define IWL_LONG_WD_TIMEOUT    (10000)
+#define IWL_MAX_WD_TIMEOUT     (120000)
+
+enum iwl_reset {
+       IWL_RF_RESET = 0,
+       IWL_FW_RESET,
+       IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+       int reset_request_count;
+       int reset_success_count;
+       int reset_reject_count;
+       unsigned long reset_duration;
+       unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS    24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IWL4965_EXT_BEACON_TIME_POS    22
+
+enum iwl_rxon_context_id {
+       IWL_RXON_CTX_BSS,
+
+       NUM_IWL_RXON_CTX
+};
+
+struct iwl_rxon_context {
+       struct ieee80211_vif *vif;
+
+       const u8 *ac_to_fifo;
+       const u8 *ac_to_queue;
+       u8 mcast_queue;
+
+       /*
+        * We could use the vif to indicate active, but we
+        * also need it to be active during disabling when
+        * we already removed the vif for type setting.
+        */
+       bool always_active, is_active;
+
+       bool ht_need_multiple_chains;
+
+       enum iwl_rxon_context_id ctxid;
+
+       u32 interface_modes, exclusive_interface_modes;
+       u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+       /*
+        * We declare this const so it can only be
+        * changed via explicit cast within the
+        * routines that actually update the physical
+        * hardware.
+        */
+       const struct iwl_legacy_rxon_cmd active;
+       struct iwl_legacy_rxon_cmd staging;
+
+       struct iwl_rxon_time_cmd timing;
+
+       struct iwl_qos_info qos_data;
+
+       u8 bcast_sta_id, ap_sta_id;
+
+       u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+       u8 qos_cmd;
+       u8 wep_key_cmd;
+
+       struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
+       u8 key_mapping_keys;
+
+       __le32 station_flags;
+
+       struct {
+               bool non_gf_sta_present;
+               u8 protection;
+               bool enabled, is_40mhz;
+               u8 extension_chan_offset;
+       } ht;
+};
+
+struct iwl_priv {
+
+       /* ieee device used by generic ieee processing code */
+       struct ieee80211_hw *hw;
+       struct ieee80211_channel *ieee_channels;
+       struct ieee80211_rate *ieee_rates;
+       struct iwl_cfg *cfg;
+
+       /* temporary frame storage list */
+       struct list_head free_frames;
+       int frames_count;
+
+       enum ieee80211_band band;
+       int alloc_rxb_page;
+
+       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+                                      struct iwl_rx_mem_buffer *rxb);
+
+       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+       /* spectrum measurement report caching */
+       struct iwl_spectrum_notification measure_report;
+       u8 measurement_status;
+
+       /* ucode beacon time */
+       u32 ucode_beacon_time;
+       int missed_beacon_threshold;
+
+       /* track IBSS manager (last beacon) status */
+       u32 ibss_manager;
+
+       /* storing the jiffies when the plcp error rate is received */
+       unsigned long plcp_jiffies;
+
+       /* force reset */
+       struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+
+       /* we allocate array of iwl_channel_info for NIC's valid channels.
+        *    Access via channel # using indirect index array */
+       struct iwl_channel_info *channel_info;  /* channel info array */
+       u8 channel_count;       /* # of channels */
+
+       /* thermal calibration */
+       s32 temperature;        /* degrees Kelvin */
+       s32 last_temperature;
+
+       /* init calibration results */
+       struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+
+       /* Scan related variables */
+       unsigned long scan_start;
+       unsigned long scan_start_tsf;
+       void *scan_cmd;
+       enum ieee80211_band scan_band;
+       struct cfg80211_scan_request *scan_request;
+       struct ieee80211_vif *scan_vif;
+       bool is_internal_short_scan;
+       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+       u8 mgmt_tx_ant;
+
+       /* spinlock */
+       spinlock_t lock;        /* protect general shared data */
+       spinlock_t hcmd_lock;   /* protect hcmd */
+       spinlock_t reg_lock;    /* protect hw register access */
+       struct mutex mutex;
+       struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
+
+       /* basic pci-network driver stuff */
+       struct pci_dev *pci_dev;
+
+       /* pci hardware address support */
+       void __iomem *hw_base;
+       u32  hw_rev;
+       u32  hw_wa_rev;
+       u8   rev_id;
+
+       /* microcode/device supports multiple contexts */
+       u8 valid_contexts;
+
+       /* command queue number */
+       u8 cmd_queue;
+
+       /* max number of station keys */
+       u8 sta_key_max_num;
+
+       /* EEPROM MAC addresses */
+       struct mac_address addresses[1];
+
+       /* uCode images, save to reload in case of failure */
+       int fw_index;                   /* firmware we're trying to load */
+       u32 ucode_ver;                  /* version of ucode, copy of
+                                          iwl_ucode.ver */
+       struct fw_desc ucode_code;      /* runtime inst */
+       struct fw_desc ucode_data;      /* runtime data original */
+       struct fw_desc ucode_data_backup;       /* runtime data save/restore */
+       struct fw_desc ucode_init;      /* initialization inst */
+       struct fw_desc ucode_init_data; /* initialization data */
+       struct fw_desc ucode_boot;      /* bootstrap inst */
+       enum ucode_type ucode_type;
+       u8 ucode_write_complete;        /* the image write is complete */
+       char firmware_name[25];
+
+       struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
+
+       struct iwl_switch_rxon switch_rxon;
+
+       /* 1st responses from initialize and runtime uCode images.
+        * _4965's initialize alive response contains some calibration data. */
+       struct iwl_init_alive_resp card_alive_init;
+       struct iwl_alive_resp card_alive;
+
+       u16 active_rate;
+
+       u8 start_calib;
+       struct iwl_sensitivity_data sensitivity_data;
+       struct iwl_chain_noise_data chain_noise_data;
+       __le16 sensitivity_tbl[HD_TABLE_SIZE];
+
+       struct iwl_ht_config current_ht_config;
+
+       /* Rate scaling data */
+       u8 retry_rate;
+
+       wait_queue_head_t wait_command_queue;
+
+       int activity_timer_active;
+
+       /* Rx and Tx DMA processing queues */
+       struct iwl_rx_queue rxq;
+       struct iwl_tx_queue *txq;
+       unsigned long txq_ctx_active_msk;
+       struct iwl_dma_ptr  kw; /* keep warm address */
+       struct iwl_dma_ptr  scd_bc_tbls;
+
+       u32 scd_base_addr;      /* scheduler sram base address */
+
+       unsigned long status;
+
+       /* counts mgmt, ctl, and data packets */
+       struct traffic_stats tx_stats;
+       struct traffic_stats rx_stats;
+
+       /* counts interrupts */
+       struct isr_statistics isr_stats;
+
+       struct iwl_power_mgr power_data;
+
+       /* context information */
+       u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+       /* station table variables */
+
+       /* Note: if lock and sta_lock are needed, lock must be acquired first */
+       spinlock_t sta_lock;
+       int num_stations;
+       struct iwl_station_entry stations[IWL_STATION_COUNT];
+       unsigned long ucode_key_table;
+
+       /* queue refcounts */
+#define IWL_MAX_HW_QUEUES      32
+       unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+       /* for each AC */
+       atomic_t queue_stop_count[4];
+
+       /* Indication if ieee80211_ops->open has been called */
+       u8 is_open;
+
+       u8 mac80211_registered;
+
+       /* eeprom -- this is in the card's little endian byte order */
+       u8 *eeprom;
+       struct iwl_eeprom_calib_info *calib_info;
+
+       enum nl80211_iftype iw_mode;
+
+       /* Last Rx'd beacon timestamp */
+       u64 timestamp;
+
+       union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+               struct {
+                       void *shared_virt;
+                       dma_addr_t shared_phys;
+
+                       struct delayed_work thermal_periodic;
+                       struct delayed_work rfkill_poll;
+
+                       struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+                       struct iwl3945_notif_statistics accum_statistics;
+                       struct iwl3945_notif_statistics delta_statistics;
+                       struct iwl3945_notif_statistics max_delta;
+#endif
+
+                       u32 sta_supp_rates;
+                       int last_rx_rssi;       /* From Rx packet statistics */
+
+                       /* Rx'd packet timing information */
+                       u32 last_beacon_time;
+                       u64 last_tsf;
+
+                       /*
+                        * each calibration channel group in the
+                        * EEPROM has a derived clip setting for
+                        * each rate.
+                        */
+                       const struct iwl3945_clip_group clip_groups[5];
+
+               } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+               struct {
+                       /*
+                        * reporting the number of tids has AGG on. 0 means
+                        * no AGGREGATION
+                        */
+                       u8 agg_tids_count;
+
+                       struct iwl_rx_phy_res last_phy_res;
+                       bool last_phy_res_valid;
+
+                       struct completion firmware_loading_complete;
+
+                       /*
+                        * chain noise reset and gain commands are the
+                        * two extra calibration commands follows the standard
+                        * phy calibration commands
+                        */
+                       u8 phy_calib_chain_noise_reset_cmd;
+                       u8 phy_calib_chain_noise_gain_cmd;
+
+                       struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+                       struct iwl_notif_statistics accum_statistics;
+                       struct iwl_notif_statistics delta_statistics;
+                       struct iwl_notif_statistics max_delta;
+#endif
+
+               } _4965;
+#endif
+       };
+
+       struct iwl_hw_params hw_params;
+
+       u32 inta_mask;
+
+       struct workqueue_struct *workqueue;
+
+       struct work_struct restart;
+       struct work_struct scan_completed;
+       struct work_struct rx_replenish;
+       struct work_struct abort_scan;
+
+       struct iwl_rxon_context *beacon_ctx;
+       struct sk_buff *beacon_skb;
+
+       struct work_struct start_internal_scan;
+       struct work_struct tx_flush;
+
+       struct tasklet_struct irq_tasklet;
+
+       struct delayed_work init_alive_start;
+       struct delayed_work alive_start;
+       struct delayed_work scan_check;
+
+       /* TX Power */
+       s8 tx_power_user_lmt;
+       s8 tx_power_device_lmt;
+       s8 tx_power_next;
+
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       /* debugging info */
+       u32 debug_level; /* per device debugging will override global
+                           iwlegacy_debug_level if set */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       /* debugfs */
+       u16 tx_traffic_idx;
+       u16 rx_traffic_idx;
+       u8 *tx_traffic;
+       u8 *rx_traffic;
+       struct dentry *debugfs_dir;
+       u32 dbgfs_sram_offset, dbgfs_sram_len;
+       bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+       struct work_struct txpower_work;
+       u32 disable_sens_cal;
+       u32 disable_chain_noise_cal;
+       u32 disable_tx_power_cal;
+       struct work_struct run_time_calib_work;
+       struct timer_list statistics_periodic;
+       struct timer_list ucode_trace;
+       struct timer_list watchdog;
+       bool hw_ready;
+
+       struct iwl_event_log event_log;
+
+       struct led_classdev led;
+       unsigned long blink_on, blink_off;
+       bool led_registered;
+}; /*iwl_priv */
+
+static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
+{
+       set_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
+{
+       clear_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+/*
+ * iwl_legacy_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+       if (priv->debug_level)
+               return priv->debug_level;
+       else
+               return iwlegacy_debug_level;
+}
+#else
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+       return iwlegacy_debug_level;
+}
+#endif
+
+
+static inline struct ieee80211_hdr *
+iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
+                                                int txq_id, int idx)
+{
+       if (priv->txq[txq_id].txb[idx].skb)
+               return (struct ieee80211_hdr *)priv->txq[txq_id].
+                               txb[idx].skb->data;
+       return NULL;
+}
+
+static inline struct iwl_rxon_context *
+iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       return vif_priv->ctx;
+}
+
+#define for_each_context(priv, ctx)                            \
+       for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];           \
+            ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)    \
+               if (priv->valid_contexts & BIT(ctx->ctxid))
+
+static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
+                                   enum iwl_rxon_context_id ctxid)
+{
+       return (priv->contexts[ctxid].active.filter_flags &
+                       RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
+{
+       return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
+}
+
+static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
+{
+       return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
+{
+       if (ch_info == NULL)
+               return 0;
+       return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
+{
+       return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
+{
+       return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
+{
+       return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline void
+__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
+{
+       __free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
+
+static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+       free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
+#endif                         /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644 (file)
index 0000000..080b852
--- /dev/null
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#include "iwl-dev.h"
+
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644 (file)
index 0000000..9612aa0
--- /dev/null
@@ -0,0 +1,270 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_LEGACY_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+
+#define PRIV_ENTRY     __field(struct iwl_priv *, priv)
+#define PRIV_ASSIGN    (__entry->priv = priv)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_io
+
+TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u8, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_ucode
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
+       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_ARGS(priv, time, data, ev),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, time)
+               __field(u32, data)
+               __field(u32, ev)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->time = time;
+               __entry->data = data;
+               __entry->ev = ev;
+       ),
+       TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+                 __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
+       TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+       TP_ARGS(priv, wraps, n_entry, p_entry),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, wraps)
+               __field(u32, n_entry)
+               __field(u32, p_entry)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->wraps = wraps;
+               __entry->n_entry = n_entry;
+               __entry->p_entry = p_entry;
+       ),
+       TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+                 __entry->priv, __entry->wraps, __entry->n_entry,
+                 __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
+       TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
+       TP_ARGS(priv, hcmd, len, flags),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __dynamic_array(u8, hcmd, len)
+               __field(u32, flags)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               memcpy(__get_dynamic_array(hcmd), hcmd, len);
+               __entry->flags = flags;
+       ),
+       TP_printk("[%p] hcmd %#.2x (%ssync)",
+                 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
+                 __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_rx,
+       TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+       TP_ARGS(priv, rxbuf, len),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __dynamic_array(u8, rxbuf, len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+       ),
+       TP_printk("[%p] RX cmd %#.2x",
+                 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_tx,
+       TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+                void *buf0, size_t buf0_len,
+                void *buf1, size_t buf1_len),
+       TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(size_t, framelen)
+               __dynamic_array(u8, tfd, tfdlen)
+
+               /*
+                * Do not insert between or below these items,
+                * we want to keep the frame together (except
+                * for the possible padding).
+                */
+               __dynamic_array(u8, buf0, buf0_len)
+               __dynamic_array(u8, buf1, buf1_len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->framelen = buf0_len + buf1_len;
+               memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+               memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+               memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+       ),
+       TP_printk("[%p] TX %.2x (%zu bytes)",
+                 __entry->priv,
+                 ((u8 *)__get_dynamic_array(buf0))[0],
+                 __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
+       TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
+                u32 data1, u32 data2, u32 line, u32 blink1,
+                u32 blink2, u32 ilink1, u32 ilink2),
+       TP_ARGS(priv, desc, time, data1, data2, line,
+               blink1, blink2, ilink1, ilink2),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, desc)
+               __field(u32, time)
+               __field(u32, data1)
+               __field(u32, data2)
+               __field(u32, line)
+               __field(u32, blink1)
+               __field(u32, blink2)
+               __field(u32, ilink1)
+               __field(u32, ilink2)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->desc = desc;
+               __entry->time = time;
+               __entry->data1 = data1;
+               __entry->data2 = data2;
+               __entry->line = line;
+               __entry->blink1 = blink1;
+               __entry->blink2 = blink2;
+               __entry->ilink1 = ilink1;
+               __entry->ilink2 = ilink2;
+       ),
+       TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+                 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
+                 __entry->priv, __entry->desc, __entry->time, __entry->data1,
+                 __entry->data2, __entry->line, __entry->blink1,
+                 __entry->blink2, __entry->ilink1, __entry->ilink2)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
+       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_ARGS(priv, time, data, ev),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, time)
+               __field(u32, data)
+               __field(u32, ev)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->time = time;
+               __entry->data = data;
+               __entry->ev = ev;
+       ),
+       TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+                 __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644 (file)
index 0000000..04c5648
--- /dev/null
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-io.h"
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The iwlegacy_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into priv->channel_info_24/52 and priv->channel_map_24/52
+ *
+ * channel_map_24/52 provides the index in the channel_info array for a
+ * given channel.  We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware.  This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel.  There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 iwlegacy_eeprom_band_1[14] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 iwlegacy_eeprom_band_2[] = {   /* 4915-5080MHz */
+       183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwlegacy_eeprom_band_3[] = {   /* 5170-5320MHz */
+       34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwlegacy_eeprom_band_4[] = {   /* 5500-5700MHz */
+       100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwlegacy_eeprom_band_5[] = {   /* 5725-5825MHz */
+       145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwlegacy_eeprom_band_6[] = {       /* 2.4 ht40 channel */
+       1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwlegacy_eeprom_band_7[] = {       /* 5.2 ht40 channel */
+       36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
+{
+       u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+       int ret = 0;
+
+       IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+       switch (gp) {
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+               break;
+       default:
+               IWL_ERR(priv, "bad EEPROM signature,"
+                       "EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               break;
+       }
+       return ret;
+}
+
+const u8
+*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+{
+       BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
+       return &priv->eeprom[offset];
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
+
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+       if (!priv->eeprom)
+               return 0;
+       return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
+
+/**
+ * iwl_legacy_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into priv->eeprom
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int iwl_legacy_eeprom_init(struct iwl_priv *priv)
+{
+       __le16 *e;
+       u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+       int sz;
+       int ret;
+       u16 addr;
+
+       /* allocate eeprom */
+       sz = priv->cfg->base_params->eeprom_size;
+       IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
+       priv->eeprom = kzalloc(sz, GFP_KERNEL);
+       if (!priv->eeprom) {
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+       e = (__le16 *)priv->eeprom;
+
+       priv->cfg->ops->lib->apm_ops.init(priv);
+
+       ret = iwl_legacy_eeprom_verify_signature(priv);
+       if (ret < 0) {
+               IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+       ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
+       if (ret < 0) {
+               IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* eeprom is an array of 16bit values */
+       for (addr = 0; addr < sz; addr += sizeof(u16)) {
+               u32 r;
+
+               _iwl_legacy_write32(priv, CSR_EEPROM_REG,
+                            CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+               ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+                                         CSR_EEPROM_REG_READ_VALID_MSK,
+                                         CSR_EEPROM_REG_READ_VALID_MSK,
+                                         IWL_EEPROM_ACCESS_TIMEOUT);
+               if (ret < 0) {
+                       IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
+                                                       addr);
+                       goto done;
+               }
+               r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
+               e[addr / 2] = cpu_to_le16(r >> 16);
+       }
+
+       IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
+                      "EEPROM",
+                      iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
+
+       ret = 0;
+done:
+       priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
+err:
+       if (ret)
+               iwl_legacy_eeprom_free(priv);
+       /* Reset chip to save power until we load uCode during "up". */
+       iwl_legacy_apm_stop(priv);
+alloc_err:
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_init);
+
+void iwl_legacy_eeprom_free(struct iwl_priv *priv)
+{
+       kfree(priv->eeprom);
+       priv->eeprom = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_free);
+
+static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
+                       int eep_band, int *eeprom_ch_count,
+                       const struct iwl_eeprom_channel **eeprom_ch_info,
+                       const u8 **eeprom_ch_index)
+{
+       u32 offset = priv->cfg->ops->lib->
+                       eeprom_ops.regulatory_bands[eep_band - 1];
+       switch (eep_band) {
+       case 1:         /* 2.4GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_1;
+               break;
+       case 2:         /* 4.9GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_2;
+               break;
+       case 3:         /* 5.2GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_3;
+               break;
+       case 4:         /* 5.5GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_4;
+               break;
+       case 5:         /* 5.7GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_5;
+               break;
+       case 6:         /* 2.4GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_6;
+               break;
+       case 7:         /* 5 GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_7;
+               break;
+       default:
+               BUG();
+               return;
+       }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+/**
+ * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
+                             enum ieee80211_band band, u16 channel,
+                             const struct iwl_eeprom_channel *eeprom_ch,
+                             u8 clear_ht40_extension_channel)
+{
+       struct iwl_channel_info *ch_info;
+
+       ch_info = (struct iwl_channel_info *)
+                       iwl_legacy_get_channel_info(priv, band, channel);
+
+       if (!iwl_legacy_is_channel_valid(ch_info))
+               return -1;
+
+       IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+                       " Ad-Hoc %ssupported\n",
+                       ch_info->channel,
+                       iwl_legacy_is_channel_a_band(ch_info) ?
+                       "5.2" : "2.4",
+                       CHECK_AND_PRINT(IBSS),
+                       CHECK_AND_PRINT(ACTIVE),
+                       CHECK_AND_PRINT(RADAR),
+                       CHECK_AND_PRINT(WIDE),
+                       CHECK_AND_PRINT(DFS),
+                       eeprom_ch->flags,
+                       eeprom_ch->max_power_avg,
+                       ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
+                        && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
+                       "" : "not ");
+
+       ch_info->ht40_eeprom = *eeprom_ch;
+       ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+       ch_info->ht40_flags = eeprom_ch->flags;
+       if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+               ch_info->ht40_extension_channel &=
+                                       ~clear_ht40_extension_channel;
+
+       return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+
+/**
+ * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
+ */
+int iwl_legacy_init_channel_map(struct iwl_priv *priv)
+{
+       int eeprom_ch_count = 0;
+       const u8 *eeprom_ch_index = NULL;
+       const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
+       int band, ch;
+       struct iwl_channel_info *ch_info;
+
+       if (priv->channel_count) {
+               IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
+               return 0;
+       }
+
+       IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
+
+       priv->channel_count =
+           ARRAY_SIZE(iwlegacy_eeprom_band_1) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_2) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_3) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_4) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_5);
+
+       IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
+                       priv->channel_count);
+
+       priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
+                                    priv->channel_count, GFP_KERNEL);
+       if (!priv->channel_info) {
+               IWL_ERR(priv, "Could not allocate channel_info\n");
+               priv->channel_count = 0;
+               return -ENOMEM;
+       }
+
+       ch_info = priv->channel_info;
+
+       /* Loop through the 5 EEPROM bands adding them in order to the
+        * channel map we maintain (that contains additional information than
+        * what just in the EEPROM) */
+       for (band = 1; band <= 5; band++) {
+
+               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+                                       &eeprom_ch_info, &eeprom_ch_index);
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       ch_info->channel = eeprom_ch_index[ch];
+                       ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
+                           IEEE80211_BAND_5GHZ;
+
+                       /* permanently store EEPROM's channel regulatory flags
+                        *   and max power in channel info database. */
+                       ch_info->eeprom = eeprom_ch_info[ch];
+
+                       /* Copy the run-time flags so they are there even on
+                        * invalid channels */
+                       ch_info->flags = eeprom_ch_info[ch].flags;
+                       /* First write that ht40 is not enabled, and then enable
+                        * one by one */
+                       ch_info->ht40_extension_channel =
+                                       IEEE80211_CHAN_NO_HT40;
+
+                       if (!(iwl_legacy_is_channel_valid(ch_info))) {
+                               IWL_DEBUG_EEPROM(priv,
+                                              "Ch. %d Flags %x [%sGHz] - "
+                                              "No traffic\n",
+                                              ch_info->channel,
+                                              ch_info->flags,
+                                              iwl_legacy_is_channel_a_band(ch_info) ?
+                                              "5.2" : "2.4");
+                               ch_info++;
+                               continue;
+                       }
+
+                       /* Initialize regulatory-based run-time data */
+                       ch_info->max_power_avg = ch_info->curr_txpow =
+                           eeprom_ch_info[ch].max_power_avg;
+                       ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+                       ch_info->min_power = 0;
+
+                       IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
+                                      "%s%s%s%s%s%s(0x%02x %ddBm):"
+                                      " Ad-Hoc %ssupported\n",
+                                      ch_info->channel,
+                                      iwl_legacy_is_channel_a_band(ch_info) ?
+                                      "5.2" : "2.4",
+                                      CHECK_AND_PRINT_I(VALID),
+                                      CHECK_AND_PRINT_I(IBSS),
+                                      CHECK_AND_PRINT_I(ACTIVE),
+                                      CHECK_AND_PRINT_I(RADAR),
+                                      CHECK_AND_PRINT_I(WIDE),
+                                      CHECK_AND_PRINT_I(DFS),
+                                      eeprom_ch_info[ch].flags,
+                                      eeprom_ch_info[ch].max_power_avg,
+                                      ((eeprom_ch_info[ch].
+                                        flags & EEPROM_CHANNEL_IBSS)
+                                       && !(eeprom_ch_info[ch].
+                                            flags & EEPROM_CHANNEL_RADAR))
+                                      ? "" : "not ");
+
+                       /* Set the tx_power_user_lmt to the highest power
+                        * supported by any channel */
+                       if (eeprom_ch_info[ch].max_power_avg >
+                                               priv->tx_power_user_lmt)
+                               priv->tx_power_user_lmt =
+                                   eeprom_ch_info[ch].max_power_avg;
+
+                       ch_info++;
+               }
+       }
+
+       /* Check if we do have HT40 channels */
+       if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+           EEPROM_REGULATORY_BAND_NO_HT40 &&
+           priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+           EEPROM_REGULATORY_BAND_NO_HT40)
+               return 0;
+
+       /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+       for (band = 6; band <= 7; band++) {
+               enum ieee80211_band ieeeband;
+
+               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+                                       &eeprom_ch_info, &eeprom_ch_index);
+
+               /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+               ieeeband =
+                       (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       /* Set up driver's info for lower half */
+                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+                                               eeprom_ch_index[ch],
+                                               &eeprom_ch_info[ch],
+                                               IEEE80211_CHAN_NO_HT40PLUS);
+
+                       /* Set up driver's info for upper half */
+                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+                                               eeprom_ch_index[ch] + 4,
+                                               &eeprom_ch_info[ch],
+                                               IEEE80211_CHAN_NO_HT40MINUS);
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_channel_map);
+
+/*
+ * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
+ */
+void iwl_legacy_free_channel_map(struct iwl_priv *priv)
+{
+       kfree(priv->channel_info);
+       priv->channel_count = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_free_channel_map);
+
+/**
+ * iwl_legacy_get_channel_info - Find driver's private channel info
+ *
+ * Based on band and channel number.
+ */
+const struct
+iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
+                                       enum ieee80211_band band, u16 channel)
+{
+       int i;
+
+       switch (band) {
+       case IEEE80211_BAND_5GHZ:
+               for (i = 14; i < priv->channel_count; i++) {
+                       if (priv->channel_info[i].channel == channel)
+                               return &priv->channel_info[i];
+               }
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (channel >= 1 && channel <= 14)
+                       return &priv->channel_info[channel - 1];
+               break;
+       default:
+               BUG();
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644 (file)
index 0000000..c59c810
--- /dev/null
@@ -0,0 +1,344 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_eeprom_h__
+#define __iwl_legacy_eeprom_h__
+
+#include <net/mac80211.h>
+
+struct iwl_priv;
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT      5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT         10   /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT     1000 /* number of attempts (not time) */
+
+
+/*
+ * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ *        It only indicates that 20 MHz channel use is supported; HT40 channel
+ *        usage is indicated by a separate set of regulatory flags for each
+ *        HT40 channel pair.
+ *
+ * NOTE:  Using a channel inappropriately will result in a uCode error!
+ */
+#define IWL_NUM_TX_CALIB_GROUPS 5
+enum {
+       EEPROM_CHANNEL_VALID = (1 << 0),        /* usable for this SKU/geo */
+       EEPROM_CHANNEL_IBSS = (1 << 1),         /* usable as an IBSS channel */
+       /* Bit 2 Reserved */
+       EEPROM_CHANNEL_ACTIVE = (1 << 3),       /* active scanning allowed */
+       EEPROM_CHANNEL_RADAR = (1 << 4),        /* radar detection required */
+       EEPROM_CHANNEL_WIDE = (1 << 5),         /* 20 MHz channel okay */
+       /* Bit 6 Reserved (was Narrow Channel) */
+       EEPROM_CHANNEL_DFS = (1 << 7),  /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct iwl_eeprom_channel {
+       u8 flags;               /* EEPROM_CHANNEL_* flags copied from EEPROM */
+       s8 max_power_avg;       /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION     (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS      (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS          (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS   (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION    (5)
+#define EEPROM_4965_EEPROM_VERSION     (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8) /* 48  bytes */
+#define EEPROM_4965_BOARD_REVISION             (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA                  (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 iwlegacy_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna).  EEPROM contains:
+ *
+ * 1)  Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2)  Gain table index used to achieve the target measurement power.
+ *     This refers to the "well-known" gain tables (see iwl-4965-hw.h).
+ *
+ * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4)  RF power amplifier detector level measurement (not used).
+ */
+struct iwl_eeprom_calib_measure {
+       u8 temperature;         /* Device temperature (Celsius) */
+       u8 gain_idx;            /* Index into gain table */
+       u8 actual_pow;          /* Measured RF output power, half-dBm */
+       s8 pa_det;              /* Power amp detector level (not used) */
+} __packed;
+
+
+/*
+ * measurement set for one channel.  EEPROM contains:
+ *
+ * 1)  Channel number measured
+ *
+ * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
+ *     (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct iwl_eeprom_calib_ch_info {
+       u8 ch_num;
+       struct iwl_eeprom_calib_measure
+               measurements[EEPROM_TX_POWER_TX_CHAINS]
+                       [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1)  First and last channels within range of the subband.  "0" values
+ *     indicate that this sample set is not being used.
+ *
+ * 2)  Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct iwl_eeprom_calib_subband_info {
+       u8 ch_from;     /* channel number of lowest channel in subband */
+       u8 ch_to;       /* channel number of highest channel in subband */
+       struct iwl_eeprom_calib_ch_info ch1;
+       struct iwl_eeprom_calib_ch_info ch2;
+} __packed;
+
+
+/*
+ * txpower calibration info.  EEPROM contains:
+ *
+ * 1)  Factory-measured saturation power levels (maximum levels at which
+ *     tx power amplifier can output a signal without too much distortion).
+ *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
+ *     values apply to all channels within each of the bands.
+ *
+ * 2)  Factory-measured power supply voltage level.  This is assumed to be
+ *     constant (i.e. same value applies to all channels/bands) while the
+ *     factory measurements are being made.
+ *
+ * 3)  Up to 8 sets of factory-measured txpower calibration values.
+ *     These are for different frequency ranges, since txpower gain
+ *     characteristics of the analog radio circuitry vary with frequency.
+ *
+ *     Not all sets need to be filled with data;
+ *     struct iwl_eeprom_calib_subband_info contains range of channels
+ *     (0 if unused) for each set of data.
+ */
+struct iwl_eeprom_calib_info {
+       u8 saturation_power24;  /* half-dBm (e.g. "34" = 17 dBm) */
+       u8 saturation_power52;  /* half-dBm */
+       __le16 voltage;         /* signed */
+       struct iwl_eeprom_calib_subband_info
+               band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)   /* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)   /* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)   /* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1) /* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)   /* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)   /* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)   /* 2  bytes */
+#define EEPROM_WOWLAN_MODE                  (2*0x47)   /* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)   /* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)   /* 2  bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID            (2*0x60)    /* 4  bytes */
+#define EEPROM_REGULATORY_BAND_1            (2*0x62)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)   /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2            (2*0x71)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)   /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3            (2*0x7F)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)   /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4            (2*0x8C)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)   /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5            (2*0x98)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)   /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
+ *        control channel to which to tune.  RXON also specifies whether the
+ *        control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)  /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)  /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40                 (0)
+
+struct iwl_eeprom_ops {
+       const u32 regulatory_bands[7];
+       int (*acquire_semaphore) (struct iwl_priv *priv);
+       void (*release_semaphore) (struct iwl_priv *priv);
+};
+
+
+int iwl_legacy_eeprom_init(struct iwl_priv *priv);
+void iwl_legacy_eeprom_free(struct iwl_priv *priv);
+const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
+                                       size_t offset);
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+int iwl_legacy_init_channel_map(struct iwl_priv *priv);
+void iwl_legacy_free_channel_map(struct iwl_priv *priv);
+const struct iwl_channel_info *iwl_legacy_get_channel_info(
+               const struct iwl_priv *priv,
+               enum ieee80211_band band, u16 channel);
+
+#endif  /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644 (file)
index 0000000..4e20c7e
--- /dev/null
@@ -0,0 +1,513 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_fh_h__
+#define __iwl_legacy_fh_h__
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH_MEM_LOWER_BOUND                   (0x1000)
+#define FH_MEM_UPPER_BOUND                   (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH_KW_MEM_ADDR_REG                  (FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH_MEM_CBBC_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH_MEM_CBBC_QUEUE(x)  (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the index of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver; see struct iwl4965_shared, val0):
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" index register,
+ * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1!  See below).
+ * NOTE:  4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD.  The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index.  For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH_MEM_RSCSR_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0xBC0)
+#define FH_MEM_RSCSR_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RSCSR_CHNL0             (FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_STTS_WPTR_REG   (FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_RBDCB_BASE_REG  (FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG  (FH_MEM_RSCSR_CHNL0 + 0x008)
+#define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH_MEM_RSSR_RX_STATUS_REG        for
+ * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
+#define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
+
+#define FH_MEM_RCSR_CHNL0_CONFIG_REG   (FH_MEM_RCSR_CHNL0)
+
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
+#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS       (20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS     (4)
+#define RX_RB_TIMEOUT  (0x10)
+
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+#define FH_RSCSR_FRAME_SIZE_MSK        (0x00003FFF)    /* bits 0-13 */
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
+#define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
+
+#define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
+#define FH_MEM_RSSR_RX_STATUS_REG      (FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+                                       (FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE      (0x01000000)
+
+#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
+
+/* TFDB  Area - TFDs buffer table */
+#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
+#define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
+#define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
+#define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM                            (7)
+#define FH50_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)      \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)      \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)     \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF         (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV         (0x00000001)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE   (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE    (0x00000008)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT      (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD     (0x00100000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD      (0x00200000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT       (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD      (0x00400000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD       (0x00800000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE       (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF   (0x40000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE      (0x80000000)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY     (0x00000000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT      (0x00002000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID     (0x00000003)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM         (20)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX         (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH_TSSR_LOWER_BOUND            (FH_MEM_LOWER_BOUND + 0xEA0)
+#define FH_TSSR_UPPER_BOUND            (FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH_TSSR_TX_STATUS_REG          (FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *     uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *     uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *     command was received from the scheduler while the TRB was already full
+ *     with previous command
+ *     uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *     bit is set, it indicates that the FH has received a full indication
+ *     from the RTC TxFIFO and the current value of the TxCredit counter was
+ *     not equal to zero. This mean that the credit mechanism was not
+ *     synchronized to the TxFIFO status
+ *     uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG           (FH_TSSR_LOWER_BOUND + 0x018)
+
+#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH_SRVC_CHNL           (9)
+#define FH_SRVC_LOWER_BOUND    (FH_MEM_LOWER_BOUND + 0x9C8)
+#define FH_SRVC_UPPER_BOUND    (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+               (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
+/**
+ * struct iwl_rb_status - reseve buffer status
+ *     host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ *     in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ *     which was transfered
+ */
+struct iwl_rb_status {
+       __le16 closed_rb_num;
+       __le16 closed_fr_num;
+       __le16 finished_rb_num;
+       __le16 finished_fr_nam;
+       __le32 __unused; /* 3945 only */
+} __packed;
+
+
+#define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_BC_DUP  (64)
+#define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
+#define IWL_NUM_OF_TBS         20
+
+static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
+{
+       return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+/**
+ * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ *     every even is unaligned on 16 bit boundary
+ * @hi_n_len 0-3 [35:32] portion of dma
+ *          4-15 length of the tx buffer
+ */
+struct iwl_tfd_tb {
+       __le32 lo;
+       __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct iwl_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *          5   reserved
+ *          6-7 padding (not used)
+ * @ tbs[20]   transmit frame buffer descriptors
+ * @ __pad     padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct iwl_tfd {
+       u8 __reserved1[3];
+       u8 num_tbs;
+       struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+       __le32 __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWL_KW_SIZE 0x1000     /* 4k */
+
+#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644 (file)
index 0000000..9d721cb
--- /dev/null
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-core.h"
+
+
+const char *iwl_legacy_get_cmd_string(u8 cmd)
+{
+       switch (cmd) {
+               IWL_CMD(REPLY_ALIVE);
+               IWL_CMD(REPLY_ERROR);
+               IWL_CMD(REPLY_RXON);
+               IWL_CMD(REPLY_RXON_ASSOC);
+               IWL_CMD(REPLY_QOS_PARAM);
+               IWL_CMD(REPLY_RXON_TIMING);
+               IWL_CMD(REPLY_ADD_STA);
+               IWL_CMD(REPLY_REMOVE_STA);
+               IWL_CMD(REPLY_WEPKEY);
+               IWL_CMD(REPLY_3945_RX);
+               IWL_CMD(REPLY_TX);
+               IWL_CMD(REPLY_RATE_SCALE);
+               IWL_CMD(REPLY_LEDS_CMD);
+               IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+               IWL_CMD(REPLY_CHANNEL_SWITCH);
+               IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+               IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+               IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+               IWL_CMD(POWER_TABLE_CMD);
+               IWL_CMD(PM_SLEEP_NOTIFICATION);
+               IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+               IWL_CMD(REPLY_SCAN_CMD);
+               IWL_CMD(REPLY_SCAN_ABORT_CMD);
+               IWL_CMD(SCAN_START_NOTIFICATION);
+               IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+               IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+               IWL_CMD(BEACON_NOTIFICATION);
+               IWL_CMD(REPLY_TX_BEACON);
+               IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+               IWL_CMD(REPLY_BT_CONFIG);
+               IWL_CMD(REPLY_STATISTICS_CMD);
+               IWL_CMD(STATISTICS_NOTIFICATION);
+               IWL_CMD(CARD_STATE_NOTIFICATION);
+               IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+               IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+               IWL_CMD(SENSITIVITY_CMD);
+               IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+               IWL_CMD(REPLY_RX_PHY_CMD);
+               IWL_CMD(REPLY_RX_MPDU_CMD);
+               IWL_CMD(REPLY_RX);
+               IWL_CMD(REPLY_COMPRESSED_BA);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
+                                    struct iwl_device_cmd *cmd,
+                                    struct iwl_rx_packet *pkt)
+{
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               return;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       switch (cmd->hdr.cmd) {
+       case REPLY_TX_LINK_QUALITY_CMD:
+       case SENSITIVITY_CMD:
+               IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               break;
+       default:
+               IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+       }
+#endif
+}
+
+static int
+iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       int ret;
+
+       BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+       /* An asynchronous command can not expect an SKB to be set. */
+       BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+       /* Assign a generic callback if one is not provided */
+       if (!cmd->callback)
+               cmd->callback = iwl_legacy_generic_cmd_callback;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EBUSY;
+
+       ret = iwl_legacy_enqueue_hcmd(priv, cmd);
+       if (ret < 0) {
+               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+                         iwl_legacy_get_cmd_string(cmd->id), ret);
+               return ret;
+       }
+       return 0;
+}
+
+int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       int cmd_idx;
+       int ret;
+
+       BUG_ON(cmd->flags & CMD_ASYNC);
+
+        /* A synchronous command can not have a callback set. */
+       BUG_ON(cmd->callback);
+
+       IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+                       iwl_legacy_get_cmd_string(cmd->id));
+       mutex_lock(&priv->sync_cmd_mutex);
+
+       set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+       IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+                       iwl_legacy_get_cmd_string(cmd->id));
+
+       cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
+       if (cmd_idx < 0) {
+               ret = cmd_idx;
+               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+                         iwl_legacy_get_cmd_string(cmd->id), ret);
+               goto out;
+       }
+
+       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+                       !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+                       HOST_COMPLETE_TIMEOUT);
+       if (!ret) {
+               if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
+                       IWL_ERR(priv,
+                               "Error sending %s: time out after %dms.\n",
+                               iwl_legacy_get_cmd_string(cmd->id),
+                               jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+                       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+                       IWL_DEBUG_INFO(priv,
+                               "Clearing HCMD_ACTIVE for command %s\n",
+                                      iwl_legacy_get_cmd_string(cmd->id));
+                       ret = -ETIMEDOUT;
+                       goto cancel;
+               }
+       }
+
+       if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
+               IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+                              iwl_legacy_get_cmd_string(cmd->id));
+               ret = -ECANCELED;
+               goto fail;
+       }
+       if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+               IWL_ERR(priv, "Command %s failed: FW Error\n",
+                              iwl_legacy_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto fail;
+       }
+       if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+               IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+                         iwl_legacy_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto cancel;
+       }
+
+       ret = 0;
+       goto out;
+
+cancel:
+       if (cmd->flags & CMD_WANT_SKB) {
+               /*
+                * Cancel the CMD_WANT_SKB flag for the cmd in the
+                * TX cmd queue. Otherwise in case the cmd comes
+                * in later, it will possibly set an invalid
+                * address (cmd->meta.source).
+                */
+               priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+                                                       ~CMD_WANT_SKB;
+       }
+fail:
+       if (cmd->reply_page) {
+               iwl_legacy_free_pages(priv, cmd->reply_page);
+               cmd->reply_page = 0;
+       }
+out:
+       mutex_unlock(&priv->sync_cmd_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
+
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       if (cmd->flags & CMD_ASYNC)
+               return iwl_legacy_send_cmd_async(priv, cmd);
+
+       return iwl_legacy_send_cmd_sync(priv, cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd);
+
+int
+iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       return iwl_legacy_send_cmd_sync(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
+
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
+                          u8 id, u16 len, const void *data,
+                          void (*callback)(struct iwl_priv *priv,
+                                           struct iwl_device_cmd *cmd,
+                                           struct iwl_rx_packet *pkt))
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       cmd.flags |= CMD_ASYNC;
+       cmd.callback = callback;
+
+       return iwl_legacy_send_cmd_async(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644 (file)
index 0000000..02132e7
--- /dev/null
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_helpers_h__
+#define __iwl_legacy_helpers_h__
+
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+
+static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
+       struct ieee80211_hw *hw)
+{
+       return &hw->conf;
+}
+
+/**
+ * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
+{
+       return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
+{
+       return --index & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
+                                   struct fw_desc *desc)
+{
+       if (desc->v_addr)
+               dma_free_coherent(&pci_dev->dev, desc->len,
+                                 desc->v_addr, desc->p_addr);
+       desc->v_addr = NULL;
+       desc->len = 0;
+}
+
+static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
+                                   struct fw_desc *desc)
+{
+       if (!desc->len) {
+               desc->v_addr = NULL;
+               return -EINVAL;
+       }
+
+       desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+                                         &desc->p_addr, GFP_KERNEL);
+       return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+       BUG_ON(ac > 3);   /* only have 2 bits */
+       BUG_ON(hwq > 31); /* only use 5 bits */
+
+       txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
+                                 struct iwl_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (test_and_clear_bit(hwq, priv->queue_stopped))
+               if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
+                       ieee80211_wake_queue(priv->hw, ac);
+}
+
+static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
+                                 struct iwl_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (!test_and_set_bit(hwq, priv->queue_stopped))
+               if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
+                       ieee80211_stop_queue(priv->hw, ac);
+}
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
+{
+       clear_bit(STATUS_INT_ENABLED, &priv->status);
+
+       /* disable interrupts from uCode/NIC to host */
+       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+       /* acknowledge/clear/reset any interrupts still pending
+        * from uCode or flow handler (Rx/Tx DMA) */
+       iwl_write32(priv, CSR_INT, 0xffffffff);
+       iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
+       IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
+}
+
+static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
+{
+       IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
+       set_bit(STATUS_INT_ENABLED, &priv->status);
+       iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
+                                          u16 tsf_bits)
+{
+       return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
+                                           u16 tsf_bits)
+{
+       return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+#endif                         /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644 (file)
index 0000000..5cc5d34
--- /dev/null
@@ -0,0 +1,545 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_io_h__
+#define __iwl_legacy_io_h__
+
+#include <linux/io.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-devtrace.h"
+
+/*
+ * IO, register, and NIC memory access functions
+ *
+ * NOTE on naming convention and macro usage for these
+ *
+ * A single _ prefix before a an access function means that no state
+ * check or debug information is printed when that function is called.
+ *
+ * A double __ prefix before an access function means that state is checked
+ * and the current line number and caller function name are printed in addition
+ * to any other debug output.
+ *
+ * The non-prefixed name is the #define that maps the caller into a
+ * #define that provides the caller's name and __LINE__ to the double
+ * prefix version.
+ *
+ * If you wish to call the function without any debug or state checking,
+ * you should use the single _ prefix version (as is used by dependent IO
+ * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
+ * _iwl_legacy_read32.)
+ *
+ * These declarations are *extremely* useful in quickly isolating code deltas
+ * which result in misconfiguration of the hardware I/O.  In combination with
+ * git-bisect and the IO debug level you can quickly determine the specific
+ * commit which breaks the IO sequence to the hardware.
+ *
+ */
+
+static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+{
+       trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
+       iowrite8(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
+                                u32 ofs, u8 val)
+{
+       IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
+       _iwl_legacy_write8(priv, ofs, val);
+}
+#define iwl_write8(priv, ofs, val) \
+       __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
+#endif
+
+
+static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+{
+       trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
+       iowrite32(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
+                                u32 ofs, u32 val)
+{
+       IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
+       _iwl_legacy_write32(priv, ofs, val);
+}
+#define iwl_write32(priv, ofs, val) \
+       __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
+#endif
+
+static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
+{
+       u32 val = ioread32(priv->hw_base + ofs);
+       trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
+       return val;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32
+__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
+{
+       IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
+       return _iwl_legacy_read32(priv, ofs);
+}
+#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
+#else
+#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
+#endif
+
+#define IWL_POLL_INTERVAL 10   /* microseconds */
+static inline int
+_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
+                               u32 bits, u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
+                       return t;
+               udelay(IWL_POLL_INTERVAL);
+               t += IWL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
+                                struct iwl_priv *priv, u32 addr,
+                                u32 bits, u32 mask, int timeout)
+{
+       int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
+       IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
+                    addr, bits, mask,
+                    unlikely(ret  == -ETIMEDOUT) ? "timeout" : "", f, l);
+       return ret;
+}
+#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
+       __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
+       bits, mask, timeout)
+#else
+#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
+#endif
+
+static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_set_bit(const char *f, u32 l,
+                                struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       u32 val = _iwl_legacy_read32(priv, reg) | mask;
+       IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
+                                                       mask, val);
+       _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _iwl_legacy_set_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline void
+_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_clear_bit(const char *f, u32 l,
+                                  struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
+       IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
+       _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _iwl_legacy_clear_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
+{
+       int ret;
+       u32 val;
+
+       /* this bit wakes up the NIC */
+       _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /*
+        * These bits say the device is running, and should keep running for
+        * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+        * but they do not indicate that embedded SRAM is restored yet;
+        * 3945 and 4965 have volatile SRAM, and must save/restore contents
+        * to/from host DRAM when sleeping/waking for power-saving.
+        * Each direction takes approximately 1/4 millisecond; with this
+        * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+        * series of register accesses are expected (e.g. reading Event Log),
+        * to keep device from sleeping.
+        *
+        * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+        * SRAM is okay/restored.  We don't check that here because this call
+        * is just for hardware register access; but GP1 MAC_SLEEP check is a
+        * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+        *
+        */
+       ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
+                          CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+                          (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+                           CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+       if (ret < 0) {
+               val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
+               IWL_ERR(priv,
+                       "MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
+               _iwl_legacy_write32(priv, CSR_RESET,
+                               CSR_RESET_REG_FLAG_FORCE_NMI);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
+                                              struct iwl_priv *priv)
+{
+       IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
+       return _iwl_legacy_grab_nic_access(priv);
+}
+#define iwl_grab_nic_access(priv) \
+       __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_grab_nic_access(priv) \
+       _iwl_legacy_grab_nic_access(priv)
+#endif
+
+static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
+{
+       _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
+                                           struct iwl_priv *priv)
+{
+
+       IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
+       _iwl_legacy_release_nic_access(priv);
+}
+#define iwl_release_nic_access(priv) \
+       __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_release_nic_access(priv) \
+       _iwl_legacy_release_nic_access(priv)
+#endif
+
+static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       return _iwl_legacy_read32(priv, reg);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
+                                       struct iwl_priv *priv, u32 reg)
+{
+       u32 value = _iwl_legacy_read_direct32(priv, reg);
+       IWL_DEBUG_IO(priv,
+                       "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
+                    f, l);
+       return value;
+}
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       u32 value;
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+}
+
+#else
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       u32 value;
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       value = _iwl_legacy_read_direct32(priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+
+}
+#endif
+
+static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
+                                        u32 reg, u32 value)
+{
+       _iwl_legacy_write32(priv, reg, value);
+}
+static inline void
+iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, reg, value);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
+                                              u32 reg, u32 len, u32 *values)
+{
+       u32 count = sizeof(u32);
+
+       if ((priv != NULL) && (values != NULL)) {
+               for (; 0 < len; len -= count, reg += count, values++)
+                       iwl_legacy_write_direct32(priv, reg, *values);
+       }
+}
+
+static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
+                                      u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
+                       return t;
+               udelay(IWL_POLL_INTERVAL);
+               t += IWL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
+                                           struct iwl_priv *priv,
+                                           u32 addr, u32 mask, int timeout)
+{
+       int ret  = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
+
+       if (unlikely(ret == -ETIMEDOUT))
+               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
+                            "timedout - %s %d\n", addr, mask, f, l);
+       else
+               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
+                            "- %s %d\n", addr, mask, ret, f, l);
+       return ret;
+}
+#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
+__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
+#else
+#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
+#endif
+
+static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+       rmb();
+       return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
+}
+static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       val = _iwl_legacy_read_prph(priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return val;
+}
+
+static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
+                                            u32 addr, u32 val)
+{
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
+                             ((addr & 0x0000FFFF) | (3 << 24)));
+       wmb();
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_prph(priv, addr, val);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
+_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
+
+static inline void
+iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       _iwl_legacy_set_bits_prph(priv, reg, mask);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
+_iwl_legacy_write_prph(priv, reg,                              \
+                ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
+
+static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+                               u32 bits, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
+                                                *priv, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       val = _iwl_legacy_read_prph(priv, reg);
+       _iwl_legacy_write_prph(priv, reg, (val & ~mask));
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
+{
+       unsigned long reg_flags;
+       u32 value;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
+       rmb();
+       value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+}
+
+static inline void
+iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void
+iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
+                                         u32 len, u32 *values)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               for (; 0 < len; len -= sizeof(u32), values++)
+                       _iwl_legacy_write_direct32(priv,
+                                       HBUS_TARG_MEM_WDAT, *values);
+
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644 (file)
index 0000000..15eb8b7
--- /dev/null
@@ -0,0 +1,188 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* default: IWL_LED_BLINK(0) using blinking index table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+               "1=On(RF On)/Off(RF Off), 2=blinking");
+
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+       { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+       { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+       { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+       { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+       { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+       { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+       { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+       { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+       { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+       { .throughput = 300 * 1024 - 1, .blink_time = 50 },
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
+                                   u8 time, u16 compensation)
+{
+       if (!compensation) {
+               IWL_ERR(priv, "undefined blink compensation: "
+                       "use pre-defined blinking time\n");
+               return time;
+       }
+
+       return (u8)((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int iwl_legacy_led_cmd(struct iwl_priv *priv,
+                      unsigned long on,
+                      unsigned long off)
+{
+       struct iwl_led_cmd led_cmd = {
+               .id = IWL_LED_LINK,
+               .interval = IWL_DEF_LED_INTRVL
+       };
+       int ret;
+
+       if (!test_bit(STATUS_READY, &priv->status))
+               return -EBUSY;
+
+       if (priv->blink_on == on && priv->blink_off == off)
+               return 0;
+
+       IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
+                       priv->cfg->base_params->led_compensation);
+       led_cmd.on = iwl_legacy_blink_compensation(priv, on,
+                               priv->cfg->base_params->led_compensation);
+       led_cmd.off = iwl_legacy_blink_compensation(priv, off,
+                               priv->cfg->base_params->led_compensation);
+
+       ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+       if (!ret) {
+               priv->blink_on = on;
+               priv->blink_off = off;
+       }
+       return ret;
+}
+
+static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
+{
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+       unsigned long on = 0;
+
+       if (brightness > 0)
+               on = IWL_LED_SOLID;
+
+       iwl_legacy_led_cmd(priv, on, 0);
+}
+
+static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on,
+                            unsigned long *delay_off)
+{
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+
+       return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
+}
+
+void iwl_legacy_leds_init(struct iwl_priv *priv)
+{
+       int mode = led_mode;
+       int ret;
+
+       if (mode == IWL_LED_DEFAULT)
+               mode = priv->cfg->led_mode;
+
+       priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                  wiphy_name(priv->hw->wiphy));
+       priv->led.brightness_set = iwl_legacy_led_brightness_set;
+       priv->led.blink_set = iwl_legacy_led_blink_set;
+       priv->led.max_brightness = 1;
+
+       switch (mode) {
+       case IWL_LED_DEFAULT:
+               WARN_ON(1);
+               break;
+       case IWL_LED_BLINK:
+               priv->led.default_trigger =
+                       ieee80211_create_tpt_led_trigger(priv->hw,
+                                       IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+                                       iwl_blink, ARRAY_SIZE(iwl_blink));
+               break;
+       case IWL_LED_RF_STATE:
+               priv->led.default_trigger =
+                       ieee80211_get_radio_led_name(priv->hw);
+               break;
+       }
+
+       ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+       if (ret) {
+               kfree(priv->led.name);
+               return;
+       }
+
+       priv->led_registered = true;
+}
+EXPORT_SYMBOL(iwl_legacy_leds_init);
+
+void iwl_legacy_leds_exit(struct iwl_priv *priv)
+{
+       if (!priv->led_registered)
+               return;
+
+       led_classdev_unregister(&priv->led);
+       kfree(priv->led.name);
+}
+EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644 (file)
index 0000000..f0791f7
--- /dev/null
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_leds_h__
+#define __iwl_legacy_leds_h__
+
+
+struct iwl_priv;
+
+#define IWL_LED_SOLID 11
+#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IWL_LED_ACTIVITY       (0<<1)
+#define IWL_LED_LINK           (1<<1)
+
+/*
+ * LED mode
+ *    IWL_LED_DEFAULT:  use device default
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *                     LED ON  = RF ON
+ *                     LED OFF = RF OFF
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+       IWL_LED_DEFAULT,
+       IWL_LED_RF_STATE,
+       IWL_LED_BLINK,
+};
+
+void iwl_legacy_leds_init(struct iwl_priv *priv);
+void iwl_legacy_leds_exit(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644 (file)
index 0000000..38647e4
--- /dev/null
@@ -0,0 +1,456 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_rs_h__
+#define __iwl_legacy_rs_h__
+
+struct iwl_rate_info {
+       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+       u8 prev_ieee;    /* previous rate in IEEE speeds */
+       u8 next_ieee;    /* next rate in IEEE speeds */
+       u8 prev_rs;      /* previous rate used in rs algo */
+       u8 next_rs;      /* next rate used in rs algo */
+       u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;  /* next rate used in TGG rs algo */
+};
+
+struct iwl3945_rate_info {
+       u8 plcp;                /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 ieee;                /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+       u8 prev_ieee;           /* previous rate in IEEE speeds */
+       u8 next_ieee;           /* next rate in IEEE speeds */
+       u8 prev_rs;             /* previous rate used in rs algo */
+       u8 next_rs;             /* next rate used in rs algo */
+       u8 prev_rs_tgg;         /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;         /* next rate used in TGG rs algo */
+       u8 table_rs_index;      /* index in rate scale table cmd */
+       u8 prev_table_rs;       /* prev in rate table cmd */
+};
+
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+ */
+enum {
+       IWL_RATE_1M_INDEX = 0,
+       IWL_RATE_2M_INDEX,
+       IWL_RATE_5M_INDEX,
+       IWL_RATE_11M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX,
+       IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX,
+       IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX,
+       IWL_RATE_54M_INDEX,
+       IWL_RATE_60M_INDEX,
+       IWL_RATE_COUNT,
+       IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1,     /* Excluding 60M */
+       IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
+       IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+       IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+enum {
+       IWL_RATE_6M_INDEX_TABLE = 0,
+       IWL_RATE_9M_INDEX_TABLE,
+       IWL_RATE_12M_INDEX_TABLE,
+       IWL_RATE_18M_INDEX_TABLE,
+       IWL_RATE_24M_INDEX_TABLE,
+       IWL_RATE_36M_INDEX_TABLE,
+       IWL_RATE_48M_INDEX_TABLE,
+       IWL_RATE_54M_INDEX_TABLE,
+       IWL_RATE_1M_INDEX_TABLE,
+       IWL_RATE_2M_INDEX_TABLE,
+       IWL_RATE_5M_INDEX_TABLE,
+       IWL_RATE_11M_INDEX_TABLE,
+       IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+enum {
+       IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+       IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
+       IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+       IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+       IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define        IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
+#define        IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
+#define        IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
+#define        IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
+#define        IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
+#define        IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
+#define        IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
+#define        IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
+#define        IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
+#define        IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
+#define        IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
+#define        IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+       IWL_RATE_6M_PLCP  = 13,
+       IWL_RATE_9M_PLCP  = 15,
+       IWL_RATE_12M_PLCP = 5,
+       IWL_RATE_18M_PLCP = 7,
+       IWL_RATE_24M_PLCP = 9,
+       IWL_RATE_36M_PLCP = 11,
+       IWL_RATE_48M_PLCP = 1,
+       IWL_RATE_54M_PLCP = 3,
+       IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
+       IWL_RATE_1M_PLCP  = 10,
+       IWL_RATE_2M_PLCP  = 20,
+       IWL_RATE_5M_PLCP  = 55,
+       IWL_RATE_11M_PLCP = 110,
+       /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+       IWL_RATE_SISO_6M_PLCP = 0,
+       IWL_RATE_SISO_12M_PLCP = 1,
+       IWL_RATE_SISO_18M_PLCP = 2,
+       IWL_RATE_SISO_24M_PLCP = 3,
+       IWL_RATE_SISO_36M_PLCP = 4,
+       IWL_RATE_SISO_48M_PLCP = 5,
+       IWL_RATE_SISO_54M_PLCP = 6,
+       IWL_RATE_SISO_60M_PLCP = 7,
+       IWL_RATE_MIMO2_6M_PLCP  = 0x8,
+       IWL_RATE_MIMO2_12M_PLCP = 0x9,
+       IWL_RATE_MIMO2_18M_PLCP = 0xa,
+       IWL_RATE_MIMO2_24M_PLCP = 0xb,
+       IWL_RATE_MIMO2_36M_PLCP = 0xc,
+       IWL_RATE_MIMO2_48M_PLCP = 0xd,
+       IWL_RATE_MIMO2_54M_PLCP = 0xe,
+       IWL_RATE_MIMO2_60M_PLCP = 0xf,
+       IWL_RATE_SISO_INVM_PLCP,
+       IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+       IWL_RATE_6M_IEEE  = 12,
+       IWL_RATE_9M_IEEE  = 18,
+       IWL_RATE_12M_IEEE = 24,
+       IWL_RATE_18M_IEEE = 36,
+       IWL_RATE_24M_IEEE = 48,
+       IWL_RATE_36M_IEEE = 72,
+       IWL_RATE_48M_IEEE = 96,
+       IWL_RATE_54M_IEEE = 108,
+       IWL_RATE_60M_IEEE = 120,
+       IWL_RATE_1M_IEEE  = 2,
+       IWL_RATE_2M_IEEE  = 4,
+       IWL_RATE_5M_IEEE  = 11,
+       IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_CCK_BASIC_RATES_MASK    \
+       (IWL_RATE_1M_MASK          | \
+       IWL_RATE_2M_MASK)
+
+#define IWL_CCK_RATES_MASK          \
+       (IWL_CCK_BASIC_RATES_MASK  | \
+       IWL_RATE_5M_MASK          | \
+       IWL_RATE_11M_MASK)
+
+#define IWL_OFDM_BASIC_RATES_MASK   \
+       (IWL_RATE_6M_MASK         | \
+       IWL_RATE_12M_MASK         | \
+       IWL_RATE_24M_MASK)
+
+#define IWL_OFDM_RATES_MASK         \
+       (IWL_OFDM_BASIC_RATES_MASK | \
+       IWL_RATE_9M_MASK          | \
+       IWL_RATE_18M_MASK         | \
+       IWL_RATE_36M_MASK         | \
+       IWL_RATE_48M_MASK         | \
+       IWL_RATE_54M_MASK)
+
+#define IWL_BASIC_RATES_MASK         \
+       (IWL_OFDM_BASIC_RATES_MASK | \
+        IWL_CCK_BASIC_RATES_MASK)
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
+
+#define IWL_INVALID_VALUE    -1
+
+#define IWL_MIN_RSSI_VAL                 -100
+#define IWL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT       160
+#define IWL_LEGACY_SUCCESS_LIMIT       480
+#define IWL_LEGACY_TABLE_COUNT         160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT  400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT  4500
+#define IWL_NONE_LEGACY_TABLE_COUNT    1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO              12800   /* 100% */
+#define IWL_RATE_SCALE_SWITCH          10880   /*  85% */
+#define IWL_RATE_HIGH_TH               10880   /*  85% */
+#define IWL_RATE_INCREASE_TH           6400    /*  50% */
+#define IWL_RATE_DECREASE_TH           1920    /*  15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1      0
+#define IWL_LEGACY_SWITCH_ANTENNA2      1
+#define IWL_LEGACY_SWITCH_SISO          2
+#define IWL_LEGACY_SWITCH_MIMO2_AB      3
+#define IWL_LEGACY_SWITCH_MIMO2_AC      4
+#define IWL_LEGACY_SWITCH_MIMO2_BC      5
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1        0
+#define IWL_SISO_SWITCH_ANTENNA2        1
+#define IWL_SISO_SWITCH_MIMO2_AB        2
+#define IWL_SISO_SWITCH_MIMO2_AC        3
+#define IWL_SISO_SWITCH_MIMO2_BC        4
+#define IWL_SISO_SWITCH_GI              5
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1       0
+#define IWL_MIMO2_SWITCH_ANTENNA2       1
+#define IWL_MIMO2_SWITCH_SISO_A         2
+#define IWL_MIMO2_SWITCH_SISO_B         3
+#define IWL_MIMO2_SWITCH_SISO_C         4
+#define IWL_MIMO2_SWITCH_GI             5
+
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+
+#define IWL_ACTION_LIMIT               3       /* # possible actions */
+
+#define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD   0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID                0xff
+#define TID_QUEUE_CELL_SPACING 50      /*mS */
+#define TID_QUEUE_MAX_SIZE     20
+#define TID_ROUND_VALUE                5       /* mS */
+#define TID_MAX_LOAD_COUNT     8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+
+enum iwl_table_type {
+       LQ_NONE,
+       LQ_G,           /* legacy types */
+       LQ_A,
+       LQ_SISO,        /* high-throughput types */
+       LQ_MIMO2,
+       LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define        ANT_NONE        0x0
+#define        ANT_A           BIT(0)
+#define        ANT_B           BIT(1)
+#define        ANT_AB          (ANT_A | ANT_B)
+#define ANT_C          BIT(2)
+#define        ANT_AC          (ANT_A | ANT_C)
+#define ANT_BC         (ANT_B | ANT_C)
+#define ANT_ABC                (ANT_AB | ANT_C)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE       12
+
+struct iwl_rate_mcs_info {
+       char    mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+       char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+       u64 data;               /* bitmap of successful frames */
+       s32 success_counter;    /* number of frames successful */
+       s32 success_ratio;      /* per-cent * 128  */
+       s32 counter;            /* number of frames attempted */
+       s32 average_tpt;        /* success ratio * expected throughput */
+       unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+       enum iwl_table_type lq_type;
+       u8 ant_type;
+       u8 is_SGI;      /* 1 = short guard interval */
+       u8 is_ht40;     /* 1 = 40 MHz channel width */
+       u8 is_dup;      /* 1 = duplicated data streams */
+       u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+       u8 max_search;  /* maximun number of tables we can search */
+       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       u32 current_rate;  /* rate_n_flags, uCode API format */
+       struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+       unsigned long time_stamp;       /* age of the oldest statistics */
+       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+                                                * slice */
+       u32 total;                      /* total num of packets during the
+                                        * last TID_MAX_TIME_DIFF */
+       u8 queue_count;                 /* number of queues that has
+                                        * been used since the last cleanup */
+       u8 head;                        /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+       u8 active_tbl;          /* index of active table, range 0-1 */
+       u8 enable_counter;      /* indicates HT mode */
+       u8 stay_in_tbl;         /* 1: disallow, 0: allow search for new mode */
+       u8 search_better_tbl;   /* 1: currently trying alternate mode */
+       s32 last_tpt;
+
+       /* The following determine when to search for a new mode */
+       u32 table_count_limit;
+       u32 max_failure_limit;  /* # failed frames before new search */
+       u32 max_success_limit;  /* # successful frames before new search */
+       u32 table_count;
+       u32 total_failed;       /* total failed frames, any/all rates */
+       u32 total_success;      /* total successful frames, any/all rates */
+       u64 flush_timer;        /* time staying in mode before new search */
+
+       u8 action_counter;      /* # mode-switch actions tried */
+       u8 is_green;
+       u8 is_dup;
+       enum ieee80211_band band;
+
+       /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+       u32 supp_rates;
+       u16 active_legacy_rate;
+       u16 active_siso_rate;
+       u16 active_mimo2_rate;
+       s8 max_rate_idx;     /* Max rate set by user */
+       u8 missed_rate_counter;
+
+       struct iwl_link_quality_cmd lq;
+       struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+       struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+       u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_scale_table_file;
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       u32 dbg_fixed_rate;
+#endif
+       struct iwl_priv *drv;
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+       /* packets destined for this STA are aggregated */
+       u8 is_agg;
+};
+
+static inline u8 iwl4965_num_of_ant(u8 mask)
+{
+       return  !!((mask) & ANT_A) +
+               !!((mask) & ANT_B) +
+               !!((mask) & ANT_C);
+}
+
+static inline u8 iwl4965_first_antenna(u8 mask)
+{
+       if (mask & ANT_A)
+               return ANT_A;
+       if (mask & ANT_B)
+               return ANT_B;
+       return ANT_C;
+}
+
+
+/**
+ * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
+                            struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+                                struct ieee80211_sta *sta, u8 sta_id);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl4965_rate_control_register(void);
+extern int iwl3945_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl4965_rate_control_unregister(void);
+extern void iwl3945_rate_control_unregister(void);
+
+#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644 (file)
index 0000000..903ef0d
--- /dev/null
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-commands.h"
+#include "iwl-debug.h"
+#include "iwl-power.h"
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct iwl_power_vec_entry {
+       struct iwl_powertable_cmd cmd;
+       u8 no_dtim;     /* number of skip dtim */
+};
+
+static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
+                                   struct iwl_powertable_cmd *cmd)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       if (priv->power_data.pci_pm)
+               cmd->flags |= IWL_POWER_PCI_PM_MSK;
+
+       IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
+}
+
+static int
+iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
+{
+       IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
+       IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
+       IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
+                                       le32_to_cpu(cmd->tx_data_timeout));
+       IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
+                                       le32_to_cpu(cmd->rx_data_timeout));
+       IWL_DEBUG_POWER(priv,
+                       "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+                       le32_to_cpu(cmd->sleep_interval[0]),
+                       le32_to_cpu(cmd->sleep_interval[1]),
+                       le32_to_cpu(cmd->sleep_interval[2]),
+                       le32_to_cpu(cmd->sleep_interval[3]),
+                       le32_to_cpu(cmd->sleep_interval[4]));
+
+       return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
+                               sizeof(struct iwl_powertable_cmd), cmd);
+}
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force)
+{
+       int ret;
+       bool update_chains;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /* Don't update the RX chain when chain noise calibration is running */
+       update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+                       priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+       if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+               return 0;
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return -EIO;
+
+       /* scan complete use sleep_power_next, need to be updated */
+       memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+               IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+               return 0;
+       }
+
+       if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+               set_bit(STATUS_POWER_PMI, &priv->status);
+
+       ret = iwl_legacy_set_power(priv, cmd);
+       if (!ret) {
+               if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+                       clear_bit(STATUS_POWER_PMI, &priv->status);
+
+               if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+                       priv->cfg->ops->lib->update_chain_flags(priv);
+               else if (priv->cfg->ops->lib->update_chain_flags)
+                       IWL_DEBUG_POWER(priv,
+                                       "Cannot update the power, chain noise "
+                                       "calibration running: %d\n",
+                                       priv->chain_noise_data.state);
+
+               memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+       } else
+               IWL_ERR(priv, "set power fail, ret = %d", ret);
+
+       return ret;
+}
+
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
+{
+       struct iwl_powertable_cmd cmd;
+
+       iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
+       return iwl_legacy_power_set_mode(priv, &cmd, force);
+}
+EXPORT_SYMBOL(iwl_legacy_power_update_mode);
+
+/* initialize to default */
+void iwl_legacy_power_initialize(struct iwl_priv *priv)
+{
+       u16 lctl = iwl_legacy_pcie_link_ctl(priv);
+
+       priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+       priv->power_data.debug_sleep_level_override = -1;
+
+       memset(&priv->power_data.sleep_cmd, 0,
+               sizeof(priv->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644 (file)
index 0000000..d30b36a
--- /dev/null
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_legacy_power_setting_h__
+#define __iwl_legacy_power_setting_h__
+
+#include "iwl-commands.h"
+
+enum iwl_power_level {
+       IWL_POWER_INDEX_1,
+       IWL_POWER_INDEX_2,
+       IWL_POWER_INDEX_3,
+       IWL_POWER_INDEX_4,
+       IWL_POWER_INDEX_5,
+       IWL_POWER_NUM
+};
+
+struct iwl_power_mgr {
+       struct iwl_powertable_cmd sleep_cmd;
+       struct iwl_powertable_cmd sleep_cmd_next;
+       int debug_sleep_level_override;
+       bool pci_pm;
+};
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force);
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
+void iwl_legacy_power_initialize(struct iwl_priv *priv);
+
+#endif  /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644 (file)
index 0000000..30a4930
--- /dev/null
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef        __iwl_legacy_prph_h__
+#define __iwl_legacy_prph_h__
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE      (0x00000)
+#define PRPH_END       (0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE                      (PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG              (APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG                        (APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG               (APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG               (APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG            (APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG                        (APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG           (APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG           (APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG           (APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG            (APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE     (0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT       (0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT       (0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS   (0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ             (0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC               (0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN         (0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX           (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX          (0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK        (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32          (0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
+
+/**
+ * BSM (Bootstrap State Machine)
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down when the embedded control
+ * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
+ *
+ * When powering back up after sleeps (or during initial uCode load), the BSM
+ * internally loads the short bootstrap program from the special SRAM into the
+ * embedded processor's instruction SRAM, and starts the processor so it runs
+ * the bootstrap program.
+ *
+ * This bootstrap program loads (via PCI busmaster DMA) instructions and data
+ * images for a uCode program from host DRAM locations.  The host driver
+ * indicates DRAM locations and sizes for instruction and data images via the
+ * four BSM_DRAM_* registers.  Once the bootstrap program loads the new program,
+ * the new program starts automatically.
+ *
+ * The uCode used for open-source drivers includes two programs:
+ *
+ * 1)  Initialization -- performs hardware calibration and sets up some
+ *     internal data, then notifies host via "initialize alive" notification
+ *     (struct iwl_init_alive_resp) that it has completed all of its work.
+ *     After signal from host, it then loads and starts the runtime program.
+ *     The initialization program must be used when initially setting up the
+ *     NIC after loading the driver.
+ *
+ * 2)  Runtime/Protocol -- performs all normal runtime operations.  This
+ *     notifies host via "alive" notification (struct iwl_alive_resp) that it
+ *     is ready to be used.
+ *
+ * When initializing the NIC, the host driver does the following procedure:
+ *
+ * 1)  Load bootstrap program (instructions only, no data image for bootstrap)
+ *     into bootstrap memory.  Use dword writes starting at BSM_SRAM_LOWER_BOUND
+ *
+ * 2)  Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
+ *     images in host DRAM.
+ *
+ * 3)  Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
+ *     BSM_WR_MEM_SRC_REG = 0
+ *     BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
+ *     BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
+ *
+ * 4)  Load bootstrap into instruction SRAM:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
+ *
+ * 5)  Wait for load completion:
+ *     Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
+ *
+ * 6)  Enable future boot loads whenever NIC's power management triggers it:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
+ *
+ * 7)  Start the NIC by removing all reset bits:
+ *     CSR_RESET = 0
+ *
+ *     The bootstrap uCode (already in instruction SRAM) loads initialization
+ *     uCode.  Initialization uCode performs data initialization, sends
+ *     "initialize alive" notification to host, and waits for a signal from
+ *     host to load runtime code.
+ *
+ * 4)  Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
+ *     images in host DRAM.  The last register loaded must be the instruction
+ *     byte count register ("1" in MSbit tells initialization uCode to load
+ *     the runtime uCode):
+ *     BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
+ *
+ * 5)  Wait for "alive" notification, then issue normal runtime commands.
+ *
+ * Data caching during power-downs:
+ *
+ * Just before the embedded controller powers down (e.g for automatic
+ * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
+ * a current snapshot of the embedded processor's data SRAM into host DRAM.
+ * This caches the data while the embedded processor's memory is powered down.
+ * Location and size are controlled by BSM_DRAM_DATA_* registers.
+ *
+ * NOTE:  Instruction SRAM does not need to be saved, since that doesn't
+ *        change during operation; the original image (from uCode distribution
+ *        file) can be used for reload.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  Bootstrap looks
+ * at the BSM_DRAM_* registers, which now point to the runtime instruction
+ * image and the cached (modified) runtime data (*not* the initialization
+ * uCode).  Bootstrap reloads these runtime images into SRAM, and restarts the
+ * uCode from where it left off before the power-down.
+ *
+ * NOTE:  Initialization uCode does *not* run as part of the save/restore
+ *        procedure.
+ *
+ * This save/restore method is mostly for autonomous power management during
+ * normal operation (result of POWER_TABLE_CMD).  Platform suspend/resume and
+ * RFKILL should use complete restarts (with total re-initialization) of uCode,
+ * allowing total shutdown (including BSM memory).
+ *
+ * Note that, during normal operation, the host DRAM that held the initial
+ * startup data for the runtime code is now being used as a backup data cache
+ * for modified data!  If you need to completely re-initialize the NIC, make
+ * sure that you use the runtime data image from the uCode distribution file,
+ * not the modified/saved runtime data.  You may want to store a separate
+ * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
+ */
+
+/* BSM bit fields */
+#define BSM_WR_CTRL_REG_BIT_START     (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000) /* enable boot after pwrup*/
+#define BSM_DRAM_INST_LOAD            (0x80000000) /* start program load now */
+
+/* BSM addresses */
+#define BSM_BASE                     (PRPH_BASE + 0x3400)
+#define BSM_END                      (PRPH_BASE + 0x3800)
+
+#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010) /* bit 0:  1 == done */
+
+/*
+ * Pointers and size regs for bootstrap load and data SRAM save/restore.
+ * NOTE:  3945 pointers use bits 31:0 of DRAM address.
+ *        4965 pointers use bits 35:4 of DRAM address.
+ */
+#define BSM_DRAM_INST_PTR_REG        (BSM_BASE + 0x090)
+#define BSM_DRAM_INST_BYTECOUNT_REG  (BSM_BASE + 0x094)
+#define BSM_DRAM_DATA_PTR_REG        (BSM_BASE + 0x098)
+#define BSM_DRAM_DATA_BYTECOUNT_REG  (BSM_BASE + 0x09C)
+
+/*
+ * BSM special memory, stays powered on during power-save sleeps.
+ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
+ */
+#define BSM_SRAM_LOWER_BOUND         (PRPH_BASE + 0x3800)
+#define BSM_SRAM_SIZE                  (1024) /* bytes */
+
+
+/* 3945 Tx scheduler registers */
+#define ALM_SCD_BASE                        (PRPH_BASE + 0x2E00)
+#define ALM_SCD_MODE_REG                    (ALM_SCD_BASE + 0x000)
+#define ALM_SCD_ARASTAT_REG                 (ALM_SCD_BASE + 0x004)
+#define ALM_SCD_TXFACT_REG                  (ALM_SCD_BASE + 0x010)
+#define ALM_SCD_TXF4MF_REG                  (ALM_SCD_BASE + 0x014)
+#define ALM_SCD_TXF5MF_REG                  (ALM_SCD_BASE + 0x020)
+#define ALM_SCD_SBYP_MODE_1_REG             (ALM_SCD_BASE + 0x02C)
+#define ALM_SCD_SBYP_MODE_2_REG             (ALM_SCD_BASE + 0x030)
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM.  It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device.  A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes.  For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- Commands (e.g. RXON, etc.)
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
+ * 7 -- not used by driver (device-internal only)
+ *
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1)  Scheduler-Ack, in which the scheduler automatically supports a
+ *     block-ack (BA) window of up to 64 TFDs.  In this mode, each queue
+ *     contains TFDs for a unique combination of Recipient Address (RA)
+ *     and Traffic Identifier (TID), that is, traffic of a given
+ *     Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ *     each frame within the BA window, including whether it's been transmitted,
+ *     and whether it's been acknowledged by the receiving station.  The device
+ *     automatically processes block-acks received from the receiving STA,
+ *     and reschedules un-acked frames to be retransmitted (successful
+ *     Tx completion may end up being out-of-order).
+ *
+ *     The driver must maintain the queue's Byte Count table in host DRAM
+ *     (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ *     This mode does not support fragmentation.
+ *
+ * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ *     The device may automatically retry Tx, but will retry only one frame
+ *     at a time, until receiving ACK from receiving station, or reaching
+ *     retry limit and giving up.
+ *
+ *     The command queue (#4/#9) must use this mode!
+ *     This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1)  Scheduler registers
+ * 2)  Shared scheduler data base in internal 4956 SRAM
+ * 3)  Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1)  16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2)  16 Byte Count circular buffers in 16 KBytes contiguous memory
+ *     (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
+ * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ */
+#define SCD_WIN_SIZE                           64
+#define SCD_FRAME_LIMIT                                64
+
+/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
+#define IWL49_SCD_START_OFFSET         0xa02c00
+
+/*
+ * 4965 tells driver SRAM address for internal scheduler structs via this reg.
+ * Value is valid only after "Alive" response from uCode.
+ */
+#define IWL49_SCD_SRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x0)
+
+/*
+ * Driver may need to update queue-empty bits after changing queue's
+ * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * scheduler is not tracking what's happening).
+ * Bit fields:
+ * 31-16:  Write mask -- 1: update empty bit, 0: don't change empty bit
+ * 15-00:  Empty state, one for each queue -- 1: empty, 0: non-empty
+ * NOTE:  This register is not used by Linux driver.
+ */
+#define IWL49_SCD_EMPTY_BITS               (IWL49_SCD_START_OFFSET + 0x4)
+
+/*
+ * Physical base address of array of byte count (BC) circular buffers (CBs).
+ * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
+ * This register points to BC CB for queue 0, must be on 1024-byte boundary.
+ * Others are spaced by 1024 bytes.
+ * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
+ * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * Bit fields:
+ * 25-00:  Byte Count CB physical address [35:10], must be 1024-byte aligned.
+ */
+#define IWL49_SCD_DRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x10)
+
+/*
+ * Enables any/all Tx DMA/FIFO channels.
+ * Scheduler generates requests for only the active channels.
+ * Set this to 0xff to enable all 8 channels (normal usage).
+ * Bit fields:
+ *  7- 0:  Enable (1), disable (0), one bit for each channel 0-7
+ */
+#define IWL49_SCD_TXFACT                   (IWL49_SCD_START_OFFSET + 0x1c)
+/*
+ * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Initialized and updated by driver as new TFDs are added to queue.
+ * NOTE:  If using Block Ack, index must correspond to frame's
+ *        Start Sequence Number; index = (SSN & 0xff)
+ * NOTE:  Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
+ */
+#define IWL49_SCD_QUEUE_WRPTR(x)  (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+
+/*
+ * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
+ * For FIFO mode, index indicates next frame to transmit.
+ * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Initialized by driver, updated by scheduler.
+ */
+#define IWL49_SCD_QUEUE_RDPTR(x)  (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+
+/*
+ * Select which queues work in chain mode (1) vs. not (0).
+ * Use chain mode to build chains of aggregated frames.
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
+ * NOTE:  If driver sets up queue for chain mode, it should be also set up
+ *        Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
+ */
+#define IWL49_SCD_QUEUECHAIN_SEL  (IWL49_SCD_START_OFFSET + 0xd0)
+
+/*
+ * Select which queues interrupt driver when scheduler increments
+ * a queue's read pointer (index).
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
+ * NOTE:  This functionality is apparently a no-op; driver relies on interrupts
+ *        from Rx queue to read Tx command responses and update Tx queues.
+ */
+#define IWL49_SCD_INTERRUPT_MASK  (IWL49_SCD_START_OFFSET + 0xe4)
+
+/*
+ * Queue search status registers.  One for each queue.
+ * Sets up queue mode and assigns queue to Tx DMA channel.
+ * Bit fields:
+ * 19-10: Write mask/enable bits for bits 0-9
+ *     9: Driver should init to "0"
+ *     8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
+ *        Driver should init to "1" for aggregation mode, or "0" otherwise.
+ *   7-6: Driver should init to "0"
+ *     5: Window Size Left; indicates whether scheduler can request
+ *        another TFD, based on window size, etc.  Driver should init
+ *        this bit to "1" for aggregation mode, or "0" for non-agg.
+ *   4-1: Tx FIFO to use (range 0-7).
+ *     0: Queue is active (1), not active (0).
+ * Other bits should be written as "0"
+ *
+ * NOTE:  If enabling Scheduler-ACK mode, chain mode should also be enabled
+ *        via SCD_QUEUECHAIN_SEL.
+ */
+#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
+       (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+
+/* Bit field positions */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE    (0)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF       (1)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL       (5)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK   (8)
+
+/* Write masks */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN        (10)
+#define IWL49_SCD_QUEUE_STTS_REG_MSK           (0x0007FC00)
+
+/**
+ * 4965 internal SRAM structures for scheduler, shared with driver ...
+ *
+ * Driver should clear and initialize the following areas after receiving
+ * "Alive" response from 4965 uCode, i.e. after initial
+ * uCode load, or after a uCode load done for error recovery:
+ *
+ * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
+ * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
+ * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
+ *
+ * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
+ * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
+ * All OFFSET values must be added to this base address.
+ */
+
+/*
+ * Queue context.  One 8-byte entry for each of 16 queues.
+ *
+ * Driver should clear this entire area (size 0x80) to 0 after receiving
+ * "Alive" notification from uCode.  Additionally, driver should init
+ * each queue's entry as follows:
+ *
+ * LS Dword bit fields:
+ *  0-06:  Max Tx window size for Scheduler-ACK.  Driver should init to 64.
+ *
+ * MS Dword bit fields:
+ * 16-22:  Frame limit.  Driver should init to 10 (0xa).
+ *
+ * Driver should init all other bits to 0.
+ *
+ * Init must be done after driver receives "Alive" response from 4965 uCode,
+ * and when setting up queue for aggregation.
+ */
+#define IWL49_SCD_CONTEXT_DATA_OFFSET                  0x380
+#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+                       (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS          (0)
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK          (0x0000007F)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS       (16)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK       (0x007F0000)
+
+/*
+ * Tx Status Bitmap
+ *
+ * Driver should clear this entire area (size 0x100) to 0 after receiving
+ * "Alive" notification from uCode.  Area is used only by device itself;
+ * no other support (besides clearing) is required from driver.
+ */
+#define IWL49_SCD_TX_STTS_BITMAP_OFFSET                0x400
+
+/*
+ * RAxTID to queue translation mapping.
+ *
+ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
+ * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
+ * one QOS priority level destined for one station (for this wireless link,
+ * not final destination).  The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * mappings, one for each of the 16 queues.  If queue is not in Scheduler-ACK
+ * mode, the device ignores the mapping value.
+ *
+ * Bit fields, for each 16-bit map:
+ * 15-9:  Reserved, set to 0
+ *  8-4:  Index into device's station table for recipient station
+ *  3-0:  Traffic ID (tid), range 0-15
+ *
+ * Driver should clear this entire area (size 32 bytes) to 0 after receiving
+ * "Alive" notification from uCode.  To update a 16-bit map value, driver
+ * must read a dword-aligned value from device SRAM, replace the 16-bit map
+ * value of interest, and write the dword value back into device SRAM.
+ */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET         0x500
+
+/* Find translation table dword to read/write for given queue */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+       ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+
+#define IWL_SCD_TXFIFO_POS_TID                 (0)
+#define IWL_SCD_TXFIFO_POS_RA                  (4)
+#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK     (0x01FF)
+
+/*********************** END TX SCHEDULER *************************************/
+
+#endif                         /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644 (file)
index 0000000..654cf23
--- /dev/null
@@ -0,0 +1,302 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_legacy_rx_queue_alloc()   Allocates rx_free
+ * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls iwl_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
+ */
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
+{
+       int s = q->read - q->write;
+       if (s <= 0)
+               s += RX_QUEUE_SIZE;
+       /* keep some buffer to not confuse full and empty queue */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
+
+/**
+ * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+                                       struct iwl_rx_queue *q)
+{
+       unsigned long flags;
+       u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
+       u32 reg;
+
+       spin_lock_irqsave(&q->lock, flags);
+
+       if (q->need_update == 0)
+               goto exit_unlock;
+
+       /* If power-saving is in use, make sure device is awake */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(priv,
+                               "Rx queue requesting wakeup,"
+                               " GP1 = 0x%x\n", reg);
+                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       goto exit_unlock;
+               }
+
+               q->write_actual = (q->write & ~0x7);
+               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+                               q->write_actual);
+
+       /* Else device is assumed to be awake */
+       } else {
+               /* Device expects a multiple of 8 */
+               q->write_actual = (q->write & ~0x7);
+               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+                       q->write_actual);
+       }
+
+       q->need_update = 0;
+
+ exit_unlock:
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
+
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       spin_lock_init(&rxq->lock);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+
+       /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+       rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+                                    GFP_KERNEL);
+       if (!rxq->bd)
+               goto err_bd;
+
+       rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+                                         &rxq->rb_stts_dma, GFP_KERNEL);
+       if (!rxq->rb_stts)
+               goto err_rb;
+
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       rxq->need_update = 0;
+       return 0;
+
+err_rb:
+       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+err_bd:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
+
+
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+       if (!report->state) {
+               IWL_DEBUG_11H(priv,
+                       "Spectrum Measure Notification: Start\n");
+               return;
+       }
+
+       memcpy(&priv->measure_report, report, sizeof(*report));
+       priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
+
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+                               struct iwl_rx_packet *pkt)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+       if (iwl_legacy_is_any_associated(priv)) {
+               if (priv->cfg->ops->lib->check_plcp_health) {
+                       if (!priv->cfg->ops->lib->check_plcp_health(
+                           priv, pkt)) {
+                               /*
+                                * high plcp error detected
+                                * reset Radio
+                                */
+                               iwl_legacy_force_reset(priv,
+                                                       IWL_RF_RESET, false);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+                          struct ieee80211_hdr *hdr,
+                          u32 decrypt_res,
+                          struct ieee80211_rx_status *stats)
+{
+       u16 fc = le16_to_cpu(hdr->frame_control);
+
+       /*
+        * All contexts have the same setting here due to it being
+        * a module parameter, so OK to check any context.
+        */
+       if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+                                               RXON_FILTER_DIS_DECRYPT_MSK)
+               return 0;
+
+       if (!(fc & IEEE80211_FCTL_PROTECTED))
+               return 0;
+
+       IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+       switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               /* The uCode has got a bad phase 1 Key, pushes the packet.
+                * Decryption will be done in SW. */
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_KEY_TTAK)
+                       break;
+
+       case RX_RES_STATUS_SEC_TYPE_WEP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_ICV_MIC) {
+                       /* bad ICV, the packet is destroyed since the
+                        * decryption is inplace, drop it */
+                       IWL_DEBUG_RX(priv, "Packet destroyed\n");
+                       return -1;
+               }
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_DECRYPT_OK) {
+                       IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+                       stats->flag |= RX_FLAG_DECRYPTED;
+               }
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644 (file)
index 0000000..60f597f
--- /dev/null
@@ -0,0 +1,625 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IWL_ACTIVE_DWELL_TIME_24    (30)       /* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IWL_PASSIVE_DWELL_TIME_24   (20)       /* all times in msec */
+#define IWL_PASSIVE_DWELL_TIME_52   (10)
+#define IWL_PASSIVE_DWELL_BASE      (100)
+#define IWL_CHANNEL_TUNE_TIME       5
+
+static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
+{
+       int ret;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SCAN_ABORT_CMD,
+               .flags = CMD_WANT_SKB,
+       };
+
+       /* Exit instantly with error when device is not ready
+        * to receive scan abort command or it does not perform
+        * hardware scan currently */
+       if (!test_bit(STATUS_READY, &priv->status) ||
+           !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+           !test_bit(STATUS_SCAN_HW, &priv->status) ||
+           test_bit(STATUS_FW_ERROR, &priv->status) ||
+           test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EIO;
+
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (ret)
+               return ret;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->u.status != CAN_ABORT_STATUS) {
+               /* The scan abort will return 1 for success or
+                * 2 for "failure".  A failure condition can be
+                * due to simply not being in an active scan which
+                * can occur if we send the scan abort before we
+                * the microcode has notified us that a scan is
+                * completed. */
+               IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+               ret = -EIO;
+       }
+
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+       return ret;
+}
+
+static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
+{
+       /* check if scan was requested from mac80211 */
+       if (priv->scan_request) {
+               IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
+               ieee80211_scan_completed(priv->hw, aborted);
+       }
+
+       priv->is_internal_short_scan = false;
+       priv->scan_vif = NULL;
+       priv->scan_request = NULL;
+}
+
+void iwl_legacy_force_scan_end(struct iwl_priv *priv)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       if (!test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
+               return;
+       }
+
+       IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
+       clear_bit(STATUS_SCANNING, &priv->status);
+       clear_bit(STATUS_SCAN_HW, &priv->status);
+       clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+       iwl_legacy_complete_scan(priv, true);
+}
+
+static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
+               return;
+       }
+
+       if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
+               return;
+       }
+
+       ret = iwl_legacy_send_scan_abort(priv);
+       if (ret) {
+               IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
+               iwl_legacy_force_scan_end(priv);
+       } else
+               IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+}
+
+/**
+ * iwl_scan_cancel - Cancel any currently executing HW scan
+ */
+int iwl_legacy_scan_cancel(struct iwl_priv *priv)
+{
+       IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
+       queue_work(priv->workqueue, &priv->abort_scan);
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel);
+
+/**
+ * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+       lockdep_assert_held(&priv->mutex);
+
+       IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
+
+       iwl_legacy_do_scan_abort(priv);
+
+       while (time_before_eq(jiffies, timeout)) {
+               if (!test_bit(STATUS_SCAN_HW, &priv->status))
+                       break;
+               msleep(20);
+       }
+
+       return test_bit(STATUS_SCAN_HW, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
+
+/* Service response to REPLY_SCAN_CMD (0x80) */
+static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanreq_notification *notif =
+           (struct iwl_scanreq_notification *)pkt->u.raw;
+
+       IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service SCAN_START_NOTIFICATION (0x82) */
+static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanstart_notification *notif =
+           (struct iwl_scanstart_notification *)pkt->u.raw;
+       priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+       IWL_DEBUG_SCAN(priv, "Scan start: "
+                      "%d [802.11%s] "
+                      "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
+                      notif->channel,
+                      notif->band ? "bg" : "a",
+                      le32_to_cpu(notif->tsf_high),
+                      le32_to_cpu(notif->tsf_low),
+                      notif->status, notif->beacon_timer);
+}
+
+/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
+static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanresults_notification *notif =
+           (struct iwl_scanresults_notification *)pkt->u.raw;
+
+       IWL_DEBUG_SCAN(priv, "Scan ch.res: "
+                      "%d [802.11%s] "
+                      "(TSF: 0x%08X:%08X) - %d "
+                      "elapsed=%lu usec\n",
+                      notif->channel,
+                      notif->band ? "bg" : "a",
+                      le32_to_cpu(notif->tsf_high),
+                      le32_to_cpu(notif->tsf_low),
+                      le32_to_cpu(notif->statistics[0]),
+                      le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
+#endif
+}
+
+/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_mem_buffer *rxb)
+{
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+       IWL_DEBUG_SCAN(priv,
+                       "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+                      scan_notif->scanned_channels,
+                      scan_notif->tsf_low,
+                      scan_notif->tsf_high, scan_notif->status);
+
+       /* The HW is no longer scanning */
+       clear_bit(STATUS_SCAN_HW, &priv->status);
+
+       IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
+                      (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+                      jiffies_to_msecs(jiffies - priv->scan_start));
+
+       queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
+{
+       /* scan handlers */
+       priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
+       priv->rx_handlers[SCAN_START_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_start_notif;
+       priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_results_notif;
+       priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_complete_notif;
+}
+EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
+
+inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+                                    enum ieee80211_band band,
+                                    u8 n_probes)
+{
+       if (band == IEEE80211_BAND_5GHZ)
+               return IWL_ACTIVE_DWELL_TIME_52 +
+                       IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+       else
+               return IWL_ACTIVE_DWELL_TIME_24 +
+                       IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
+
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+                              enum ieee80211_band band,
+                              struct ieee80211_vif *vif)
+{
+       struct iwl_rxon_context *ctx;
+       u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
+           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
+
+       if (iwl_legacy_is_any_associated(priv)) {
+               /*
+                * If we're associated, we clamp the maximum passive
+                * dwell time to be 98% of the smallest beacon interval
+                * (minus 2 * channel tune time)
+                */
+               for_each_context(priv, ctx) {
+                       u16 value;
+
+                       if (!iwl_legacy_is_associated_ctx(ctx))
+                               continue;
+                       value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+                       if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
+                               value = IWL_PASSIVE_DWELL_BASE;
+                       value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+                       passive = min(value, passive);
+               }
+       }
+
+       return passive;
+}
+EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
+
+void iwl_legacy_init_scan_params(struct iwl_priv *priv)
+{
+       u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+       if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
+               priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+       if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
+               priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(iwl_legacy_init_scan_params);
+
+static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
+                                         struct ieee80211_vif *vif,
+                                         bool internal,
+                                         enum ieee80211_band band)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+               return -EOPNOTSUPP;
+
+       cancel_delayed_work(&priv->scan_check);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_WARN(priv, "Request scan called when driver not ready.\n");
+               return -EIO;
+       }
+
+       if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+               IWL_DEBUG_SCAN(priv,
+                       "Multiple concurrent scan requests in parallel.\n");
+               return -EBUSY;
+       }
+
+       if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
+               return -EBUSY;
+       }
+
+       IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
+                       internal ? "internal short " : "");
+
+       set_bit(STATUS_SCANNING, &priv->status);
+       priv->is_internal_short_scan = internal;
+       priv->scan_start = jiffies;
+       priv->scan_band = band;
+
+       ret = priv->cfg->ops->utils->request_scan(priv, vif);
+       if (ret) {
+               clear_bit(STATUS_SCANNING, &priv->status);
+               priv->is_internal_short_scan = false;
+               return ret;
+       }
+
+       queue_delayed_work(priv->workqueue, &priv->scan_check,
+                          IWL_SCAN_CHECK_WATCHDOG);
+
+       return 0;
+}
+
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   struct cfg80211_scan_request *req)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (req->n_channels == 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->mutex);
+
+       if (test_bit(STATUS_SCANNING, &priv->status) &&
+           !priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+
+       /* mac80211 will only ask for one band at a time */
+       priv->scan_request = req;
+       priv->scan_vif = vif;
+
+       /*
+        * If an internal scan is in progress, just set
+        * up the scan_request as per above.
+        */
+       if (priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+               ret = 0;
+       } else
+               ret = iwl_legacy_scan_initiate(priv, vif, false,
+                                       req->channels[0]->band);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+out_unlock:
+       mutex_unlock(&priv->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
+
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
+{
+       queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
+{
+       struct iwl_priv *priv =
+               container_of(work, struct iwl_priv, start_internal_scan);
+
+       IWL_DEBUG_SCAN(priv, "Start internal scan\n");
+
+       mutex_lock(&priv->mutex);
+
+       if (priv->is_internal_short_scan == true) {
+               IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+               goto unlock;
+       }
+
+       if (test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+               goto unlock;
+       }
+
+       if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
+               IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
+ unlock:
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_check(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, scan_check.work);
+
+       IWL_DEBUG_SCAN(priv, "Scan check work\n");
+
+       /* Since we are here firmware does not finish scan and
+        * most likely is in bad shape, so we don't bother to
+        * send abort command, just force scan complete to mac80211 */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_force_scan_end(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+                      const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+       int len = 0;
+       u8 *pos = NULL;
+
+       /* Make sure there is enough space for the probe request,
+        * two mandatory IEs and the data */
+       left -= 24;
+       if (left < 0)
+               return 0;
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
+       memcpy(frame->sa, ta, ETH_ALEN);
+       memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
+       frame->seq_ctrl = 0;
+
+       len += 24;
+
+       /* ...next IE... */
+       pos = &frame->u.probe_req.variable[0];
+
+       /* fill in our indirect SSID IE */
+       left -= 2;
+       if (left < 0)
+               return 0;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0;
+
+       len += 2;
+
+       if (WARN_ON(left < ie_len))
+               return len;
+
+       if (ies && ie_len) {
+               memcpy(pos, ies, ie_len);
+               len += ie_len;
+       }
+
+       return (u16)len;
+}
+EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
+
+static void iwl_legacy_bg_abort_scan(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
+
+       IWL_DEBUG_SCAN(priv, "Abort scan work\n");
+
+       /* We keep scan_check work queued in case when firmware will not
+        * report back scan completed notification */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_completed(struct work_struct *work)
+{
+       struct iwl_priv *priv =
+           container_of(work, struct iwl_priv, scan_completed);
+       bool aborted;
+
+       IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
+                      priv->is_internal_short_scan ? "internal short " : "");
+
+       cancel_delayed_work(&priv->scan_check);
+
+       mutex_lock(&priv->mutex);
+
+       aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+       if (aborted)
+               IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+       if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+               goto out_settings;
+       }
+
+       if (priv->is_internal_short_scan && !aborted) {
+               int err;
+
+               /* Check if mac80211 requested scan during our internal scan */
+               if (priv->scan_request == NULL)
+                       goto out_complete;
+
+               /* If so request a new scan */
+               err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
+                                       priv->scan_request->channels[0]->band);
+               if (err) {
+                       IWL_DEBUG_SCAN(priv,
+                               "failed to initiate pending scan: %d\n", err);
+                       aborted = true;
+                       goto out_complete;
+               }
+
+               goto out;
+       }
+
+out_complete:
+       iwl_legacy_complete_scan(priv, aborted);
+
+out_settings:
+       /* Can we still talk to firmware ? */
+       if (!iwl_legacy_is_ready_rf(priv))
+               goto out;
+
+       /*
+        * We do not commit power settings while scan is pending,
+        * do it now if the settings changed.
+        */
+       iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
+                                                               false);
+       iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+
+       priv->cfg->ops->utils->post_scan(priv);
+
+out:
+       mutex_unlock(&priv->mutex);
+}
+
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
+{
+       INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
+       INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
+       INIT_WORK(&priv->start_internal_scan,
+                               iwl_legacy_bg_start_internal_scan);
+       INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
+
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
+{
+       cancel_work_sync(&priv->start_internal_scan);
+       cancel_work_sync(&priv->abort_scan);
+       cancel_work_sync(&priv->scan_completed);
+
+       if (cancel_delayed_work_sync(&priv->scan_check)) {
+               mutex_lock(&priv->mutex);
+               iwl_legacy_force_scan_end(priv);
+               mutex_unlock(&priv->mutex);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644 (file)
index 0000000..9f70a47
--- /dev/null
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_spectrum_h__
+#define __iwl_legacy_spectrum_h__
+enum {                         /* ieee80211_basic_report.map */
+       IEEE80211_BASIC_MAP_BSS = (1 << 0),
+       IEEE80211_BASIC_MAP_OFDM = (1 << 1),
+       IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
+       IEEE80211_BASIC_MAP_RADAR = (1 << 3),
+       IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
+       /* Bits 5-7 are reserved */
+
+};
+struct ieee80211_basic_report {
+       u8 channel;
+       __le64 start_time;
+       __le16 duration;
+       u8 map;
+} __packed;
+
+enum {                         /* ieee80211_measurement_request.mode */
+       /* Bit 0 is reserved */
+       IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
+       IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
+       IEEE80211_MEASUREMENT_REPORT = (1 << 3),
+       /* Bits 4-7 are reserved */
+};
+
+enum {
+       IEEE80211_REPORT_BASIC = 0,     /* required */
+       IEEE80211_REPORT_CCA = 1,       /* optional */
+       IEEE80211_REPORT_RPI = 2,       /* optional */
+       /* 3-255 reserved */
+};
+
+struct ieee80211_measurement_params {
+       u8 channel;
+       __le64 start_time;
+       __le16 duration;
+} __packed;
+
+struct ieee80211_info_element {
+       u8 id;
+       u8 len;
+       u8 data[0];
+} __packed;
+
+struct ieee80211_measurement_request {
+       struct ieee80211_info_element ie;
+       u8 token;
+       u8 mode;
+       u8 type;
+       struct ieee80211_measurement_params params[0];
+} __packed;
+
+struct ieee80211_measurement_report {
+       struct ieee80211_info_element ie;
+       u8 token;
+       u8 mode;
+       u8 type;
+       union {
+               struct ieee80211_basic_report basic[0];
+       } u;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644 (file)
index 0000000..47c9da3
--- /dev/null
@@ -0,0 +1,816 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/lockdep.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+
+/* priv->sta_lock must be held */
+static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+               IWL_ERR(priv,
+                       "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+                       sta_id, priv->stations[sta_id].sta.sta.addr);
+
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+               IWL_DEBUG_ASSOC(priv,
+                       "STA id %u addr %pM already present"
+                       " in uCode (according to driver)\n",
+                       sta_id, priv->stations[sta_id].sta.sta.addr);
+       } else {
+               priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+               IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+                               sta_id, priv->stations[sta_id].sta.sta.addr);
+       }
+}
+
+static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
+                                   struct iwl_legacy_addsta_cmd *addsta,
+                                   struct iwl_rx_packet *pkt,
+                                   bool sync)
+{
+       u8 sta_id = addsta->sta.sta_id;
+       unsigned long flags;
+       int ret = -EIO;
+
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+                       pkt->hdr.flags);
+               return ret;
+       }
+
+       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+                      sta_id);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       switch (pkt->u.add_sta.status) {
+       case ADD_STA_SUCCESS_MSK:
+               IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+               iwl_legacy_sta_ucode_activate(priv, sta_id);
+               ret = 0;
+               break;
+       case ADD_STA_NO_ROOM_IN_TABLE:
+               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+                       sta_id);
+               break;
+       case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+               IWL_ERR(priv,
+                       "Adding station %d failed, no block ack resource.\n",
+                       sta_id);
+               break;
+       case ADD_STA_MODIFY_NON_EXIST_STA:
+               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+                       sta_id);
+               break;
+       default:
+               IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+                               pkt->u.add_sta.status);
+               break;
+       }
+
+       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+                      priv->stations[sta_id].sta.mode ==
+                      STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
+                      sta_id, priv->stations[sta_id].sta.sta.addr);
+
+       /*
+        * XXX: The MAC address in the command buffer is often changed from
+        * the original sent to the device. That is, the MAC address
+        * written to the command buffer often is not the same MAC adress
+        * read from the command buffer when the command returns. This
+        * issue has not yet been resolved and this debugging is left to
+        * observe the problem.
+        */
+       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+                      priv->stations[sta_id].sta.mode ==
+                      STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+                      addsta->sta.addr);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return ret;
+}
+
+static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
+                                struct iwl_device_cmd *cmd,
+                                struct iwl_rx_packet *pkt)
+{
+       struct iwl_legacy_addsta_cmd *addsta =
+               (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+
+       iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+
+}
+
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+                    struct iwl_legacy_addsta_cmd *sta, u8 flags)
+{
+       struct iwl_rx_packet *pkt = NULL;
+       int ret = 0;
+       u8 data[sizeof(*sta)];
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_ADD_STA,
+               .flags = flags,
+               .data = data,
+       };
+       u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+       IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+                      sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
+
+       if (flags & CMD_ASYNC)
+               cmd.callback = iwl_legacy_add_sta_callback;
+       else {
+               cmd.flags |= CMD_WANT_SKB;
+               might_sleep();
+       }
+
+       cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
+       ret = iwl_legacy_send_cmd(priv, &cmd);
+
+       if (ret || (flags & CMD_ASYNC))
+               return ret;
+
+       if (ret == 0) {
+               pkt = (struct iwl_rx_packet *)cmd.reply_page;
+               ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+       }
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+
+static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+                                  struct ieee80211_sta *sta,
+                                  struct iwl_rxon_context *ctx)
+{
+       struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+       __le32 sta_flags;
+       u8 mimo_ps_mode;
+
+       if (!sta || !sta_ht_inf->ht_supported)
+               goto done;
+
+       mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+       IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+                       "static" :
+                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+                       "dynamic" : "disabled");
+
+       sta_flags = priv->stations[index].sta.station_flags;
+
+       sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+       switch (mimo_ps_mode) {
+       case WLAN_HT_CAP_SM_PS_STATIC:
+               sta_flags |= STA_FLG_MIMO_DIS_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+               sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DISABLED:
+               break;
+       default:
+               IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               break;
+       }
+
+       sta_flags |= cpu_to_le32(
+             (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+       sta_flags |= cpu_to_le32(
+             (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               sta_flags |= STA_FLG_HT40_EN_MSK;
+       else
+               sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+       priv->stations[index].sta.station_flags = sta_flags;
+ done:
+       return;
+}
+
+/**
+ * iwl_legacy_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                   const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+       struct iwl_station_entry *station;
+       int i;
+       u8 sta_id = IWL_INVALID_STATION;
+       u16 rate;
+
+       if (is_ap)
+               sta_id = ctx->ap_sta_id;
+       else if (is_broadcast_ether_addr(addr))
+               sta_id = ctx->bcast_sta_id;
+       else
+               for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
+                       if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+                                               addr)) {
+                               sta_id = i;
+                               break;
+                       }
+
+                       if (!priv->stations[i].used &&
+                           sta_id == IWL_INVALID_STATION)
+                               sta_id = i;
+               }
+
+       /*
+        * These two conditions have the same outcome, but keep them
+        * separate
+        */
+       if (unlikely(sta_id == IWL_INVALID_STATION))
+               return sta_id;
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+               IWL_DEBUG_INFO(priv,
+                               "STA %d already in process of being added.\n",
+                               sta_id);
+               return sta_id;
+       }
+
+       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+           !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+               IWL_DEBUG_ASSOC(priv,
+                               "STA %d (%pM) already added, not adding again.\n",
+                               sta_id, addr);
+               return sta_id;
+       }
+
+       station = &priv->stations[sta_id];
+       station->used = IWL_STA_DRIVER_ACTIVE;
+       IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+                       sta_id, addr);
+       priv->num_stations++;
+
+       /* Set up the REPLY_ADD_STA command to send to device */
+       memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+       memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+       station->sta.mode = 0;
+       station->sta.sta.sta_id = sta_id;
+       station->sta.station_flags = ctx->station_flags;
+       station->ctxid = ctx->ctxid;
+
+       if (sta) {
+               struct iwl_station_priv_common *sta_priv;
+
+               sta_priv = (void *)sta->drv_priv;
+               sta_priv->ctx = ctx;
+       }
+
+       /*
+        * OK to call unconditionally, since local stations (IBSS BSSID
+        * STA and broadcast STA) pass in a NULL sta, and mac80211
+        * doesn't allow HT IBSS.
+        */
+       iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+
+       /* 3945 only */
+       rate = (priv->band == IEEE80211_BAND_5GHZ) ?
+               IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+       /* Turn on both antennas for the station... */
+       station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+       return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_legacy_add_station_common -
+ */
+int
+iwl_legacy_add_station_common(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                          const u8 *addr, bool is_ap,
+                          struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+       unsigned long flags_spin;
+       int ret = 0;
+       u8 sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       *sta_id_r = 0;
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+                       addr);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+               IWL_DEBUG_INFO(priv,
+                       "STA %d already in process of being added.\n",
+                      sta_id);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+               IWL_DEBUG_ASSOC(priv,
+                       "STA %d (%pM) already added, not adding again.\n",
+                       sta_id, addr);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                               sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       /* Add station to device's station table */
+       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       if (ret) {
+               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+               IWL_ERR(priv, "Adding station %pM failed.\n",
+                       priv->stations[sta_id].sta.sta.addr);
+               priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+               priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       }
+       *sta_id_r = sta_id;
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_add_station_common);
+
+/**
+ * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+       /* Ucode must be active and driver must be non active */
+       if ((priv->stations[sta_id].used &
+            (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+                                               IWL_STA_UCODE_ACTIVE)
+               IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+       priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+       memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+       IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+                                  const u8 *addr, int sta_id,
+                                  bool temporary)
+{
+       struct iwl_rx_packet *pkt;
+       int ret;
+
+       unsigned long flags_spin;
+       struct iwl_rem_sta_cmd rm_sta_cmd;
+
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_REMOVE_STA,
+               .len = sizeof(struct iwl_rem_sta_cmd),
+               .flags = CMD_SYNC,
+               .data = &rm_sta_cmd,
+       };
+
+       memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+       rm_sta_cmd.num_sta = 1;
+       memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+       cmd.flags |= CMD_WANT_SKB;
+
+       ret = iwl_legacy_send_cmd(priv, &cmd);
+
+       if (ret)
+               return ret;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+                         pkt->hdr.flags);
+               ret = -EIO;
+       }
+
+       if (!ret) {
+               switch (pkt->u.rem_sta.status) {
+               case REM_STA_SUCCESS_MSK:
+                       if (!temporary) {
+                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                               iwl_legacy_sta_ucode_deactivate(priv, sta_id);
+                               spin_unlock_irqrestore(&priv->sta_lock,
+                                                               flags_spin);
+                       }
+                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+                       break;
+               default:
+                       ret = -EIO;
+                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+                       break;
+               }
+       }
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return ret;
+}
+
+/**
+ * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+                      const u8 *addr)
+{
+       unsigned long flags;
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_INFO(priv,
+                       "Unable to remove station %pM, device not ready.\n",
+                       addr);
+               /*
+                * It is typical for stations to be removed when we are
+                * going down. Return success since device will be down
+                * soon anyway
+                */
+               return 0;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d  %pM\n",
+                       sta_id, addr);
+
+       if (WARN_ON(sta_id == IWL_INVALID_STATION))
+               return -EINVAL;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+               IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+                               addr);
+               goto out_err;
+       }
+
+       if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+               IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+                               addr);
+               goto out_err;
+       }
+
+       if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+               kfree(priv->stations[sta_id].lq);
+               priv->stations[sta_id].lq = NULL;
+       }
+
+       priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+       priv->num_stations--;
+
+       BUG_ON(priv->num_stations < 0);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+out_err:
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+
+/**
+ * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx)
+{
+       int i;
+       unsigned long flags_spin;
+       bool cleared = false;
+
+       IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+                       continue;
+
+               if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+                       IWL_DEBUG_INFO(priv,
+                               "Clearing ucode active for station %d\n", i);
+                       priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+                       cleared = true;
+               }
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       if (!cleared)
+               IWL_DEBUG_INFO(priv,
+                       "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+
+/**
+ * iwl_legacy_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_addsta_cmd sta_cmd;
+       struct iwl_link_quality_cmd lq;
+       unsigned long flags_spin;
+       int i;
+       bool found = false;
+       int ret;
+       bool send_lq;
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_INFO(priv,
+                       "Not ready yet, not restoring any stations.\n");
+               return;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (ctx->ctxid != priv->stations[i].ctxid)
+                       continue;
+               if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+                           !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+                       IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+                                       priv->stations[i].sta.sta.addr);
+                       priv->stations[i].sta.mode = 0;
+                       priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+                       found = true;
+               }
+       }
+
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+                       memcpy(&sta_cmd, &priv->stations[i].sta,
+                              sizeof(struct iwl_legacy_addsta_cmd));
+                       send_lq = false;
+                       if (priv->stations[i].lq) {
+                               memcpy(&lq, priv->stations[i].lq,
+                                      sizeof(struct iwl_link_quality_cmd));
+                               send_lq = true;
+                       }
+                       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+                       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+                       if (ret) {
+                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                               IWL_ERR(priv, "Adding station %pM failed.\n",
+                                       priv->stations[i].sta.sta.addr);
+                               priv->stations[i].used &=
+                                               ~IWL_STA_DRIVER_ACTIVE;
+                               priv->stations[i].used &=
+                                               ~IWL_STA_UCODE_INPROGRESS;
+                               spin_unlock_irqrestore(&priv->sta_lock,
+                                                               flags_spin);
+                       }
+                       /*
+                        * Rate scaling has already been initialized, send
+                        * current LQ command
+                        */
+                       if (send_lq)
+                               iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+                                                               CMD_SYNC, true);
+                       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                       priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       if (!found)
+               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+                               " .... no stations to be restored.\n");
+       else
+               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+                               " .... complete.\n");
+}
+EXPORT_SYMBOL(iwl_legacy_restore_stations);
+
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->sta_key_max_num; i++)
+               if (!test_and_set_bit(i, &priv->ucode_key_table))
+                       return i;
+
+       return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (!(priv->stations[i].used & IWL_STA_BCAST))
+                       continue;
+
+               priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+               priv->num_stations--;
+               BUG_ON(priv->num_stations < 0);
+               kfree(priv->stations[i].lq);
+               priv->stations[i].lq = NULL;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+                          struct iwl_link_quality_cmd *lq)
+{
+       int i;
+       IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+       IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+                      lq->general_params.single_stream_ant_msk,
+                      lq->general_params.dual_stream_ant_msk);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+                              i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+                                  struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                             struct iwl_link_quality_cmd *lq)
+{
+       int i;
+
+       if (ctx->ht.enabled)
+               return true;
+
+       IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+                      ctx->active.channel);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+                                               RATE_MCS_HT_MSK) {
+                       IWL_DEBUG_INFO(priv,
+                                      "index %d of LQ expects HT channel\n",
+                                      i);
+                       return false;
+               }
+       }
+       return true;
+}
+
+/**
+ * iwl_legacy_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                   struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+       int ret = 0;
+       unsigned long flags_spin;
+
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_TX_LINK_QUALITY_CMD,
+               .len = sizeof(struct iwl_link_quality_cmd),
+               .flags = flags,
+               .data = lq,
+       };
+
+       if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+               return -EINVAL;
+
+
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       iwl_legacy_dump_lq_cmd(priv, lq);
+       BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+       if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
+               ret = iwl_legacy_send_cmd(priv, &cmd);
+       else
+               ret = -EINVAL;
+
+       if (cmd.flags & CMD_ASYNC)
+               return ret;
+
+       if (init) {
+               IWL_DEBUG_INFO(priv, "init LQ command complete,"
+                               " clearing sta addition status for sta %d\n",
+                              lq->sta_id);
+               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+               priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+       int ret;
+
+       IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+                       sta->addr);
+       ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+       if (ret)
+               IWL_ERR(priv, "Error removing station %pM\n",
+                       sta->addr);
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644 (file)
index 0000000..67bd75f
--- /dev/null
@@ -0,0 +1,148 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_sta_h__
+#define __iwl_legacy_sta_h__
+
+#include "iwl-dev.h"
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE  BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS  BIT(2) /* ucode entry is in process of
+                                           being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+                               (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_legacy_restore_stations(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx);
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+                       struct iwl_legacy_addsta_cmd *sta, u8 flags);
+int iwl_legacy_add_station_common(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       const u8 *addr, bool is_ap,
+                       struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_legacy_remove_station(struct iwl_priv *priv,
+                       const u8 sta_id,
+                       const u8 *addr);
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta);
+
+u8 iwl_legacy_prep_station(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       const u8 *addr, bool is_ap,
+                       struct ieee80211_sta *sta);
+
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct iwl_link_quality_cmd *lq,
+                       u8 flags, bool init);
+
+/**
+ * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       struct iwl_rxon_context *ctx;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       memset(priv->stations, 0, sizeof(priv->stations));
+       priv->num_stations = 0;
+
+       priv->ucode_key_table = 0;
+
+       for_each_context(priv, ctx) {
+               /*
+                * Remove all key information that is not stored as part
+                * of station information since mac80211 may not have had
+                * a chance to remove all the keys. When device is
+                * reconfigured by mac80211 after an error all keys will
+                * be reconfigured.
+                */
+               memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+               ctx->key_mapping_keys = 0;
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
+{
+       if (WARN_ON(!sta))
+               return IWL_INVALID_STATION;
+
+       return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
+                                         struct iwl_rxon_context *context,
+                                         struct ieee80211_sta *sta)
+{
+       int sta_id;
+
+       if (!sta)
+               return context->bcast_sta_id;
+
+       sta_id = iwl_legacy_sta_id(sta);
+
+       /*
+        * mac80211 should not be passing a partially
+        * initialised station!
+        */
+       WARN_ON(sta_id == IWL_INVALID_STATION);
+
+       return sta_id;
+}
+#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644 (file)
index 0000000..a227773
--- /dev/null
@@ -0,0 +1,660 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/**
+ * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
+ */
+void
+iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       u32 reg = 0;
+       int txq_id = txq->q.id;
+
+       if (txq->need_update == 0)
+               return;
+
+       /* if we're trying to save power */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               /* wake up nic if it's powered down ...
+                * uCode will wake up, and interrupt us again, so next
+                * time we'll skip this part. */
+               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(priv,
+                                       "Tx queue %d requesting wakeup,"
+                                       " GP1 = 0x%x\n", txq_id, reg);
+                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       return;
+               }
+
+               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+                               txq->q.write_ptr | (txq_id << 8));
+
+               /*
+                * else not in power-save mode,
+                * uCode will never sleep when we're
+                * trying to tx (during RFKILL, we're not trying to tx).
+                */
+       } else
+               iwl_write32(priv, HBUS_TARG_WRPTR,
+                           txq->q.write_ptr | (txq_id << 8));
+       txq->need_update = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
+
+/**
+ * iwl_legacy_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->write_ptr != q->read_ptr) {
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
+
+/**
+ * iwl_legacy_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_legacy_tx_queue_unmap(priv, txq_id);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, priv->hw_params.tfd_size *
+                                 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+
+       /* De-alloc array of per-TFD driver data */
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
+
+/**
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct iwl_queue *q = &txq->q;
+       bool huge = false;
+       int i;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->read_ptr != q->write_ptr) {
+               /* we have no way to tell if it is a huge cmd ATM */
+               i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
+
+               if (txq->meta[i].flags & CMD_SIZE_HUGE)
+                       huge = true;
+               else
+                       pci_unmap_single(priv->pci_dev,
+                                        dma_unmap_addr(&txq->meta[i], mapping),
+                                        dma_unmap_len(&txq->meta[i], len),
+                                        PCI_DMA_BIDIRECTIONAL);
+
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+
+       if (huge) {
+               i = q->n_window;
+               pci_unmap_single(priv->pci_dev,
+                                dma_unmap_addr(&txq->meta[i], mapping),
+                                dma_unmap_len(&txq->meta[i], len),
+                                PCI_DMA_BIDIRECTIONAL);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
+
+/**
+ * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_legacy_cmd_queue_unmap(priv);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i <= TFD_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+                                 txq->tfds, txq->q.dma_addr);
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in iwl-4965-hw.h.
+ ***************************************************/
+
+int iwl_legacy_queue_space(const struct iwl_queue *q)
+{
+       int s = q->read_ptr - q->write_ptr;
+
+       if (q->read_ptr > q->write_ptr)
+               s -= q->n_bd;
+
+       if (s <= 0)
+               s += q->n_window;
+       /* keep some reserve to not confuse empty and full situations */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(iwl_legacy_queue_space);
+
+
+/**
+ * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+                         int count, int slots_num, u32 id)
+{
+       q->n_bd = count;
+       q->n_window = slots_num;
+       q->id = id;
+
+       /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
+        * and iwl_legacy_queue_dec_wrap are broken. */
+       BUG_ON(!is_power_of_2(count));
+
+       /* slots_num must be power-of-two size, otherwise
+        * iwl_legacy_get_cmd_index is broken. */
+       BUG_ON(!is_power_of_2(slots_num));
+
+       q->low_mark = q->n_window / 4;
+       if (q->low_mark < 4)
+               q->low_mark = 4;
+
+       q->high_mark = q->n_window / 8;
+       if (q->high_mark < 2)
+               q->high_mark = 2;
+
+       q->write_ptr = q->read_ptr = 0;
+
+       return 0;
+}
+
+/**
+ * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
+                             struct iwl_tx_queue *txq, u32 id)
+{
+       struct device *dev = &priv->pci_dev->dev;
+       size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+       /* Driver private data, only for Tx (not command) queues,
+        * not shared with device. */
+       if (id != priv->cmd_queue) {
+               txq->txb = kzalloc(sizeof(txq->txb[0]) *
+                                  TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
+               if (!txq->txb) {
+                       IWL_ERR(priv, "kmalloc for auxiliary BD "
+                                 "structures failed\n");
+                       goto error;
+               }
+       } else {
+               txq->txb = NULL;
+       }
+
+       /* Circular buffer of transmit frame descriptors (TFDs),
+        * shared with device */
+       txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+                                      GFP_KERNEL);
+       if (!txq->tfds) {
+               IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
+               goto error;
+       }
+       txq->q.id = id;
+
+       return 0;
+
+ error:
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       return -ENOMEM;
+}
+
+/**
+ * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                     int slots_num, u32 txq_id)
+{
+       int i, len;
+       int ret;
+       int actual_slots = slots_num;
+
+       /*
+        * Alloc buffer array for commands (Tx or other types of commands).
+        * For the command queue (#4/#9), allocate command space + one big
+        * command for scan, since scan command is very huge; the system will
+        * not have two scans at the same time, so only one is needed.
+        * For normal Tx queues (all other queues), no super-size command
+        * space is needed.
+        */
+       if (txq_id == priv->cmd_queue)
+               actual_slots++;
+
+       txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+                           GFP_KERNEL);
+       txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+                          GFP_KERNEL);
+
+       if (!txq->meta || !txq->cmd)
+               goto out_free_arrays;
+
+       len = sizeof(struct iwl_device_cmd);
+       for (i = 0; i < actual_slots; i++) {
+               /* only happens for cmd queue */
+               if (i == slots_num)
+                       len = IWL_MAX_CMD_SIZE;
+
+               txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+               if (!txq->cmd[i])
+                       goto err;
+       }
+
+       /* Alloc driver data array and TFD circular buffer */
+       ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
+       if (ret)
+               goto err;
+
+       txq->need_update = 0;
+
+       /*
+        * For the default queues 0-3, set up the swq_id
+        * already -- all others need to get one later
+        * (if they need one at all).
+        */
+       if (txq_id < 4)
+               iwl_legacy_set_swq_id(txq, txq_id, txq_id);
+
+       /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+        * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
+       BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+       /* Initialize queue's high/low-water marks, and head/tail indexes */
+       iwl_legacy_queue_init(priv, &txq->q,
+                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       priv->cfg->ops->lib->txq_init(priv, txq);
+
+       return 0;
+err:
+       for (i = 0; i < actual_slots; i++)
+               kfree(txq->cmd[i]);
+out_free_arrays:
+       kfree(txq->meta);
+       kfree(txq->cmd);
+
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
+
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id)
+{
+       int actual_slots = slots_num;
+
+       if (txq_id == priv->cmd_queue)
+               actual_slots++;
+
+       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+       txq->need_update = 0;
+
+       /* Initialize queue's high/low-water marks, and head/tail indexes */
+       iwl_legacy_queue_init(priv, &txq->q,
+                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * iwl_legacy_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct iwl_queue *q = &txq->q;
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       dma_addr_t phys_addr;
+       unsigned long flags;
+       int len;
+       u32 idx;
+       u16 fix_size;
+
+       cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+       fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
+
+       /* If any of the command structures end up being larger than
+        * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+        * we will need to increase the size of the TFD entries
+        * Also, check to see if command buffer should not exceed the size
+        * of device_cmd and max_cmd_size. */
+       BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+              !(cmd->flags & CMD_SIZE_HUGE));
+       BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
+
+       if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
+               IWL_WARN(priv, "Not sending command - %s KILL\n",
+                        iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
+               return -EIO;
+       }
+
+       if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+               IWL_ERR(priv, "No space in command queue\n");
+               IWL_ERR(priv, "Restarting adapter due to queue full\n");
+               queue_work(priv->workqueue, &priv->restart);
+               return -ENOSPC;
+       }
+
+       spin_lock_irqsave(&priv->hcmd_lock, flags);
+
+       /* If this is a huge cmd, mark the huge flag also on the meta.flags
+        * of the _original_ cmd. This is used for DMA mapping clean up.
+        */
+       if (cmd->flags & CMD_SIZE_HUGE) {
+               idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
+               txq->meta[idx].flags = CMD_SIZE_HUGE;
+       }
+
+       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+       out_cmd = txq->cmd[idx];
+       out_meta = &txq->meta[idx];
+
+       memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+       out_meta->flags = cmd->flags;
+       if (cmd->flags & CMD_WANT_SKB)
+               out_meta->source = cmd;
+       if (cmd->flags & CMD_ASYNC)
+               out_meta->callback = cmd->callback;
+
+       out_cmd->hdr.cmd = cmd->id;
+       memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+       /* At this point, the out_cmd now has all of the incoming cmd
+        * information */
+
+       out_cmd->hdr.flags = 0;
+       out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
+                       INDEX_TO_SEQ(q->write_ptr));
+       if (cmd->flags & CMD_SIZE_HUGE)
+               out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+       len = sizeof(struct iwl_device_cmd);
+       if (idx == TFD_CMD_SLOTS)
+               len = IWL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       switch (out_cmd->hdr.cmd) {
+       case REPLY_TX_LINK_QUALITY_CMD:
+       case SENSITIVITY_CMD:
+               IWL_DEBUG_HC_DUMP(priv,
+                               "Sending command %s (#%x), seq: 0x%04X, "
+                               "%d bytes at %d[%d]:%d\n",
+                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+                               out_cmd->hdr.cmd,
+                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+                               q->write_ptr, idx, priv->cmd_queue);
+               break;
+       default:
+               IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
+                               "%d bytes at %d[%d]:%d\n",
+                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+                               out_cmd->hdr.cmd,
+                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+                               q->write_ptr, idx, priv->cmd_queue);
+       }
+#endif
+       txq->need_update = 1;
+
+       if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
+               /* Set up entry in queue's byte count circular buffer */
+               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
+
+       phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+                                  fix_size, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, phys_addr);
+       dma_unmap_len_set(out_meta, len, fix_size);
+
+       trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
+                                               fix_size, cmd->flags);
+
+       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                  phys_addr, fix_size, 1,
+                                                  U32_PAD(cmd->len));
+
+       /* Increment and update queue's write index */
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
+
+       spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+       return idx;
+}
+
+/**
+ * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
+                                  int idx, int cmd_idx)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+       int nfreed = 0;
+
+       if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+                         "is out of range [0-%d] %d %d.\n", txq_id,
+                         idx, q->n_bd, q->write_ptr, q->read_ptr);
+               return;
+       }
+
+       for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               if (nfreed++ > 0) {
+                       IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
+                                       q->write_ptr, q->read_ptr);
+                       queue_work(priv->workqueue, &priv->restart);
+               }
+
+       }
+}
+
+/**
+ * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int index = SEQ_TO_INDEX(sequence);
+       int cmd_index;
+       bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+       struct iwl_device_cmd *cmd;
+       struct iwl_cmd_meta *meta;
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+
+       /* If a Tx command is being handled and it isn't in the actual
+        * command queue then there a command routing bug has been introduced
+        * in the queue management code. */
+       if (WARN(txq_id != priv->cmd_queue,
+                "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+                 txq_id, priv->cmd_queue, sequence,
+                 priv->txq[priv->cmd_queue].q.read_ptr,
+                 priv->txq[priv->cmd_queue].q.write_ptr)) {
+               iwl_print_hex_error(priv, pkt, 32);
+               return;
+       }
+
+       /* If this is a huge cmd, clear the huge flag on the meta.flags
+        * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
+        * the DMA buffer for the scan (huge) command.
+        */
+       if (huge) {
+               cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
+               txq->meta[cmd_index].flags = 0;
+       }
+       cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
+       cmd = txq->cmd[cmd_index];
+       meta = &txq->meta[cmd_index];
+
+       pci_unmap_single(priv->pci_dev,
+                        dma_unmap_addr(meta, mapping),
+                        dma_unmap_len(meta, len),
+                        PCI_DMA_BIDIRECTIONAL);
+
+       /* Input error checking is done when commands are added to queue. */
+       if (meta->flags & CMD_WANT_SKB) {
+               meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+               rxb->page = NULL;
+       } else if (meta->callback)
+               meta->callback(priv, cmd, pkt);
+
+       iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
+
+       if (!(meta->flags & CMD_ASYNC)) {
+               clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+               IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
+                              iwl_legacy_get_cmd_string(cmd->hdr.cmd));
+               wake_up_interruptible(&priv->wait_command_queue);
+       }
+       meta->flags = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
similarity index 89%
rename from drivers/net/wireless/iwlwifi/iwl3945-base.c
rename to drivers/net/wireless/iwlegacy/iwl3945-base.c
index 371abbf..ab87e1b 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -61,7 +61,6 @@
 #include "iwl-helpers.h"
 #include "iwl-dev.h"
 #include "iwl-spectrum.h"
-#include "iwl-legacy.h"
 
 /*
  * module name, copyright, version, etc.
@@ -70,7 +69,7 @@
 #define DRV_DESCRIPTION        \
 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 #define VD "d"
 #else
 #define VD
@@ -82,7 +81,7 @@
  * this was configurable.
  */
 #define DRV_VERSION  IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT  "Copyright(c) 2003-2010 Intel Corporation"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
        if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
                        == STA_KEY_FLG_NO_ENC)
                priv->stations[sta_id].sta.key.key_offset =
-                                iwl_get_free_ucode_key_index(priv);
+                                iwl_legacy_get_free_ucode_key_index(priv);
        /* else, we are overriding an existing key => no need to allocated room
        * in uCode. */
 
@@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
 
        IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
 
-       ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+       ret = iwl_legacy_send_add_sta(priv,
+                               &priv->stations[sta_id].sta, CMD_ASYNC);
 
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 
@@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
 {
        unsigned long flags;
-       struct iwl_addsta_cmd sta_cmd;
+       struct iwl_legacy_addsta_cmd sta_cmd;
 
        spin_lock_irqsave(&priv->sta_lock, flags);
        memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
        priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
        priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
        priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 
        IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
 static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
                                int left)
 {
 
-       if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
+       if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
                return 0;
 
        if (priv->beacon_skb->len > left)
@@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
                return -ENOMEM;
        }
 
-       rate = iwl_rate_get_lowest_plcp(priv,
+       rate = iwl_legacy_get_lowest_plcp(priv,
                                &priv->contexts[IWL_RXON_CTX_BSS]);
 
        frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
 
-       rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
                              &frame->u.cmd[0]);
 
        iwl3945_free_frame(priv, frame);
@@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
                tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
-       priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
+       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
 
        tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
        if (ieee80211_is_mgmt(fc)) {
@@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        unsigned long flags;
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (iwl_is_rfkill(priv)) {
+       if (iwl_legacy_is_rfkill(priv)) {
                IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
                goto drop_unlock;
        }
@@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        fc = hdr->frame_control;
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        if (ieee80211_is_auth(fc))
                IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
        else if (ieee80211_is_assoc_req(fc))
@@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        hdr_len = ieee80211_hdrlen(fc);
 
        /* Find index into station table for destination station */
-       sta_id = iwl_sta_id_or_broadcast(
+       sta_id = iwl_legacy_sta_id_or_broadcast(
                        priv, &priv->contexts[IWL_RXON_CTX_BSS],
                        info->control.sta);
        if (sta_id == IWL_INVALID_STATION) {
@@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        txq = &priv->txq[txq_id];
        q = &txq->q;
 
-       if ((iwl_queue_space(q) < q->high_mark))
+       if ((iwl_legacy_queue_space(q) < q->high_mark))
                goto drop;
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       idx = get_cmd_index(q, q->write_ptr, 0);
+       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
 
        /* Set up driver data for this TFD */
        memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        len = (u16)skb->len;
        tx_cmd->len = cpu_to_le16(len);
 
-       iwl_dbg_log_tx_data_frame(priv, len, hdr);
-       iwl_update_stats(priv, true, fc, len);
+       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+       iwl_legacy_update_stats(priv, true, fc, len);
        tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
        tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
 
@@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
 
        /* Tell device the write index *just past* this latest filled TFD */
-       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_txq_update_write_ptr(priv, txq);
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       if ((iwl_queue_space(q) < q->high_mark)
+       if ((iwl_legacy_queue_space(q) < q->high_mark)
            && priv->mac80211_registered) {
                if (wait_write_ptr) {
                        spin_lock_irqsave(&priv->lock, flags);
                        txq->need_update = 1;
-                       iwl_txq_update_write_ptr(priv, txq);
+                       iwl_legacy_txq_update_write_ptr(priv, txq);
                        spin_unlock_irqrestore(&priv->lock, flags);
                }
 
-               iwl_stop_queue(priv, txq);
+               iwl_legacy_stop_queue(priv, txq);
        }
 
        return 0;
@@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
        int duration = le16_to_cpu(params->duration);
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
-               add_time = iwl_usecs_to_beacons(priv,
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
+               add_time = iwl_legacy_usecs_to_beacons(priv,
                        le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
                        le16_to_cpu(ctx->timing.beacon_interval));
 
@@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
        cmd.len = sizeof(spectrum);
        spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
 
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
                spectrum.start_time =
-                       iwl_add_beacon_time(priv,
+                       iwl_legacy_add_beacon_time(priv,
                                priv->_3945.last_beacon_time, add_time,
                                le16_to_cpu(ctx->timing.beacon_interval));
        else
@@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
                spectrum.flags |= RXON_FLG_BAND_24G_MSK |
                    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
 
-       rc = iwl_send_cmd_sync(priv, &cmd);
+       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
        if (rc)
                return rc;
 
@@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
                break;
        }
 
-       iwl_free_pages(priv, cmd.reply_page);
+       iwl_legacy_free_pages(priv, cmd.reply_page);
 
        return rc;
 }
@@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
 static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
                                 struct iwl_rx_mem_buffer *rxb)
 {
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 #endif
 
        IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
 }
 
-static void iwl3945_bg_beacon_update(struct work_struct *work)
-{
-       struct iwl_priv *priv =
-               container_of(work, struct iwl_priv, beacon_update);
-       struct sk_buff *beacon;
-
-       /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
-       beacon = ieee80211_beacon_get(priv->hw,
-                       priv->contexts[IWL_RXON_CTX_BSS].vif);
-
-       if (!beacon) {
-               IWL_ERR(priv, "update beacon failed\n");
-               return;
-       }
-
-       mutex_lock(&priv->mutex);
-       /* new beacon skb is allocated every time; dispose previous.*/
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = beacon;
-       mutex_unlock(&priv->mutex);
-
-       iwl3945_send_beacon_cmd(priv);
-}
-
 static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
                                struct iwl_rx_mem_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        u8 rate = beacon->beacon_notify_hdr.rate;
 
        IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
 
        priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
 
-       if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
-           (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
-               queue_work(priv->workqueue, &priv->beacon_update);
 }
 
 /* Handle notification from uCode that card's power state is changing
@@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
                clear_bit(STATUS_RF_KILL_HW, &priv->status);
 
 
-       iwl_scan_cancel(priv);
+       iwl_legacy_scan_cancel(priv);
 
        if ((test_bit(STATUS_RF_KILL_HW, &status) !=
             test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
 {
        priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
        priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
-       priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
-       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
        priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-                       iwl_rx_spectrum_measure_notif;
-       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
+                       iwl_legacy_rx_spectrum_measure_notif;
+       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
        priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-           iwl_rx_pm_debug_statistics_notif;
+           iwl_legacy_rx_pm_debug_statistics_notif;
        priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
 
        /*
@@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
        priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
        priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
 
-       iwl_setup_rx_scan_handlers(priv);
+       iwl_legacy_setup_rx_scan_handlers(priv);
        priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
 
        /* Set up hardware specific Rx handlers */
@@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
 
        spin_lock_irqsave(&rxq->lock, flags);
        write = rxq->write & ~0x7;
-       while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
                /* Get next free Rx buffer, remove from free list */
                element = rxq->rx_free.next;
                rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
                spin_lock_irqsave(&rxq->lock, flags);
                rxq->need_update = 1;
                spin_unlock_irqrestore(&rxq->lock, flags);
-               iwl_rx_queue_update_write_ptr(priv, rxq);
+               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
        }
 }
 
@@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       __iwl_free_pages(priv, rxq->pool[i].page);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
                }
                list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       __iwl_free_pages(priv, rxq->pool[i].page);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
                }
        }
@@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 
                len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
                len += sizeof(u32); /* account for status word */
-               trace_iwlwifi_dev_rx(priv, pkt, len);
+               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
 
                /* Reclaim a command buffer only if this packet is a response
                 *   to a (driver-originated) command.
@@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
                 *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
                if (priv->rx_handlers[pkt->hdr.cmd]) {
                        IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
-                               get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+                       iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
                        priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
                        priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
                } else {
                        /* No handling needed */
                        IWL_DEBUG_RX(priv,
                                "r %d i %d No handler needed for %s, 0x%02x\n",
-                               r, i, get_cmd_string(pkt->hdr.cmd),
+                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
                                pkt->hdr.cmd);
                }
 
@@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 
                if (reclaim) {
                        /* Invoke any callbacks, transfer the buffer to caller,
-                        * and fire off the (possibly) blocking iwl_send_cmd()
+                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
                         * as we reclaim the driver command queue */
                        if (rxb->page)
-                               iwl_tx_cmd_complete(priv, rxb);
+                               iwl_legacy_tx_cmd_complete(priv, rxb);
                        else
                                IWL_WARN(priv, "Claim null rxb?\n");
                }
@@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
 }
 
 /* call this function to flush any scheduled tasklet */
-static inline void iwl_synchronize_irq(struct iwl_priv *priv)
+static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
 {
        /* wait to make sure we flush pending tasklet*/
        synchronize_irq(priv->pci_dev->irq);
        tasklet_kill(&priv->irq_tasklet);
 }
 
-static const char *desc_lookup(int i)
+static const char *iwl3945_desc_lookup(int i)
 {
        switch (i) {
        case 1:
@@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
        }
 
 
-       count = iwl_read_targ_mem(priv, base);
+       count = iwl_legacy_read_targ_mem(priv, base);
 
        if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
                IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
        for (i = ERROR_START_OFFSET;
             i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
             i += ERROR_ELEM_SIZE) {
-               desc = iwl_read_targ_mem(priv, base + i);
+               desc = iwl_legacy_read_targ_mem(priv, base + i);
                time =
-                   iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
                blink1 =
-                   iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
                blink2 =
-                   iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
                ilink1 =
-                   iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
                ilink2 =
-                   iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
                data1 =
-                   iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
+                   iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
 
                IWL_ERR(priv,
                        "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
-                       desc_lookup(desc), desc, time, blink1, blink2,
+                       iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
                        ilink1, ilink2, data1);
-               trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
+               trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
                                        0, blink1, blink2, ilink1, ilink2);
        }
 }
@@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
        iwl_grab_nic_access(priv);
 
        /* Set starting address; reads will auto-increment */
-       _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
        rmb();
 
        /* "time" is actually "data" for mode 0 (no timestamp).
         * place event id # at far right for easier visual parsing. */
        for (i = 0; i < num_events; i++) {
-               ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
                if (mode == 0) {
                        /* data, ev */
                        if (bufsz) {
@@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
                                                time, ev);
                        } else {
                                IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
-                               trace_iwlwifi_dev_ucode_event(priv, 0,
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
                                                              time, ev);
                        }
                } else {
-                       data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+                       data = _iwl_legacy_read_direct32(priv,
+                                                       HBUS_TARG_MEM_RDAT);
                        if (bufsz) {
                                pos += scnprintf(*buf + pos, bufsz - pos,
                                                "%010u:0x%08x:%04u\n",
@@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
                        } else {
                                IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
                                        time, data, ev);
-                               trace_iwlwifi_dev_ucode_event(priv, time,
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, time,
                                                              data, ev);
                        }
                }
@@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
        }
 
        /* event log header */
-       capacity = iwl_read_targ_mem(priv, base);
-       mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
-       num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
-       next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+       capacity = iwl_legacy_read_targ_mem(priv, base);
+       mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+       num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+       next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
 
        if (capacity > priv->cfg->base_params->max_event_log_size) {
                IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
                return pos;
        }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
                size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
                        ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
 #else
@@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
        IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
                  size);
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        if (display) {
                if (full_log)
                        bufsz = capacity * 48;
@@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
                if (!*buf)
                        return -ENOMEM;
        }
-       if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+       if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
                /* if uCode has wrapped back to top of log,
                 * start at the oldest entry,
                 * i.e the next one that uCode would fill.
@@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
        u32 inta, handled = 0;
        u32 inta_fh;
        unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        u32 inta_mask;
 #endif
 
@@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
        inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
        iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
                /* just for debug */
                inta_mask = iwl_read32(priv, CSR_INT_MASK);
                IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
                IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
 
                /* Tell the device to stop sending interrupts */
-               iwl_disable_interrupts(priv);
+               iwl_legacy_disable_interrupts(priv);
 
                priv->isr_stats.hw++;
-               iwl_irq_handle_error(priv);
+               iwl_legacy_irq_handle_error(priv);
 
                handled |= CSR_INT_BIT_HW_ERR;
 
                return;
        }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
                /* NIC fires this, but we don't use it, redundant with WAKEUP */
                if (inta & CSR_INT_BIT_SCD) {
                        IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
                IWL_ERR(priv, "Microcode SW error detected. "
                        "Restarting 0x%X.\n", inta);
                priv->isr_stats.sw++;
-               iwl_irq_handle_error(priv);
+               iwl_legacy_irq_handle_error(priv);
                handled |= CSR_INT_BIT_SW_ERR;
        }
 
        /* uCode wakes up after power-down sleep */
        if (inta & CSR_INT_BIT_WAKEUP) {
                IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-               iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
-               iwl_txq_update_write_ptr(priv, &priv->txq[0]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[1]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[2]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[3]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[4]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
 
                priv->isr_stats.wakeup++;
                handled |= CSR_INT_BIT_WAKEUP;
@@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
                priv->isr_stats.tx++;
 
                iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
-               iwl_write_direct32(priv, FH39_TCSR_CREDIT
+               iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
                                        (FH39_SRVC_CHNL), 0x0);
                handled |= CSR_INT_BIT_FH_TX;
        }
@@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
        /* Re-enable all interrupts */
        /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_enable_interrupts(priv);
+               iwl_legacy_enable_interrupts(priv);
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
                inta = iwl_read32(priv, CSR_INT);
                inta_mask = iwl_read32(priv, CSR_INT_MASK);
                inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
                return added;
        }
 
-       active_dwell = iwl_get_active_dwell_time(priv, band, 0);
-       passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
 
        if (passive_dwell <= active_dwell)
                passive_dwell = active_dwell + 1;
 
 
-       channel = iwl_get_single_channel_number(priv, band);
+       channel = iwl_legacy_get_single_channel_number(priv, band);
 
        if (channel) {
                scan_ch->channel = channel;
@@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
        if (!sband)
                return 0;
 
-       active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
-       passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
 
        if (passive_dwell <= active_dwell)
                passive_dwell = active_dwell + 1;
@@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
 
                scan_ch->channel = chan->hw_value;
 
-               ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
-               if (!is_channel_valid(ch_info)) {
-                       IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
-                                      scan_ch->channel);
+               ch_info = iwl_legacy_get_channel_info(priv, band,
+                                                       scan_ch->channel);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_SCAN(priv,
+                               "Channel %d is INVALID for this band.\n",
+                              scan_ch->channel);
                        continue;
                }
 
@@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
                /* If passive , set up for auto-switch
                 *  and use long active_dwell time.
                 */
-               if (!is_active || is_channel_passive(ch_info) ||
+               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
                    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
                        scan_ch->type = 0;      /* passive */
                        if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
 
 static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
 {
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
 }
 
 /**
@@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
 
        IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
 
-       iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
                               IWL39_RTC_INST_LOWER_BOUND);
 
        errcnt = 0;
@@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
                /* read data comes through single port, auto-incr addr */
                /* NOTE: Use the debugless read so we don't flood kernel log
                 * if IWL_DL_IO is set */
-               val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
                if (val != le32_to_cpu(*image)) {
                        IWL_ERR(priv, "uCode INST section is invalid at "
                                  "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
                /* read data comes through single port, auto-incr addr */
                /* NOTE: Use the debugless read so we don't flood kernel log
                 * if IWL_DL_IO is set */
-               iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
                        i + IWL39_RTC_INST_LOWER_BOUND);
-               val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
                if (val != le32_to_cpu(*image)) {
 #if 0 /* Enable this if you want to see details */
                        IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
 #define IWL3945_UCODE_GET(item)                                                \
 static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
 {                                                                      \
-       return le32_to_cpu(ucode->u.v1.item);                           \
+       return le32_to_cpu(ucode->v1.item);                             \
 }
 
 static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
 
 static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
 {
-       return (u8 *) ucode->u.v1.data;
+       return (u8 *) ucode->v1.data;
 }
 
 IWL3945_UCODE_GET(inst_size);
@@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
         * 1) unmodified from disk
         * 2) backup cache for save/restore during power-downs */
        priv->ucode_code.len = inst_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
 
        priv->ucode_data.len = data_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
 
        priv->ucode_data_backup.len = data_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
 
        if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
            !priv->ucode_data_backup.v_addr)
@@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
        /* Initialization instructions and data */
        if (init_size && init_data_size) {
                priv->ucode_init.len = init_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
 
                priv->ucode_init_data.len = init_data_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
 
                if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
                        goto err_pci_alloc;
@@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
        /* Bootstrap (instructions only, no data) */
        if (boot_size) {
                priv->ucode_boot.len = boot_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
 
                if (!priv->ucode_boot.v_addr)
                        goto err_pci_alloc;
@@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
        pdata = priv->ucode_data_backup.p_addr;
 
        /* Tell bootstrap uCode where to find image to load */
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
                                 priv->ucode_data.len);
 
        /* Inst byte count must be last to set up, bit 31 signals uCode
         *   that all new ptr/size info is in place */
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
                                 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
 
        IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
                goto restart;
        }
 
-       rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
+       rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
        IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
 
        if (rfkill & 0x1) {
@@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
        set_bit(STATUS_ALIVE, &priv->status);
 
        /* Enable watchdog to monitor the driver tx queues */
-       iwl_setup_watchdog(priv);
+       iwl_legacy_setup_watchdog(priv);
 
-       if (iwl_is_rfkill(priv))
+       if (iwl_legacy_is_rfkill(priv))
                return;
 
        ieee80211_wake_queues(priv->hw);
 
-       priv->active_rate = IWL_RATES_MASK;
+       priv->active_rate = IWL_RATES_MASK_3945;
 
-       iwl_power_update_mode(priv, true);
+       iwl_legacy_power_update_mode(priv, true);
 
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
                struct iwl3945_rxon_cmd *active_rxon =
                                (struct iwl3945_rxon_cmd *)(&ctx->active);
 
@@ -2529,21 +2503,20 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
                active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        } else {
                /* Initialize our rx_config data */
-               iwl_connection_init_rx_config(priv, ctx);
+               iwl_legacy_connection_init_rx_config(priv, ctx);
        }
 
        /* Configure Bluetooth device coexistence support */
-       priv->cfg->ops->hcmd->send_bt_config(priv);
+       iwl_legacy_send_bt_config(priv);
+
+       set_bit(STATUS_READY, &priv->status);
 
        /* Configure the adapter for unassociated operation */
        iwl3945_commit_rxon(priv, ctx);
 
        iwl3945_reg_txpower_periodic(priv);
 
-       iwl_leds_init(priv);
-
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       set_bit(STATUS_READY, &priv->status);
        wake_up_interruptible(&priv->wait_command_queue);
 
        return;
@@ -2561,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
 
        IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
 
-       iwl_scan_cancel_timeout(priv, 200);
+       iwl_legacy_scan_cancel_timeout(priv, 200);
 
        exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
 
@@ -2570,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
        del_timer_sync(&priv->watchdog);
 
        /* Station information will now be cleared in device */
-       iwl_clear_ucode_stations(priv, NULL);
-       iwl_dealloc_bcast_stations(priv);
-       iwl_clear_driver_stations(priv);
+       iwl_legacy_clear_ucode_stations(priv, NULL);
+       iwl_legacy_dealloc_bcast_stations(priv);
+       iwl_legacy_clear_driver_stations(priv);
 
        /* Unblock any waiting calls */
        wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2587,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
 
        /* tell the device to stop sending interrupts */
        spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
+       iwl_legacy_disable_interrupts(priv);
        spin_unlock_irqrestore(&priv->lock, flags);
-       iwl_synchronize_irq(priv);
+       iwl3945_synchronize_irq(priv);
 
        if (priv->mac80211_registered)
                ieee80211_stop_queues(priv->hw);
 
        /* If we have not previously called iwl3945_init() then
         * clear all bits but the RF Kill bits and return */
-       if (!iwl_is_init(priv)) {
+       if (!iwl_legacy_is_init(priv)) {
                priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
                                        STATUS_RF_KILL_HW |
                               test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2621,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
        iwl3945_hw_rxq_stop(priv);
 
        /* Power-down device's busmaster DMA clocks */
-       iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
        udelay(5);
 
        /* Stop the device, and put it in low power state */
-       iwl_apm_stop(priv);
+       iwl_legacy_apm_stop(priv);
 
  exit:
        memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2656,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
        u8 sta_id;
 
        spin_lock_irqsave(&priv->sta_lock, flags);
-       sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
+       sta_id = iwl_legacy_prep_station(priv, ctx,
+                                       iwlegacy_bcast_addr, false, NULL);
        if (sta_id == IWL_INVALID_STATION) {
                IWL_ERR(priv, "Unable to prepare broadcast station\n");
                spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2714,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
 
        /* clear (again), then enable host interrupts */
        iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-       iwl_enable_interrupts(priv);
+       iwl_legacy_enable_interrupts(priv);
 
        /* really make sure rfkill handshake bits are cleared */
        iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2856,21 +2830,18 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
        scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
 
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
                u16 interval = 0;
                u32 extra;
                u32 suspend_time = 100;
                u32 scan_suspend_time = 100;
-               unsigned long flags;
 
                IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
 
-               spin_lock_irqsave(&priv->lock, flags);
                if (priv->is_internal_short_scan)
                        interval = 0;
                else
                        interval = vif->bss_conf.beacon_int;
-               spin_unlock_irqrestore(&priv->lock, flags);
 
                scan->suspend_time = 0;
                scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -2947,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
        if (!priv->is_internal_short_scan) {
                scan->tx_cmd.len = cpu_to_le16(
-                       iwl_fill_probe_req(priv,
+                       iwl_legacy_fill_probe_req(priv,
                                (struct ieee80211_mgmt *)scan->data,
                                vif->addr,
                                priv->scan_request->ie,
@@ -2956,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        } else {
                /* use bcast addr, will not be transmitted but must be valid */
                scan->tx_cmd.len = cpu_to_le16(
-                       iwl_fill_probe_req(priv,
+                       iwl_legacy_fill_probe_req(priv,
                                (struct ieee80211_mgmt *)scan->data,
-                               iwl_bcast_addr, NULL, 0,
+                               iwlegacy_bcast_addr, NULL, 0,
                                IWL_MAX_SCAN_SIZE - sizeof(*scan)));
        }
        /* select Rx antennas */
@@ -2986,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        scan->len = cpu_to_le16(cmd.len);
 
        set_bit(STATUS_SCAN_HW, &priv->status);
-       ret = iwl_send_cmd_sync(priv, &cmd);
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
        if (ret)
                clear_bit(STATUS_SCAN_HW, &priv->status);
        return ret;
@@ -3054,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv)
        if (!ctx->vif || !priv->is_open)
                return;
 
-       if (ctx->vif->type == NL80211_IFTYPE_AP) {
-               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
-               return;
-       }
-
        IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
                        ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       iwl_scan_cancel_timeout(priv, 200);
+       iwl_legacy_scan_cancel_timeout(priv, 200);
 
-       conf = ieee80211_get_hw_conf(priv->hw);
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
 
        ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        iwl3945_commit_rxon(priv, ctx);
 
-       rc = iwl_send_rxon_timing(priv, ctx);
+       rc = iwl_legacy_send_rxon_timing(priv, ctx);
        if (rc)
                IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
                            "Attempting to continue.\n");
@@ -3170,8 +3136,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
         * no need to poll the killswitch state anymore */
        cancel_delayed_work(&priv->_3945.rfkill_poll);
 
-       iwl_led_start(priv);
-
        priv->is_open = 1;
        IWL_DEBUG_MAC80211(priv, "leave\n");
        return 0;
@@ -3206,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -3219,7 +3183,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                dev_kfree_skb_any(skb);
 
        IWL_DEBUG_MAC80211(priv, "leave\n");
-       return NETDEV_TX_OK;
 }
 
 void iwl3945_config_ap(struct iwl_priv *priv)
@@ -3232,14 +3195,14 @@ void iwl3945_config_ap(struct iwl_priv *priv)
                return;
 
        /* The following should be done only at AP bring up */
-       if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
+       if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
 
                /* RXON - unassoc (to set timing command) */
                ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
                iwl3945_commit_rxon(priv, ctx);
 
                /* RXON Timing */
-               rc = iwl_send_rxon_timing(priv, ctx);
+               rc = iwl_legacy_send_rxon_timing(priv, ctx);
                if (rc)
                        IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
                                        "Attempting to continue.\n");
@@ -3266,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv)
                iwl3945_commit_rxon(priv, ctx);
        }
        iwl3945_send_beacon_cmd(priv);
-
-       /* FIXME - we need to add code here to detect a totally new
-        * configuration, reset the AP, unassoc, rxon timing, assoc,
-        * clear sta table, add BCAST sta... */
 }
 
 static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3289,17 +3248,25 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
-       static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
+       /*
+        * To support IBSS RSN, don't program group keys in IBSS, the
+        * hardware will then not attempt to decrypt the frames.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
+       static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
 
        if (!static_key) {
-               sta_id = iwl_sta_id_or_broadcast(
+               sta_id = iwl_legacy_sta_id_or_broadcast(
                                priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
                if (sta_id == IWL_INVALID_STATION)
                        return -EINVAL;
        }
 
        mutex_lock(&priv->mutex);
-       iwl_scan_cancel_timeout(priv, 100);
+       iwl_legacy_scan_cancel_timeout(priv, 100);
 
        switch (cmd) {
        case SET_KEY:
@@ -3344,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
        sta_priv->common.sta_id = IWL_INVALID_STATION;
 
 
-       ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
+       ret = iwl_legacy_add_station_common(priv,
+                               &priv->contexts[IWL_RXON_CTX_BSS],
                                     sta->addr, is_ap, sta, &sta_id);
        if (ret) {
                IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3405,7 +3373,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
 
        /*
         * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_connection_init_rx_config()
+        * default flags setup in iwl_legacy_connection_init_rx_config()
         * since we currently do not support programming multicast
         * filters into the device.
         */
@@ -3420,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
  *
  *****************************************************************************/
 
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 
 /*
  * The following adds a new attribute to the sysfs representation
@@ -3433,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
  * level that is used instead of the global debug level if it (the per
  * device debug level) is set.
  */
-static ssize_t show_debug_level(struct device *d,
+static ssize_t iwl3945_show_debug_level(struct device *d,
                                struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
+       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
 }
-static ssize_t store_debug_level(struct device *d,
+static ssize_t iwl3945_store_debug_level(struct device *d,
                                struct device_attribute *attr,
                                 const char *buf, size_t count)
 {
@@ -3452,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d,
                IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
        else {
                priv->debug_level = val;
-               if (iwl_alloc_traffic_mem(priv))
+               if (iwl_legacy_alloc_traffic_mem(priv))
                        IWL_ERR(priv,
                                "Not enough memory to generate traffic log\n");
        }
@@ -3460,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d,
 }
 
 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-                       show_debug_level, store_debug_level);
+                       iwl3945_show_debug_level, iwl3945_store_debug_level);
 
-#endif /* CONFIG_IWLWIFI_DEBUG */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
 
-static ssize_t show_temperature(struct device *d,
+static ssize_t iwl3945_show_temperature(struct device *d,
                                struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
 
        return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
 }
 
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
 
-static ssize_t show_tx_power(struct device *d,
+static ssize_t iwl3945_show_tx_power(struct device *d,
                             struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
        return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
 }
 
-static ssize_t store_tx_power(struct device *d,
+static ssize_t iwl3945_store_tx_power(struct device *d,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
 {
@@ -3501,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d,
        return count;
 }
 
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
 
-static ssize_t show_flags(struct device *d,
+static ssize_t iwl3945_show_flags(struct device *d,
                          struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3512,7 +3480,7 @@ static ssize_t show_flags(struct device *d,
        return sprintf(buf, "0x%04X\n", ctx->active.flags);
 }
 
-static ssize_t store_flags(struct device *d,
+static ssize_t iwl3945_store_flags(struct device *d,
                           struct device_attribute *attr,
                           const char *buf, size_t count)
 {
@@ -3523,7 +3491,7 @@ static ssize_t store_flags(struct device *d,
        mutex_lock(&priv->mutex);
        if (le32_to_cpu(ctx->staging.flags) != flags) {
                /* Cancel any currently running scans... */
-               if (iwl_scan_cancel_timeout(priv, 100))
+               if (iwl_legacy_scan_cancel_timeout(priv, 100))
                        IWL_WARN(priv, "Could not cancel scan.\n");
                else {
                        IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3537,9 +3505,9 @@ static ssize_t store_flags(struct device *d,
        return count;
 }
 
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
 
-static ssize_t show_filter_flags(struct device *d,
+static ssize_t iwl3945_show_filter_flags(struct device *d,
                                 struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3549,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d,
                le32_to_cpu(ctx->active.filter_flags));
 }
 
-static ssize_t store_filter_flags(struct device *d,
+static ssize_t iwl3945_store_filter_flags(struct device *d,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
@@ -3560,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d,
        mutex_lock(&priv->mutex);
        if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
                /* Cancel any currently running scans... */
-               if (iwl_scan_cancel_timeout(priv, 100))
+               if (iwl_legacy_scan_cancel_timeout(priv, 100))
                        IWL_WARN(priv, "Could not cancel scan.\n");
                else {
                        IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3575,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d,
        return count;
 }
 
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
-                  store_filter_flags);
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
+                  iwl3945_store_filter_flags);
 
-static ssize_t show_measurement(struct device *d,
+static ssize_t iwl3945_show_measurement(struct device *d,
                                struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3610,7 +3578,7 @@ static ssize_t show_measurement(struct device *d,
        return len;
 }
 
-static ssize_t store_measurement(struct device *d,
+static ssize_t iwl3945_store_measurement(struct device *d,
                                 struct device_attribute *attr,
                                 const char *buf, size_t count)
 {
@@ -3647,9 +3615,9 @@ static ssize_t store_measurement(struct device *d,
 }
 
 static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
-                  show_measurement, store_measurement);
+                  iwl3945_show_measurement, iwl3945_store_measurement);
 
-static ssize_t store_retry_rate(struct device *d,
+static ssize_t iwl3945_store_retry_rate(struct device *d,
                                struct device_attribute *attr,
                                const char *buf, size_t count)
 {
@@ -3662,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d,
        return count;
 }
 
-static ssize_t show_retry_rate(struct device *d,
+static ssize_t iwl3945_show_retry_rate(struct device *d,
                               struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
        return sprintf(buf, "%d", priv->retry_rate);
 }
 
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
-                  store_retry_rate);
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
+                  iwl3945_store_retry_rate);
 
 
-static ssize_t show_channels(struct device *d,
+static ssize_t iwl3945_show_channels(struct device *d,
                             struct device_attribute *attr, char *buf)
 {
        /* all this shit doesn't belong into sysfs anyway */
        return 0;
 }
 
-static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
+static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
 
-static ssize_t show_antenna(struct device *d,
+static ssize_t iwl3945_show_antenna(struct device *d,
                            struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
 
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
 
        return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
 }
 
-static ssize_t store_antenna(struct device *d,
+static ssize_t iwl3945_store_antenna(struct device *d,
                             struct device_attribute *attr,
                             const char *buf, size_t count)
 {
@@ -3718,20 +3686,20 @@ static ssize_t store_antenna(struct device *d,
        return count;
 }
 
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
 
-static ssize_t show_status(struct device *d,
+static ssize_t iwl3945_show_status(struct device *d,
                           struct device_attribute *attr, char *buf)
 {
        struct iwl_priv *priv = dev_get_drvdata(d);
-       if (!iwl_is_alive(priv))
+       if (!iwl_legacy_is_alive(priv))
                return -EAGAIN;
        return sprintf(buf, "0x%08x\n", (int)priv->status);
 }
 
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
 
-static ssize_t dump_error_log(struct device *d,
+static ssize_t iwl3945_dump_error_log(struct device *d,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
 {
@@ -3744,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d,
        return strnlen(buf, count);
 }
 
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
 
 /*****************************************************************************
  *
@@ -3760,18 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
 
        INIT_WORK(&priv->restart, iwl3945_bg_restart);
        INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
-       INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
        INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
        INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
        INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
 
-       iwl_setup_scan_deferred_work(priv);
+       iwl_legacy_setup_scan_deferred_work(priv);
 
        iwl3945_hw_setup_deferred_work(priv);
 
        init_timer(&priv->watchdog);
        priv->watchdog.data = (unsigned long)priv;
-       priv->watchdog.function = iwl_bg_watchdog;
+       priv->watchdog.function = iwl_legacy_bg_watchdog;
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3783,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
 
        cancel_delayed_work_sync(&priv->init_alive_start);
        cancel_delayed_work(&priv->alive_start);
-       cancel_work_sync(&priv->beacon_update);
 
-       iwl_cancel_scan_deferred_work(priv);
+       iwl_legacy_cancel_scan_deferred_work(priv);
 }
 
 static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3799,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
        &dev_attr_status.attr,
        &dev_attr_temperature.attr,
        &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        &dev_attr_debug_level.attr,
 #endif
        NULL
@@ -3814,19 +3780,19 @@ struct ieee80211_ops iwl3945_hw_ops = {
        .tx = iwl3945_mac_tx,
        .start = iwl3945_mac_start,
        .stop = iwl3945_mac_stop,
-       .add_interface = iwl_mac_add_interface,
-       .remove_interface = iwl_mac_remove_interface,
-       .change_interface = iwl_mac_change_interface,
+       .add_interface = iwl_legacy_mac_add_interface,
+       .remove_interface = iwl_legacy_mac_remove_interface,
+       .change_interface = iwl_legacy_mac_change_interface,
        .config = iwl_legacy_mac_config,
        .configure_filter = iwl3945_configure_filter,
        .set_key = iwl3945_mac_set_key,
-       .conf_tx = iwl_mac_conf_tx,
+       .conf_tx = iwl_legacy_mac_conf_tx,
        .reset_tsf = iwl_legacy_mac_reset_tsf,
        .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .hw_scan = iwl_mac_hw_scan,
+       .hw_scan = iwl_legacy_mac_hw_scan,
        .sta_add = iwl3945_mac_sta_add,
-       .sta_remove = iwl_mac_sta_remove,
-       .tx_last_beacon = iwl_mac_tx_last_beacon,
+       .sta_remove = iwl_legacy_mac_sta_remove,
+       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
 };
 
 static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3868,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
                ret = -EINVAL;
                goto err;
        }
-       ret = iwl_init_channel_map(priv);
+       ret = iwl_legacy_init_channel_map(priv);
        if (ret) {
                IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
                goto err;
@@ -3880,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
                goto err_free_channel_map;
        }
 
-       ret = iwlcore_init_geos(priv);
+       ret = iwl_legacy_init_geos(priv);
        if (ret) {
                IWL_ERR(priv, "initializing geos failed: %d\n", ret);
                goto err_free_channel_map;
@@ -3890,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
        return 0;
 
 err_free_channel_map:
-       iwl_free_channel_map(priv);
+       iwl_legacy_free_channel_map(priv);
 err:
        return ret;
 }
@@ -3910,15 +3876,12 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
                    IEEE80211_HW_SPECTRUM_MGMT;
 
-       if (!priv->cfg->base_params->broken_powersave)
-               hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-                            IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
        hw->wiphy->interface_modes =
                priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
 
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
 
        hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
        /* we create the 802.11 header and a zero-length SSID element */
@@ -3935,6 +3898,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
                priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &priv->bands[IEEE80211_BAND_5GHZ];
 
+       iwl_legacy_leds_init(priv);
+
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3960,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
 
        /* mac80211 allocates memory for this device instance, including
         *   space for this driver's private structure */
-       hw = iwl_alloc_all(cfg);
+       hw = iwl_legacy_alloc_all(cfg);
        if (hw == NULL) {
                pr_err("Can not allocate network device\n");
                err = -ENOMEM;
@@ -4000,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
                iwl3945_hw_ops.hw_scan = NULL;
        }
 
-
        IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
        priv->cfg = cfg;
        priv->pci_dev = pdev;
        priv->inta_mask = CSR_INI_SET_MASK;
 
-       if (iwl_alloc_traffic_mem(priv))
+       if (iwl_legacy_alloc_traffic_mem(priv))
                IWL_ERR(priv, "Not enough memory to generate traffic log\n");
 
        /***************************
@@ -4070,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
         * ********************/
 
        /* Read the EEPROM */
-       err = iwl_eeprom_init(priv);
+       err = iwl_legacy_eeprom_init(priv);
        if (err) {
                IWL_ERR(priv, "Unable to init EEPROM\n");
                goto out_iounmap;
@@ -4107,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
         * ********************/
 
        spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
+       iwl_legacy_disable_interrupts(priv);
        spin_unlock_irqrestore(&priv->lock, flags);
 
        pci_enable_msi(priv->pci_dev);
 
-       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
+       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
                          IRQF_SHARED, DRV_NAME, priv);
        if (err) {
                IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4125,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
                goto out_release_irq;
        }
 
-       iwl_set_rxon_channel(priv,
+       iwl_legacy_set_rxon_channel(priv,
                             &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
                             &priv->contexts[IWL_RXON_CTX_BSS]);
        iwl3945_setup_deferred_work(priv);
        iwl3945_setup_rx_handlers(priv);
-       iwl_power_initialize(priv);
+       iwl_legacy_power_initialize(priv);
 
        /*********************************
         * 8. Setup and Register mac80211
         * *******************************/
 
-       iwl_enable_interrupts(priv);
+       iwl_legacy_enable_interrupts(priv);
 
        err = iwl3945_setup_mac(priv);
        if (err)
                goto  out_remove_sysfs;
 
-       err = iwl_dbgfs_register(priv, DRV_NAME);
+       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
        if (err)
                IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
 
@@ -4160,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
        free_irq(priv->pci_dev->irq, priv);
  out_disable_msi:
        pci_disable_msi(priv->pci_dev);
-       iwlcore_free_geos(priv);
-       iwl_free_channel_map(priv);
+       iwl_legacy_free_geos(priv);
+       iwl_legacy_free_channel_map(priv);
  out_unset_hw_params:
        iwl3945_unset_hw_params(priv);
  out_eeprom_free:
-       iwl_eeprom_free(priv);
+       iwl_legacy_eeprom_free(priv);
  out_iounmap:
        pci_iounmap(pdev, priv->hw_base);
  out_pci_release_regions:
@@ -4174,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
        pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
  out_ieee80211_free_hw:
-       iwl_free_traffic_mem(priv);
+       iwl_legacy_free_traffic_mem(priv);
        ieee80211_free_hw(priv->hw);
  out:
        return err;
@@ -4190,10 +4154,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
 
        IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
 
-       iwl_dbgfs_unregister(priv);
+       iwl_legacy_dbgfs_unregister(priv);
 
        set_bit(STATUS_EXIT_PENDING, &priv->status);
 
+       iwl_legacy_leds_exit(priv);
+
        if (priv->mac80211_registered) {
                ieee80211_unregister_hw(priv->hw);
                priv->mac80211_registered = 0;
@@ -4208,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
         * paths to avoid running iwl_down() at all before leaving driver.
         * This (inexpensive) call *makes sure* device is reset.
         */
-       iwl_apm_stop(priv);
+       iwl_legacy_apm_stop(priv);
 
        /* make sure we flush any pending irq or
         * tasklet for the driver
         */
        spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
+       iwl_legacy_disable_interrupts(priv);
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       iwl_synchronize_irq(priv);
+       iwl3945_synchronize_irq(priv);
 
        sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
 
@@ -4239,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
         * until now... */
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
-       iwl_free_traffic_mem(priv);
+       iwl_legacy_free_traffic_mem(priv);
 
        free_irq(pdev->irq, priv);
        pci_disable_msi(pdev);
@@ -4249,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
 
-       iwl_free_channel_map(priv);
-       iwlcore_free_geos(priv);
+       iwl_legacy_free_channel_map(priv);
+       iwl_legacy_free_geos(priv);
        kfree(priv->scan_cmd);
        if (priv->beacon_skb)
                dev_kfree_skb(priv->beacon_skb);
@@ -4270,7 +4236,7 @@ static struct pci_driver iwl3945_driver = {
        .id_table = iwl3945_hw_card_ids,
        .probe = iwl3945_pci_probe,
        .remove = __devexit_p(iwl3945_pci_remove),
-       .driver.pm = IWL_PM_OPS,
+       .driver.pm = IWL_LEGACY_PM_OPS,
 };
 
 static int __init iwl3945_init(void)
@@ -4311,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
 module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
 MODULE_PARM_DESC(swcrypto,
-                "using software crypto (default 1 [software])\n");
-#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
+               "using software crypto (default 1 [software])");
 module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
-                  int, S_IRUGO);
+               int, S_IRUGO);
 MODULE_PARM_DESC(disable_hw_scan,
-                "disable hardware scanning (default 0) (deprecated)");
-module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
+               "disable hardware scanning (default 0) (deprecated)");
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
 
 module_exit(iwl3945_exit);
 module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644 (file)
index 0000000..91b3d8b
--- /dev/null
@@ -0,0 +1,3632 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME        "iwl4965"
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-sta.h"
+#include "iwl-4965-calib.h"
+#include "iwl-4965.h"
+#include "iwl-4965-led.h"
+
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION        "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION     IWLWIFI_VERSION VD
+
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void iwl4965_update_chain_flags(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain) {
+               for_each_context(priv, ctx) {
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+                       if (ctx->active.rx_chain != ctx->staging.rx_chain)
+                               iwl_legacy_commit_rxon(priv, ctx);
+               }
+       }
+}
+
+static void iwl4965_clear_free_frames(struct iwl_priv *priv)
+{
+       struct list_head *element;
+
+       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
+                      priv->frames_count);
+
+       while (!list_empty(&priv->free_frames)) {
+               element = priv->free_frames.next;
+               list_del(element);
+               kfree(list_entry(element, struct iwl_frame, list));
+               priv->frames_count--;
+       }
+
+       if (priv->frames_count) {
+               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
+                           priv->frames_count);
+               priv->frames_count = 0;
+       }
+}
+
+static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
+{
+       struct iwl_frame *frame;
+       struct list_head *element;
+       if (list_empty(&priv->free_frames)) {
+               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+               if (!frame) {
+                       IWL_ERR(priv, "Could not allocate frame!\n");
+                       return NULL;
+               }
+
+               priv->frames_count++;
+               return frame;
+       }
+
+       element = priv->free_frames.next;
+       list_del(element);
+       return list_entry(element, struct iwl_frame, list);
+}
+
+static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
+{
+       memset(frame, 0, sizeof(*frame));
+       list_add(&frame->list, &priv->free_frames);
+}
+
+static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
+                                struct ieee80211_hdr *hdr,
+                                int left)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_skb)
+               return 0;
+
+       if (priv->beacon_skb->len > left)
+               return 0;
+
+       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
+
+       return priv->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
+                              struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+                              u8 *beacon, u32 frame_size)
+{
+       u16 tim_idx;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+       /*
+        * The index is relative to frame start but we start looking at the
+        * variable-length part of the beacon.
+        */
+       tim_idx = mgmt->u.beacon.variable - beacon;
+
+       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+       while ((tim_idx < (frame_size - 2)) &&
+                       (beacon[tim_idx] != WLAN_EID_TIM))
+               tim_idx += beacon[tim_idx+1] + 2;
+
+       /* If TIM field was found, set variables */
+       if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+               tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+               tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+       } else
+               IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+                                      struct iwl_frame *frame)
+{
+       struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+       u32 frame_size;
+       u32 rate_flags;
+       u32 rate;
+       /*
+        * We have to set up the TX command, the TX Beacon command, and the
+        * beacon contents.
+        */
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_ctx) {
+               IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
+               return 0;
+       }
+
+       /* Initialize memory */
+       tx_beacon_cmd = &frame->u.beacon;
+       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+       /* Set up TX beacon contents */
+       frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
+                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+       if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+               return 0;
+       if (!frame_size)
+               return 0;
+
+       /* Set up TX command fields */
+       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+       tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
+       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+               TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
+
+       /* Set up TX beacon command fields */
+       iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
+                          frame_size);
+
+       /* Set up packet rate and flags */
+       rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
+       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+                                             priv->hw_params.valid_tx_ant);
+       rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+       if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+       tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
+                       rate_flags);
+
+       return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
+{
+       struct iwl_frame *frame;
+       unsigned int frame_size;
+       int rc;
+
+       frame = iwl4965_get_free_frame(priv);
+       if (!frame) {
+               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
+                         "command.\n");
+               return -ENOMEM;
+       }
+
+       frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
+       if (!frame_size) {
+               IWL_ERR(priv, "Error configuring the beacon command\n");
+               iwl4965_free_frame(priv, frame);
+               return -EINVAL;
+       }
+
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+                             &frame->u.cmd[0]);
+
+       iwl4965_free_frame(priv, frame);
+
+       return rc;
+}
+
+static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       dma_addr_t addr = get_unaligned_le32(&tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               addr |=
+               ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+       return addr;
+}
+
+static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+                                 dma_addr_t addr, u16 len)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+       u16 hi_n_len = len << 4;
+
+       put_unaligned_le32(addr, &tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+       tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+       tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+       return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       struct iwl_tfd *tfd;
+       struct pci_dev *dev = priv->pci_dev;
+       int index = txq->q.read_ptr;
+       int i;
+       int num_tbs;
+
+       tfd = &tfd_tmp[index];
+
+       /* Sanity check on number of chunks */
+       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (num_tbs)
+               pci_unmap_single(dev,
+                               dma_unmap_addr(&txq->meta[index], mapping),
+                               dma_unmap_len(&txq->meta[index], len),
+                               PCI_DMA_BIDIRECTIONAL);
+
+       /* Unmap chunks, if any. */
+       for (i = 1; i < num_tbs; i++)
+               pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
+                               iwl4965_tfd_tb_get_len(tfd, i),
+                               PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                struct iwl_tx_queue *txq,
+                                dma_addr_t addr, u16 len,
+                                u8 reset, u8 pad)
+{
+       struct iwl_queue *q;
+       struct iwl_tfd *tfd, *tfd_tmp;
+       u32 num_tbs;
+
+       q = &txq->q;
+       tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+       /* Each TFD can point to a maximum 20 Tx buffers */
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Error can not send more than %d chunks\n",
+                         IWL_NUM_OF_TBS);
+               return -EINVAL;
+       }
+
+       BUG_ON(addr & ~DMA_BIT_MASK(36));
+       if (unlikely(addr & ~IWL_TX_DMA_MASK))
+               IWL_ERR(priv, "Unaligned address = %llx\n",
+                         (unsigned long long)addr);
+
+       iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+       return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+                        struct iwl_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       /* Circular buffer (TFD queue in DRAM) physical base address */
+       iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+                            txq->q.dma_addr >> 8);
+
+       return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+                      "0x%01X 0x%01X\n",
+                      palive->is_valid, palive->ver_type,
+                      palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+               memcpy(&priv->card_alive_init,
+                      &pkt->u.alive_frame,
+                      sizeof(struct iwl_init_alive_resp));
+               pwork = &priv->init_alive_start;
+       } else {
+               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+               memcpy(&priv->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct iwl_alive_resp));
+               pwork = &priv->alive_start;
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(priv->workqueue, pwork,
+                                  msecs_to_jiffies(5));
+       else
+               IWL_WARN(priv, "uCode did not respond OK.\n");
+}
+
+/**
+ * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
+ *
+ * This callback is provided in order to send a statistics request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
+ * was received.  We need to ensure we receive the statistics in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void iwl4965_bg_statistics_periodic(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       /* dont send host command if rf-kill is on */
+       if (!iwl_legacy_is_ready_rf(priv))
+               return;
+
+       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
+}
+
+
+static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+                                       u32 start_idx, u32 num_events,
+                                       u32 mode)
+{
+       u32 i;
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+       unsigned long reg_flags;
+
+       if (mode == 0)
+               ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+       else
+               ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+       /* Make sure device is powered up for SRAM reads */
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (iwl_grab_nic_access(priv)) {
+               spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+               return;
+       }
+
+       /* Set starting address; reads will auto-increment */
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       rmb();
+
+       /*
+        * "time" is actually "data" for mode 0 (no timestamp).
+        * place event id # at far right for easier visual parsing.
+        */
+       for (i = 0; i < num_events; i++) {
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (mode == 0) {
+                       trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+                                                       0, time, ev);
+               } else {
+                       data = _iwl_legacy_read_direct32(priv,
+                                               HBUS_TARG_MEM_RDAT);
+                       trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+                                               time, data, ev);
+               }
+       }
+       /* Allow device to power down */
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
+{
+       u32 capacity;   /* event log capacity in # entries */
+       u32 base;       /* SRAM byte address of event log header */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+
+       if (priv->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+       else
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               capacity = iwl_legacy_read_targ_mem(priv, base);
+               num_wraps = iwl_legacy_read_targ_mem(priv,
+                                               base + (2 * sizeof(u32)));
+               mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+               next_entry = iwl_legacy_read_targ_mem(priv,
+                                               base + (3 * sizeof(u32)));
+       } else
+               return;
+
+       if (num_wraps == priv->event_log.num_wraps) {
+               iwl4965_print_cont_event_trace(priv,
+                                      base, priv->event_log.next_entry,
+                                      next_entry - priv->event_log.next_entry,
+                                      mode);
+               priv->event_log.non_wraps_count++;
+       } else {
+               if ((num_wraps - priv->event_log.num_wraps) > 1)
+                       priv->event_log.wraps_more_count++;
+               else
+                       priv->event_log.wraps_once_count++;
+               trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
+                               num_wraps - priv->event_log.num_wraps,
+                               next_entry, priv->event_log.next_entry);
+               if (next_entry < priv->event_log.next_entry) {
+                       iwl4965_print_cont_event_trace(priv, base,
+                              priv->event_log.next_entry,
+                              capacity - priv->event_log.next_entry,
+                              mode);
+
+                       iwl4965_print_cont_event_trace(priv, base, 0,
+                               next_entry, mode);
+               } else {
+                       iwl4965_print_cont_event_trace(priv, base,
+                              next_entry, capacity - next_entry,
+                              mode);
+
+                       iwl4965_print_cont_event_trace(priv, base, 0,
+                               next_entry, mode);
+               }
+       }
+       priv->event_log.num_wraps = num_wraps;
+       priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl4965_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl4965_bg_ucode_trace(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (priv->event_log.ucode_trace) {
+               iwl4965_continuous_event_trace(priv);
+               /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+               mod_timer(&priv->ucode_trace,
+                        jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+       }
+}
+
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl4965_beacon_notif *beacon =
+               (struct iwl4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
+               "tsf %d %d rate %d\n",
+               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
+{
+       unsigned long flags;
+
+       IWL_DEBUG_POWER(priv, "Stop all queues\n");
+
+       if (priv->mac80211_registered)
+               ieee80211_stop_queues(priv->hw);
+
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       if (!iwl_grab_nic_access(priv))
+               iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = priv->status;
+
+       IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & CT_CARD_DISABLED) ?
+                         "Reached" : "Not reached");
+
+       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+                    CT_CARD_DISABLED)) {
+
+               iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                           CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+               if (!(flags & RXON_CARD_DISABLED)) {
+                       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+                       iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+               }
+       }
+
+       if (flags & CT_CARD_DISABLED)
+               iwl4965_perform_ct_kill_task(priv);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       if (!(flags & RXON_CARD_DISABLED))
+               iwl_legacy_scan_cancel(priv);
+
+       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+            test_bit(STATUS_RF_KILL_HW, &priv->status)))
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+                       test_bit(STATUS_RF_KILL_HW, &priv->status));
+       else
+               wake_up_interruptible(&priv->wait_command_queue);
+}
+
+/**
+ * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
+{
+       priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
+       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
+       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+                       iwl_legacy_rx_spectrum_measure_notif;
+       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
+       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+           iwl_legacy_rx_pm_debug_statistics_notif;
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * statistics request from the host as well as for the periodic
+        * statistics notifications (after received beacons) from the uCode.
+        */
+       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
+       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
+
+       iwl_legacy_setup_rx_scan_handlers(priv);
+
+       /* status change handler */
+       priv->rx_handlers[CARD_STATE_NOTIFICATION] =
+                                       iwl4965_rx_card_state_notif;
+
+       priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
+           iwl4965_rx_missed_beacon_notif;
+       /* Rx handlers */
+       priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
+       priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
+       /* block ack */
+       priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
+       /* Set up hardware specific Rx handlers */
+       priv->cfg->ops->lib->rx_handler_setup(priv);
+}
+
+/**
+ * iwl4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void iwl4965_rx_handle(struct iwl_priv *priv)
+{
+       struct iwl_rx_mem_buffer *rxb;
+       struct iwl_rx_packet *pkt;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       u32 r, i;
+       int reclaim;
+       unsigned long flags;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty;
+
+       /* uCode's read index (stored in shared DRAM) indicates the last Rx
+        * buffer that the driver may process (last buffer filled by ucode). */
+       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
+       i = rxq->read;
+
+       /* Rx interrupt, but nothing sent from uCode */
+       if (i == r)
+               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+
+       while (i != r) {
+               int len;
+
+               rxb = rxq->queue[i];
+
+               /* If an RXB doesn't have a Rx queue slot associated with it,
+                * then a bug has been introduced in the queue refilling
+                * routines -- catch it here */
+               BUG_ON(rxb == NULL);
+
+               rxq->queue[i] = NULL;
+
+               pci_unmap_page(priv->pci_dev, rxb->page_dma,
+                              PAGE_SIZE << priv->hw_params.rx_page_order,
+                              PCI_DMA_FROMDEVICE);
+               pkt = rxb_addr(rxb);
+
+               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+               len += sizeof(u32); /* account for status word */
+               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
+
+               /* Reclaim a command buffer only if this packet is a response
+                *   to a (driver-originated) command.
+                * If the packet (e.g. Rx frame) originated from uCode,
+                *   there is no command buffer to reclaim.
+                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+                *   but apparently a few don't get set; catch them here. */
+               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+                       (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+                       (pkt->hdr.cmd != REPLY_RX) &&
+                       (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+                       (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+                       (pkt->hdr.cmd != REPLY_TX);
+
+               /* Based on type of command response or notification,
+                *   handle those that need handling via function in
+                *   rx_handlers table.  See iwl4965_setup_rx_handlers() */
+               if (priv->rx_handlers[pkt->hdr.cmd]) {
+                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+                               i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+                               pkt->hdr.cmd);
+                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+               } else {
+                       /* No handling needed */
+                       IWL_DEBUG_RX(priv,
+                               "r %d i %d No handler needed for %s, 0x%02x\n",
+                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+                               pkt->hdr.cmd);
+               }
+
+               /*
+                * XXX: After here, we should always check rxb->page
+                * against NULL before touching it or its virtual
+                * memory (pkt). Because some rx_handler might have
+                * already taken or freed the pages.
+                */
+
+               if (reclaim) {
+                       /* Invoke any callbacks, transfer the buffer to caller,
+                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
+                        * as we reclaim the driver command queue */
+                       if (rxb->page)
+                               iwl_legacy_tx_cmd_complete(priv, rxb);
+                       else
+                               IWL_WARN(priv, "Claim null rxb?\n");
+               }
+
+               /* Reuse the page if possible. For notification packets and
+                * SKBs that fail to Rx correctly, add them back into the
+                * rx_free list for reuse later. */
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (rxb->page != NULL) {
+                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               } else
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               i = (i + 1) & RX_QUEUE_MASK;
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode wont assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               iwl4965_rx_replenish_now(priv);
+                               count = 0;
+                       }
+               }
+       }
+
+       /* Backtrack one entry */
+       rxq->read = i;
+       if (fill_rx)
+               iwl4965_rx_replenish_now(priv);
+       else
+               iwl4965_rx_queue_restock(priv);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
+{
+       /* wait to make sure we flush pending tasklet*/
+       synchronize_irq(priv->pci_dev->irq);
+       tasklet_kill(&priv->irq_tasklet);
+}
+
+static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+{
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+       u32 i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u32 inta_mask;
+#endif
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Ack/clear/reset pending uCode interrupts.
+        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+       inta = iwl_read32(priv, CSR_INT);
+       iwl_write32(priv, CSR_INT, inta);
+
+       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+        * Any new interrupts that happen after this, either while we're
+        * in this tasklet, or later, will show up in next ISR/tasklet. */
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
+               /* just for debug */
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                             inta, inta_mask, inta_fh);
+       }
+#endif
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+        * atomic, make sure that inta covers all the interrupts that
+        * we've discovered, even if FH interrupt came in just after
+        * reading CSR_INT. */
+       if (inta_fh & CSR49_FH_INT_RX_MASK)
+               inta |= CSR_INT_BIT_FH_RX;
+       if (inta_fh & CSR49_FH_INT_TX_MASK)
+               inta |= CSR_INT_BIT_FH_TX;
+
+       /* Now service all interrupt bits discovered above. */
+       if (inta & CSR_INT_BIT_HW_ERR) {
+               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
+
+               /* Tell the device to stop sending interrupts */
+               iwl_legacy_disable_interrupts(priv);
+
+               priv->isr_stats.hw++;
+               iwl_legacy_irq_handle_error(priv);
+
+               handled |= CSR_INT_BIT_HW_ERR;
+
+               return;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               /* NIC fires this, but we don't use it, redundant with WAKEUP */
+               if (inta & CSR_INT_BIT_SCD) {
+                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+                                     "the frame/frames.\n");
+                       priv->isr_stats.sch++;
+               }
+
+               /* Alive notification via Rx interrupt will do the real work */
+               if (inta & CSR_INT_BIT_ALIVE) {
+                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+                       priv->isr_stats.alive++;
+               }
+       }
+#endif
+       /* Safely ignore these bits for debug checks below */
+       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+       /* HW RF KILL switch toggled */
+       if (inta & CSR_INT_BIT_RF_KILL) {
+               int hw_rf_kill = 0;
+               if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+                       hw_rf_kill = 1;
+
+               IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
+                               hw_rf_kill ? "disable radio" : "enable radio");
+
+               priv->isr_stats.rfkill++;
+
+               /* driver only loads ucode once setting the interface up.
+                * the driver allows loading the ucode even if the radio
+                * is killed. Hence update the killswitch state here. The
+                * rfkill handler will care about restarting if needed.
+                */
+               if (!test_bit(STATUS_ALIVE, &priv->status)) {
+                       if (hw_rf_kill)
+                               set_bit(STATUS_RF_KILL_HW, &priv->status);
+                       else
+                               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+                       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+               }
+
+               handled |= CSR_INT_BIT_RF_KILL;
+       }
+
+       /* Chip got too hot and stopped itself */
+       if (inta & CSR_INT_BIT_CT_KILL) {
+               IWL_ERR(priv, "Microcode CT kill error detected.\n");
+               priv->isr_stats.ctkill++;
+               handled |= CSR_INT_BIT_CT_KILL;
+       }
+
+       /* Error detected by uCode */
+       if (inta & CSR_INT_BIT_SW_ERR) {
+               IWL_ERR(priv, "Microcode SW error detected. "
+                       " Restarting 0x%X.\n", inta);
+               priv->isr_stats.sw++;
+               iwl_legacy_irq_handle_error(priv);
+               handled |= CSR_INT_BIT_SW_ERR;
+       }
+
+       /*
+        * uCode wakes up after power-down sleep.
+        * Tell device about any new tx or host commands enqueued,
+        * and about any Rx buffers made available while asleep.
+        */
+       if (inta & CSR_INT_BIT_WAKEUP) {
+               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+               for (i = 0; i < priv->hw_params.max_txq_num; i++)
+                       iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
+               priv->isr_stats.wakeup++;
+               handled |= CSR_INT_BIT_WAKEUP;
+       }
+
+       /* All uCode command responses, including Tx command responses,
+        * Rx "responses" (frame-received notification), and other
+        * notifications from uCode come through here*/
+       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+               iwl4965_rx_handle(priv);
+               priv->isr_stats.rx++;
+               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+       }
+
+       /* This "Tx" DMA channel is used only for loading uCode */
+       if (inta & CSR_INT_BIT_FH_TX) {
+               IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
+               priv->isr_stats.tx++;
+               handled |= CSR_INT_BIT_FH_TX;
+               /* Wake up uCode load routine, now that load is complete */
+               priv->ucode_write_complete = 1;
+               wake_up_interruptible(&priv->wait_command_queue);
+       }
+
+       if (inta & ~handled) {
+               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+               priv->isr_stats.unhandled++;
+       }
+
+       if (inta & ~(priv->inta_mask)) {
+               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+                        inta & ~priv->inta_mask);
+               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
+       }
+
+       /* Re-enable all interrupts */
+       /* only Re-enable if diabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_legacy_enable_interrupts(priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               inta = iwl_read32(priv, CSR_INT);
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+               IWL_DEBUG_ISR(priv,
+                       "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+       }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t iwl4965_show_debug_level(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
+}
+static ssize_t iwl4965_store_debug_level(struct device *d,
+                               struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
+       else {
+               priv->debug_level = val;
+               if (iwl_legacy_alloc_traffic_mem(priv))
+                       IWL_ERR(priv,
+                               "Not enough memory to generate traffic log\n");
+       }
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+                       iwl4965_show_debug_level, iwl4965_store_debug_level);
+
+
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+
+static ssize_t iwl4965_show_temperature(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", priv->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
+
+static ssize_t iwl4965_show_tx_power(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return sprintf(buf, "off\n");
+       else
+               return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
+}
+
+static ssize_t iwl4965_store_tx_power(struct device *d,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret)
+               IWL_INFO(priv, "%s is not in decimal form.\n", buf);
+       else {
+               ret = iwl_legacy_set_tx_power(priv, val, false);
+               if (ret)
+                       IWL_ERR(priv, "failed setting tx power (0x%d).\n",
+                               ret);
+               else
+                       ret = count;
+       }
+       return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
+                       iwl4965_show_tx_power, iwl4965_store_tx_power);
+
+static struct attribute *iwl_sysfs_entries[] = {
+       &dev_attr_temperature.attr,
+       &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       &dev_attr_debug_level.attr,
+#endif
+       NULL
+};
+
+static struct attribute_group iwl_attribute_group = {
+       .name = NULL,           /* put in device directory */
+       .attrs = iwl_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
+{
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+}
+
+static void iwl4965_nic_start(struct iwl_priv *priv)
+{
+       /* Remove all resets to allow NIC to operate */
+       iwl_write32(priv, CSR_RESET, 0);
+}
+
+static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
+                                       void *context);
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+                                               u32 max_probe_length);
+
+static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
+{
+       const char *name_pre = priv->cfg->fw_name_pre;
+       char tag[8];
+
+       if (first) {
+               priv->fw_index = priv->cfg->ucode_api_max;
+               sprintf(tag, "%d", priv->fw_index);
+       } else {
+               priv->fw_index--;
+               sprintf(tag, "%d", priv->fw_index);
+       }
+
+       if (priv->fw_index < priv->cfg->ucode_api_min) {
+               IWL_ERR(priv, "no suitable firmware found!\n");
+               return -ENOENT;
+       }
+
+       sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+       IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
+                      priv->firmware_name);
+
+       return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
+                                      &priv->pci_dev->dev, GFP_KERNEL, priv,
+                                      iwl4965_ucode_callback);
+}
+
+struct iwl4965_firmware_pieces {
+       const void *inst, *data, *init, *init_data, *boot;
+       size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int iwl4965_load_firmware(struct iwl_priv *priv,
+                                      const struct firmware *ucode_raw,
+                                      struct iwl4965_firmware_pieces *pieces)
+{
+       struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+       u32 api_ver, hdr_size;
+       const u8 *src;
+
+       priv->ucode_ver = le32_to_cpu(ucode->ver);
+       api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+       switch (api_ver) {
+       default:
+       case 0:
+       case 1:
+       case 2:
+               hdr_size = 24;
+               if (ucode_raw->size < hdr_size) {
+                       IWL_ERR(priv, "File size too small!\n");
+                       return -EINVAL;
+               }
+               pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+               pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+               pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+               pieces->init_data_size =
+                               le32_to_cpu(ucode->v1.init_data_size);
+               pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+               src = ucode->v1.data;
+               break;
+       }
+
+       /* Verify size of file vs. image size info in file's header */
+       if (ucode_raw->size != hdr_size + pieces->inst_size +
+                               pieces->data_size + pieces->init_size +
+                               pieces->init_data_size + pieces->boot_size) {
+
+               IWL_ERR(priv,
+                       "uCode file size %d does not match expected size\n",
+                       (int)ucode_raw->size);
+               return -EINVAL;
+       }
+
+       pieces->inst = src;
+       src += pieces->inst_size;
+       pieces->data = src;
+       src += pieces->data_size;
+       pieces->init = src;
+       src += pieces->init_size;
+       pieces->init_data = src;
+       src += pieces->init_data_size;
+       pieces->boot = src;
+       src += pieces->boot_size;
+
+       return 0;
+}
+
+/**
+ * iwl4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+       struct iwl_priv *priv = context;
+       struct iwl_ucode_header *ucode;
+       int err;
+       struct iwl4965_firmware_pieces pieces;
+       const unsigned int api_max = priv->cfg->ucode_api_max;
+       const unsigned int api_min = priv->cfg->ucode_api_min;
+       u32 api_ver;
+
+       u32 max_probe_length = 200;
+       u32 standard_phy_calibration_size =
+                       IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+       memset(&pieces, 0, sizeof(pieces));
+
+       if (!ucode_raw) {
+               if (priv->fw_index <= priv->cfg->ucode_api_max)
+                       IWL_ERR(priv,
+                               "request for firmware file '%s' failed.\n",
+                               priv->firmware_name);
+               goto try_again;
+       }
+
+       IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
+                      priv->firmware_name, ucode_raw->size);
+
+       /* Make sure that we got at least the API version number */
+       if (ucode_raw->size < 4) {
+               IWL_ERR(priv, "File size way too small!\n");
+               goto try_again;
+       }
+
+       /* Data from ucode file:  header followed by uCode images */
+       ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+       err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
+
+       if (err)
+               goto try_again;
+
+       api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+       /*
+        * api_ver should match the api version forming part of the
+        * firmware filename ... but we don't check for that and only rely
+        * on the API version read from firmware header from here on forward
+        */
+       if (api_ver < api_min || api_ver > api_max) {
+               IWL_ERR(priv,
+                       "Driver unable to support your firmware API. "
+                       "Driver supports v%u, firmware is v%u.\n",
+                       api_max, api_ver);
+               goto try_again;
+       }
+
+       if (api_ver != api_max)
+               IWL_ERR(priv,
+                       "Firmware has old API version. Expected v%u, "
+                       "got v%u. New firmware can be obtained "
+                       "from http://www.intellinuxwireless.org.\n",
+                       api_max, api_ver);
+
+       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
+                IWL_UCODE_MAJOR(priv->ucode_ver),
+                IWL_UCODE_MINOR(priv->ucode_ver),
+                IWL_UCODE_API(priv->ucode_ver),
+                IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       snprintf(priv->hw->wiphy->fw_version,
+                sizeof(priv->hw->wiphy->fw_version),
+                "%u.%u.%u.%u",
+                IWL_UCODE_MAJOR(priv->ucode_ver),
+                IWL_UCODE_MINOR(priv->ucode_ver),
+                IWL_UCODE_API(priv->ucode_ver),
+                IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       /*
+        * For any of the failures below (before allocating pci memory)
+        * we will try to load a version with a smaller API -- maybe the
+        * user just got a corrupted version of the latest API.
+        */
+
+       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+                      priv->ucode_ver);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+                      pieces.inst_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+                      pieces.data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+                      pieces.init_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+                      pieces.init_data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+                      pieces.boot_size);
+
+       /* Verify that uCode images will fit in card's SRAM */
+       if (pieces.inst_size > priv->hw_params.max_inst_size) {
+               IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+                       pieces.inst_size);
+               goto try_again;
+       }
+
+       if (pieces.data_size > priv->hw_params.max_data_size) {
+               IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+                       pieces.data_size);
+               goto try_again;
+       }
+
+       if (pieces.init_size > priv->hw_params.max_inst_size) {
+               IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+                       pieces.init_size);
+               goto try_again;
+       }
+
+       if (pieces.init_data_size > priv->hw_params.max_data_size) {
+               IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+                       pieces.init_data_size);
+               goto try_again;
+       }
+
+       if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+               IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+                       pieces.boot_size);
+               goto try_again;
+       }
+
+       /* Allocate ucode buffers for card's bus-master loading ... */
+
+       /* Runtime instructions and 2 copies of data:
+        * 1) unmodified from disk
+        * 2) backup cache for save/restore during power-downs */
+       priv->ucode_code.len = pieces.inst_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+
+       priv->ucode_data.len = pieces.data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+
+       priv->ucode_data_backup.len = pieces.data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+
+       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+           !priv->ucode_data_backup.v_addr)
+               goto err_pci_alloc;
+
+       /* Initialization instructions and data */
+       if (pieces.init_size && pieces.init_data_size) {
+               priv->ucode_init.len = pieces.init_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+
+               priv->ucode_init_data.len = pieces.init_data_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+
+               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Bootstrap (instructions only, no data) */
+       if (pieces.boot_size) {
+               priv->ucode_boot.len = pieces.boot_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+
+               if (!priv->ucode_boot.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Now that we can no longer fail, copy information */
+
+       priv->sta_key_max_num = STA_KEY_MAX_NUM;
+
+       /* Copy images into buffers for card's bus-master reads ... */
+
+       /* Runtime instructions (first block of data in file) */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+                       pieces.inst_size);
+       memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
+
+       /*
+        * Runtime data
+        * NOTE:  Copy into backup buffer will be done in iwl_up()
+        */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+                       pieces.data_size);
+       memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+       memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+       /* Initialization instructions */
+       if (pieces.init_size) {
+               IWL_DEBUG_INFO(priv,
+                               "Copying (but not loading) init instr len %Zd\n",
+                               pieces.init_size);
+               memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
+       }
+
+       /* Initialization data */
+       if (pieces.init_data_size) {
+               IWL_DEBUG_INFO(priv,
+                               "Copying (but not loading) init data len %Zd\n",
+                              pieces.init_data_size);
+               memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+                      pieces.init_data_size);
+       }
+
+       /* Bootstrap instructions */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+                       pieces.boot_size);
+       memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+       /*
+        * figure out the offset of chain noise reset and gain commands
+        * base on the size of standard phy calibration commands table size
+        */
+       priv->_4965.phy_calib_chain_noise_reset_cmd =
+               standard_phy_calibration_size;
+       priv->_4965.phy_calib_chain_noise_gain_cmd =
+               standard_phy_calibration_size + 1;
+
+       /**************************************************
+        * This is still part of probe() in a sense...
+        *
+        * 9. Setup and register with mac80211 and debugfs
+        **************************************************/
+       err = iwl4965_mac_setup_register(priv, max_probe_length);
+       if (err)
+               goto out_unbind;
+
+       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
+       if (err)
+               IWL_ERR(priv,
+               "failed to create debugfs files. Ignoring error: %d\n", err);
+
+       err = sysfs_create_group(&priv->pci_dev->dev.kobj,
+                                       &iwl_attribute_group);
+       if (err) {
+               IWL_ERR(priv, "failed to create sysfs device attributes\n");
+               goto out_unbind;
+       }
+
+       /* We have our copies now, allow OS release its copies */
+       release_firmware(ucode_raw);
+       complete(&priv->_4965.firmware_loading_complete);
+       return;
+
+ try_again:
+       /* try next, if any */
+       if (iwl4965_request_firmware(priv, false))
+               goto out_unbind;
+       release_firmware(ucode_raw);
+       return;
+
+ err_pci_alloc:
+       IWL_ERR(priv, "failed to allocate pci memory\n");
+       iwl4965_dealloc_ucode_pci(priv);
+ out_unbind:
+       complete(&priv->_4965.firmware_loading_complete);
+       device_release_driver(&priv->pci_dev->dev);
+       release_firmware(ucode_raw);
+}
+
+static const char * const desc_lookup_text[] = {
+       "OK",
+       "FAIL",
+       "BAD_PARAM",
+       "BAD_CHECKSUM",
+       "NMI_INTERRUPT_WDG",
+       "SYSASSERT",
+       "FATAL_ERROR",
+       "BAD_COMMAND",
+       "HW_ERROR_TUNE_LOCK",
+       "HW_ERROR_TEMPERATURE",
+       "ILLEGAL_CHAN_FREQ",
+       "VCC_NOT_STABLE",
+       "FH_ERROR",
+       "NMI_INTERRUPT_HOST",
+       "NMI_INTERRUPT_ACTION_PT",
+       "NMI_INTERRUPT_UNKNOWN",
+       "UCODE_VERSION_MISMATCH",
+       "HW_ERROR_ABS_LOCK",
+       "HW_ERROR_CAL_LOCK_FAIL",
+       "NMI_INTERRUPT_INST_ACTION_PT",
+       "NMI_INTERRUPT_DATA_ACTION_PT",
+       "NMI_TRM_HW_ER",
+       "NMI_INTERRUPT_TRM",
+       "NMI_INTERRUPT_BREAK_POINT"
+       "DEBUG_0",
+       "DEBUG_1",
+       "DEBUG_2",
+       "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+       { "NMI_INTERRUPT_WDG", 0x34 },
+       { "SYSASSERT", 0x35 },
+       { "UCODE_VERSION_MISMATCH", 0x37 },
+       { "BAD_COMMAND", 0x38 },
+       { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+       { "FATAL_ERROR", 0x3D },
+       { "NMI_TRM_HW_ERR", 0x46 },
+       { "NMI_INTERRUPT_TRM", 0x4C },
+       { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+       { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+       { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+       { "NMI_INTERRUPT_HOST", 0x66 },
+       { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+       { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+       { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+       { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *iwl4965_desc_lookup(u32 num)
+{
+       int i;
+       int max = ARRAY_SIZE(desc_lookup_text);
+
+       if (num < max)
+               return desc_lookup_text[num];
+
+       max = ARRAY_SIZE(advanced_lookup) - 1;
+       for (i = 0; i < max; i++) {
+               if (advanced_lookup[i].num == num)
+                       break;
+       }
+       return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
+{
+       u32 data2, line;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+       u32 pc, hcmd;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+       }
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+               return;
+       }
+
+       count = iwl_legacy_read_targ_mem(priv, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+                       priv->status, count);
+       }
+
+       desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
+       priv->isr_stats.err_code = desc;
+       pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
+       blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
+       blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
+       ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
+       ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
+       data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
+       data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
+       line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
+       time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
+       hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
+
+       trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
+                                       time, data1, data2, line,
+                                     blink1, blink2, ilink1, ilink2);
+
+       IWL_ERR(priv, "Desc                                  Time       "
+               "data1      data2      line\n");
+       IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+               iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
+       IWL_ERR(priv, "pc      blink1  blink2  ilink1  ilink2  hcmd\n");
+       IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+               pc, blink1, blink2, ilink1, ilink2, hcmd);
+}
+
+#define EVENT_START_OFFSET  (4 * sizeof(u32))
+
+/**
+ * iwl4965_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
+                              u32 num_events, u32 mode,
+                              int pos, char **buf, size_t bufsz)
+{
+       u32 i;
+       u32 base;       /* SRAM byte address of event log header */
+       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+       unsigned long reg_flags;
+
+       if (num_events == 0)
+               return pos;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       }
+
+       if (mode == 0)
+               event_size = 2 * sizeof(u32);
+       else
+               event_size = 3 * sizeof(u32);
+
+       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+       /* Make sure device is powered up for SRAM reads */
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+
+       /* Set starting address; reads will auto-increment */
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       rmb();
+
+       /* "time" is actually "data" for mode 0 (no timestamp).
+       * place event id # at far right for easier visual parsing. */
+       for (i = 0; i < num_events; i++) {
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (mode == 0) {
+                       /* data, ev */
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "EVT_LOG:0x%08x:%04u\n",
+                                               time, ev);
+                       } else {
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
+                                       time, ev);
+                               IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+                                       time, ev);
+                       }
+               } else {
+                       data = _iwl_legacy_read_direct32(priv,
+                                               HBUS_TARG_MEM_RDAT);
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "EVT_LOGT:%010u:0x%08x:%04u\n",
+                                                time, data, ev);
+                       } else {
+                               IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+                                       time, data, ev);
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, time,
+                                       data, ev);
+                       }
+               }
+       }
+
+       /* Allow device to power down */
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return pos;
+}
+
+/**
+ * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+                                   u32 num_wraps, u32 next_entry,
+                                   u32 size, u32 mode,
+                                   int pos, char **buf, size_t bufsz)
+{
+       /*
+        * display the newest DEFAULT_LOG_ENTRIES entries
+        * i.e the entries just before the next ont that uCode would fill.
+        */
+       if (num_wraps) {
+               if (next_entry < size) {
+                       pos = iwl4965_print_event_log(priv,
+                                               capacity - (size - next_entry),
+                                               size - next_entry, mode,
+                                               pos, buf, bufsz);
+                       pos = iwl4965_print_event_log(priv, 0,
+                                                 next_entry, mode,
+                                                 pos, buf, bufsz);
+               } else
+                       pos = iwl4965_print_event_log(priv, next_entry - size,
+                                                 size, mode, pos, buf, bufsz);
+       } else {
+               if (next_entry < size) {
+                       pos = iwl4965_print_event_log(priv, 0, next_entry,
+                                                 mode, pos, buf, bufsz);
+               } else {
+                       pos = iwl4965_print_event_log(priv, next_entry - size,
+                                                 size, mode, pos, buf, bufsz);
+               }
+       }
+       return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+                           char **buf, bool display)
+{
+       u32 base;       /* SRAM byte address of event log header */
+       u32 capacity;   /* event log capacity in # entries */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+       u32 size;       /* # entries that we'll print */
+       int pos = 0;
+       size_t bufsz = 0;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       }
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv,
+                       "Invalid event log pointer 0x%08X for %s uCode\n",
+                       base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+               return -EINVAL;
+       }
+
+       /* event log header */
+       capacity = iwl_legacy_read_targ_mem(priv, base);
+       mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+       num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+       next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+       size = num_wraps ? capacity : next_entry;
+
+       /* bail out if nothing in log */
+       if (size == 0) {
+               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+               return pos;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+               size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+                       ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+       size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+               ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+       IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+               size);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (display) {
+               if (full_log)
+                       bufsz = capacity * 48;
+               else
+                       bufsz = size * 48;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+       }
+       if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+               /*
+                * if uCode has wrapped back to top of log,
+                * start at the oldest entry,
+                * i.e the next one that uCode would fill.
+                */
+               if (num_wraps)
+                       pos = iwl4965_print_event_log(priv, next_entry,
+                                               capacity - next_entry, mode,
+                                               pos, buf, bufsz);
+               /* (then/else) start at top of log */
+               pos = iwl4965_print_event_log(priv, 0,
+                                         next_entry, mode, pos, buf, bufsz);
+       } else
+               pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+                                               next_entry, size, mode,
+                                               pos, buf, bufsz);
+#else
+       pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+                                       next_entry, size, mode,
+                                       pos, buf, bufsz);
+#endif
+       return pos;
+}
+
+static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
+{
+       struct iwl_ct_kill_config cmd;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       cmd.critical_temperature_R =
+               cpu_to_le32(priv->hw_params.ct_kill_threshold);
+
+       ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
+                              sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+       else
+               IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+                               "succeeded, "
+                               "critical temperature is %d\n",
+                               priv->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+       IWL49_CMD_FIFO_NUM,
+       IWL_TX_FIFO_UNUSED,
+       IWL_TX_FIFO_UNUSED,
+};
+
+static int iwl4965_alive_notify(struct iwl_priv *priv)
+{
+       u32 a;
+       unsigned long flags;
+       int i, chan;
+       u32 reg_val;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Clear 4965's internal Tx Scheduler data base */
+       priv->scd_base_addr = iwl_legacy_read_prph(priv,
+                                       IWL49_SCD_SRAM_BASE_ADDR);
+       a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
+       for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+       for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+       for (; a < priv->scd_base_addr +
+              IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+
+       /* Tel 4965 where to find Tx byte count tables */
+       iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
+                       priv->scd_bc_tbls.dma >> 10);
+
+       /* Enable DMA channel */
+       for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
+               iwl_legacy_write_direct32(priv,
+                               FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+       /* Update FH chicken bits */
+       reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+       iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+                          reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+       /* Disable chain mode for all queues */
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
+
+       /* Initialize each Tx queue (including the command queue) */
+       for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+
+               /* TFD circular buffer read/write indexes */
+               iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
+               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+               /* Max Tx Window size for Scheduler-ACK mode */
+               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+                               (SCD_WIN_SIZE <<
+                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+               /* Frame limit */
+               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+                               sizeof(u32),
+                               (SCD_FRAME_LIMIT <<
+                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       }
+       iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
+                                (1 << priv->hw_params.max_txq_num) - 1);
+
+       /* Activate all Tx DMA/FIFO channels */
+       iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
+
+       iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+       /* make sure all queue are not stopped */
+       memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+       for (i = 0; i < 4; i++)
+               atomic_set(&priv->queue_stop_count[i], 0);
+
+       /* reset to 0 to enable all the queue first */
+       priv->txq_ctx_active_msk = 0;
+       /* Map each Tx/cmd queue to its corresponding fifo */
+       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+               int ac = default_queue_to_tx_fifo[i];
+
+               iwl_txq_ctx_activate(priv, i);
+
+               if (ac == IWL_TX_FIFO_UNUSED)
+                       continue;
+
+               iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return 0;
+}
+
+/**
+ * iwl4965_alive_start - called after REPLY_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by iwl_init_alive_start()).
+ */
+static void iwl4965_alive_start(struct iwl_priv *priv)
+{
+       int ret = 0;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Alive failed.\n");
+               goto restart;
+       }
+
+       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "runtime" alive if code weren't properly loaded.  */
+       if (iwl4965_verify_ucode(priv)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
+               goto restart;
+       }
+
+       ret = iwl4965_alive_notify(priv);
+       if (ret) {
+               IWL_WARN(priv,
+                       "Could not complete ALIVE transition [ntf]: %d\n", ret);
+               goto restart;
+       }
+
+
+       /* After the ALIVE response, we can send host commands to the uCode */
+       set_bit(STATUS_ALIVE, &priv->status);
+
+       /* Enable watchdog to monitor the driver tx queues */
+       iwl_legacy_setup_watchdog(priv);
+
+       if (iwl_legacy_is_rfkill(priv))
+               return;
+
+       ieee80211_wake_queues(priv->hw);
+
+       priv->active_rate = IWL_RATES_MASK;
+
+       if (iwl_legacy_is_associated_ctx(ctx)) {
+               struct iwl_legacy_rxon_cmd *active_rxon =
+                               (struct iwl_legacy_rxon_cmd *)&ctx->active;
+               /* apply any changes in staging */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       } else {
+               struct iwl_rxon_context *tmp;
+               /* Initialize our rx_config data */
+               for_each_context(priv, tmp)
+                       iwl_legacy_connection_init_rx_config(priv, tmp);
+
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       /* Configure bluetooth coexistence if enabled */
+       iwl_legacy_send_bt_config(priv);
+
+       iwl4965_reset_run_time_calib(priv);
+
+       set_bit(STATUS_READY, &priv->status);
+
+       /* Configure the adapter for unassociated operation */
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       /* At this point, the NIC is initialized and operational */
+       iwl4965_rf_kill_ct_config(priv);
+
+       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+       wake_up_interruptible(&priv->wait_command_queue);
+
+       iwl_legacy_power_update_mode(priv, true);
+       IWL_DEBUG_INFO(priv, "Updated power mode\n");
+
+       return;
+
+ restart:
+       queue_work(priv->workqueue, &priv->restart);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
+
+static void __iwl4965_down(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+
+       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
+        * to prevent rearm timer */
+       del_timer_sync(&priv->watchdog);
+
+       iwl_legacy_clear_ucode_stations(priv, NULL);
+       iwl_legacy_dealloc_bcast_stations(priv);
+       iwl_legacy_clear_driver_stations(priv);
+
+       /* Unblock any waiting calls */
+       wake_up_interruptible_all(&priv->wait_command_queue);
+
+       /* Wipe out the EXIT_PENDING status bit if we are not actually
+        * exiting the module */
+       if (!exit_pending)
+               clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* stop and reset the on-board processor */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /* tell the device to stop sending interrupts */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       iwl4965_synchronize_irq(priv);
+
+       if (priv->mac80211_registered)
+               ieee80211_stop_queues(priv->hw);
+
+       /* If we have not previously called iwl_init() then
+        * clear all bits but the RF Kill bit and return */
+       if (!iwl_legacy_is_init(priv)) {
+               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                                       STATUS_RF_KILL_HW |
+                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                                       STATUS_GEO_CONFIGURED |
+                              test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                                       STATUS_EXIT_PENDING;
+               goto exit;
+       }
+
+       /* ...otherwise clear out all the status bits but the RF Kill
+        * bit and continue taking the NIC down. */
+       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                               STATUS_RF_KILL_HW |
+                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                               STATUS_GEO_CONFIGURED |
+                       test_bit(STATUS_FW_ERROR, &priv->status) <<
+                               STATUS_FW_ERROR |
+                      test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                               STATUS_EXIT_PENDING;
+
+       iwl4965_txq_ctx_stop(priv);
+       iwl4965_rxq_stop(priv);
+
+       /* Power-down device's busmaster DMA clocks */
+       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(5);
+
+       /* Make sure (redundant) we've released our request to stay awake */
+       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /* Stop the device, and put it in low power state */
+       iwl_legacy_apm_stop(priv);
+
+ exit:
+       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
+
+       dev_kfree_skb(priv->beacon_skb);
+       priv->beacon_skb = NULL;
+
+       /* clear out any free frames */
+       iwl4965_clear_free_frames(priv);
+}
+
+static void iwl4965_down(struct iwl_priv *priv)
+{
+       mutex_lock(&priv->mutex);
+       __iwl4965_down(priv);
+       mutex_unlock(&priv->mutex);
+
+       iwl4965_cancel_deferred_work(priv);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int iwl4965_set_hw_ready(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+       /* See if we got it */
+       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                               HW_READY_TIMEOUT);
+       if (ret != -ETIMEDOUT)
+               priv->hw_ready = true;
+       else
+               priv->hw_ready = false;
+
+       IWL_DEBUG_INFO(priv, "hardware %s\n",
+                     (priv->hw_ready == 1) ? "ready" : "not ready");
+       return ret;
+}
+
+static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
+
+       ret = iwl4965_set_hw_ready(priv);
+       if (priv->hw_ready)
+               return ret;
+
+       /* If HW is not ready, prepare the conditions to check again */
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       CSR_HW_IF_CONFIG_REG_PREPARE);
+
+       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+                       CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+       /* HW should be ready by now, check again. */
+       if (ret != -ETIMEDOUT)
+               iwl4965_set_hw_ready(priv);
+
+       return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int __iwl4965_up(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int i;
+       int ret;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+               IWL_ERR(priv, "ucode not available for device bringup\n");
+               return -EIO;
+       }
+
+       for_each_context(priv, ctx) {
+               ret = iwl4965_alloc_bcast_station(priv, ctx);
+               if (ret) {
+                       iwl_legacy_dealloc_bcast_stations(priv);
+                       return ret;
+               }
+       }
+
+       iwl4965_prepare_card_hw(priv);
+
+       if (!priv->hw_ready) {
+               IWL_WARN(priv, "Exit HW not ready\n");
+               return -EIO;
+       }
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (iwl_read32(priv,
+               CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       if (iwl_legacy_is_rfkill(priv)) {
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+
+               iwl_legacy_enable_interrupts(priv);
+               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+               return 0;
+       }
+
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+       /* must be initialised before iwl_hw_nic_init */
+       priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+       ret = iwl4965_hw_nic_init(priv);
+       if (ret) {
+               IWL_ERR(priv, "Unable to init nic\n");
+               return ret;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+       iwl_legacy_enable_interrupts(priv);
+
+       /* really make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+       /* Copy original ucode data image from disk into backup cache.
+        * This will be used to initialize the on-board processor's
+        * data SRAM for a clean start when the runtime program first loads. */
+       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
+              priv->ucode_data.len);
+
+       for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+               /* load bootstrap state machine,
+                * load bootstrap program into processor's memory,
+                * prepare to load the "initialize" uCode */
+               ret = priv->cfg->ops->lib->load_ucode(priv);
+
+               if (ret) {
+                       IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
+                               ret);
+                       continue;
+               }
+
+               /* start card; "initialize" will load runtime ucode */
+               iwl4965_nic_start(priv);
+
+               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
+
+               return 0;
+       }
+
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+       __iwl4965_down(priv);
+       clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* tried to restart and config the device for as long as our
+        * patience could withstand */
+       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
+       return -EIO;
+}
+
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_init_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, init_alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       priv->cfg->ops->lib->init_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl4965_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                       run_time_calib_work);
+
+       mutex_lock(&priv->mutex);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status)) {
+               mutex_unlock(&priv->mutex);
+               return;
+       }
+
+       if (priv->start_calib) {
+               iwl4965_chain_noise_calibration(priv,
+                               (void *)&priv->_4965.statistics);
+               iwl4965_sensitivity_calibration(priv,
+                               (void *)&priv->_4965.statistics);
+       }
+
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_restart(struct work_struct *data)
+{
+       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+               struct iwl_rxon_context *ctx;
+
+               mutex_lock(&priv->mutex);
+               for_each_context(priv, ctx)
+                       ctx->vif = NULL;
+               priv->is_open = 0;
+
+               __iwl4965_down(priv);
+
+               mutex_unlock(&priv->mutex);
+               iwl4965_cancel_deferred_work(priv);
+               ieee80211_restart_hw(priv->hw);
+       } else {
+               iwl4965_down(priv);
+
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       return;
+
+               mutex_lock(&priv->mutex);
+               __iwl4965_up(priv);
+               mutex_unlock(&priv->mutex);
+       }
+}
+
+static void iwl4965_bg_rx_replenish(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, rx_replenish);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl4965_rx_replenish(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT    (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+                                 u32 max_probe_length)
+{
+       int ret;
+       struct ieee80211_hw *hw = priv->hw;
+       struct iwl_rxon_context *ctx;
+
+       hw->rate_control_algorithm = "iwl-4965-rs";
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   IEEE80211_HW_NEED_DTIM_PERIOD |
+                   IEEE80211_HW_SPECTRUM_MGMT |
+                   IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+                            IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+       hw->sta_data_size = sizeof(struct iwl_station_priv);
+       hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+       for_each_context(priv, ctx) {
+               hw->wiphy->interface_modes |= ctx->interface_modes;
+               hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+       }
+
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+       /*
+        * For now, disable PS by default because it affects
+        * RX performance significantly.
+        */
+       hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                       &priv->bands[IEEE80211_BAND_2GHZ];
+       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                       &priv->bands[IEEE80211_BAND_5GHZ];
+
+       iwl_legacy_leds_init(priv);
+
+       ret = ieee80211_register_hw(priv->hw);
+       if (ret) {
+               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       priv->mac80211_registered = 1;
+
+       return 0;
+}
+
+
+int iwl4965_mac_start(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&priv->mutex);
+       ret = __iwl4965_up(priv);
+       mutex_unlock(&priv->mutex);
+
+       if (ret)
+               return ret;
+
+       if (iwl_legacy_is_rfkill(priv))
+               goto out;
+
+       IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+       /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+        * mac80211 will not be run successfully. */
+       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+                       test_bit(STATUS_READY, &priv->status),
+                       UCODE_READY_TIMEOUT);
+       if (!ret) {
+               if (!test_bit(STATUS_READY, &priv->status)) {
+                       IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
+                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
+                       return -ETIMEDOUT;
+               }
+       }
+
+       iwl4965_led_enable(priv);
+
+out:
+       priv->is_open = 1;
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+
+void iwl4965_mac_stop(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!priv->is_open)
+               return;
+
+       priv->is_open = 0;
+
+       iwl4965_down(priv);
+
+       flush_workqueue(priv->workqueue);
+
+       /* enable interrupts again in order to receive rfkill changes */
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+       iwl_legacy_enable_interrupts(priv);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MACDUMP(priv, "enter\n");
+
+       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (iwl4965_tx_skb(priv, skb))
+               dev_kfree_skb_any(skb);
+
+       IWL_DEBUG_MACDUMP(priv, "leave\n");
+}
+
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
+                           iv32, phase1key);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+       int ret;
+       u8 sta_id;
+       bool is_default_wep_key = false;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (priv->cfg->mod_params->sw_crypto) {
+               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return -EINVAL;
+
+       mutex_lock(&priv->mutex);
+       iwl_legacy_scan_cancel_timeout(priv, 100);
+
+       /*
+        * If we are getting WEP group key and we didn't receive any key mapping
+        * so far, we are in legacy wep mode (group key only), otherwise we are
+        * in 1X mode.
+        * In legacy wep mode, we use another host command to the uCode.
+        */
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+            key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+           !sta) {
+               if (cmd == SET_KEY)
+                       is_default_wep_key = !ctx->key_mapping_keys;
+               else
+                       is_default_wep_key =
+                                       (key->hw_key_idx == HW_KEY_DEFAULT);
+       }
+
+       switch (cmd) {
+       case SET_KEY:
+               if (is_default_wep_key)
+                       ret = iwl4965_set_default_wep_key(priv,
+                                                       vif_priv->ctx, key);
+               else
+                       ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
+                                                 key, sta_id);
+
+               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (is_default_wep_key)
+                       ret = iwl4965_remove_default_wep_key(priv, ctx, key);
+               else
+                       ret = iwl4965_remove_dynamic_key(priv, ctx,
+                                                       key, sta_id);
+
+               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret = -EINVAL;
+
+       IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+                    sta->addr, tid);
+
+       if (!(priv->cfg->sku & IWL_SKU_N))
+               return -EACCES;
+
+       mutex_lock(&priv->mutex);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               IWL_DEBUG_HT(priv, "start Rx\n");
+               ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               IWL_DEBUG_HT(priv, "stop Rx\n");
+               ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               IWL_DEBUG_HT(priv, "start Tx\n");
+               ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
+               if (ret == 0) {
+                       priv->_4965.agg_tids_count++;
+                       IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+                                    priv->_4965.agg_tids_count);
+               }
+               break;
+       case IEEE80211_AMPDU_TX_STOP:
+               IWL_DEBUG_HT(priv, "stop Tx\n");
+               ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
+               if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
+                       priv->_4965.agg_tids_count--;
+                       IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+                                    priv->_4965.agg_tids_count);
+               }
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = 0;
+               break;
+       }
+       mutex_unlock(&priv->mutex);
+
+       return ret;
+}
+
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       int ret;
+       u8 sta_id;
+
+       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+                       sta->addr);
+       sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+       atomic_set(&sta_priv->pending_frames, 0);
+
+       ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
+                                    is_ap, sta, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+                       sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               mutex_unlock(&priv->mutex);
+               return ret;
+       }
+
+       sta_priv->common.sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+                      sta->addr);
+       iwl4965_rs_rate_init(priv, sta, sta_id);
+       mutex_unlock(&priv->mutex);
+
+       return 0;
+}
+
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = ch_switch->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u16 ch;
+       unsigned long flags = 0;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (iwl_legacy_is_rfkill(priv))
+               goto out_exit;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status))
+               goto out_exit;
+
+       if (!iwl_legacy_is_associated_ctx(ctx))
+               goto out_exit;
+
+       /* channel switch in progress */
+       if (priv->switch_rxon.switch_in_progress == true)
+               goto out_exit;
+
+       mutex_lock(&priv->mutex);
+       if (priv->cfg->ops->lib->set_channel_switch) {
+
+               ch = channel->hw_value;
+               if (le16_to_cpu(ctx->active.channel) != ch) {
+                       ch_info = iwl_legacy_get_channel_info(priv,
+                                                      channel->band,
+                                                      ch);
+                       if (!iwl_legacy_is_channel_valid(ch_info)) {
+                               IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+                               goto out;
+                       }
+                       spin_lock_irqsave(&priv->lock, flags);
+
+                       priv->current_ht_config.smps = conf->smps_mode;
+
+                       /* Configure HT40 channels */
+                       ctx->ht.enabled = conf_is_ht(conf);
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       if ((le16_to_cpu(ctx->staging.channel) != ch))
+                               ctx->staging.flags = 0;
+
+                       iwl_legacy_set_rxon_channel(priv, channel, ctx);
+                       iwl_legacy_set_rxon_ht(priv, ht_conf);
+                       iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+                       spin_unlock_irqrestore(&priv->lock, flags);
+
+                       iwl_legacy_set_rate(priv);
+                       /*
+                        * at this point, staging_rxon has the
+                        * configuration for channel switch
+                        */
+                       if (priv->cfg->ops->lib->set_channel_switch(priv,
+                                                                   ch_switch))
+                               priv->switch_rxon.switch_in_progress = false;
+               }
+       }
+out:
+       mutex_unlock(&priv->mutex);
+out_exit:
+       if (!priv->switch_rxon.switch_in_progress)
+               ieee80211_chswitch_done(ctx->vif, false);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+       struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       for_each_context(priv, ctx) {
+               ctx->staging.filter_flags &= ~filter_nand;
+               ctx->staging.filter_flags |= filter_or;
+
+               /*
+                * Not committing directly because hardware can perform a scan,
+                * but we'll eventually commit the filter flags change anyway.
+                */
+       }
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_legacy_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_txpower_work(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                       txpower_work);
+
+       /* If a scan happened to start before we got here
+        * then just return; the statistics notification will
+        * kick off another scheduled work to compensate for
+        * any temperature delta we missed here. */
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+
+       /* Regardless of if we are associated, we must reconfigure the
+        * TX power since frames can be sent on non-radar channels while
+        * not associated */
+       priv->cfg->ops->lib->send_tx_power(priv);
+
+       /* Update last_temperature to keep is_calib_needed from running
+        * when it isn't needed... */
+       priv->last_temperature = priv->temperature;
+
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
+{
+       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+       init_waitqueue_head(&priv->wait_command_queue);
+
+       INIT_WORK(&priv->restart, iwl4965_bg_restart);
+       INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
+       INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
+       INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
+       INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
+
+       iwl_legacy_setup_scan_deferred_work(priv);
+
+       INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
+
+       init_timer(&priv->statistics_periodic);
+       priv->statistics_periodic.data = (unsigned long)priv;
+       priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
+
+       init_timer(&priv->ucode_trace);
+       priv->ucode_trace.data = (unsigned long)priv;
+       priv->ucode_trace.function = iwl4965_bg_ucode_trace;
+
+       init_timer(&priv->watchdog);
+       priv->watchdog.data = (unsigned long)priv;
+       priv->watchdog.function = iwl_legacy_bg_watchdog;
+
+       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+               iwl4965_irq_tasklet, (unsigned long)priv);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
+{
+       cancel_work_sync(&priv->txpower_work);
+       cancel_delayed_work_sync(&priv->init_alive_start);
+       cancel_delayed_work(&priv->alive_start);
+       cancel_work_sync(&priv->run_time_calib_work);
+
+       iwl_legacy_cancel_scan_deferred_work(priv);
+
+       del_timer_sync(&priv->statistics_periodic);
+       del_timer_sync(&priv->ucode_trace);
+}
+
+static void iwl4965_init_hw_rates(struct iwl_priv *priv,
+                             struct ieee80211_rate *rates)
+{
+       int i;
+
+       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
+               rates[i].hw_value = i; /* Rate scaling will work on indexes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |=
+                               (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+                                       0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
+       }
+}
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
+{
+       iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+                            (index & 0xff) | (txq_id << 8));
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       int tx_fifo_id, int scd_retry)
+{
+       int txq_id = txq->q.id;
+
+       /* Find out whether to activate Tx queue */
+       int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+       /* Set up and activate */
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+                        (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+                        (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+                        IWL49_SCD_QUEUE_STTS_REG_MSK);
+
+       txq->sched_retry = scd_retry;
+
+       IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
+                      active ? "Activate" : "Deactivate",
+                      scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+
+static int iwl4965_init_drv(struct iwl_priv *priv)
+{
+       int ret;
+
+       spin_lock_init(&priv->sta_lock);
+       spin_lock_init(&priv->hcmd_lock);
+
+       INIT_LIST_HEAD(&priv->free_frames);
+
+       mutex_init(&priv->mutex);
+       mutex_init(&priv->sync_cmd_mutex);
+
+       priv->ieee_channels = NULL;
+       priv->ieee_rates = NULL;
+       priv->band = IEEE80211_BAND_2GHZ;
+
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+       priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+       priv->_4965.agg_tids_count = 0;
+
+       /* initialize force reset */
+       priv->force_reset[IWL_RF_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_RF_RESET;
+       priv->force_reset[IWL_FW_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+       /* Choose which receivers/antennas to use */
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
+
+       iwl_legacy_init_scan_params(priv);
+
+       /* Set the tx_power_user_lmt to the lowest power level
+        * this value will get overwritten by channel max power avg
+        * from eeprom */
+       priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
+       priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
+
+       ret = iwl_legacy_init_channel_map(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+               goto err;
+       }
+
+       ret = iwl_legacy_init_geos(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       iwl4965_init_hw_rates(priv, priv->ieee_rates);
+
+       return 0;
+
+err_free_channel_map:
+       iwl_legacy_free_channel_map(priv);
+err:
+       return ret;
+}
+
+static void iwl4965_uninit_drv(struct iwl_priv *priv)
+{
+       iwl4965_calib_free_results(priv);
+       iwl_legacy_free_geos(priv);
+       iwl_legacy_free_channel_map(priv);
+       kfree(priv->scan_cmd);
+}
+
+static void iwl4965_hw_detect(struct iwl_priv *priv)
+{
+       priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
+       priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
+       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
+}
+
+static int iwl4965_set_hw_params(struct iwl_priv *priv)
+{
+       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+       else
+               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
+
+       priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+
+       if (priv->cfg->mod_params->disable_11n)
+               priv->cfg->sku &= ~IWL_SKU_N;
+
+       /* Device-specific setup */
+       return priv->cfg->ops->lib->set_hw_params(priv);
+}
+
+static const u8 iwl4965_bss_ac_to_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+};
+
+static const u8 iwl4965_bss_ac_to_queue[] = {
+       0, 1, 2, 3,
+};
+
+static int
+iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err = 0, i;
+       struct iwl_priv *priv;
+       struct ieee80211_hw *hw;
+       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+       unsigned long flags;
+       u16 pci_cmd;
+
+       /************************
+        * 1. Allocating HW data
+        ************************/
+
+       hw = iwl_legacy_alloc_all(cfg);
+       if (!hw) {
+               err = -ENOMEM;
+               goto out;
+       }
+       priv = hw->priv;
+       /* At this point both hw and priv are allocated. */
+
+       /*
+        * The default context is always valid,
+        * more may be discovered when firmware
+        * is loaded.
+        */
+       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+
+       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+               priv->contexts[i].ctxid = i;
+
+       priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+       priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+       priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
+       priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
+       priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+               BIT(NL80211_IFTYPE_ADHOC);
+       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+               BIT(NL80211_IFTYPE_STATION);
+       priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
+
+       SET_IEEE80211_DEV(hw, &pdev->dev);
+
+       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+       priv->cfg = cfg;
+       priv->pci_dev = pdev;
+       priv->inta_mask = CSR_INI_SET_MASK;
+
+       if (iwl_legacy_alloc_traffic_mem(priv))
+               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+       /**************************
+        * 2. Initializing PCI bus
+        **************************/
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                               PCIE_LINK_STATE_CLKPM);
+
+       if (pci_enable_device(pdev)) {
+               err = -ENODEV;
+               goto out_ieee80211_free_hw;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!err)
+                       err = pci_set_consistent_dma_mask(pdev,
+                                                       DMA_BIT_MASK(32));
+               /* both attempts failed: */
+               if (err) {
+                       IWL_WARN(priv, "No suitable DMA available.\n");
+                       goto out_pci_disable_device;
+               }
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err)
+               goto out_pci_disable_device;
+
+       pci_set_drvdata(pdev, priv);
+
+
+       /***********************
+        * 3. Read REV register
+        ***********************/
+       priv->hw_base = pci_iomap(pdev, 0, 0);
+       if (!priv->hw_base) {
+               err = -ENODEV;
+               goto out_pci_release_regions;
+       }
+
+       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
+               (unsigned long long) pci_resource_len(pdev, 0));
+       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+
+       /* these spin locks will be used in apm_ops.init and EEPROM access
+        * we should init now
+        */
+       spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
+
+       /*
+        * stop and reset the on-board processor just in case it is in a
+        * strange state ... like being left stranded by a primary kernel
+        * and this is now the kdump kernel trying to start up
+        */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       iwl4965_hw_detect(priv);
+       IWL_INFO(priv, "Detected %s, REV=0x%X\n",
+               priv->cfg->name, priv->hw_rev);
+
+       /* We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       iwl4965_prepare_card_hw(priv);
+       if (!priv->hw_ready) {
+               IWL_WARN(priv, "Failed, HW not ready\n");
+               goto out_iounmap;
+       }
+
+       /*****************
+        * 4. Read EEPROM
+        *****************/
+       /* Read the EEPROM */
+       err = iwl_legacy_eeprom_init(priv);
+       if (err) {
+               IWL_ERR(priv, "Unable to init EEPROM\n");
+               goto out_iounmap;
+       }
+       err = iwl4965_eeprom_check_version(priv);
+       if (err)
+               goto out_free_eeprom;
+
+       if (err)
+               goto out_free_eeprom;
+
+       /* extract MAC Address */
+       iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
+       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
+       priv->hw->wiphy->addresses = priv->addresses;
+       priv->hw->wiphy->n_addresses = 1;
+
+       /************************
+        * 5. Setup HW constants
+        ************************/
+       if (iwl4965_set_hw_params(priv)) {
+               IWL_ERR(priv, "failed to set hw parameters\n");
+               goto out_free_eeprom;
+       }
+
+       /*******************
+        * 6. Setup priv
+        *******************/
+
+       err = iwl4965_init_drv(priv);
+       if (err)
+               goto out_free_eeprom;
+       /* At this point both hw and priv are initialized. */
+
+       /********************
+        * 7. Setup services
+        ********************/
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       pci_enable_msi(priv->pci_dev);
+
+       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
+                         IRQF_SHARED, DRV_NAME, priv);
+       if (err) {
+               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
+               goto out_disable_msi;
+       }
+
+       iwl4965_setup_deferred_work(priv);
+       iwl4965_setup_rx_handlers(priv);
+
+       /*********************************************
+        * 8. Enable interrupts and read RFKILL state
+        *********************************************/
+
+       /* enable interrupts if needed: hw bug w/a */
+       pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
+       if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+               pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+               pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
+       }
+
+       iwl_legacy_enable_interrupts(priv);
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (iwl_read32(priv, CSR_GP_CNTRL) &
+               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+               test_bit(STATUS_RF_KILL_HW, &priv->status));
+
+       iwl_legacy_power_initialize(priv);
+
+       init_completion(&priv->_4965.firmware_loading_complete);
+
+       err = iwl4965_request_firmware(priv, true);
+       if (err)
+               goto out_destroy_workqueue;
+
+       return 0;
+
+ out_destroy_workqueue:
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       free_irq(priv->pci_dev->irq, priv);
+ out_disable_msi:
+       pci_disable_msi(priv->pci_dev);
+       iwl4965_uninit_drv(priv);
+ out_free_eeprom:
+       iwl_legacy_eeprom_free(priv);
+ out_iounmap:
+       pci_iounmap(pdev, priv->hw_base);
+ out_pci_release_regions:
+       pci_set_drvdata(pdev, NULL);
+       pci_release_regions(pdev);
+ out_pci_disable_device:
+       pci_disable_device(pdev);
+ out_ieee80211_free_hw:
+       iwl_legacy_free_traffic_mem(priv);
+       ieee80211_free_hw(priv->hw);
+ out:
+       return err;
+}
+
+static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
+{
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+       unsigned long flags;
+
+       if (!priv)
+               return;
+
+       wait_for_completion(&priv->_4965.firmware_loading_complete);
+
+       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+       iwl_legacy_dbgfs_unregister(priv);
+       sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
+
+       /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+        * to be called and iwl4965_down since we are removing the device
+        * we need to set STATUS_EXIT_PENDING bit.
+        */
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       iwl_legacy_leds_exit(priv);
+
+       if (priv->mac80211_registered) {
+               ieee80211_unregister_hw(priv->hw);
+               priv->mac80211_registered = 0;
+       } else {
+               iwl4965_down(priv);
+       }
+
+       /*
+        * Make sure device is reset to low power before unloading driver.
+        * This may be redundant with iwl4965_down(), but there are paths to
+        * run iwl4965_down() without calling apm_ops.stop(), and there are
+        * paths to avoid running iwl4965_down() at all before leaving driver.
+        * This (inexpensive) call *makes sure* device is reset.
+        */
+       iwl_legacy_apm_stop(priv);
+
+       /* make sure we flush any pending irq or
+        * tasklet for the driver
+        */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl4965_synchronize_irq(priv);
+
+       iwl4965_dealloc_ucode_pci(priv);
+
+       if (priv->rxq.bd)
+               iwl4965_rx_queue_free(priv, &priv->rxq);
+       iwl4965_hw_txq_ctx_free(priv);
+
+       iwl_legacy_eeprom_free(priv);
+
+
+       /*netif_stop_queue(dev); */
+       flush_workqueue(priv->workqueue);
+
+       /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
+        * priv->workqueue... so we can't take down the workqueue
+        * until now... */
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       iwl_legacy_free_traffic_mem(priv);
+
+       free_irq(priv->pci_dev->irq, priv);
+       pci_disable_msi(priv->pci_dev);
+       pci_iounmap(pdev, priv->hw_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       iwl4965_uninit_drv(priv);
+
+       dev_kfree_skb(priv->beacon_skb);
+
+       ieee80211_free_hw(priv->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+       iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
+#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
+       {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
+       {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
+#endif /* CONFIG_IWL4965 */
+
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
+
+static struct pci_driver iwl4965_driver = {
+       .name = DRV_NAME,
+       .id_table = iwl4965_hw_card_ids,
+       .probe = iwl4965_pci_probe,
+       .remove = __devexit_p(iwl4965_pci_remove),
+       .driver.pm = IWL_LEGACY_PM_OPS,
+};
+
+static int __init iwl4965_init(void)
+{
+
+       int ret;
+       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       pr_info(DRV_COPYRIGHT "\n");
+
+       ret = iwl4965_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&iwl4965_driver);
+       if (ret) {
+               pr_err("Unable to initialize PCI module\n");
+               goto error_register;
+       }
+
+       return ret;
+
+error_register:
+       iwl4965_rate_control_unregister();
+       return ret;
+}
+
+static void __exit iwl4965_exit(void)
+{
+       pci_unregister_driver(&iwl4965_driver);
+       iwl4965_rate_control_unregister();
+}
+
+module_exit(iwl4965_exit);
+module_init(iwl4965_init);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
+                  int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
index ed42457..17d555f 100644 (file)
@@ -1,14 +1,52 @@
-config IWLWIFI
-       tristate "Intel Wireless Wifi"
+config IWLAGN
+       tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
        depends on PCI && MAC80211
        select FW_LOADER
+       select NEW_LEDS
+       select LEDS_CLASS
+       select LEDS_TRIGGERS
+       select MAC80211_LEDS
+       ---help---
+         Select to build the driver supporting the:
+
+         Intel Wireless WiFi Link Next-Gen AGN
+
+         This option enables support for use with the following hardware:
+               Intel Wireless WiFi Link 6250AGN Adapter
+               Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
+               Intel WiFi Link 1000BGN
+               Intel Wireless WiFi 5150AGN
+               Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+               Intel 6005 Series Wi-Fi Adapters
+               Intel 6030 Series Wi-Fi Adapters
+               Intel Wireless WiFi Link 6150BGN 2 Adapter
+               Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+               Intel 2000 Series Wi-Fi Adapters
+
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwlagn.
 
 menu "Debugging Options"
-       depends on IWLWIFI
+       depends on IWLAGN
 
 config IWLWIFI_DEBUG
-       bool "Enable full debugging output in iwlagn and iwl3945 drivers"
-       depends on IWLWIFI
+       bool "Enable full debugging output in the iwlagn driver"
+       depends on IWLAGN
        ---help---
          This option will enable debug tracing output for the iwlwifi drivers
 
@@ -33,7 +71,7 @@ config IWLWIFI_DEBUG
 
 config IWLWIFI_DEBUGFS
         bool "iwlagn debugfs support"
-        depends on IWLWIFI && MAC80211_DEBUGFS
+        depends on IWLAGN && MAC80211_DEBUGFS
         ---help---
          Enable creation of debugfs files for the iwlwifi drivers. This
          is a low-impact option that allows getting insight into the
@@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS
 
 config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
         bool "Experimental uCode support"
-        depends on IWLWIFI && IWLWIFI_DEBUG
+        depends on IWLAGN && IWLWIFI_DEBUG
         ---help---
          Enable use of experimental ucode for testing and debugging.
 
 config IWLWIFI_DEVICE_TRACING
        bool "iwlwifi device access tracing"
-       depends on IWLWIFI
+       depends on IWLAGN
        depends on EVENT_TRACING
        help
          Say Y here to trace all commands, including TX frames and IO
@@ -64,73 +102,19 @@ config IWLWIFI_DEVICE_TRACING
          occur.
 endmenu
 
-config IWLAGN
-       tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
-       depends on IWLWIFI
-       ---help---
-         Select to build the driver supporting the:
-
-         Intel Wireless WiFi Link Next-Gen AGN
-
-         This driver uses the kernel's mac80211 subsystem.
-
-         In order to use this driver, you will need a microcode (uCode)
-         image for it. You can obtain the microcode from:
-
-                 <http://intellinuxwireless.org/>.
-
-         The microcode is typically installed in /lib/firmware. You can
-         look in the hotplug script /etc/hotplug/firmware.agent to
-         determine which directory FIRMWARE_DIR is set to when the script
-         runs.
-
-         If you want to compile the driver as a module ( = code which can be
-         inserted in and removed from the running kernel whenever you want),
-         say M here and read <file:Documentation/kbuild/modules.txt>.  The
-         module will be called iwlagn.
-
-
-config IWL4965
-       bool "Intel Wireless WiFi 4965AGN"
-       depends on IWLAGN
-       ---help---
-         This option enables support for Intel Wireless WiFi Link 4965AGN
-
-config IWL5000
-       bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
+config IWL_P2P
+       bool "iwlwifi experimental P2P support"
        depends on IWLAGN
-       ---help---
-         This option enables support for use with the following hardware:
-               Intel Wireless WiFi Link 6250AGN Adapter
-               Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
-               Intel WiFi Link 1000BGN
-               Intel Wireless WiFi 5150AGN
-               Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
-               Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
-               Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
-               Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
-
-config IWL3945
-       tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
-       depends on IWLWIFI
-       ---help---
-         Select to build the driver supporting the:
-
-         Intel PRO/Wireless 3945ABG/BG Network Connection
-
-         This driver uses the kernel's mac80211 subsystem.
-
-         In order to use this driver, you will need a microcode (uCode)
-         image for it. You can obtain the microcode from:
+       help
+         This option enables experimental P2P support for some devices
+         based on microcode support. Since P2P support is still under
+         development, this option may even enable it for some devices
+         now that turn out to not support it in the future due to
+         microcode restrictions.
 
-                 <http://intellinuxwireless.org/>.
+         To determine if your microcode supports the experimental P2P
+         offered by this option, check if the driver advertises AP
+         support when it is loaded.
 
-         The microcode is typically installed in /lib/firmware. You can
-         look in the hotplug script /etc/hotplug/firmware.agent to
-         determine which directory FIRMWARE_DIR is set to when the script
-         runs.
+         Say Y only if you want to experiment with P2P.
 
-         If you want to compile the driver as a module ( = code which can be
-         inserted in and removed from the running kernel whenever you want),
-         say M here and read <file:Documentation/kbuild/modules.txt>.  The
-         module will be called iwl3945.
index 93380f9..9d6ee83 100644 (file)
@@ -1,35 +1,23 @@
-obj-$(CONFIG_IWLWIFI)  += iwlcore.o
-iwlcore-objs           := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs           += iwl-rx.o iwl-tx.o iwl-sta.o
-iwlcore-objs           += iwl-scan.o iwl-led.o
-iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
-iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
-iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-
-# If 3945 is selected only, iwl-legacy.o will be added
-# to iwlcore-m above, but it needs to be built in.
-iwlcore-objs += $(iwlcore-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
-
 # AGN
 obj-$(CONFIG_IWLAGN)   += iwlagn.o
 iwlagn-objs            := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
 iwlagn-objs            += iwl-agn-ucode.o iwl-agn-tx.o
-iwlagn-objs            += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
+iwlagn-objs            += iwl-agn-lib.o iwl-agn-calib.o
 iwlagn-objs            += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
 
-iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
-iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
-iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
+iwlagn-objs            += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwlagn-objs            += iwl-rx.o iwl-tx.o iwl-sta.o
+iwlagn-objs            += iwl-scan.o iwl-led.o
+iwlagn-objs             += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
+iwlagn-objs             += iwl-5000.o
+iwlagn-objs             += iwl-6000.o
+iwlagn-objs             += iwl-1000.o
+iwlagn-objs             += iwl-2000.o
+
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 
-# 3945
-obj-$(CONFIG_IWL3945)  += iwl3945.o
-iwl3945-objs           := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
+CFLAGS_iwl-devtrace.o := -I$(src)
 
 ccflags-y += -D__CHECK_ENDIAN__
index ba78bc8..e8e1c2d 100644 (file)
@@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644 (file)
index 0000000..d7b6126
--- /dev/null
@@ -0,0 +1,560 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-sta.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 5
+#define IWL2000_UCODE_API_MAX 5
+#define IWL200_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL200_UCODE_API_MIN 5
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
+
+#define IWL200_FW_PRE "iwlwifi-200-"
+#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
+#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+       /* want Celsius */
+       priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+       priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+       u16 radio_cfg;
+
+       radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+       /* write radio config values to register */
+       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+                       EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+                       EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+       /* set CSR_HW_CONFIG_REG for uCode use */
+       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+                   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+       if (priv->cfg->iq_invert)
+               iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+                           CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+
+}
+
+static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+       .min_nrg_cck = 97,
+       .max_nrg_cck = 0, /* not used, set to 0 */
+       .auto_corr_min_ofdm = 80,
+       .auto_corr_min_ofdm_mrc = 128,
+       .auto_corr_min_ofdm_x1 = 105,
+       .auto_corr_min_ofdm_mrc_x1 = 192,
+
+       .auto_corr_max_ofdm = 145,
+       .auto_corr_max_ofdm_mrc = 232,
+       .auto_corr_max_ofdm_x1 = 110,
+       .auto_corr_max_ofdm_mrc_x1 = 232,
+
+       .auto_corr_min_cck = 125,
+       .auto_corr_max_cck = 175,
+       .auto_corr_min_cck_mrc = 160,
+       .auto_corr_max_cck_mrc = 310,
+       .nrg_th_cck = 97,
+       .nrg_th_ofdm = 100,
+
+       .barker_corr_th_min = 190,
+       .barker_corr_th_min_mrc = 390,
+       .nrg_th_cca = 62,
+};
+
+static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+       if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+           priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+               priv->cfg->base_params->num_of_queues =
+                       priv->cfg->mod_params->num_of_queues;
+
+       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+       priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+       priv->hw_params.scd_bc_tbls_size =
+               priv->cfg->base_params->num_of_queues *
+               sizeof(struct iwlagn_scd_bc_tbl);
+       priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+       priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+
+       priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+       priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+       priv->hw_params.max_bsm_size = 0;
+       priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+                                       BIT(IEEE80211_BAND_5GHZ);
+       priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+       priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+       if (priv->cfg->rx_with_siso_diversity)
+               priv->hw_params.rx_chains_num = 1;
+       else
+               priv->hw_params.rx_chains_num =
+                       num_of_ant(priv->cfg->valid_rx_ant);
+       priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+       priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+       iwl2000_set_ct_threshold(priv);
+
+       /* Set initial sensitivity parameters */
+       /* Set initial calibration set */
+       priv->hw_params.sens = &iwl2000_sensitivity;
+       priv->hw_params.calib_init_cfg =
+               BIT(IWL_CALIB_XTAL)             |
+               BIT(IWL_CALIB_LO)               |
+               BIT(IWL_CALIB_TX_IQ)            |
+               BIT(IWL_CALIB_BASE_BAND);
+       if (priv->cfg->need_dc_calib)
+               priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+       if (priv->cfg->need_temp_offset_calib)
+               priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+
+       priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
+                                    struct ieee80211_channel_switch *ch_switch)
+{
+       /*
+        * MULTI-FIXME
+        * See iwl_mac_channel_switch.
+        */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl6000_channel_switch_cmd cmd;
+       const struct iwl_channel_info *ch_info;
+       u32 switch_time_in_usec, ucode_switch_time;
+       u16 ch;
+       u32 tsf_low;
+       u8 switch_count;
+       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+       struct ieee80211_vif *vif = ctx->vif;
+       struct iwl_host_cmd hcmd = {
+               .id = REPLY_CHANNEL_SWITCH,
+               .len = sizeof(cmd),
+               .flags = CMD_SYNC,
+               .data = &cmd,
+       };
+
+       cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+       ch = ch_switch->channel->hw_value;
+       IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+               ctx->active.channel, ch);
+       cmd.channel = cpu_to_le16(ch);
+       cmd.rxon_flags = ctx->staging.flags;
+       cmd.rxon_filter_flags = ctx->staging.filter_flags;
+       switch_count = ch_switch->count;
+       tsf_low = ch_switch->timestamp & 0x0ffffffff;
+       /*
+        * calculate the ucode channel switch time
+        * adding TSF as one of the factor for when to switch
+        */
+       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+                   beacon_interval)) {
+                       switch_count -= (priv->ucode_beacon_time -
+                               tsf_low) / beacon_interval;
+               } else
+                       switch_count = 0;
+       }
+       if (switch_count <= 1)
+               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+       else {
+               switch_time_in_usec =
+                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+               ucode_switch_time = iwl_usecs_to_beacons(priv,
+                                               switch_time_in_usec,
+                                               beacon_interval);
+               cmd.switch_time = iwl_add_beacon_time(priv,
+                                               priv->ucode_beacon_time,
+                                               ucode_switch_time,
+                                               beacon_interval);
+       }
+       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+                     cmd.switch_time);
+       ch_info = iwl_get_channel_info(priv, priv->band, ch);
+       if (ch_info)
+               cmd.expect_beacon = is_channel_radar(ch_info);
+       else {
+               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+                       ctx->active.channel, ch);
+               return -EFAULT;
+       }
+       priv->switch_rxon.channel = cmd.channel;
+       priv->switch_rxon.switch_in_progress = true;
+
+       return iwl_send_cmd_sync(priv, &hcmd);
+}
+
+static struct iwl_lib_ops iwl2000_lib = {
+       .set_hw_params = iwl2000_hw_set_hw_params,
+       .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+       .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+       .txq_set_sched = iwlagn_txq_set_sched,
+       .txq_agg_enable = iwlagn_txq_agg_enable,
+       .txq_agg_disable = iwlagn_txq_agg_disable,
+       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = iwl_hw_txq_free_tfd,
+       .txq_init = iwl_hw_tx_queue_init,
+       .rx_handler_setup = iwlagn_rx_handler_setup,
+       .setup_deferred_work = iwlagn_bt_setup_deferred_work,
+       .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
+       .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+       .load_ucode = iwlagn_load_ucode,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
+       .dump_csr = iwl_dump_csr,
+       .dump_fh = iwl_dump_fh,
+       .init_alive_start = iwlagn_init_alive_start,
+       .alive_notify = iwlagn_alive_notify,
+       .send_tx_power = iwlagn_send_tx_power,
+       .update_chain_flags = iwl_update_chain_flags,
+       .set_channel_switch = iwl2030_hw_channel_switch,
+       .apm_ops = {
+               .init = iwl_apm_init,
+               .config = iwl2000_nic_config,
+       },
+       .eeprom_ops = {
+               .regulatory_bands = {
+                       EEPROM_REG_BAND_1_CHANNELS,
+                       EEPROM_REG_BAND_2_CHANNELS,
+                       EEPROM_REG_BAND_3_CHANNELS,
+                       EEPROM_REG_BAND_4_CHANNELS,
+                       EEPROM_REG_BAND_5_CHANNELS,
+                       EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+                       EEPROM_REG_BAND_52_HT40_CHANNELS
+               },
+               .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+               .release_semaphore = iwlcore_eeprom_release_semaphore,
+               .calib_version  = iwlagn_eeprom_calib_version,
+               .query_addr = iwlagn_eeprom_query_addr,
+               .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+       },
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
+       .temp_ops = {
+               .temperature = iwlagn_temperature,
+       },
+       .debugfs_ops = {
+               .rx_stats_read = iwl_ucode_rx_stats_read,
+               .tx_stats_read = iwl_ucode_tx_stats_read,
+               .general_stats_read = iwl_ucode_general_stats_read,
+               .bt_stats_read = iwl_ucode_bt_stats_read,
+               .reply_tx_error = iwl_reply_tx_error_read,
+       },
+       .txfifo_flush = iwlagn_txfifo_flush,
+       .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+       .tt_ops = {
+               .lower_power_detection = iwl_tt_is_low_power_state,
+               .tt_power_mode = iwl_tt_current_power_mode,
+               .ct_kill_check = iwl_check_for_ct_kill,
+       }
+};
+
+static const struct iwl_ops iwl2000_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl2030_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_bt_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl200_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl230_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_bt_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static struct iwl_base_params iwl2000_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = false,
+       .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+       .shadow_ram_support = true,
+       .led_compensation = 51,
+       .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .wd_timeout = IWL_DEF_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
+};
+
+
+static struct iwl_base_params iwl2030_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = false,
+       .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+       .shadow_ram_support = true,
+       .led_compensation = 57,
+       .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .wd_timeout = IWL_LONG_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
+};
+
+static struct iwl_ht_params iwl2000_ht_params = {
+       .ht_greenfield_support = true,
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static struct iwl_bt_params iwl2030_bt_params = {
+       .bt_statistics = true,
+       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+       .advanced_bt_coexist = true,
+       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+       .bt_sco_disable = true,
+       .bt_session_2 = true,
+};
+
+#define IWL_DEVICE_2000                                                \
+       .fw_name_pre = IWL2000_FW_PRE,                          \
+       .ucode_api_max = IWL2000_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2000_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl2000_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2000_base_params,                    \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .iq_invert = true                                       \
+
+struct iwl_cfg iwl2000_2bgn_cfg = {
+       .name = "2000 Series 2x2 BGN",
+       IWL_DEVICE_2000,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2000_2bg_cfg = {
+       .name = "2000 Series 2x2 BG",
+       IWL_DEVICE_2000,
+};
+
+#define IWL_DEVICE_2030                                                \
+       .fw_name_pre = IWL2030_FW_PRE,                          \
+       .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl2030_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .iq_invert = true                                       \
+
+struct iwl_cfg iwl2030_2bgn_cfg = {
+       .name = "2000 Series 2x2 BGN/BT",
+       IWL_DEVICE_2030,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2030_2bg_cfg = {
+       .name = "2000 Series 2x2 BG/BT",
+       IWL_DEVICE_2030,
+};
+
+#define IWL_DEVICE_6035                                                \
+       .fw_name_pre = IWL2030_FW_PRE,                          \
+       .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_6035_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION,       \
+       .ops = &iwl2030_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true                                          \
+
+struct iwl_cfg iwl6035_2agn_cfg = {
+       .name = "2000 Series 2x2 AGN/BT",
+       IWL_DEVICE_6035,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl6035_2abg_cfg = {
+       .name = "2000 Series 2x2 ABG/BT",
+       IWL_DEVICE_6035,
+};
+
+struct iwl_cfg iwl6035_2bg_cfg = {
+       .name = "2000 Series 2x2 BG/BT",
+       IWL_DEVICE_6035,
+};
+
+#define IWL_DEVICE_200                                         \
+       .fw_name_pre = IWL200_FW_PRE,                           \
+       .ucode_api_max = IWL200_UCODE_API_MAX,                  \
+       .ucode_api_min = IWL200_UCODE_API_MIN,                  \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl200_ops,                                     \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2000_base_params,                    \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .rx_with_siso_diversity = true                          \
+
+struct iwl_cfg iwl200_bg_cfg = {
+       .name = "200 Series 1x1 BG",
+       IWL_DEVICE_200,
+};
+
+struct iwl_cfg iwl200_bgn_cfg = {
+       .name = "200 Series 1x1 BGN",
+       IWL_DEVICE_200,
+       .ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_230                                         \
+       .fw_name_pre = IWL200_FW_PRE,                           \
+       .ucode_api_max = IWL200_UCODE_API_MAX,                  \
+       .ucode_api_min = IWL200_UCODE_API_MIN,                  \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl230_ops,                                     \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .rx_with_siso_diversity = true                          \
+
+struct iwl_cfg iwl230_bg_cfg = {
+       .name = "200 Series 1x1 BG/BT",
+       IWL_DEVICE_230,
+};
+
+struct iwl_cfg iwl230_bgn_cfg = {
+       .name = "200 Series 1x1 BGN/BT",
+       IWL_DEVICE_230,
+       .ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
index 79ab0a6..3ea31b6 100644 (file)
@@ -51,7 +51,7 @@
 #include "iwl-agn-debugfs.h"
 
 /* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
 /* Lowest firmware API version supported */
@@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
index af505bc..a745b01 100644 (file)
 #define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
 #define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
 
-#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
-#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
-#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
 
-#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
-#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
-#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
 
 static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 {
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
                                CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 }
 
-static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
+static void iwl6150_additional_nic_config(struct iwl_priv *priv)
 {
        /* Indicate calibration version to uCode. */
        if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -354,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = {
        }
 };
 
-static struct iwl_lib_ops iwl6000g2b_lib = {
+static struct iwl_lib_ops iwl6030_lib = {
        .set_hw_params = iwl6000_hw_set_hw_params,
        .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
        .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
        .additional_nic_config = &iwl6050_additional_nic_config,
 };
 
-static struct iwl_nic_ops iwl6050g2_nic_ops = {
-       .additional_nic_config = &iwl6050g2_additional_nic_config,
+static struct iwl_nic_ops iwl6150_nic_ops = {
+       .additional_nic_config = &iwl6150_additional_nic_config,
 };
 
 static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +447,17 @@ static const struct iwl_ops iwl6050_ops = {
        .ieee80211_ops = &iwlagn_hw_ops,
 };
 
-static const struct iwl_ops iwl6050g2_ops = {
+static const struct iwl_ops iwl6150_ops = {
        .lib = &iwl6000_lib,
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
-       .nic = &iwl6050g2_nic_ops,
+       .nic = &iwl6150_nic_ops,
        .ieee80211_ops = &iwlagn_hw_ops,
 };
 
-static const struct iwl_ops iwl6000g2b_ops = {
-       .lib = &iwl6000g2b_lib,
+static const struct iwl_ops iwl6030_ops = {
+       .lib = &iwl6030_lib,
        .hcmd = &iwlagn_bt_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
@@ -479,7 +475,6 @@ static struct iwl_base_params iwl6000_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +498,6 @@ static struct iwl_base_params iwl6050_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +520,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 57,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +548,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
 };
 
 #define IWL_DEVICE_6005                                                \
-       .fw_name_pre = IWL6000G2A_FW_PRE,                       \
+       .fw_name_pre = IWL6005_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
-       .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
-       .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
+       .eeprom_ver = EEPROM_6005_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,       \
        .ops = &iwl6000_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
@@ -584,12 +577,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
 };
 
 #define IWL_DEVICE_6030                                                \
-       .fw_name_pre = IWL6000G2B_FW_PRE,                       \
+       .fw_name_pre = IWL6030_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
-       .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
-       .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
-       .ops = &iwl6000g2b_ops,                                 \
+       .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
+       .ops = &iwl6030_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
        .bt_params = &iwl6000_bt_params,                        \
@@ -681,6 +674,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
        .fw_name_pre = IWL6050_FW_PRE,                          \
        .ucode_api_max = IWL6050_UCODE_API_MAX,                 \
        .ucode_api_min = IWL6050_UCODE_API_MIN,                 \
+       .valid_tx_ant = ANT_AB,         /* .cfg overwrite */    \
+       .valid_rx_ant = ANT_AB,         /* .cfg overwrite */    \
        .ops = &iwl6050_ops,                                    \
        .eeprom_ver = EEPROM_6050_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,       \
@@ -706,9 +701,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
        .fw_name_pre = IWL6050_FW_PRE,
        .ucode_api_max = IWL6050_UCODE_API_MAX,
        .ucode_api_min = IWL6050_UCODE_API_MIN,
-       .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
-       .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
-       .ops = &iwl6050g2_ops,
+       .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
+       .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
+       .ops = &iwl6150_ops,
        .mod_params = &iwlagn_mod_params,
        .base_params = &iwl6050_base_params,
        .ht_params = &iwl6000_ht_params,
@@ -734,5 +729,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
index d16bb5e..9006293 100644 (file)
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
        }
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
                              rx.general.common);
                ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        }
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
                              rx.general.common);
        } else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
 
        rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
        rxon_chnum = le16_to_cpu(ctx->staging.channel);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                stat_band24 = !!(((struct iwl_bt_notif_statistics *)
                                 stat_resp)->flag &
                                 STATISTICS_REPLY_FLG_BAND_24G_MSK);
index a6dbd89..b500aaa 100644 (file)
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
        int p = 0;
        u32 flag;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
+       if (iwl_bt_statistics(priv))
                flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
        else
                flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
         * the last statistics notification from uCode
         * might not reflect the current uCode activity
         */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                ofdm = &priv->_agn.statistics_bt.rx.ofdm;
                cck = &priv->_agn.statistics_bt.rx.cck;
                general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
          * the last statistics notification from uCode
          * might not reflect the current uCode activity
          */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                tx = &priv->_agn.statistics_bt.tx;
                accum_tx = &priv->_agn.accum_statistics_bt.tx;
                delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
          * the last statistics notification from uCode
          * might not reflect the current uCode activity
          */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                general = &priv->_agn.statistics_bt.general.common;
                dbg = &priv->_agn.statistics_bt.general.common.dbg;
                div = &priv->_agn.statistics_bt.general.common.div;
index 14ceb4d..27b5a3e 100644 (file)
@@ -152,11 +152,14 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
 
        eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
 
-       priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
+       if (!priv->cfg->sku) {
+               /* not using sku overwrite */
+               priv->cfg->sku =
+                       ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
                        EEPROM_SKU_CAP_BAND_POS);
-       if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
-               priv->cfg->sku |= IWL_SKU_N;
-
+               if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
+                       priv->cfg->sku |= IWL_SKU_N;
+       }
        if (!priv->cfg->sku) {
                IWL_ERR(priv, "Invalid device sku\n");
                return -EINVAL;
index 366340f..41543ad 100644 (file)
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
        cmd.slots[0].type = 0; /* BSS */
        cmd.slots[1].type = 1; /* PAN */
 
-       if (ctx_bss->vif && ctx_pan->vif) {
+       if (priv->_agn.hw_roc_channel) {
+               /* both contexts must be used for this to happen */
+               slot1 = priv->_agn.hw_roc_duration;
+               slot0 = IWL_MIN_SLOT_TIME;
+       } else if (ctx_bss->vif && ctx_pan->vif) {
                int bcnint = ctx_pan->vif->bss_conf.beacon_int;
                int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
 
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
                if (test_bit(STATUS_SCAN_HW, &priv->status) ||
                    (!ctx_bss->vif->bss_conf.idle &&
                     !ctx_bss->vif->bss_conf.assoc)) {
-                       slot0 = dtim * bcnint * 3 - 20;
-                       slot1 = 20;
+                       slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+                       slot1 = IWL_MIN_SLOT_TIME;
                } else if (!ctx_pan->vif->bss_conf.idle &&
                           !ctx_pan->vif->bss_conf.assoc) {
-                       slot1 = bcnint * 3 - 20;
-                       slot0 = 20;
+                       slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+                       slot0 = IWL_MIN_SLOT_TIME;
                }
        } else if (ctx_pan->vif) {
                slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
                slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
 
                if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-                       slot0 = slot1 * 3 - 20;
-                       slot1 = 20;
+                       slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+                       slot1 = IWL_MIN_SLOT_TIME;
                }
        }
 
index 1a24946..c1190d9 100644 (file)
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
 }
 
 /* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv)
+void iwlagn_led_enable(struct iwl_priv *priv)
 {
-       IWL_DEBUG_LED(priv, "led on\n");
        iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-       return 0;
-}
-
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv)
-{
-       IWL_DEBUG_LED(priv, "LED Reg off\n");
-       iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
-       return 0;
 }
 
 const struct iwl_led_ops iwlagn_led_ops = {
        .cmd = iwl_send_led_cmd,
-       .on = iwl_led_on_reg,
-       .off = iwl_led_off_reg,
 };
index a594e4f..96f323d 100644 (file)
@@ -28,5 +28,6 @@
 #define __iwl_agn_led_h__
 
 extern const struct iwl_led_ops iwlagn_led_ops;
+void iwlagn_led_enable(struct iwl_priv *priv);
 
 #endif /* __iwl_agn_led_h__ */
index 3dee87e..2003c1d 100644 (file)
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
        priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
                                        iwlagn_rx_calib_complete;
        priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+       /* set up notification wait support */
+       spin_lock_init(&priv->_agn.notif_wait_lock);
+       INIT_LIST_HEAD(&priv->_agn.notif_waits);
+       init_waitqueue_head(&priv->_agn.notif_waitq);
 }
 
 void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -528,9 +533,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
 
 void iwlagn_temperature(struct iwl_priv *priv)
 {
-       /* store temperature from statistics (in Celsius) */
-       priv->temperature =
-               le32_to_cpu(priv->_agn.statistics.general.common.temperature);
+       /* store temperature from correct statistics (in Celsius) */
+       priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ?
+               priv->_agn.statistics_bt.general.common.temperature :
+               priv->_agn.statistics.general.common.temperature);
        iwl_tt_handler(priv);
 }
 
@@ -604,6 +610,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
 struct iwl_mod_params iwlagn_mod_params = {
        .amsdu_size_8K = 1,
        .restart_fw = 1,
+       .plcp_check = true,
        /* the rest are 0 by default */
 };
 
@@ -988,240 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
        return -1;
 }
 
-/* Calc max signal level (dBm) among 3 possible receivers */
-static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
-                               struct iwl_rx_phy_res *rx_resp)
-{
-       return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
-}
-
-static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
-       u32 decrypt_out = 0;
-
-       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
-                                       RX_RES_STATUS_STATION_FOUND)
-               decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
-                               RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
-       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
-       /* packet was not encrypted */
-       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-                                       RX_RES_STATUS_SEC_TYPE_NONE)
-               return decrypt_out;
-
-       /* packet was encrypted with unknown alg */
-       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-                                       RX_RES_STATUS_SEC_TYPE_ERR)
-               return decrypt_out;
-
-       /* decryption was not done in HW */
-       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
-                                       RX_MPDU_RES_STATUS_DEC_DONE_MSK)
-               return decrypt_out;
-
-       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
-       case RX_RES_STATUS_SEC_TYPE_CCMP:
-               /* alg is CCM: check MIC only */
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
-                       /* Bad MIC */
-                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-               else
-                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
-               break;
-
-       case RX_RES_STATUS_SEC_TYPE_TKIP:
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
-                       /* Bad TTAK */
-                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
-                       break;
-               }
-               /* fall through if TTAK OK */
-       default:
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
-                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-               else
-                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-               break;
-       }
-
-       IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
-                                       decrypt_in, decrypt_out);
-
-       return decrypt_out;
-}
-
-static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
-                                       struct ieee80211_hdr *hdr,
-                                       u16 len,
-                                       u32 ampdu_status,
-                                       struct iwl_rx_mem_buffer *rxb,
-                                       struct ieee80211_rx_status *stats)
-{
-       struct sk_buff *skb;
-       __le16 fc = hdr->frame_control;
-
-       /* We only process data packets if the interface is open */
-       if (unlikely(!priv->is_open)) {
-               IWL_DEBUG_DROP_LIMIT(priv,
-                   "Dropping packet while interface is not open.\n");
-               return;
-       }
-
-       /* In case of HW accelerated crypto and bad decryption, drop */
-       if (!priv->cfg->mod_params->sw_crypto &&
-           iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
-               return;
-
-       skb = dev_alloc_skb(128);
-       if (!skb) {
-               IWL_ERR(priv, "dev_alloc_skb failed\n");
-               return;
-       }
-
-       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
-       iwl_update_stats(priv, false, fc, len);
-       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-       ieee80211_rx(priv->hw, skb);
-       priv->alloc_rxb_page--;
-       rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwlagn_rx_reply_rx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct ieee80211_hdr *header;
-       struct ieee80211_rx_status rx_status;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_rx_phy_res *phy_res;
-       __le32 rx_pkt_status;
-       struct iwl_rx_mpdu_res_start *amsdu;
-       u32 len;
-       u32 ampdu_status;
-       u32 rate_n_flags;
-
-       /**
-        * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
-        *      REPLY_RX: physical layer info is in this buffer
-        *      REPLY_RX_MPDU_CMD: physical layer info was sent in separate
-        *              command and cached in priv->last_phy_res
-        *
-        * Here we set up local variables depending on which command is
-        * received.
-        */
-       if (pkt->hdr.cmd == REPLY_RX) {
-               phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
-               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
-                               + phy_res->cfg_phy_cnt);
-
-               len = le16_to_cpu(phy_res->byte_count);
-               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
-                               phy_res->cfg_phy_cnt + len);
-               ampdu_status = le32_to_cpu(rx_pkt_status);
-       } else {
-               if (!priv->_agn.last_phy_res_valid) {
-                       IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-                       return;
-               }
-               phy_res = &priv->_agn.last_phy_res;
-               amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
-               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
-               len = le16_to_cpu(amsdu->byte_count);
-               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
-               ampdu_status = iwlagn_translate_rx_status(priv,
-                               le32_to_cpu(rx_pkt_status));
-       }
-
-       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-                               phy_res->cfg_phy_cnt);
-               return;
-       }
-
-       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
-           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
-                               le32_to_cpu(rx_pkt_status));
-               return;
-       }
-
-       /* This will be used in several places later */
-       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
-       /* rx_status carries information about the packet to mac80211 */
-       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
-       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-       rx_status.rate_idx =
-               iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
-       rx_status.flag = 0;
-
-       /* TSF isn't reliable. In order to allow smooth user experience,
-        * this W/A doesn't propagate it to the mac80211 */
-       /*rx_status.flag |= RX_FLAG_TSFT;*/
-
-       priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
-       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
-       rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
-
-       iwl_dbg_log_rx_data_frame(priv, len, header);
-       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
-               rx_status.signal, (unsigned long long)rx_status.mactime);
-
-       /*
-        * "antenna number"
-        *
-        * It seems that the antenna field in the phy flags value
-        * is actually a bit field. This is undefined by radiotap,
-        * it wants an actual antenna number but I always get "7"
-        * for most legacy frames I receive indicating that the
-        * same frame was received on all three RX chains.
-        *
-        * I think this field should be removed in favor of a
-        * new 802.11n radiotap field "RX chains" that is defined
-        * as a bitmask.
-        */
-       rx_status.antenna =
-               (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
-               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
-       /* set the preamble flag if appropriate */
-       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-               rx_status.flag |= RX_FLAG_SHORTPRE;
-
-       /* Set up the HT phy flags */
-       if (rate_n_flags & RATE_MCS_HT_MSK)
-               rx_status.flag |= RX_FLAG_HT;
-       if (rate_n_flags & RATE_MCS_HT40_MSK)
-               rx_status.flag |= RX_FLAG_40MHZ;
-       if (rate_n_flags & RATE_MCS_SGI_MSK)
-               rx_status.flag |= RX_FLAG_SHORT_GI;
-
-       iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
-                                   rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-                           struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       priv->_agn.last_phy_res_valid = true;
-       memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
-              sizeof(struct iwl_rx_phy_res));
-}
-
 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
                                           struct ieee80211_vif *vif,
                                           enum ieee80211_band band,
@@ -1342,6 +1115,18 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
        return added;
 }
 
+static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
+{
+       struct sk_buff *skb = priv->_agn.offchan_tx_skb;
+
+       if (skb->len < maxlen)
+               maxlen = skb->len;
+
+       memcpy(data, skb->data, maxlen);
+
+       return maxlen;
+}
+
 int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 {
        struct iwl_host_cmd cmd = {
@@ -1384,20 +1169,25 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
        scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
 
-       if (iwl_is_any_associated(priv)) {
+       if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
+           iwl_is_any_associated(priv)) {
                u16 interval = 0;
                u32 extra;
                u32 suspend_time = 100;
                u32 scan_suspend_time = 100;
-               unsigned long flags;
 
                IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-               spin_lock_irqsave(&priv->lock, flags);
-               if (priv->is_internal_short_scan)
+               switch (priv->scan_type) {
+               case IWL_SCAN_OFFCH_TX:
+                       WARN_ON(1);
+                       break;
+               case IWL_SCAN_RADIO_RESET:
                        interval = 0;
-               else
+                       break;
+               case IWL_SCAN_NORMAL:
                        interval = vif->bss_conf.beacon_int;
-               spin_unlock_irqrestore(&priv->lock, flags);
+                       break;
+               }
 
                scan->suspend_time = 0;
                scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1410,29 +1200,41 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                scan->suspend_time = cpu_to_le32(scan_suspend_time);
                IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
                               scan_suspend_time, interval);
+       } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
+               scan->suspend_time = 0;
+               scan->max_out_time =
+                       cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
        }
 
-       if (priv->is_internal_short_scan) {
+       switch (priv->scan_type) {
+       case IWL_SCAN_RADIO_RESET:
                IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
-       } else if (priv->scan_request->n_ssids) {
-               int i, p = 0;
-               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-               for (i = 0; i < priv->scan_request->n_ssids; i++) {
-                       /* always does wildcard anyway */
-                       if (!priv->scan_request->ssids[i].ssid_len)
-                               continue;
-                       scan->direct_scan[p].id = WLAN_EID_SSID;
-                       scan->direct_scan[p].len =
-                               priv->scan_request->ssids[i].ssid_len;
-                       memcpy(scan->direct_scan[p].ssid,
-                              priv->scan_request->ssids[i].ssid,
-                              priv->scan_request->ssids[i].ssid_len);
-                       n_probes++;
-                       p++;
-               }
-               is_active = true;
-       } else
-               IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+               break;
+       case IWL_SCAN_NORMAL:
+               if (priv->scan_request->n_ssids) {
+                       int i, p = 0;
+                       IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+                       for (i = 0; i < priv->scan_request->n_ssids; i++) {
+                               /* always does wildcard anyway */
+                               if (!priv->scan_request->ssids[i].ssid_len)
+                                       continue;
+                               scan->direct_scan[p].id = WLAN_EID_SSID;
+                               scan->direct_scan[p].len =
+                                       priv->scan_request->ssids[i].ssid_len;
+                               memcpy(scan->direct_scan[p].ssid,
+                                      priv->scan_request->ssids[i].ssid,
+                                      priv->scan_request->ssids[i].ssid_len);
+                               n_probes++;
+                               p++;
+                       }
+                       is_active = true;
+               } else
+                       IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+               break;
+       case IWL_SCAN_OFFCH_TX:
+               IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
+               break;
+       }
 
        scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
        scan->tx_cmd.sta_id = ctx->bcast_sta_id;
@@ -1530,38 +1332,77 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
        rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
        scan->rx_chain = cpu_to_le16(rx_chain);
-       if (!priv->is_internal_short_scan) {
+       switch (priv->scan_type) {
+       case IWL_SCAN_NORMAL:
                cmd_len = iwl_fill_probe_req(priv,
                                        (struct ieee80211_mgmt *)scan->data,
                                        vif->addr,
                                        priv->scan_request->ie,
                                        priv->scan_request->ie_len,
                                        IWL_MAX_SCAN_SIZE - sizeof(*scan));
-       } else {
+               break;
+       case IWL_SCAN_RADIO_RESET:
                /* use bcast addr, will not be transmitted but must be valid */
                cmd_len = iwl_fill_probe_req(priv,
                                        (struct ieee80211_mgmt *)scan->data,
                                        iwl_bcast_addr, NULL, 0,
                                        IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
+               break;
+       case IWL_SCAN_OFFCH_TX:
+               cmd_len = iwl_fill_offch_tx(priv, scan->data,
+                                           IWL_MAX_SCAN_SIZE
+                                            - sizeof(*scan)
+                                            - sizeof(struct iwl_scan_channel));
+               scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
+               break;
+       default:
+               BUG();
        }
        scan->tx_cmd.len = cpu_to_le16(cmd_len);
 
        scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
                               RXON_FILTER_BCON_AWARE_MSK);
 
-       if (priv->is_internal_short_scan) {
+       switch (priv->scan_type) {
+       case IWL_SCAN_RADIO_RESET:
                scan->channel_count =
                        iwl_get_single_channel_for_scan(priv, vif, band,
-                               (void *)&scan->data[le16_to_cpu(
-                               scan->tx_cmd.len)]);
-       } else {
+                               (void *)&scan->data[cmd_len]);
+               break;
+       case IWL_SCAN_NORMAL:
                scan->channel_count =
                        iwl_get_channels_for_scan(priv, vif, band,
                                is_active, n_probes,
-                               (void *)&scan->data[le16_to_cpu(
-                               scan->tx_cmd.len)]);
+                               (void *)&scan->data[cmd_len]);
+               break;
+       case IWL_SCAN_OFFCH_TX: {
+               struct iwl_scan_channel *scan_ch;
+
+               scan->channel_count = 1;
+
+               scan_ch = (void *)&scan->data[cmd_len];
+               scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+               scan_ch->channel =
+                       cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
+               scan_ch->active_dwell =
+                       cpu_to_le16(priv->_agn.offchan_tx_timeout);
+               scan_ch->passive_dwell = 0;
+
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+
+               /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                * power level:
+                * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                */
+               if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+               }
+               break;
        }
+
        if (scan->channel_count == 0) {
                IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
                return -EIO;
@@ -1801,26 +1642,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
 
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
 {
-       struct iwlagn_bt_cmd bt_cmd = {
+       struct iwl_basic_bt_cmd basic = {
                .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
                .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
                .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
                .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
        };
+       struct iwl6000_bt_cmd bt_cmd_6000;
+       struct iwl2000_bt_cmd bt_cmd_2000;
+       int ret;
 
        BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
-                       sizeof(bt_cmd.bt3_lookup_table));
-
-       if (priv->cfg->bt_params)
-               bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
-       else
-               bt_cmd.prio_boost = 0;
-       bt_cmd.kill_ack_mask = priv->kill_ack_mask;
-       bt_cmd.kill_cts_mask = priv->kill_cts_mask;
+                       sizeof(basic.bt3_lookup_table));
+
+       if (priv->cfg->bt_params) {
+               if (priv->cfg->bt_params->bt_session_2) {
+                       bt_cmd_2000.prio_boost = cpu_to_le32(
+                               priv->cfg->bt_params->bt_prio_boost);
+                       bt_cmd_2000.tx_prio_boost = 0;
+                       bt_cmd_2000.rx_prio_boost = 0;
+               } else {
+                       bt_cmd_6000.prio_boost =
+                               priv->cfg->bt_params->bt_prio_boost;
+                       bt_cmd_6000.tx_prio_boost = 0;
+                       bt_cmd_6000.rx_prio_boost = 0;
+               }
+       } else {
+               IWL_ERR(priv, "failed to construct BT Coex Config\n");
+               return;
+       }
 
-       bt_cmd.valid = priv->bt_valid;
-       bt_cmd.tx_prio_boost = 0;
-       bt_cmd.rx_prio_boost = 0;
+       basic.kill_ack_mask = priv->kill_ack_mask;
+       basic.kill_cts_mask = priv->kill_cts_mask;
+       basic.valid = priv->bt_valid;
 
        /*
         * Configure BT coex mode to "no coexistence" when the
@@ -1829,49 +1683,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
         * IBSS mode (no proper uCode support for coex then).
         */
        if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
-               bt_cmd.flags = 0;
+               basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
        } else {
-               bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
+               basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
                                        IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
                if (priv->cfg->bt_params &&
                    priv->cfg->bt_params->bt_sco_disable)
-                       bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+                       basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
 
                if (priv->bt_ch_announce)
-                       bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
-               IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
+                       basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
+               IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
        }
-       priv->bt_enable_flag = bt_cmd.flags;
+       priv->bt_enable_flag = basic.flags;
        if (priv->bt_full_concurrent)
-               memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
+               memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
                        sizeof(iwlagn_concurrent_lookup));
        else
-               memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
+               memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
                        sizeof(iwlagn_def_3w_lookup));
 
        IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
-                      bt_cmd.flags ? "active" : "disabled",
+                      basic.flags ? "active" : "disabled",
                       priv->bt_full_concurrent ?
                       "full concurrency" : "3-wire");
 
-       if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
+       if (priv->cfg->bt_params->bt_session_2) {
+               memcpy(&bt_cmd_2000.basic, &basic,
+                       sizeof(basic));
+               ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                       sizeof(bt_cmd_2000), &bt_cmd_2000);
+       } else {
+               memcpy(&bt_cmd_6000.basic, &basic,
+                       sizeof(basic));
+               ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                       sizeof(bt_cmd_6000), &bt_cmd_6000);
+       }
+       if (ret)
                IWL_ERR(priv, "failed to send BT Coex Config\n");
 
-       /*
-        * When we are doing a restart, need to also reconfigure BT
-        * SCO to the device. If not doing a restart, bt_sco_active
-        * will always be false, so there's no need to have an extra
-        * variable to check for it.
-        */
-       if (priv->bt_sco_active) {
-               struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
-
-               if (priv->bt_sco_active)
-                       sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
-               if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
-                                    sizeof(sco_cmd), &sco_cmd))
-                       IWL_ERR(priv, "failed to send BT SCO command\n");
-       }
 }
 
 static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1881,6 +1731,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
        struct iwl_rxon_context *ctx;
        int smps_request = -1;
 
+       if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+               /* bt coex disabled */
+               return;
+       }
+
        /*
         * Note: bt_traffic_load can be overridden by scan complete and
         * coex profile notifications. Ignore that since only bad consequence
@@ -1991,12 +1846,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
                (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
                        BT_UART_MSG_FRAME6DISCOVERABLE_POS);
 
-       IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
-                       "0x%X, Connectable = 0x%X",
+       IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+                       "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
                (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
                        BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
-               (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
-                       BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
+               (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
+                       BT_UART_MSG_FRAME7PAGE_POS,
+               (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
+                       BT_UART_MSG_FRAME7INQUIRY_POS,
                (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
                        BT_UART_MSG_FRAME7CONNECTABLE_POS);
 }
@@ -2032,9 +1889,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
        unsigned long flags;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
-       struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
        struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
 
+       if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+               /* bt coex disabled */
+               return;
+       }
+
        IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
        IWL_DEBUG_NOTIF(priv, "    status: %d\n", coex->bt_status);
        IWL_DEBUG_NOTIF(priv, "    traffic load: %d\n", coex->bt_traffic_load);
@@ -2063,15 +1924,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
                        queue_work(priv->workqueue,
                                   &priv->bt_traffic_change_work);
                }
-               if (priv->bt_sco_active !=
-                   (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
-                       priv->bt_sco_active = uart_msg->frame3 &
-                               BT_UART_MSG_FRAME3SCOESCO_MSK;
-                       if (priv->bt_sco_active)
-                               sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
-                       iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
-                                      sizeof(sco_cmd), &sco_cmd, NULL);
-               }
        }
 
        iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2241,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
        }
        return 0;
 }
+
+/* notification wait support */
+void iwlagn_init_notification_wait(struct iwl_priv *priv,
+                                  struct iwl_notification_wait *wait_entry,
+                                  void (*fn)(struct iwl_priv *priv,
+                                             struct iwl_rx_packet *pkt),
+                                  u8 cmd)
+{
+       wait_entry->fn = fn;
+       wait_entry->cmd = cmd;
+       wait_entry->triggered = false;
+
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_add(&wait_entry->list, &priv->_agn.notif_waits);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
+
+signed long iwlagn_wait_notification(struct iwl_priv *priv,
+                                    struct iwl_notification_wait *wait_entry,
+                                    unsigned long timeout)
+{
+       int ret;
+
+       ret = wait_event_timeout(priv->_agn.notif_waitq,
+                                &wait_entry->triggered,
+                                timeout);
+
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+
+       return ret;
+}
+
+void iwlagn_remove_notification(struct iwl_priv *priv,
+                               struct iwl_notification_wait *wait_entry)
+{
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
index 75fcd30..d03b473 100644 (file)
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
 };
 
 static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
-       {0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
-       {0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
-       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
+       {0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
+       {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
        {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-       {0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
-       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+       {0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+       {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
-       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
-       {0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
-       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+       {0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+       {0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+       {0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
 };
 
 static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
        {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
-       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+       {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+       {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
        u8 ant_toggle_cnt = 0;
        u8 use_ht_possible = 1;
        u8 valid_tx_ant = 0;
+       struct iwl_station_priv *sta_priv =
+               container_of(lq_sta, struct iwl_station_priv, lq_sta);
        struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
 
        /* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
                repeat_rate--;
        }
 
-       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_frame_cnt_limit =
+               sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
        lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
 
        lq_cmd->agg_params.agg_time_limit =
index 75e50d3..184828c 100644 (file)
@@ -213,6 +213,7 @@ enum {
         IWL_CCK_BASIC_RATES_MASK)
 
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
 
 #define IWL_INVALID_VALUE    -1
 
index 6d140bd..dfdbea6 100644 (file)
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
                              struct iwl_rxon_context *ctx,
                              struct iwl_rxon_cmd *send)
 {
+       struct iwl_notification_wait disable_wait;
        __le32 old_filter = send->filter_flags;
        u8 old_dev_type = send->dev_type;
        int ret;
 
+       iwlagn_init_notification_wait(priv, &disable_wait, NULL,
+                                     REPLY_WIPAN_DEACTIVATION_COMPLETE);
+
        send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        send->dev_type = RXON_DEV_TYPE_P2P;
        ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        send->filter_flags = old_filter;
        send->dev_type = old_dev_type;
 
-       if (ret)
+       if (ret) {
                IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-
-       /* FIXME: WAIT FOR PAN DISABLE */
-       msleep(300);
+               iwlagn_remove_notification(priv, &disable_wait);
+       } else {
+               signed long wait_res;
+
+               wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
+               if (wait_res == 0) {
+                       IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+                       ret = -EIO;
+               }
+       }
 
        return ret;
 }
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* always get timestamp with Rx frame */
        ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
 
+       if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
+               struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+
+               iwl_set_rxon_channel(priv, chan, ctx);
+               iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+               ctx->staging.filter_flags |=
+                       RXON_FILTER_ASSOC_MSK |
+                       RXON_FILTER_PROMISC_MSK |
+                       RXON_FILTER_CTL2HOST_MSK;
+               ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+               new_assoc = true;
+
+               if (memcmp(&ctx->staging, &ctx->active,
+                          sizeof(ctx->staging)) == 0)
+                       return 0;
+       }
+
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
         * If we issue a new RXON command which required a tune then we must
         * send a new TXPOWER command or we won't be able to Tx any frames.
         *
-        * FIXME: which RXON requires a tune? Can we optimise this out in
-        *        some cases?
+        * It's expected we set power here if channel is changing.
         */
-       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
        if (ret) {
                IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
                return ret;
@@ -444,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
        struct iwl_rxon_context *tmp;
        struct ieee80211_sta *sta;
        struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct ieee80211_sta_ht_cap *ht_cap;
        bool need_multiple;
 
        lockdep_assert_held(&priv->mutex);
@@ -452,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
        case NL80211_IFTYPE_STATION:
                rcu_read_lock();
                sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       need_multiple = true;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               need_multiple = false;
-                       if (maxstreams <= 1)
-                               need_multiple = false;
-               } else {
+               if (!sta) {
                        /*
                         * If at all, this can only happen through a race
                         * when the AP disconnects us while we're still
@@ -476,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
                         * will soon tell us about that.
                         */
                        need_multiple = false;
+                       rcu_read_unlock();
+                       break;
+               }
+
+               ht_cap = &sta->ht_cap;
+
+               need_multiple = true;
+
+               /*
+                * If the peer advertises no support for receiving 2 and 3
+                * stream MCS rates, it can't be transmitting them either.
+                */
+               if (ht_cap->mcs.rx_mask[1] == 0 &&
+                   ht_cap->mcs.rx_mask[2] == 0) {
+                       need_multiple = false;
+               } else if (!(ht_cap->mcs.tx_params &
+                                               IEEE80211_HT_MCS_TX_DEFINED)) {
+                       /* If it can't TX MCS at all ... */
+                       need_multiple = false;
+               } else if (ht_cap->mcs.tx_params &
+                                               IEEE80211_HT_MCS_TX_RX_DIFF) {
+                       int maxstreams;
+
+                       /*
+                        * But if it can receive them, it might still not
+                        * be able to transmit them, which is what we need
+                        * to check here -- so check the number of streams
+                        * it advertises for TX (if different from RX).
+                        */
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                                IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
+                       maxstreams >>=
+                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if (maxstreams <= 1)
+                               need_multiple = false;
                }
+
                rcu_read_unlock();
                break;
        case NL80211_IFTYPE_ADHOC:
@@ -546,12 +597,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changes & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
-                       iwl_led_associate(priv);
                        priv->timestamp = bss_conf->timestamp;
                        ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
                } else {
                        ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-                       iwl_led_disassociate(priv);
                }
        }
 
index 24a11b8..a709d05 100644 (file)
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        unsigned long flags;
        bool is_agg = false;
 
-       if (info->control.vif)
+       /*
+        * If the frame needs to go out off-channel, then
+        * we'll have put the PAN context to that channel,
+        * so make the frame go out there.
+        */
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+               ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+       else if (info->control.vif)
                ctx = iwl_rxon_ctx_from_vif(info->control.vif);
 
        spin_lock_irqsave(&priv->lock, flags);
@@ -940,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
  */
 void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
 {
-       int ch;
+       int ch, txq_id;
        unsigned long flags;
 
        /* Turn off all Tx DMA fifos */
@@ -959,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
                            iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
        }
        spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!priv->txq)
+               return;
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (txq_id == priv->cmd_queue)
+                       iwl_cmd_queue_unmap(priv);
+               else
+                       iwl_tx_queue_unmap(priv, txq_id);
 }
 
 /*
index 24dabcd..d807e5e 100644 (file)
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
 {
        int ret = 0;
 
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
        /* initialize uCode was loaded... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "initialize" alive if code weren't properly loaded.  */
index 36335b1..581dc9f 100644 (file)
@@ -59,6 +59,7 @@
 #include "iwl-sta.h"
 #include "iwl-agn-calib.h"
 #include "iwl-agn.h"
+#include "iwl-agn-led.h"
 
 
 /******************************************************************************
@@ -85,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
 
 static int iwlagn_ant_coupling;
 static bool iwlagn_bt_ch_announce = 1;
@@ -424,47 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
        return 0;
 }
 
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl_rx_reply_alive(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_alive_resp *palive;
-       struct delayed_work *pwork;
-
-       palive = &pkt->u.alive_frame;
-
-       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-                      "0x%01X 0x%01X\n",
-                      palive->is_valid, palive->ver_type,
-                      palive->ver_subtype);
-
-       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-               memcpy(&priv->card_alive_init,
-                      &pkt->u.alive_frame,
-                      sizeof(struct iwl_init_alive_resp));
-               pwork = &priv->init_alive_start;
-       } else {
-               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-               memcpy(&priv->card_alive, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->alive_start;
-       }
-
-       /* We delay the ALIVE response by 5ms to
-        * give the HW RF Kill time to activate... */
-       if (palive->is_valid == UCODE_VALID_OK)
-               queue_delayed_work(priv->workqueue, pwork,
-                                  msecs_to_jiffies(5));
-       else
-               IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
 static void iwl_bg_beacon_update(struct work_struct *work)
 {
        struct iwl_priv *priv =
@@ -699,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data)
        }
 }
 
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl4965_beacon_notif *beacon =
-               (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
-       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-               "tsf %d %d rate %d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
-               beacon->beacon_notify_hdr.failure_frame,
-               le32_to_cpu(beacon->ibss_mgr_status),
-               le32_to_cpu(beacon->high_tsf),
-               le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-       if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
-               queue_work(priv->workqueue, &priv->beacon_update);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-       unsigned long status = priv->status;
-
-       IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
-                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & CT_CARD_DISABLED) ?
-                         "Reached" : "Not reached");
-
-       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
-                    CT_CARD_DISABLED)) {
-
-               iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-                           CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-               iwl_write_direct32(priv, HBUS_TARG_MBX_C,
-                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
-               if (!(flags & RXON_CARD_DISABLED)) {
-                       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-                       iwl_write_direct32(priv, HBUS_TARG_MBX_C,
-                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-               }
-               if (flags & CT_CARD_DISABLED)
-                       iwl_tt_enter_ct_kill(priv);
-       }
-       if (!(flags & CT_CARD_DISABLED))
-               iwl_tt_exit_ct_kill(priv);
-
-       if (flags & HW_CARD_DISABLED)
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
-       if (!(flags & RXON_CARD_DISABLED))
-               iwl_scan_cancel(priv);
-
-       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-            test_bit(STATUS_RF_KILL_HW, &priv->status)))
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-                       test_bit(STATUS_RF_KILL_HW, &priv->status));
-       else
-               wake_up_interruptible(&priv->wait_command_queue);
-}
-
 static void iwl_bg_tx_flush(struct work_struct *work)
 {
        struct iwl_priv *priv =
@@ -794,51 +676,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
        }
 }
 
-/**
- * iwl_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl_setup_rx_handlers(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
-       priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
-       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
-       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-                       iwl_rx_spectrum_measure_notif;
-       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
-       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-           iwl_rx_pm_debug_statistics_notif;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
-
-       /*
-        * The same handler is used for both the REPLY to a discrete
-        * statistics request from the host as well as for the periodic
-        * statistics notifications (after received beacons) from the uCode.
-        */
-       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
-       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
-
-       iwl_setup_rx_scan_handlers(priv);
-
-       /* status change handler */
-       priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
-
-       priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
-           iwl_rx_missed_beacon_notif;
-       /* Rx handlers */
-       priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
-       priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
-       /* block ack */
-       priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
-       /* Set up hardware specific Rx handlers */
-       priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
 /**
  * iwl_rx_handle - Main entry function for receiving responses from uCode
  *
@@ -846,7 +683,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
  * the appropriate handlers, including command responses,
  * frame-received notifications, and other notifications.
  */
-void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_priv *priv)
 {
        struct iwl_rx_mem_buffer *rxb;
        struct iwl_rx_packet *pkt;
@@ -910,6 +747,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
                        (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
                        (pkt->hdr.cmd != REPLY_TX);
 
+               /*
+                * Do the notification wait before RX handlers so
+                * even if the RX handler consumes the RXB we have
+                * access to it in the notification wait entry.
+                */
+               if (!list_empty(&priv->_agn.notif_waits)) {
+                       struct iwl_notification_wait *w;
+
+                       spin_lock(&priv->_agn.notif_wait_lock);
+                       list_for_each_entry(w, &priv->_agn.notif_waits, list) {
+                               if (w->cmd == pkt->hdr.cmd) {
+                                       w->triggered = true;
+                                       if (w->fn)
+                                               w->fn(priv, pkt);
+                               }
+                       }
+                       spin_unlock(&priv->_agn.notif_wait_lock);
+
+                       wake_up_all(&priv->_agn.notif_waitq);
+               }
+
                /* Based on type of command response or notification,
                 *   handle those that need handling via function in
                 *   rx_handlers table.  See iwl_setup_rx_handlers() */
@@ -1157,6 +1015,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
        /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
+       /* Re-enable RF_KILL if it occurred */
+       else if (handled & CSR_INT_BIT_RF_KILL)
+               iwl_enable_rfkill_int(priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1371,68 +1232,11 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
        /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
+       /* Re-enable RF_KILL if it occurred */
+       else if (handled & CSR_INT_BIT_RF_KILL)
+               iwl_enable_rfkill_int(priv);
 }
 
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-bool iwl_good_ack_health(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
-{
-       bool rc = true;
-       int actual_ack_cnt_delta, expected_ack_cnt_delta;
-       int ba_timeout_delta;
-
-       actual_ack_cnt_delta =
-               le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
-               le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
-       expected_ack_cnt_delta =
-               le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
-               le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
-       ba_timeout_delta =
-               le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
-               le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
-       if ((priv->_agn.agg_tids_count > 0) &&
-           (expected_ack_cnt_delta > 0) &&
-           (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
-               < ACK_CNT_RATIO) &&
-           (ba_timeout_delta > BA_TIMEOUT_CNT)) {
-               IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
-                               " expected_ack_cnt = %d\n",
-                               actual_ack_cnt_delta, expected_ack_cnt_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               /*
-                * This is ifdef'ed on DEBUGFS because otherwise the
-                * statistics aren't available. If DEBUGFS is set but
-                * DEBUG is not, these will just compile out.
-                */
-               IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
-                               priv->_agn.delta_statistics.tx.rx_detected_cnt);
-               IWL_DEBUG_RADIO(priv,
-                               "ack_or_ba_timeout_collision delta = %d\n",
-                               priv->_agn.delta_statistics.tx.
-                               ack_or_ba_timeout_collision);
-#endif
-               IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
-                               ba_timeout_delta);
-               if (!actual_ack_cnt_delta &&
-                   (ba_timeout_delta >= BA_TIMEOUT_MAX))
-                       rc = false;
-       }
-       return rc;
-}
-
-
 /*****************************************************************************
  *
  * sysfs attributes
@@ -2626,13 +2430,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
 
        IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
 
-       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Alive failed.\n");
-               goto restart;
-       }
-
        /* Initialize uCode has loaded Runtime uCode ... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "runtime" alive if code weren't properly loaded.  */
@@ -2704,9 +2501,11 @@ static void iwl_alive_start(struct iwl_priv *priv)
                        priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
        }
 
-       if (priv->cfg->bt_params &&
-           !priv->cfg->bt_params->advanced_bt_coexist) {
-               /* Configure Bluetooth device coexistence support */
+       if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
+           !priv->cfg->bt_params->advanced_bt_coexist)) {
+               /*
+                * default is 2-wire BT coexexistence support
+                */
                priv->cfg->ops->hcmd->send_bt_config(priv);
        }
 
@@ -2720,8 +2519,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
        /* At this point, the NIC is initialized and operational */
        iwl_rf_kill_ct_config(priv);
 
-       iwl_leds_init(priv);
-
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
        wake_up_interruptible(&priv->wait_command_queue);
 
@@ -2763,7 +2560,6 @@ static void __iwl_down(struct iwl_priv *priv)
                         priv->cfg->bt_params->bt_init_traffic_load;
        else
                priv->bt_traffic_load = 0;
-       priv->bt_sco_active = false;
        priv->bt_full_concurrent = false;
        priv->bt_ci_compliance = 0;
 
@@ -3057,8 +2853,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
        }
 
        if (priv->start_calib) {
-               if (priv->cfg->bt_params &&
-                   priv->cfg->bt_params->bt_statistics) {
+               if (iwl_bt_statistics(priv)) {
                        iwl_chain_noise_calibration(priv,
                                        (void *)&priv->_agn.statistics_bt);
                        iwl_sensitivity_calibration(priv,
@@ -3083,7 +2878,7 @@ static void iwl_bg_restart(struct work_struct *data)
 
        if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
                struct iwl_rxon_context *ctx;
-               bool bt_sco, bt_full_concurrent;
+               bool bt_full_concurrent;
                u8 bt_ci_compliance;
                u8 bt_load;
                u8 bt_status;
@@ -3102,7 +2897,6 @@ static void iwl_bg_restart(struct work_struct *data)
                 * re-configure the hw when we reconfigure the BT
                 * command.
                 */
-               bt_sco = priv->bt_sco_active;
                bt_full_concurrent = priv->bt_full_concurrent;
                bt_ci_compliance = priv->bt_ci_compliance;
                bt_load = priv->bt_traffic_load;
@@ -3110,7 +2904,6 @@ static void iwl_bg_restart(struct work_struct *data)
 
                __iwl_down(priv);
 
-               priv->bt_sco_active = bt_sco;
                priv->bt_full_concurrent = bt_full_concurrent;
                priv->bt_ci_compliance = bt_ci_compliance;
                priv->bt_traffic_load = bt_load;
@@ -3144,6 +2937,91 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
        mutex_unlock(&priv->mutex);
 }
 
+static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+                                struct ieee80211_channel *chan,
+                                enum nl80211_channel_type channel_type,
+                                unsigned int wait)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       /* Not supported if we don't have PAN */
+       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
+               ret = -EOPNOTSUPP;
+               goto free;
+       }
+
+       /* Not supported on pre-P2P firmware */
+       if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+                                       BIT(NL80211_IFTYPE_P2P_CLIENT))) {
+               ret = -EOPNOTSUPP;
+               goto free;
+       }
+
+       mutex_lock(&priv->mutex);
+
+       if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
+               /*
+                * If the PAN context is free, use the normal
+                * way of doing remain-on-channel offload + TX.
+                */
+               ret = 1;
+               goto out;
+       }
+
+       /* TODO: queue up if scanning? */
+       if (test_bit(STATUS_SCANNING, &priv->status) ||
+           priv->_agn.offchan_tx_skb) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /*
+        * max_scan_ie_len doesn't include the blank SSID or the header,
+        * so need to add that again here.
+        */
+       if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
+               ret = -ENOBUFS;
+               goto out;
+       }
+
+       priv->_agn.offchan_tx_skb = skb;
+       priv->_agn.offchan_tx_timeout = wait;
+       priv->_agn.offchan_tx_chan = chan;
+
+       ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
+                               IWL_SCAN_OFFCH_TX, chan->band);
+       if (ret)
+               priv->_agn.offchan_tx_skb = NULL;
+ out:
+       mutex_unlock(&priv->mutex);
+ free:
+       if (ret < 0)
+               kfree_skb(skb);
+
+       return ret;
+}
+
+static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       mutex_lock(&priv->mutex);
+
+       if (!priv->_agn.offchan_tx_skb)
+               return -EINVAL;
+
+       priv->_agn.offchan_tx_skb = NULL;
+
+       ret = iwl_scan_cancel_timeout(priv, 200);
+       if (ret)
+               ret = -EIO;
+       mutex_unlock(&priv->mutex);
+
+       return ret;
+}
+
 /*****************************************************************************
  *
  * mac80211 entry point functions
@@ -3172,6 +3050,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
+       hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
        if (!priv->cfg->base_params->broken_powersave)
                hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                             IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3188,8 +3068,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
        }
 
+       hw->wiphy->max_remain_on_channel_duration = 1000;
+
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
 
        /*
         * For now, disable PS by default because it affects
@@ -3213,6 +3096,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &priv->bands[IEEE80211_BAND_5GHZ];
 
+       iwl_leds_init(priv);
+
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3257,7 +3142,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
                }
        }
 
-       iwl_led_start(priv);
+       iwlagn_led_enable(priv);
 
 out:
        priv->is_open = 1;
@@ -3288,7 +3173,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -3301,7 +3186,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                dev_kfree_skb_any(skb);
 
        IWL_DEBUG_MACDUMP(priv, "leave\n");
-       return NETDEV_TX_OK;
 }
 
 void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3339,6 +3223,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       /*
+        * To support IBSS RSN, don't program group keys in IBSS, the
+        * hardware will then not attempt to decrypt the frames.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
        sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
        if (sta_id == IWL_INVALID_STATION)
                return -EINVAL;
@@ -3393,10 +3285,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size)
 {
        struct iwl_priv *priv = hw->priv;
        int ret = -EINVAL;
+       struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 
        IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
                     sta->addr, tid);
@@ -3451,11 +3345,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                }
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
+               /*
+                * If the limit is 0, then it wasn't initialised yet,
+                * use the default. We can do that since we take the
+                * minimum below, and we don't want to go above our
+                * default due to hardware restrictions.
+                */
+               if (sta_priv->max_agg_bufsize == 0)
+                       sta_priv->max_agg_bufsize =
+                               LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+               /*
+                * Even though in theory the peer could have different
+                * aggregation reorder buffer sizes for different sessions,
+                * our ucode doesn't allow for that and has a global limit
+                * for each station. Therefore, use the minimum of all the
+                * aggregation sessions and our default value.
+                */
+               sta_priv->max_agg_bufsize =
+                       min(sta_priv->max_agg_bufsize, buf_size);
+
                if (priv->cfg->ht_params &&
                    priv->cfg->ht_params->use_rts_for_aggregation) {
-                       struct iwl_station_priv *sta_priv =
-                               (void *) sta->drv_priv;
-
                        /*
                         * switch to RTS/CTS if it is the prefer protection
                         * method for HT traffic
@@ -3463,9 +3374,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
 
                        sta_priv->lq_sta.lq.general_params.flags |=
                                LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-                       iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-                                       &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                }
+
+               sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+                       sta_priv->max_agg_bufsize;
+
+               iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+                               &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                ret = 0;
                break;
        }
@@ -3703,6 +3618,95 @@ done:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
+static void iwlagn_disable_roc(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+       struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl_set_rxon_channel(priv, chan, ctx);
+       iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+
+       priv->_agn.hw_roc_channel = NULL;
+
+       iwlcore_commit_rxon(priv, ctx);
+
+       ctx->is_active = false;
+}
+
+static void iwlagn_bg_roc_done(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                                            _agn.hw_roc_work.work);
+
+       mutex_lock(&priv->mutex);
+       ieee80211_remain_on_channel_expired(priv->hw);
+       iwlagn_disable_roc(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+                                    struct ieee80211_channel *channel,
+                                    enum nl80211_channel_type channel_type,
+                                    int duration)
+{
+       struct iwl_priv *priv = hw->priv;
+       int err = 0;
+
+       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+                                       BIT(NL80211_IFTYPE_P2P_CLIENT)))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->mutex);
+
+       if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
+           test_bit(STATUS_SCAN_HW, &priv->status)) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
+       priv->_agn.hw_roc_channel = channel;
+       priv->_agn.hw_roc_chantype = channel_type;
+       priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+       iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+       queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+                          msecs_to_jiffies(duration + 20));
+
+       msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
+       ieee80211_ready_on_channel(priv->hw);
+
+ out:
+       mutex_unlock(&priv->mutex);
+
+       return err;
+}
+
+static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+
+       mutex_lock(&priv->mutex);
+       iwlagn_disable_roc(priv);
+       mutex_unlock(&priv->mutex);
+
+       return 0;
+}
+
 /*****************************************************************************
  *
  * driver setup and teardown
@@ -3724,6 +3728,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
        INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
        INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
        INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
+       INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
 
        iwl_setup_scan_deferred_work(priv);
 
@@ -3817,6 +3822,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
        priv->force_reset[IWL_FW_RESET].reset_duration =
                IWL_DELAY_NEXT_FORCE_FW_RELOAD;
 
+       priv->rx_statistics_jiffies = jiffies;
+
        /* Choose which receivers/antennas to use */
        if (priv->cfg->ops->hcmd->set_rxon_chain)
                priv->cfg->ops->hcmd->set_rxon_chain(priv,
@@ -3870,7 +3877,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
        kfree(priv->scan_cmd);
 }
 
-#ifdef CONFIG_IWL5000
 struct ieee80211_ops iwlagn_hw_ops = {
        .tx = iwlagn_mac_tx,
        .start = iwlagn_mac_start,
@@ -3892,14 +3898,17 @@ struct ieee80211_ops iwlagn_hw_ops = {
        .channel_switch = iwlagn_mac_channel_switch,
        .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwl_mac_tx_last_beacon,
+       .remain_on_channel = iwl_mac_remain_on_channel,
+       .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
+       .offchannel_tx = iwl_mac_offchannel_tx,
+       .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
 };
-#endif
 
 static void iwl_hw_detect(struct iwl_priv *priv)
 {
        priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
        priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
-       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+       priv->rev_id = priv->pci_dev->revision;
        IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
 }
 
@@ -3961,12 +3970,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (cfg->mod_params->disable_hw_scan) {
                dev_printk(KERN_DEBUG, &(pdev->dev),
                        "sw scan support is deprecated\n");
-#ifdef CONFIG_IWL5000
                iwlagn_hw_ops.hw_scan = NULL;
-#endif
-#ifdef CONFIG_IWL4965
-               iwl4965_hw_ops.hw_scan = NULL;
-#endif
        }
 
        hw = iwl_alloc_all(cfg);
@@ -4019,6 +4023,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
        priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
                BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_IWL_P2P
+       priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+               BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+#endif
        priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
        priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
        priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4266,6 +4274,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
         * we need to set STATUS_EXIT_PENDING bit.
         */
        set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       iwl_leds_exit(priv);
+
        if (priv->mac80211_registered) {
                ieee80211_unregister_hw(priv->hw);
                priv->mac80211_registered = 0;
@@ -4338,12 +4349,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
 
 /* Hardware specific file defines the PCI IDs table for that hardware module */
 static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
-#ifdef CONFIG_IWL4965
-       {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
-       {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
-#endif /* CONFIG_IWL4965 */
-#ifdef CONFIG_IWL5000
-/* 5100 Series WiFi */
        {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4486,7 +4491,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
 
-#endif /* CONFIG_IWL5000 */
+/* 2x00 Series */
+       {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+       {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+       {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 200 Series */
+       {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
+
+/* 230 Series */
+       {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
 
        {0}
 };
@@ -4586,3 +4632,9 @@ MODULE_PARM_DESC(antenna_coupling,
 module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
 MODULE_PARM_DESC(bt_ch_inhibition,
                 "Disable BT channel inhibition (default: enable)");
+
+module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
+MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
+
+module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
+MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
index da30358..20f8e41 100644 (file)
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
 extern struct iwl_cfg iwl100_bg_cfg;
 extern struct iwl_cfg iwl130_bgn_cfg;
 extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl200_bg_cfg;
+extern struct iwl_cfg iwl200_bgn_cfg;
+extern struct iwl_cfg iwl230_bg_cfg;
+extern struct iwl_cfg iwl230_bgn_cfg;
 
 extern struct iwl_mod_params iwlagn_mod_params;
 extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -110,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv);
 int iwl_alloc_isr_ict(struct iwl_priv *priv);
 void iwl_free_isr_ict(struct iwl_priv *priv);
 irqreturn_t iwl_isr_ict(int irq, void *data);
-bool iwl_good_ack_health(struct iwl_priv *priv,
-                        struct iwl_rx_packet *pkt);
 
 /* tx queue */
 void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -181,11 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv);
 void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
 int iwlagn_rxq_stop(struct iwl_priv *priv);
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwlagn_rx_reply_rx(struct iwl_priv *priv,
-                    struct iwl_rx_mem_buffer *rxb);
-void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-                        struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_handle(struct iwl_priv *priv);
+void iwl_setup_rx_handlers(struct iwl_priv *priv);
 
 /* tx */
 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -235,16 +240,6 @@ static inline bool iwl_is_tx_success(u32 status)
 
 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
 
-/* rx */
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
-                         struct iwl_rx_packet *pkt);
-void iwl_rx_statistics(struct iwl_priv *priv,
-                      struct iwl_rx_mem_buffer *rxb);
-void iwl_reply_statistics(struct iwl_priv *priv,
-                         struct iwl_rx_mem_buffer *rxb);
-
 /* scan */
 int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
 void iwlagn_post_scan(struct iwl_priv *priv);
@@ -330,8 +325,23 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
 int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
 void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
 
+/* notification wait support */
+void __acquires(wait_entry)
+iwlagn_init_notification_wait(struct iwl_priv *priv,
+                             struct iwl_notification_wait *wait_entry,
+                             void (*fn)(struct iwl_priv *priv,
+                                        struct iwl_rx_packet *pkt),
+                             u8 cmd);
+signed long __releases(wait_entry)
+iwlagn_wait_notification(struct iwl_priv *priv,
+                        struct iwl_notification_wait *wait_entry,
+                        unsigned long timeout);
+void __releases(wait_entry)
+iwlagn_remove_notification(struct iwl_priv *priv,
+                          struct iwl_notification_wait *wait_entry);
+
 /* mac80211 handlers (for 4965) */
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int iwlagn_mac_start(struct ieee80211_hw *hw);
 void iwlagn_mac_stop(struct ieee80211_hw *hw);
 void iwlagn_configure_filter(struct ieee80211_hw *hw,
@@ -349,7 +359,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
 int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
 int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
index f893d4a..ca42ffa 100644 (file)
@@ -178,7 +178,6 @@ enum {
        REPLY_BT_COEX_PRIO_TABLE = 0xcc,
        REPLY_BT_COEX_PROT_ENV = 0xcd,
        REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
-       REPLY_BT_COEX_SCO = 0xcf,
 
        /* PAN commands */
        REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
        REPLY_WIPAN_WEPKEY = 0xb8,      /* use REPLY_WEPKEY structure */
        REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
        REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+       REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
 
        REPLY_MAX = 0xff
 };
@@ -2477,7 +2477,7 @@ struct iwl_bt_cmd {
                                        IWLAGN_BT_VALID_BT4_TIMES | \
                                        IWLAGN_BT_VALID_3W_LUT)
 
-struct iwlagn_bt_cmd {
+struct iwl_basic_bt_cmd {
        u8 flags;
        u8 ledtime; /* unused */
        u8 max_kill;
@@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd {
        __le32 bt3_lookup_table[12];
        __le16 bt4_decision_time; /* unused */
        __le16 valid;
+};
+
+struct iwl6000_bt_cmd {
+       struct iwl_basic_bt_cmd basic;
        u8 prio_boost;
        /*
         * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd {
        __le16 rx_prio_boost;   /* SW boost of WiFi rx priority */
 };
 
+struct iwl2000_bt_cmd {
+       struct iwl_basic_bt_cmd basic;
+       __le32 prio_boost;
+       /*
+        * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+        * if configure the following patterns
+        */
+       u8 reserved;
+       u8 tx_prio_boost;       /* SW boost of WiFi tx priority */
+       __le16 rx_prio_boost;   /* SW boost of WiFi rx priority */
+};
+
 #define IWLAGN_BT_SCO_ACTIVE   cpu_to_le32(BIT(0))
 
 struct iwlagn_bt_sco_cmd {
@@ -2948,9 +2964,15 @@ struct iwl3945_scan_cmd {
        u8 data[0];
 } __packed;
 
+enum iwl_scan_flags {
+       /* BIT(0) currently unused */
+       IWL_SCAN_FLAGS_ACTION_FRAME_TX  = BIT(1),
+       /* bits 2-7 reserved */
+};
+
 struct iwl_scan_cmd {
        __le16 len;
-       u8 reserved0;
+       u8 scan_flags;          /* scan flags: see enum iwl_scan_flags */
        u8 channel_count;       /* # channels in channel list */
        __le16 quiet_time;      /* dwell only this # millisecs on quiet channel
                                 * (only for active scan) */
@@ -3082,6 +3104,13 @@ struct iwl4965_beacon_notif {
        __le32 ibss_mgr_status;
 } __packed;
 
+struct iwlagn_beacon_notif {
+       struct iwlagn_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
 /*
  * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
  */
@@ -4143,6 +4172,10 @@ enum iwl_bt_coex_profile_traffic_load {
  */
 };
 
+#define BT_SESSION_ACTIVITY_1_UART_MSG         0x1
+#define BT_SESSION_ACTIVITY_2_UART_MSG         0x2
+
+/* BT UART message - Share Part (BT -> WiFi) */
 #define BT_UART_MSG_FRAME1MSGTYPE_POS          (0)
 #define BT_UART_MSG_FRAME1MSGTYPE_MSK          \
                (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4227,9 +4260,12 @@ enum iwl_bt_coex_profile_traffic_load {
 #define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS    (0)
 #define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK    \
                (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS        (3)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK        \
-               (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
+#define BT_UART_MSG_FRAME7PAGE_POS             (3)
+#define BT_UART_MSG_FRAME7PAGE_MSK             \
+               (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
+#define BT_UART_MSG_FRAME7INQUIRY_POS          (4)
+#define BT_UART_MSG_FRAME7INQUIRY_MSK          \
+               (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
 #define BT_UART_MSG_FRAME7CONNECTABLE_POS      (5)
 #define BT_UART_MSG_FRAME7CONNECTABLE_MSK      \
                (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4237,6 +4273,83 @@ enum iwl_bt_coex_profile_traffic_load {
 #define BT_UART_MSG_FRAME7RESERVED_MSK         \
                (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
 
+/* BT Session Activity 2 UART message (BT -> WiFi) */
+#define BT_UART_MSG_2_FRAME1RESERVED1_POS      (5)
+#define BT_UART_MSG_2_FRAME1RESERVED1_MSK      \
+               (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
+#define BT_UART_MSG_2_FRAME1RESERVED2_POS      (6)
+#define BT_UART_MSG_2_FRAME1RESERVED2_MSK      \
+               (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
+
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
+               (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
+#define BT_UART_MSG_2_FRAME2RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME2RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS  (0)
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK  \
+               (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS  (4)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK  \
+               (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
+#define BT_UART_MSG_2_FRAME3LEMASTER_POS       (5)
+#define BT_UART_MSG_2_FRAME3LEMASTER_MSK       \
+               (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
+#define BT_UART_MSG_2_FRAME3RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME3RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS  (0)
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK  \
+               (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_POS      (4)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK      \
+               (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
+#define BT_UART_MSG_2_FRAME4RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME4RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS      (0)
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK      \
+               (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
+               (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS    (5)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK    \
+               (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
+#define BT_UART_MSG_2_FRAME5RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME5RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
+               (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
+#define BT_UART_MSG_2_FRAME6RFU_POS            (5)
+#define BT_UART_MSG_2_FRAME6RFU_MSK            \
+               (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
+#define BT_UART_MSG_2_FRAME6RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME6RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
+               (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS     (3)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK     \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS     (4)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK     \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
+#define BT_UART_MSG_2_FRAME7RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME7RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
+
 
 struct iwl_bt_uart_msg {
        u8 header;
@@ -4369,6 +4482,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
  * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
  */
 
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME      20
+
 /**
  * struct iwl_wipan_slot
  * @width: Time in TU
index efbde1f..6c30fa6 100644 (file)
 #include "iwl-helpers.h"
 
 
-MODULE_DESCRIPTION("iwl core");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
 /*
  * set bt_coex_active to true, uCode will do kill/defer
  * every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
  * default: bt_coex_active = true (BT_COEX_ENABLE)
  */
 bool bt_coex_active = true;
-EXPORT_SYMBOL_GPL(bt_coex_active);
 module_param(bt_coex_active, bool, S_IRUGO);
 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
 
 u32 iwl_debug_level;
-EXPORT_SYMBOL(iwl_debug_level);
 
 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwl_bcast_addr);
 
 
 /* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
 out:
        return hw;
 }
-EXPORT_SYMBOL(iwl_alloc_all);
 
 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -219,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
                if (!is_channel_valid(ch))
                        continue;
 
-               if (is_channel_a_band(ch))
-                       sband =  &priv->bands[IEEE80211_BAND_5GHZ];
-               else
-                       sband =  &priv->bands[IEEE80211_BAND_2GHZ];
+               sband =  &priv->bands[ch->band];
 
                geo_ch = &sband->channels[sband->n_channels++];
 
                geo_ch->center_freq =
-                               ieee80211_channel_to_frequency(ch->channel);
+                       ieee80211_channel_to_frequency(ch->channel, ch->band);
                geo_ch->max_power = ch->max_power_avg;
                geo_ch->max_antenna_gain = 0xff;
                geo_ch->hw_value = ch->channel;
@@ -275,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
 
        return 0;
 }
-EXPORT_SYMBOL(iwlcore_init_geos);
 
 /*
  * iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -286,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
        kfree(priv->ieee_rates);
        clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
 }
-EXPORT_SYMBOL(iwlcore_free_geos);
 
 static bool iwl_is_channel_extension(struct iwl_priv *priv,
                                     enum ieee80211_band band,
@@ -331,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
                        le16_to_cpu(ctx->staging.channel),
                        ctx->ht.extension_chan_offset);
 }
-EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
 
 static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
 {
@@ -432,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
                                sizeof(ctx->timing), &ctx->timing);
 }
-EXPORT_SYMBOL(iwl_send_rxon_timing);
 
 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                           int hw_decrypt)
@@ -445,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
 
 }
-EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
 
 /* validate RXON structure is valid */
 int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -518,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_check_rxon_cmd);
 
 /**
  * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -582,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_full_rxon_required);
 
 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx)
@@ -596,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
        else
                return IWL_RATE_6M_PLCP;
 }
-EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
 
 static void _iwl_set_rxon_ht(struct iwl_priv *priv,
                             struct iwl_ht_config *ht_conf,
@@ -673,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
        for_each_context(priv, ctx)
                _iwl_set_rxon_ht(priv, ht_conf, ctx);
 }
-EXPORT_SYMBOL(iwl_set_rxon_ht);
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -714,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
 
        return channel;
 }
-EXPORT_SYMBOL(iwl_get_single_channel_number);
 
 /**
  * iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -745,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_set_rxon_channel);
 
 void iwl_set_flags_for_band(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
@@ -769,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
                ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
        }
 }
-EXPORT_SYMBOL(iwl_set_flags_for_band);
 
 /*
  * initialize rxon structure with default values from eeprom
@@ -841,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
        ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
        ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
 }
-EXPORT_SYMBOL(iwl_connection_init_rx_config);
 
 void iwl_set_rate(struct iwl_priv *priv)
 {
@@ -874,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
                   (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
        }
 }
-EXPORT_SYMBOL(iwl_set_rate);
 
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
 {
@@ -894,35 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
                mutex_unlock(&priv->mutex);
        }
 }
-EXPORT_SYMBOL(iwl_chswitch_done);
-
-void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-       /*
-        * MULTI-FIXME
-        * See iwl_mac_channel_switch.
-        */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
-
-       if (priv->switch_rxon.switch_in_progress) {
-               if (!le32_to_cpu(csa->status) &&
-                   (csa->channel == priv->switch_rxon.channel)) {
-                       rxon->channel = csa->channel;
-                       ctx->staging.channel = csa->channel;
-                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
-                             le16_to_cpu(csa->channel));
-                       iwl_chswitch_done(priv, true);
-               } else {
-                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
-                             le16_to_cpu(csa->channel));
-                       iwl_chswitch_done(priv, false);
-               }
-       }
-}
-EXPORT_SYMBOL(iwl_rx_csa);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -944,13 +889,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
        IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
        IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
 }
-EXPORT_SYMBOL(iwl_print_rx_config_cmd);
 #endif
 /**
  * iwl_irq_handle_error - called for HW or SW error interrupt from card
  */
 void iwl_irq_handle_error(struct iwl_priv *priv)
 {
+       unsigned int reload_msec;
+       unsigned long reload_jiffies;
+
        /* Set the FW error flag -- cleared on iwl_down */
        set_bit(STATUS_FW_ERROR, &priv->status);
 
@@ -994,6 +941,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
         * commands by clearing the INIT status bit */
        clear_bit(STATUS_READY, &priv->status);
 
+       /*
+        * If firmware keep reloading, then it indicate something
+        * serious wrong and firmware having problem to recover
+        * from it. Instead of keep trying which will fill the syslog
+        * and hang the system, let's just stop it
+        */
+       reload_jiffies = jiffies;
+       reload_msec = jiffies_to_msecs((long) reload_jiffies -
+                               (long) priv->reload_jiffies);
+       priv->reload_jiffies = reload_jiffies;
+       if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+               priv->reload_count++;
+               if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+                       IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+                       return;
+               }
+       } else
+               priv->reload_count = 0;
+
        if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
                IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
                          "Restarting adapter due to uCode error.\n");
@@ -1002,7 +968,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
                        queue_work(priv->workqueue, &priv->restart);
        }
 }
-EXPORT_SYMBOL(iwl_irq_handle_error);
 
 static int iwl_apm_stop_master(struct iwl_priv *priv)
 {
@@ -1039,7 +1004,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
         */
        iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 }
-EXPORT_SYMBOL(iwl_apm_stop);
 
 
 /*
@@ -1154,13 +1118,14 @@ int iwl_apm_init(struct iwl_priv *priv)
 out:
        return ret;
 }
-EXPORT_SYMBOL(iwl_apm_init);
 
 
 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 {
        int ret;
        s8 prev_tx_power;
+       bool defer;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
        lockdep_assert_held(&priv->mutex);
 
@@ -1188,10 +1153,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
        if (!iwl_is_ready_rf(priv))
                return -EIO;
 
-       /* scan complete use tx_power_next, need to be updated */
+       /* scan complete and commit_rxon use tx_power_next value,
+        * it always need to be updated for newest request */
        priv->tx_power_next = tx_power;
-       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
-               IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
+
+       /* do not set tx power when scanning or channel changing */
+       defer = test_bit(STATUS_SCANNING, &priv->status) ||
+               memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+       if (defer && !force) {
+               IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
                return 0;
        }
 
@@ -1207,7 +1177,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
        }
        return ret;
 }
-EXPORT_SYMBOL(iwl_set_tx_power);
 
 void iwl_send_bt_config(struct iwl_priv *priv)
 {
@@ -1231,7 +1200,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
                             sizeof(struct iwl_bt_cmd), &bt_cmd))
                IWL_ERR(priv, "failed to send BT Coex Config\n");
 }
-EXPORT_SYMBOL(iwl_send_bt_config);
 
 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
 {
@@ -1249,46 +1217,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
                                        sizeof(struct iwl_statistics_cmd),
                                        &statistics_cmd);
 }
-EXPORT_SYMBOL(iwl_send_statistics_request);
-
-void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
-                          struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
-       IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
-                    sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
-
-void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
-                       "notification for %s:\n", len,
-                       get_cmd_string(pkt->hdr.cmd));
-       iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
-
-void iwl_rx_reply_error(struct iwl_priv *priv,
-                       struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
-               "seq 0x%04X ser 0x%08X\n",
-               le32_to_cpu(pkt->u.err_resp.error_type),
-               get_cmd_string(pkt->u.err_resp.cmd_id),
-               pkt->u.err_resp.cmd_id,
-               le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
-               le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_rx_reply_error);
 
 void iwl_clear_isr_stats(struct iwl_priv *priv)
 {
@@ -1340,7 +1268,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
        IWL_DEBUG_MAC80211(priv, "leave\n");
        return 0;
 }
-EXPORT_SYMBOL(iwl_mac_conf_tx);
 
 int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 {
@@ -1348,7 +1275,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 
        return priv->ibss_manager == IWL_IBSS_MANAGER;
 }
-EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
 
 static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
@@ -1403,9 +1329,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *tmp, *ctx = NULL;
        int err;
+       enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
 
        IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-                          vif->type, vif->addr);
+                          viftype, vif->addr);
 
        mutex_lock(&priv->mutex);
 
@@ -1429,7 +1356,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                        continue;
                }
 
-               if (!(possible_modes & BIT(vif->type)))
+               if (!(possible_modes & BIT(viftype)))
                        continue;
 
                /* have maybe usable context w/o interface */
@@ -1457,7 +1384,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        IWL_DEBUG_MAC80211(priv, "leave\n");
        return err;
 }
-EXPORT_SYMBOL(iwl_mac_add_interface);
 
 static void iwl_teardown_interface(struct iwl_priv *priv,
                                   struct ieee80211_vif *vif,
@@ -1510,7 +1436,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "leave\n");
 
 }
-EXPORT_SYMBOL(iwl_mac_remove_interface);
 
 int iwl_alloc_txq_mem(struct iwl_priv *priv)
 {
@@ -1525,14 +1450,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_alloc_txq_mem);
 
 void iwl_free_txq_mem(struct iwl_priv *priv)
 {
        kfree(priv->txq);
        priv->txq = NULL;
 }
-EXPORT_SYMBOL(iwl_free_txq_mem);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
@@ -1571,7 +1494,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
        iwl_reset_traffic_log(priv);
        return 0;
 }
-EXPORT_SYMBOL(iwl_alloc_traffic_mem);
 
 void iwl_free_traffic_mem(struct iwl_priv *priv)
 {
@@ -1581,7 +1503,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
        kfree(priv->rx_traffic);
        priv->rx_traffic = NULL;
 }
-EXPORT_SYMBOL(iwl_free_traffic_mem);
 
 void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
                      u16 length, struct ieee80211_hdr *header)
@@ -1606,7 +1527,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
                        (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
        }
 }
-EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
 
 void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
                      u16 length, struct ieee80211_hdr *header)
@@ -1631,7 +1551,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
                        (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
        }
 }
-EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
 
 const char *get_mgmt_string(int cmd)
 {
@@ -1675,7 +1594,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
 {
        memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
        memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-       priv->led_tpt = 0;
 }
 
 /*
@@ -1768,9 +1686,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
                stats->data_cnt++;
                stats->data_bytes += len;
        }
-       iwl_leds_background(priv);
 }
-EXPORT_SYMBOL(iwl_update_stats);
 #endif
 
 static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1909,7 +1825,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mutex_unlock(&priv->mutex);
        return err;
 }
-EXPORT_SYMBOL(iwl_mac_change_interface);
 
 /*
  * On every watchdog tick we check (latest) time stamp. If it does not
@@ -1981,7 +1896,6 @@ void iwl_bg_watchdog(unsigned long data)
        mod_timer(&priv->watchdog, jiffies +
                  msecs_to_jiffies(IWL_WD_TICK(timeout)));
 }
-EXPORT_SYMBOL(iwl_bg_watchdog);
 
 void iwl_setup_watchdog(struct iwl_priv *priv)
 {
@@ -1993,7 +1907,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
        else
                del_timer(&priv->watchdog);
 }
-EXPORT_SYMBOL(iwl_setup_watchdog);
 
 /*
  * extended beacon time format
@@ -2019,7 +1932,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
 
        return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
 }
-EXPORT_SYMBOL(iwl_usecs_to_beacons);
 
 /* base is usually what we get from ucode with each received frame,
  * the same as HW timer counter counting down
@@ -2047,7 +1959,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
 
        return cpu_to_le32(res);
 }
-EXPORT_SYMBOL(iwl_add_beacon_time);
 
 #ifdef CONFIG_PM
 
@@ -2067,7 +1978,6 @@ int iwl_pci_suspend(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_pci_suspend);
 
 int iwl_pci_resume(struct device *device)
 {
@@ -2096,7 +2006,6 @@ int iwl_pci_resume(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_pci_resume);
 
 const struct dev_pm_ops iwl_pm_ops = {
        .suspend = iwl_pci_suspend,
@@ -2106,6 +2015,5 @@ const struct dev_pm_ops iwl_pm_ops = {
        .poweroff = iwl_pci_suspend,
        .restore = iwl_pci_resume,
 };
-EXPORT_SYMBOL(iwl_pm_ops);
 
 #endif /* CONFIG_PM */
index a347437..b316d83 100644 (file)
@@ -63,6 +63,8 @@
 #ifndef __iwl_core_h__
 #define __iwl_core_h__
 
+#include "iwl-dev.h"
+
 /************************
  * forward declarations *
  ************************/
@@ -210,12 +212,7 @@ struct iwl_lib_ops {
 
        /* temperature */
        struct iwl_temp_ops temp_ops;
-       /* check for plcp health */
-       bool (*check_plcp_health)(struct iwl_priv *priv,
-                                       struct iwl_rx_packet *pkt);
-       /* check for ack health */
-       bool (*check_ack_health)(struct iwl_priv *priv,
-                                       struct iwl_rx_packet *pkt);
+
        int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
        void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
 
@@ -227,8 +224,6 @@ struct iwl_lib_ops {
 
 struct iwl_led_ops {
        int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-       int (*on)(struct iwl_priv *priv);
-       int (*off)(struct iwl_priv *priv);
 };
 
 /* NIC specific ops */
@@ -263,6 +258,8 @@ struct iwl_mod_params {
        int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
        int antenna;            /* def: 0 = both antennas (use diversity) */
        int restart_fw;         /* def: 1 = restart firmware */
+       bool plcp_check;        /* def: true = enable plcp health check */
+       bool ack_check;         /* def: false = disable ack health check */
 };
 
 /*
@@ -307,7 +304,6 @@ struct iwl_base_params {
        u16 led_compensation;
        const bool broken_powersave;
        int chain_noise_num_beacons;
-       const bool supports_idle;
        bool adv_thermal_throttle;
        bool support_ct_kill_exit;
        const bool support_wimax_coexist;
@@ -342,6 +338,7 @@ struct iwl_bt_params {
        u8 ampdu_factor;
        u8 ampdu_density;
        bool bt_sco_disable;
+       bool bt_session_2;
 };
 /*
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -366,6 +363,7 @@ struct iwl_ht_params {
  * @adv_pm: advance power management
  * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
  * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
  *
  * We enable the driver to be backward compatible wrt API version. The
  * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +413,7 @@ struct iwl_cfg {
        const bool adv_pm;
        const bool rx_with_siso_diversity;
        const bool internal_wimax_coex;
+       const bool iq_invert;
 };
 
 /***************************
@@ -444,10 +443,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 void iwl_connection_init_rx_config(struct iwl_priv *priv,
                                   struct iwl_rxon_context *ctx);
 void iwl_set_rate(struct iwl_priv *priv);
-int iwl_set_decrypted_flag(struct iwl_priv *priv,
-                          struct ieee80211_hdr *hdr,
-                          u32 decrypt_res,
-                          struct ieee80211_rx_status *stats);
 void iwl_irq_handle_error(struct iwl_priv *priv);
 int iwl_mac_add_interface(struct ieee80211_hw *hw,
                          struct ieee80211_vif *vif);
@@ -494,46 +489,21 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
 static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
                                    __le16 fc, u16 len)
 {
-       struct traffic_stats    *stats;
-
-       if (is_tx)
-               stats = &priv->tx_stats;
-       else
-               stats = &priv->rx_stats;
-
-       if (ieee80211_is_data(fc)) {
-               /* data */
-               stats->data_bytes += len;
-       }
-       iwl_leds_background(priv);
 }
 #endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
-                          struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_reply_error(struct iwl_priv *priv,
-                       struct iwl_rx_mem_buffer *rxb);
 
 /*****************************************************
 * RX
 ******************************************************/
 void iwl_cmd_queue_free(struct iwl_priv *priv);
+void iwl_cmd_queue_unmap(struct iwl_priv *priv);
 int iwl_rx_queue_alloc(struct iwl_priv *priv);
 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
                                  struct iwl_rx_queue *q);
 int iwl_rx_queue_space(const struct iwl_rx_queue *q);
 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_mem_buffer *rxb);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt);
+
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 
 /* TX helpers */
 
@@ -546,6 +516,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                        int slots_num, u32 txq_id);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
 void iwl_setup_watchdog(struct iwl_priv *priv);
 /*****************************************************
  * TX power
@@ -582,6 +553,10 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
                               struct ieee80211_vif *vif);
 void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
 void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  enum iwl_scan_type scan_type,
+                                  enum ieee80211_band band);
 
 /* For faster active scanning, scan will move to the next channel if fewer than
  * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -755,6 +730,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
        return priv->hw->wiphy->bands[band];
 }
 
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+       return priv->cfg->bt_params &&
+              priv->cfg->bt_params->advanced_bt_coexist;
+}
+
+static inline bool iwl_bt_statistics(struct iwl_priv *priv)
+{
+       return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
+}
+
 extern bool bt_coex_active;
 extern bool bt_siso_mode;
 
index b80bf7d..f52bc04 100644 (file)
 
 
 /* HW REV */
-#define CSR_HW_REV_TYPE_MSK            (0x00000F0)
+#define CSR_HW_REV_TYPE_MSK            (0x00001F0)
 #define CSR_HW_REV_TYPE_3945           (0x00000D0)
 #define CSR_HW_REV_TYPE_4965           (0x0000000)
 #define CSR_HW_REV_TYPE_5300           (0x0000020)
 #define CSR_HW_REV_TYPE_1000           (0x0000060)
 #define CSR_HW_REV_TYPE_6x00           (0x0000070)
 #define CSR_HW_REV_TYPE_6x50           (0x0000080)
-#define CSR_HW_REV_TYPE_6x50g2         (0x0000084)
-#define CSR_HW_REV_TYPE_6x00g2         (0x00000B0)
-#define CSR_HW_REV_TYPE_NONE           (0x00000F0)
+#define CSR_HW_REV_TYPE_6150           (0x0000084)
+#define CSR_HW_REV_TYPE_6x05          (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30          CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35          CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30          (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00          (0x0000100)
+#define CSR_HW_REV_TYPE_200           (0x0000110)
+#define CSR_HW_REV_TYPE_230           (0x0000120)
+#define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
 #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6       (0x00000004)
 #define CSR_GP_DRIVER_REG_BIT_6050_1x2             (0x00000008)
 
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER       (0x00000080)
+
 /* GIO Chicken Bits (PCI Express bus link power management) */
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
index 6fe80b5..8842411 100644 (file)
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
        return ret;
 }
 
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
 static ssize_t iwl_dbgfs_sram_read(struct file *file,
                                        char __user *user_buf,
                                        size_t count, loff_t *ppos)
 {
-       u32 val;
+       u32 val = 0;
        char *buf;
        ssize_t ret;
-       int i;
+       int i = 0;
+       bool device_format = false;
+       int offset = 0;
+       int len = 0;
        int pos = 0;
+       int sram;
        struct iwl_priv *priv = file->private_data;
        size_t bufsz;
 
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
                else
                        priv->dbgfs_sram_len = priv->ucode_data.len;
        }
-       bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+       len = priv->dbgfs_sram_len;
+
+       if (len == -4) {
+               device_format = true;
+               len = 4;
+       }
+
+       bufsz =  50 + len * 4;
        buf = kmalloc(bufsz, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
+
        pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
-                       priv->dbgfs_sram_len);
+                        len);
        pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
                        priv->dbgfs_sram_offset);
-       for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
-               val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
-                                       priv->dbgfs_sram_len - i);
-               if (i < 4) {
-                       switch (i) {
-                       case 1:
-                               val &= BYTE1_MASK;
-                               break;
-                       case 2:
-                               val &= BYTE2_MASK;
-                               break;
-                       case 3:
-                               val &= BYTE3_MASK;
-                               break;
-                       }
+
+       /* adjust sram address since reads are only on even u32 boundaries */
+       offset = priv->dbgfs_sram_offset & 0x3;
+       sram = priv->dbgfs_sram_offset & ~0x3;
+
+       /* read the first u32 from sram */
+       val = iwl_read_targ_mem(priv, sram);
+
+       for (; len; len--) {
+               /* put the address at the start of every line */
+               if (i == 0)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%08X: ", sram + offset);
+
+               if (device_format)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%02x", (val >> (8 * (3 - offset))) & 0xff);
+               else
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%02x ", (val >> (8 * offset)) & 0xff);
+
+               /* if all bytes processed, read the next u32 from sram */
+               if (++offset == 4) {
+                       sram += 4;
+                       offset = 0;
+                       val = iwl_read_targ_mem(priv, sram);
                }
-               if (!(i % 16))
+
+               /* put in extra spaces and split lines for human readability */
+               if (++i == 16) {
+                       i = 0;
                        pos += scnprintf(buf + pos, bufsz - pos, "\n");
-               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+               } else if (!(i & 7)) {
+                       pos += scnprintf(buf + pos, bufsz - pos, "   ");
+               } else if (!(i & 3)) {
+                       pos += scnprintf(buf + pos, bufsz - pos, " ");
+               }
        }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       if (i)
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
        kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
        if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
                priv->dbgfs_sram_offset = offset;
                priv->dbgfs_sram_len = len;
+       } else if (sscanf(buf, "%x", &offset) == 1) {
+               priv->dbgfs_sram_offset = offset;
+               priv->dbgfs_sram_len = -4;
        } else {
                priv->dbgfs_sram_offset = 0;
                priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
-                                 size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char buf[256];
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "allow blinking: %s\n",
-                        (priv->allow_blinking) ? "True" : "False");
-       if (priv->allow_blinking) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "Led blinking rate: %u\n",
-                                priv->last_blink_rate);
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "Last blink time: %lu\n",
-                                priv->last_blink_time);
-       }
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
 static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
                                char __user *user_buf,
                                size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
 DEBUGFS_READ_FILE_OPS(status);
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_FILE_OPS(led);
 DEBUGFS_READ_FILE_OPS(thermal_throttling);
 DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
                         "last traffic notif: %d\n",
                priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
        pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
-                        "sco_active: %d, kill_ack_mask: %x, "
-                        "kill_cts_mask: %x\n",
-               priv->bt_ch_announce, priv->bt_sco_active,
-               priv->kill_ack_mask, priv->kill_cts_mask);
+                        "kill_ack_mask: %x, kill_cts_mask: %x\n",
+               priv->bt_ch_announce, priv->kill_ack_mask,
+               priv->kill_cts_mask);
 
        pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
        switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
        DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
        DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
        if (!priv->cfg->base_params->broken_powersave) {
                DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
                                 S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
                DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
        if (priv->cfg->base_params->ucode_tracing)
                DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
-       if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
+       if (iwl_bt_statistics(priv))
                DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
        DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
        DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
-       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+       if (iwl_advanced_bt_coexist(priv))
                DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
        if (priv->cfg->base_params->sensitivity_calib_by_driver)
                DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
@@ -1783,7 +1788,6 @@ err:
        iwl_dbgfs_unregister(priv);
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_dbgfs_register);
 
 /**
  * Remove the debugfs files and directories
@@ -1797,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
        debugfs_remove_recursive(priv->debugfs_dir);
        priv->debugfs_dir = NULL;
 }
-EXPORT_SYMBOL(iwl_dbgfs_unregister);
 
 
 
index 8dda678..68b953f 100644 (file)
@@ -34,6 +34,8 @@
 
 #include <linux/pci.h> /* for struct pci_device_id */
 #include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
 #include <net/ieee80211_radiotap.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-prph.h"
 #include "iwl-fh.h"
 #include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
 #include "iwl-agn-hw.h"
 #include "iwl-led.h"
 #include "iwl-power.h"
 #include "iwl-agn-rs.h"
 #include "iwl-agn-tt.h"
 
+#define U32_PAD(n)             ((4-(n))&0x3)
+
 struct iwl_tx_queue;
 
 /* CT-KILL constants */
@@ -136,7 +138,7 @@ struct iwl_queue {
                                * space more than this */
        int high_mark;         /* high watermark, stop queue if free
                                * space less than this */
-} __packed;
+};
 
 /* One for each TFD */
 struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
        atomic_t pending_frames;
        bool client;
        bool asleep;
+       u8 max_agg_bufsize;
 };
 
 /**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
        u32 unknown;
 };
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
 /* management statistics */
 enum iwl_mgmt_stats {
        MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
 };
 
 struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
        u32 mgmt[MANAGEMENT_MAX];
        u32 ctrl[CONTROL_MAX];
        u32 data_cnt;
        u64 data_bytes;
-};
-#else
-struct traffic_stats {
-       u64 data_bytes;
-};
 #endif
+};
 
 /*
  * iwl_switch_rxon: "channel switch" structure
@@ -1111,6 +1110,11 @@ struct iwl_event_log {
 /* BT Antenna Coupling Threshold (dB) */
 #define IWL_BT_ANTENNA_COUPLING_THRESHOLD      (35)
 
+/* Firmware reload counter and Timestamp */
+#define IWL_MIN_RELOAD_DURATION                1000 /* 1000 ms */
+#define IWL_MAX_CONTINUE_RELOAD_CNT    4
+
+
 enum iwl_reset {
        IWL_RF_RESET = 0,
        IWL_FW_RESET,
@@ -1139,6 +1143,33 @@ struct iwl_force_reset {
  */
 #define IWLAGN_EXT_BEACON_TIME_POS     22
 
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+       struct list_head list;
+
+       void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
+
+       u8 cmd;
+       bool triggered;
+};
+
 enum iwl_rxon_context_id {
        IWL_RXON_CTX_BSS,
        IWL_RXON_CTX_PAN,
@@ -1199,6 +1230,12 @@ struct iwl_rxon_context {
        } ht;
 };
 
+enum iwl_scan_type {
+       IWL_SCAN_NORMAL,
+       IWL_SCAN_RADIO_RESET,
+       IWL_SCAN_OFFCH_TX,
+};
+
 struct iwl_priv {
 
        /* ieee device used by generic ieee processing code */
@@ -1230,12 +1267,16 @@ struct iwl_priv {
        /* track IBSS manager (last beacon) status */
        u32 ibss_manager;
 
-       /* storing the jiffies when the plcp error rate is received */
-       unsigned long plcp_jiffies;
+       /* jiffies when last recovery from statistics was performed */
+       unsigned long rx_statistics_jiffies;
 
        /* force reset */
        struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
 
+       /* firmware reload counter and timestamp */
+       unsigned long reload_jiffies;
+       int reload_count;
+
        /* we allocate array of iwl_channel_info for NIC's valid channels.
         *    Access via channel # using indirect index array */
        struct iwl_channel_info *channel_info;  /* channel info array */
@@ -1255,7 +1296,7 @@ struct iwl_priv {
        enum ieee80211_band scan_band;
        struct cfg80211_scan_request *scan_request;
        struct ieee80211_vif *scan_vif;
-       bool is_internal_short_scan;
+       enum iwl_scan_type scan_type;
        u8 scan_tx_ant[IEEE80211_NUM_BANDS];
        u8 mgmt_tx_ant;
 
@@ -1310,11 +1351,6 @@ struct iwl_priv {
        struct iwl_init_alive_resp card_alive_init;
        struct iwl_alive_resp card_alive;
 
-       unsigned long last_blink_time;
-       u8 last_blink_rate;
-       u8 allow_blinking;
-       u64 led_tpt;
-
        u16 active_rate;
 
        u8 start_calib;
@@ -1463,6 +1499,21 @@ struct iwl_priv {
                        struct iwl_bt_notif_statistics delta_statistics_bt;
                        struct iwl_bt_notif_statistics max_delta_bt;
 #endif
+
+                       /* notification wait support */
+                       struct list_head notif_waits;
+                       spinlock_t notif_wait_lock;
+                       wait_queue_head_t notif_waitq;
+
+                       /* remain-on-channel offload support */
+                       struct ieee80211_channel *hw_roc_channel;
+                       struct delayed_work hw_roc_work;
+                       enum nl80211_channel_type hw_roc_chantype;
+                       int hw_roc_duration;
+
+                       struct sk_buff *offchan_tx_skb;
+                       int offchan_tx_timeout;
+                       struct ieee80211_channel *offchan_tx_chan;
                } _agn;
 #endif
        };
@@ -1472,7 +1523,6 @@ struct iwl_priv {
        u8 bt_status;
        u8 bt_traffic_load, last_bt_traffic_load;
        bool bt_ch_announce;
-       bool bt_sco_active;
        bool bt_full_concurrent;
        bool bt_ant_couple_ok;
        __le32 kill_ack_mask;
@@ -1547,6 +1597,10 @@ struct iwl_priv {
        bool hw_ready;
 
        struct iwl_event_log event_log;
+
+       struct led_classdev led;
+       unsigned long blink_on, blink_off;
+       bool led_registered;
 }; /*iwl_priv */
 
 static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
index 358cfd7..833194a 100644 (file)
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
        BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
        return &priv->eeprom[offset];
 }
-EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
 
 static int iwl_init_otp_access(struct iwl_priv *priv)
 {
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
 {
        return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
 }
-EXPORT_SYMBOL(iwl_eeprom_query_addr);
 
 u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
 {
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
                return 0;
        return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
 }
-EXPORT_SYMBOL(iwl_eeprom_query16);
 
 /**
  * iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
 alloc_err:
        return ret;
 }
-EXPORT_SYMBOL(iwl_eeprom_init);
 
 void iwl_eeprom_free(struct iwl_priv *priv)
 {
        kfree(priv->eeprom);
        priv->eeprom = NULL;
 }
-EXPORT_SYMBOL(iwl_eeprom_free);
 
 static void iwl_init_band_reference(const struct iwl_priv *priv,
                        int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_init_channel_map);
 
 /*
  * iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
        kfree(priv->channel_info);
        priv->channel_count = 0;
 }
-EXPORT_SYMBOL(iwl_free_channel_map);
 
 /**
  * iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
 
        return NULL;
 }
-EXPORT_SYMBOL(iwl_get_channel_info);
index 9e6f313..98aa8af 100644 (file)
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
 #define EEPROM_6050_TX_POWER_VERSION    (4)
 #define EEPROM_6050_EEPROM_VERSION     (0x532)
 
-/* 6x50g2 Specific */
-#define EEPROM_6050G2_TX_POWER_VERSION    (6)
-#define EEPROM_6050G2_EEPROM_VERSION   (0x553)
+/* 6150 Specific */
+#define EEPROM_6150_TX_POWER_VERSION    (6)
+#define EEPROM_6150_EEPROM_VERSION     (0x553)
+
+/* 6x05 Specific */
+#define EEPROM_6005_TX_POWER_VERSION    (6)
+#define EEPROM_6005_EEPROM_VERSION     (0x709)
+
+/* 6x30 Specific */
+#define EEPROM_6030_TX_POWER_VERSION    (6)
+#define EEPROM_6030_EEPROM_VERSION     (0x709)
+
+/* 2x00 Specific */
+#define EEPROM_2000_TX_POWER_VERSION    (6)
+#define EEPROM_2000_EEPROM_VERSION     (0x805)
+
+/* 6x35 Specific */
+#define EEPROM_6035_TX_POWER_VERSION    (6)
+#define EEPROM_6035_EEPROM_VERSION     (0x753)
 
-/* 6x00g2 Specific */
-#define EEPROM_6000G2_TX_POWER_VERSION    (6)
-#define EEPROM_6000G2_EEPROM_VERSION   (0x709)
 
 /* OTP */
 /* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
 #define OTP_MAX_LL_ITEMS_1000          (3)     /* OTP blocks for 1000 */
 #define OTP_MAX_LL_ITEMS_6x00          (4)     /* OTP blocks for 6x00 */
 #define OTP_MAX_LL_ITEMS_6x50          (7)     /* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00          (4)     /* OTP blocks for 2x00 */
 
 /* 2.4 GHz */
 extern const u8 iwl_eeprom_band_1[14];
index c373b53..02499f6 100644 (file)
@@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd)
                IWL_CMD(REPLY_WIPAN_WEPKEY);
                IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
                IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+               IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
        default:
                return "UNKNOWN";
 
        }
 }
-EXPORT_SYMBOL(get_cmd_string);
 
 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
 
@@ -252,7 +252,6 @@ out:
        mutex_unlock(&priv->sync_cmd_mutex);
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_cmd_sync);
 
 int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 {
@@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 
        return iwl_send_cmd_sync(priv, cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd);
 
 int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
 {
@@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
 
        return iwl_send_cmd_sync(priv, &cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd_pdu);
 
 int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
                           u8 id, u16 len, const void *data,
@@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
 
        return iwl_send_cmd_async(priv, &cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
index 46ccdf4..d7f2a0b 100644 (file)
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
 MODULE_PARM_DESC(led_mode, "0=system default, "
                "1=On(RF On)/Off(RF Off), 2=blinking");
 
-static const struct {
-       u16 tpt;        /* Mb/s */
-       u8 on_time;
-       u8 off_time;
-} blink_tbl[] =
-{
-       {300, 25, 25},
-       {200, 40, 40},
-       {100, 55, 55},
-       {70, 65, 65},
-       {50, 75, 75},
-       {20, 85, 85},
-       {10, 95, 95},
-       {5, 110, 110},
-       {1, 130, 130},
-       {0, 167, 167},
-       /* SOLID_ON */
-       {-1, IWL_LED_SOLID, 0}
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+       { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+       { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+       { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+       { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+       { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+       { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+       { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+       { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+       { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+       { .throughput = 300 * 1024 - 1, .blink_time = 50 },
 };
 
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
 /*
  * Adjust led blink rate to compensate on a MAC Clock difference on every HW
  * Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
 }
 
 /* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
+static int iwl_led_cmd(struct iwl_priv *priv,
+                      unsigned long on,
+                      unsigned long off)
 {
        struct iwl_led_cmd led_cmd = {
                .id = IWL_LED_LINK,
                .interval = IWL_DEF_LED_INTRVL
        };
+       int ret;
+
+       if (!test_bit(STATUS_READY, &priv->status))
+               return -EBUSY;
 
-       BUG_ON(idx > IWL_MAX_BLINK_TBL);
+       if (priv->blink_on == on && priv->blink_off == off)
+               return 0;
 
-       IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+       IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
                        priv->cfg->base_params->led_compensation);
-       led_cmd.on =
-               iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+       led_cmd.on = iwl_blink_compensation(priv, on,
                                priv->cfg->base_params->led_compensation);
-       led_cmd.off =
-               iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+       led_cmd.off = iwl_blink_compensation(priv, off,
                                priv->cfg->base_params->led_compensation);
 
-       return priv->cfg->ops->led->cmd(priv, &led_cmd);
+       ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+       if (!ret) {
+               priv->blink_on = on;
+               priv->blink_off = off;
+       }
+       return ret;
 }
 
-int iwl_led_start(struct iwl_priv *priv)
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
 {
-       return priv->cfg->ops->led->on(priv);
-}
-EXPORT_SYMBOL(iwl_led_start);
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+       unsigned long on = 0;
 
-int iwl_led_associate(struct iwl_priv *priv)
-{
-       IWL_DEBUG_LED(priv, "Associated\n");
-       if (priv->cfg->led_mode == IWL_LED_BLINK)
-               priv->allow_blinking = 1;
-       priv->last_blink_time = jiffies;
+       if (brightness > 0)
+               on = IWL_LED_SOLID;
 
-       return 0;
+       iwl_led_cmd(priv, on, 0);
 }
-EXPORT_SYMBOL(iwl_led_associate);
 
-int iwl_led_disassociate(struct iwl_priv *priv)
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on,
+                            unsigned long *delay_off)
 {
-       priv->allow_blinking = 0;
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
 
-       return 0;
+       return iwl_led_cmd(priv, *delay_on, *delay_off);
 }
-EXPORT_SYMBOL(iwl_led_disassociate);
 
-/*
- * calculate blink rate according to last second Tx/Rx activities
- */
-static int iwl_get_blink_rate(struct iwl_priv *priv)
-{
-       int i;
-       /* count both tx and rx traffic to be able to
-        * handle traffic in either direction
-        */
-       u64 current_tpt = priv->tx_stats.data_bytes +
-                         priv->rx_stats.data_bytes;
-       s64 tpt = current_tpt - priv->led_tpt;
-
-       if (tpt < 0) /* wraparound */
-               tpt = -tpt;
-
-       IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
-               (long long)tpt,
-               (unsigned long long)current_tpt);
-       priv->led_tpt = current_tpt;
-
-       if (!priv->allow_blinking)
-               i = IWL_MAX_BLINK_TBL;
-       else
-               for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
-                       if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
-                               break;
-
-       IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
-       return i;
-}
-
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl_leds_background(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
 {
-       u8 blink_idx;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               priv->last_blink_time = 0;
-               return;
-       }
-       if (iwl_is_rfkill(priv)) {
-               priv->last_blink_time = 0;
-               return;
+       int mode = led_mode;
+       int ret;
+
+       if (mode == IWL_LED_DEFAULT)
+               mode = priv->cfg->led_mode;
+
+       priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                  wiphy_name(priv->hw->wiphy));
+       priv->led.brightness_set = iwl_led_brightness_set;
+       priv->led.blink_set = iwl_led_blink_set;
+       priv->led.max_brightness = 1;
+
+       switch (mode) {
+       case IWL_LED_DEFAULT:
+               WARN_ON(1);
+               break;
+       case IWL_LED_BLINK:
+               priv->led.default_trigger =
+                       ieee80211_create_tpt_led_trigger(priv->hw,
+                                       IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+                                       iwl_blink, ARRAY_SIZE(iwl_blink));
+               break;
+       case IWL_LED_RF_STATE:
+               priv->led.default_trigger =
+                       ieee80211_get_radio_led_name(priv->hw);
+               break;
        }
 
-       if (!priv->allow_blinking) {
-               priv->last_blink_time = 0;
-               if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
-                       priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
-                       iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
-               }
+       ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+       if (ret) {
+               kfree(priv->led.name);
                return;
        }
-       if (!priv->last_blink_time ||
-           !time_after(jiffies, priv->last_blink_time +
-                       msecs_to_jiffies(1000)))
-               return;
-
-       blink_idx = iwl_get_blink_rate(priv);
-
-       /* call only if blink rate change */
-       if (blink_idx != priv->last_blink_rate)
-               iwl_led_pattern(priv, blink_idx);
 
-       priv->last_blink_time = jiffies;
-       priv->last_blink_rate = blink_idx;
+       priv->led_registered = true;
 }
-EXPORT_SYMBOL(iwl_leds_background);
 
-void iwl_leds_init(struct iwl_priv *priv)
+void iwl_leds_exit(struct iwl_priv *priv)
 {
-       priv->last_blink_rate = 0;
-       priv->last_blink_time = 0;
-       priv->allow_blinking = 0;
-       if (led_mode != IWL_LED_DEFAULT &&
-           led_mode != priv->cfg->led_mode)
-               priv->cfg->led_mode = led_mode;
+       if (!priv->led_registered)
+               return;
+
+       led_classdev_unregister(&priv->led);
+       kfree(priv->led.name);
 }
-EXPORT_SYMBOL(iwl_leds_init);
index 9079b33..101eef1 100644 (file)
 struct iwl_priv;
 
 #define IWL_LED_SOLID 11
-#define IWL_LED_NAME_LEN 31
 #define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
 
 #define IWL_LED_ACTIVITY       (0<<1)
 #define IWL_LED_LINK           (1<<1)
 
-enum led_type {
-       IWL_LED_TRG_TX,
-       IWL_LED_TRG_RX,
-       IWL_LED_TRG_ASSOC,
-       IWL_LED_TRG_RADIO,
-       IWL_LED_TRG_MAX,
-};
-
 /*
  * LED mode
- *    IWL_LED_DEFAULT:  use system default
+ *    IWL_LED_DEFAULT:  use device default
  *    IWL_LED_RF_STATE: turn LED on/off based on RF state
  *                     LED ON  = RF ON
  *                     LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
 };
 
 void iwl_leds_init(struct iwl_priv *priv);
-void iwl_leds_background(struct iwl_priv *priv);
-int iwl_led_start(struct iwl_priv *priv);
-int iwl_led_associate(struct iwl_priv *priv);
-int iwl_led_disassociate(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
 
 #endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644 (file)
index bb1a742..0000000
+++ /dev/null
@@ -1,662 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-legacy.h"
-
-static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (!ctx->is_active)
-               return;
-
-       ctx->qos_data.def_qos_parm.qos_flags = 0;
-
-       if (ctx->qos_data.qos_active)
-               ctx->qos_data.def_qos_parm.qos_flags |=
-                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
-       if (ctx->ht.enabled)
-               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
-       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
-                     ctx->qos_data.qos_active,
-                     ctx->qos_data.def_qos_parm.qos_flags);
-
-       iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
-                              sizeof(struct iwl_qosparam_cmd),
-                              &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = conf->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags = 0;
-       int ret = 0;
-       u16 ch;
-       int scan_active = 0;
-       bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return -EOPNOTSUPP;
-
-       mutex_lock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
-                                       channel->hw_value, changed);
-
-       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
-                       test_bit(STATUS_SCANNING, &priv->status))) {
-               scan_active = 1;
-               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
-                      IEEE80211_CONF_CHANGE_CHANNEL)) {
-               /* mac80211 uses static for non-HT which is what we want */
-               priv->current_ht_config.smps = conf->smps_mode;
-
-               /*
-                * Recalculate chain counts.
-                *
-                * If monitor mode is enabled then mac80211 will
-                * set up the SM PS mode to OFF if an HT channel is
-                * configured.
-                */
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       for_each_context(priv, ctx)
-                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       /* during scanning mac80211 will delay channel setting until
-        * scan finish with changed = 0
-        */
-       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-               if (scan_active)
-                       goto set_ch_out;
-
-               ch = channel->hw_value;
-               ch_info = iwl_get_channel_info(priv, channel->band, ch);
-               if (!is_channel_valid(ch_info)) {
-                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
-                       ret = -EINVAL;
-                       goto set_ch_out;
-               }
-
-               spin_lock_irqsave(&priv->lock, flags);
-
-               for_each_context(priv, ctx) {
-                       /* Configure HT40 channels */
-                       if (ctx->ht.enabled != conf_is_ht(conf)) {
-                               ctx->ht.enabled = conf_is_ht(conf);
-                               ht_changed[ctx->ctxid] = true;
-                       }
-                       if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
-                       } else
-                               ctx->ht.is_40mhz = false;
-
-                       /*
-                        * Default to no protection. Protection mode will
-                        * later be set from BSS config in iwl_ht_conf
-                        */
-                       ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
-                       /* if we are switching from ht to 2.4 clear flags
-                        * from any ht related info since 2.4 does not
-                        * support ht */
-                       if ((le16_to_cpu(ctx->staging.channel) != ch))
-                               ctx->staging.flags = 0;
-
-                       iwl_set_rxon_channel(priv, channel, ctx);
-                       iwl_set_rxon_ht(priv, ht_conf);
-
-                       iwl_set_flags_for_band(priv, ctx, channel->band,
-                                              ctx->vif);
-               }
-
-               spin_unlock_irqrestore(&priv->lock, flags);
-
-               if (priv->cfg->ops->legacy->update_bcast_stations)
-                       ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
-               /* The list of supported rates and rate mask can be different
-                * for each band; since the band may have changed, reset
-                * the rate mask to what mac80211 lists */
-               iwl_set_rate(priv);
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_PS |
-                       IEEE80211_CONF_CHANGE_IDLE)) {
-               ret = iwl_power_update_mode(priv, false);
-               if (ret)
-                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
-                       priv->tx_power_user_lmt, conf->power_level);
-
-               iwl_set_tx_power(priv, conf->power_level, false);
-       }
-
-       if (!iwl_is_ready(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               goto out;
-       }
-
-       if (scan_active)
-               goto out;
-
-       for_each_context(priv, ctx) {
-               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
-                       iwlcore_commit_rxon(priv, ctx);
-               else
-                       IWL_DEBUG_INFO(priv,
-                               "Not re-sending same RXON configuration.\n");
-               if (ht_changed[ctx->ctxid])
-                       iwl_update_qos(priv, ctx);
-       }
-
-out:
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       mutex_unlock(&priv->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       /* IBSS can only be the IWL_RXON_CTX_BSS context */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       spin_lock_irqsave(&priv->lock, flags);
-       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* new association get rid of ibss beacon skb */
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = NULL;
-
-       priv->timestamp = 0;
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl_scan_cancel_timeout(priv, 100);
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               mutex_unlock(&priv->mutex);
-               return;
-       }
-
-       /* we are restarting association process
-        * clear RXON_FILTER_ASSOC_MSK bit
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
-
-       iwl_set_rate(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_ht_conf(struct iwl_priv *priv,
-                       struct ieee80211_vif *vif)
-{
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct ieee80211_sta *sta;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_ASSOC(priv, "enter:\n");
-
-       if (!ctx->ht.enabled)
-               return;
-
-       ctx->ht.protection =
-               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-       ctx->ht.non_gf_sta_present =
-               !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-       ht_conf->single_chain_sufficient = false;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               rcu_read_lock();
-               sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               ht_conf->single_chain_sufficient = true;
-                       if (maxstreams <= 1)
-                               ht_conf->single_chain_sufficient = true;
-               } else {
-                       /*
-                        * If at all, this can only happen through a race
-                        * when the AP disconnects us while we're still
-                        * setting up the connection, in that case mac80211
-                        * will soon tell us about that.
-                        */
-                       ht_conf->single_chain_sufficient = true;
-               }
-               rcu_read_unlock();
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               ht_conf->single_chain_sufficient = true;
-               break;
-       default:
-               break;
-       }
-
-       IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_set_no_assoc(struct iwl_priv *priv,
-                                   struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       iwl_led_disassociate(priv);
-       /*
-        * inform the ucode that there is no longer an
-        * association and that no more packets should be
-        * sent
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       ctx->staging.assoc_id = 0;
-       iwlcore_commit_rxon(priv, ctx);
-}
-
-static void iwlcore_beacon_update(struct ieee80211_hw *hw,
-                                 struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       __le64 timestamp;
-       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
-       if (!skb)
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->beacon_ctx) {
-               IWL_ERR(priv, "update beacon but no beacon context!\n");
-               dev_kfree_skb(skb);
-               return;
-       }
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = skb;
-
-       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
-       priv->timestamp = le64_to_cpu(timestamp);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return;
-       }
-
-       priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-       int ret;
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
-       if (!iwl_is_alive(priv))
-               return;
-
-       mutex_lock(&priv->mutex);
-
-       if (changes & BSS_CHANGED_QOS) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&priv->lock, flags);
-               ctx->qos_data.qos_active = bss_conf->qos;
-               iwl_update_qos(priv, ctx);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               /*
-                * the add_interface code must make sure we only ever
-                * have a single interface that could be beaconing at
-                * any time.
-                */
-               if (vif->bss_conf.enable_beacon)
-                       priv->beacon_ctx = ctx;
-               else
-                       priv->beacon_ctx = NULL;
-       }
-
-       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
-               dev_kfree_skb(priv->beacon_skb);
-               priv->beacon_skb = ieee80211_beacon_get(hw, vif);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
-               iwl_send_rxon_timing(priv, ctx);
-
-       if (changes & BSS_CHANGED_BSSID) {
-               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
-               /*
-                * If there is currently a HW scan going on in the
-                * background then we need to cancel it else the RXON
-                * below/in post_associate will fail.
-                */
-               if (iwl_scan_cancel_timeout(priv, 100)) {
-                       IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
-                       IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               /* mac80211 only sets assoc when in STATION mode */
-               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-
-                       /* currently needed in a few places */
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-               } else {
-                       ctx->staging.filter_flags &=
-                               ~RXON_FILTER_ASSOC_MSK;
-               }
-
-       }
-
-       /*
-        * This needs to be after setting the BSSID in case
-        * mac80211 decides to do both changes at once because
-        * it will invoke post_associate.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
-               iwlcore_beacon_update(hw, vif);
-
-       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
-                                  bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-       }
-
-       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
-               IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
-                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
-               if (bss_conf->use_cts_prot)
-                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-       }
-
-       if (changes & BSS_CHANGED_BASIC_RATES) {
-               /* XXX use this information
-                *
-                * To do that, remove code from iwl_set_rate() and put something
-                * like this here:
-                *
-               if (A-band)
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates;
-               else
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates >> 4;
-                       ctx->staging.cck_basic_rates =
-                               bss_conf->basic_rates & 0xF;
-                */
-       }
-
-       if (changes & BSS_CHANGED_HT) {
-               iwl_ht_conf(priv, vif);
-
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       if (changes & BSS_CHANGED_ASSOC) {
-               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
-               if (bss_conf->assoc) {
-                       priv->timestamp = bss_conf->timestamp;
-
-                       iwl_led_associate(priv);
-
-                       if (!iwl_is_rfkill(priv))
-                               priv->cfg->ops->legacy->post_associate(priv);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
-
-       if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
-               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
-                                  changes);
-               ret = iwl_send_rxon_assoc(priv, ctx);
-               if (!ret) {
-                       /* Sync active_rxon with latest change. */
-                       memcpy((void *)&ctx->active,
-                               &ctx->staging,
-                               sizeof(struct iwl_rxon_cmd));
-               }
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               if (vif->bss_conf.enable_beacon) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-                       iwl_led_associate(priv);
-                       priv->cfg->ops->legacy->config_ap(priv);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
-
-       if (changes & BSS_CHANGED_IBSS) {
-               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
-                                                       bss_conf->ibss_joined);
-               if (ret)
-                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
-                               bss_conf->ibss_joined ? "add" : "remove",
-                               bss_conf->bssid);
-       }
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data)
-{
-       struct iwl_priv *priv = data;
-       u32 inta, inta_mask;
-       u32 inta_fh;
-       unsigned long flags;
-       if (!priv)
-               return IRQ_NONE;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable (but don't clear!) interrupts here to avoid
-        *    back-to-back ISRs and sporadic interrupts from our NIC.
-        * If we have something to service, the tasklet will re-enable ints.
-        * If we *don't* have something, we'll re-enable before leaving here. */
-       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
-       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-       /* Discover which interrupts are active/pending */
-       inta = iwl_read32(priv, CSR_INT);
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
-       /* Ignore interrupt if there's nothing in NIC to service.
-        * This may be due to IRQ shared with another device,
-        * or due to sporadic interrupts thrown from our NIC. */
-       if (!inta && !inta_fh) {
-               IWL_DEBUG_ISR(priv,
-                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
-               goto none;
-       }
-
-       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-               /* Hardware disappeared. It might have already raised
-                * an interrupt */
-               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-               goto unplugged;
-       }
-
-       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                     inta, inta_mask, inta_fh);
-
-       inta &= ~CSR_INT_BIT_SCD;
-
-       /* iwl_irq_tasklet() will service interrupts and re-enable them */
-       if (likely(inta || inta_fh))
-               tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_HANDLED;
-
-none:
-       /* re-enable interrupts here since we don't have anything to service. */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_enable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_legacy);
-
-/*
- *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- *  function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                              struct ieee80211_tx_info *info,
-                              __le16 fc, __le32 *tx_flags)
-{
-       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-               *tx_flags |= TX_CMD_FLG_RTS_MSK;
-               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
-               if (!ieee80211_is_mgmt(fc))
-                       return;
-
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
-                       break;
-               }
-       } else if (info->control.rates[0].flags &
-                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-               *tx_flags |= TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
index 1eec18d..576795e 100644 (file)
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
        else
                cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (iwl_advanced_bt_coexist(priv)) {
                if (!priv->cfg->bt_params->bt_sco_disable)
                        cmd->flags |= IWL_POWER_BT_SCO_ENA;
                else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
        else
                cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (iwl_advanced_bt_coexist(priv)) {
                if (!priv->cfg->bt_params->bt_sco_disable)
                        cmd->flags |= IWL_POWER_BT_SCO_ENA;
                else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
 
        if (priv->cfg->base_params->broken_powersave)
                iwl_power_sleep_cam_cmd(priv, cmd);
-       else if (priv->cfg->base_params->supports_idle &&
-                priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+       else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
                iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
        else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
                 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
@@ -428,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_power_set_mode);
 
 int iwl_power_update_mode(struct iwl_priv *priv, bool force)
 {
@@ -437,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
        iwl_power_build_cmd(priv, &cmd);
        return iwl_power_set_mode(priv, &cmd, force);
 }
-EXPORT_SYMBOL(iwl_power_update_mode);
 
 /* initialize to default */
 void iwl_power_initialize(struct iwl_priv *priv)
@@ -451,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
        memset(&priv->power_data.sleep_cmd, 0,
                sizeof(priv->power_data.sleep_cmd));
 }
-EXPORT_SYMBOL(iwl_power_initialize);
index 87a6fd8..6f9a2fa 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <net/mac80211.h>
 #include <asm/unaligned.h>
 #include "iwl-eeprom.h"
 #include "iwl-sta.h"
 #include "iwl-io.h"
 #include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
 /*
  * Rx theory of operation
  *
@@ -118,7 +127,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
                s = 0;
        return s;
 }
-EXPORT_SYMBOL(iwl_rx_queue_space);
 
 /**
  * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +178,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
  exit_unlock:
        spin_unlock_irqrestore(&q->lock, flags);
 }
-EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
 
 int iwl_rx_queue_alloc(struct iwl_priv *priv)
 {
@@ -211,10 +218,105 @@ err_rb:
 err_bd:
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_rx_queue_alloc);
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+
+static void iwl_rx_reply_alive(struct iwl_priv *priv,
+                              struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+                      "0x%01X 0x%01X\n",
+                      palive->is_valid, palive->ver_type,
+                      palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+               memcpy(&priv->card_alive_init,
+                      &pkt->u.alive_frame,
+                      sizeof(struct iwl_init_alive_resp));
+               pwork = &priv->init_alive_start;
+       } else {
+               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+               memcpy(&priv->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct iwl_alive_resp));
+               pwork = &priv->alive_start;
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(priv->workqueue, pwork,
+                                  msecs_to_jiffies(5));
+       else {
+               IWL_WARN(priv, "%s uCode did not respond OK.\n",
+                       (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
+                       "init" : "runtime");
+               /*
+                * If fail to load init uCode,
+                * let's try to load the init uCode again.
+                * We should not get into this situation, but if it
+                * does happen, we should not move on and loading "runtime"
+                * without proper calibrate the device.
+                */
+               if (palive->ver_subtype == INITIALIZE_SUBTYPE)
+                       priv->ucode_type = UCODE_NONE;
+               queue_work(priv->workqueue, &priv->restart);
+       }
+}
+
+static void iwl_rx_reply_error(struct iwl_priv *priv,
+                              struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+               "seq 0x%04X ser 0x%08X\n",
+               le32_to_cpu(pkt->u.err_resp.error_type),
+               get_cmd_string(pkt->u.err_resp.cmd_id),
+               pkt->u.err_resp.cmd_id,
+               le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+               le32_to_cpu(pkt->u.err_resp.error_info));
+}
+
+static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+       /*
+        * MULTI-FIXME
+        * See iwl_mac_channel_switch.
+        */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+
+       if (priv->switch_rxon.switch_in_progress) {
+               if (!le32_to_cpu(csa->status) &&
+                   (csa->channel == priv->switch_rxon.channel)) {
+                       rxon->channel = csa->channel;
+                       ctx->staging.channel = csa->channel;
+                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_chswitch_done(priv, true);
+               } else {
+                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_chswitch_done(priv, false);
+               }
+       }
+}
 
 
-void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
                                          struct iwl_rx_mem_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -229,48 +331,494 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
        memcpy(&priv->measure_report, report, sizeof(*report));
        priv->measurement_status |= MEASUREMENT_READY;
 }
-EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
 
-void iwl_recover_from_statistics(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
+static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
+                                 struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+       IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+                    sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+
+static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                            struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+                       "notification for %s:\n", len,
+                       get_cmd_string(pkt->hdr.cmd));
+       iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+
+static void iwl_rx_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_DEBUG
+       u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
+       u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+               "tsf:0x%.8x%.8x rate:%d\n",
+               status & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+       if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+               queue_work(priv->workqueue, &priv->beacon_update);
+}
+
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is low and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+       int actual_delta, expected_delta, ba_timeout_delta;
+       struct statistics_tx *cur, *old;
+
+       if (priv->_agn.agg_tids_count)
+               return true;
+
+       if (iwl_bt_statistics(priv)) {
+               cur = &pkt->u.stats_bt.tx;
+               old = &priv->_agn.statistics_bt.tx;
+       } else {
+               cur = &pkt->u.stats.tx;
+               old = &priv->_agn.statistics.tx;
+       }
+
+       actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
+                      le32_to_cpu(old->actual_ack_cnt);
+       expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
+                        le32_to_cpu(old->expected_ack_cnt);
+
+       /* Values should not be negative, but we do not trust the firmware */
+       if (actual_delta <= 0 || expected_delta <= 0)
+               return true;
+
+       ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
+                          le32_to_cpu(old->agg.ba_timeout);
+
+       if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
+           ba_timeout_delta > BA_TIMEOUT_CNT) {
+               IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
+                               actual_delta, expected_delta, ba_timeout_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               /*
+                * This is ifdef'ed on DEBUGFS because otherwise the
+                * statistics aren't available. If DEBUGFS is set but
+                * DEBUG is not, these will just compile out.
+                */
+               IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
+                               priv->_agn.delta_statistics.tx.rx_detected_cnt);
+               IWL_DEBUG_RADIO(priv,
+                               "ack_or_ba_timeout_collision delta %d\n",
+                               priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
+#endif
+
+               if (ba_timeout_delta >= BA_TIMEOUT_MAX)
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl_good_plcp_health(struct iwl_priv *priv,
+                                struct iwl_rx_packet *pkt, unsigned int msecs)
 {
+       int delta;
+       int threshold = priv->cfg->base_params->plcp_delta_threshold;
+
+       if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+               return true;
+       }
+
+       if (iwl_bt_statistics(priv)) {
+               struct statistics_rx_bt *cur, *old;
+
+               cur = &pkt->u.stats_bt.rx;
+               old = &priv->_agn.statistics_bt.rx;
+
+               delta = le32_to_cpu(cur->ofdm.plcp_err) -
+                       le32_to_cpu(old->ofdm.plcp_err) +
+                       le32_to_cpu(cur->ofdm_ht.plcp_err) -
+                       le32_to_cpu(old->ofdm_ht.plcp_err);
+       } else {
+               struct statistics_rx *cur, *old;
+
+               cur = &pkt->u.stats.rx;
+               old = &priv->_agn.statistics.rx;
+
+               delta = le32_to_cpu(cur->ofdm.plcp_err) -
+                       le32_to_cpu(old->ofdm.plcp_err) +
+                       le32_to_cpu(cur->ofdm_ht.plcp_err) -
+                       le32_to_cpu(old->ofdm_ht.plcp_err);
+       }
+
+       /* Can be negative if firmware reseted statistics */
+       if (delta <= 0)
+               return true;
+
+       if ((delta * 100 / msecs) > threshold) {
+               IWL_DEBUG_RADIO(priv,
+                               "plcp health threshold %u delta %d msecs %u\n",
+                               threshold, delta, msecs);
+               return false;
+       }
+
+       return true;
+}
+
+static void iwl_recover_from_statistics(struct iwl_priv *priv,
+                                       struct iwl_rx_packet *pkt)
+{
+       const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
+       unsigned int msecs;
+       unsigned long stamp;
+
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
-       if (iwl_is_any_associated(priv)) {
-               if (priv->cfg->ops->lib->check_ack_health) {
-                       if (!priv->cfg->ops->lib->check_ack_health(
-                           priv, pkt)) {
-                               /*
-                                * low ack count detected
-                                * restart Firmware
-                                */
-                               IWL_ERR(priv, "low ack count detected, "
-                                       "restart firmware\n");
-                               if (!iwl_force_reset(priv, IWL_FW_RESET, false))
-                                       return;
-                       }
+
+       stamp = jiffies;
+       msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
+
+       /* Only gather statistics and update time stamp when not associated */
+       if (!iwl_is_any_associated(priv))
+               goto out;
+
+       /* Do not check/recover when do not have enough statistics data */
+       if (msecs < 99)
+               return;
+
+       if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
+               IWL_ERR(priv, "low ack count detected, restart firmware\n");
+               if (!iwl_force_reset(priv, IWL_FW_RESET, false))
+                       return;
+       }
+
+       if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
+               iwl_force_reset(priv, IWL_RF_RESET, false);
+
+out:
+       if (iwl_bt_statistics(priv))
+               memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
+                       sizeof(priv->_agn.statistics_bt));
+       else
+               memcpy(&priv->_agn.statistics, &pkt->u.stats,
+                       sizeof(priv->_agn.statistics));
+
+       priv->rx_statistics_jiffies = stamp;
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+       struct statistics_rx_non_phy *rx_info;
+       int num_active_rx = 0;
+       int total_silence = 0;
+       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+       int last_rx_noise;
+
+       if (iwl_bt_statistics(priv))
+               rx_info = &(priv->_agn.statistics_bt.rx.general.common);
+       else
+               rx_info = &(priv->_agn.statistics.rx.general);
+       bcn_silence_a =
+               le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+       bcn_silence_b =
+               le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+       bcn_silence_c =
+               le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+       if (bcn_silence_a) {
+               total_silence += bcn_silence_a;
+               num_active_rx++;
+       }
+       if (bcn_silence_b) {
+               total_silence += bcn_silence_b;
+               num_active_rx++;
+       }
+       if (bcn_silence_c) {
+               total_silence += bcn_silence_c;
+               num_active_rx++;
+       }
+
+       /* Average among active antennas */
+       if (num_active_rx)
+               last_rx_noise = (total_silence / num_active_rx) - 107;
+       else
+               last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+       IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+                       bcn_silence_a, bcn_silence_b, bcn_silence_c,
+                       last_rx_noise);
+}
+
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+                                       __le32 *stats)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       int i, size;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+       struct statistics_general_common *general, *accum_general;
+       struct statistics_tx *tx, *accum_tx;
+
+       if (iwl_bt_statistics(priv)) {
+               prev_stats = (__le32 *)&priv->_agn.statistics_bt;
+               accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
+               size = sizeof(struct iwl_bt_notif_statistics);
+               general = &priv->_agn.statistics_bt.general.common;
+               accum_general = &priv->_agn.accum_statistics_bt.general.common;
+               tx = &priv->_agn.statistics_bt.tx;
+               accum_tx = &priv->_agn.accum_statistics_bt.tx;
+               delta = (u32 *)&priv->_agn.delta_statistics_bt;
+               max_delta = (u32 *)&priv->_agn.max_delta_bt;
+       } else {
+               prev_stats = (__le32 *)&priv->_agn.statistics;
+               accum_stats = (u32 *)&priv->_agn.accum_statistics;
+               size = sizeof(struct iwl_notif_statistics);
+               general = &priv->_agn.statistics.general.common;
+               accum_general = &priv->_agn.accum_statistics.general.common;
+               tx = &priv->_agn.statistics.tx;
+               accum_tx = &priv->_agn.accum_statistics.tx;
+               delta = (u32 *)&priv->_agn.delta_statistics;
+               max_delta = (u32 *)&priv->_agn.max_delta;
+       }
+       for (i = sizeof(__le32); i < size;
+            i += sizeof(__le32), stats++, prev_stats++, delta++,
+            max_delta++, accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta = (le32_to_cpu(*stats) -
+                               le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
                }
-               if (priv->cfg->ops->lib->check_plcp_health) {
-                       if (!priv->cfg->ops->lib->check_plcp_health(
-                           priv, pkt)) {
-                               /*
-                                * high plcp error detected
-                                * reset Radio
-                                */
-                               iwl_force_reset(priv, IWL_RF_RESET, false);
-                       }
+       }
+
+       /* reset accumulative statistics for "no-counter" type statistics */
+       accum_general->temperature = general->temperature;
+       accum_general->temperature_m = general->temperature_m;
+       accum_general->ttl_timestamp = general->ttl_timestamp;
+       accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
+       accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
+       accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
+#endif
+}
+
+static void iwl_rx_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       const int reg_recalib_period = 60;
+       int change;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (iwl_bt_statistics(priv)) {
+               IWL_DEBUG_RX(priv,
+                            "Statistics notification received (%d vs %d).\n",
+                            (int)sizeof(struct iwl_bt_notif_statistics),
+                            le32_to_cpu(pkt->len_n_flags) &
+                            FH_RSCSR_FRAME_SIZE_MSK);
+
+               change = ((priv->_agn.statistics_bt.general.common.temperature !=
+                          pkt->u.stats_bt.general.common.temperature) ||
+                          ((priv->_agn.statistics_bt.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                          (pkt->u.stats_bt.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+       } else {
+               IWL_DEBUG_RX(priv,
+                            "Statistics notification received (%d vs %d).\n",
+                            (int)sizeof(struct iwl_notif_statistics),
+                            le32_to_cpu(pkt->len_n_flags) &
+                            FH_RSCSR_FRAME_SIZE_MSK);
+
+               change = ((priv->_agn.statistics.general.common.temperature !=
+                          pkt->u.stats.general.common.temperature) ||
+                          ((priv->_agn.statistics.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                          (pkt->u.stats.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+       }
+
+       iwl_recover_from_statistics(priv, pkt);
+
+       set_bit(STATUS_STATISTICS, &priv->status);
+
+       /* Reschedule the statistics timer to occur in
+        * reg_recalib_period seconds to ensure we get a
+        * thermal update even if the uCode doesn't give
+        * us one */
+       mod_timer(&priv->statistics_periodic, jiffies +
+                 msecs_to_jiffies(reg_recalib_period * 1000));
+
+       if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+           (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+               iwl_rx_calc_noise(priv);
+               queue_work(priv->workqueue, &priv->run_time_calib_work);
+       }
+       if (priv->cfg->ops->lib->temp_ops.temperature && change)
+               priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+static void iwl_rx_reply_statistics(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               memset(&priv->_agn.accum_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.delta_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.max_delta, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.accum_statistics_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+               memset(&priv->_agn.delta_statistics_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+               memset(&priv->_agn.max_delta_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+#endif
+               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+       }
+       iwl_rx_statistics(priv, rxb);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl_rx_card_state_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = priv->status;
+
+       IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & CT_CARD_DISABLED) ?
+                         "Reached" : "Not reached");
+
+       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+                    CT_CARD_DISABLED)) {
+
+               iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                           CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+               iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+               if (!(flags & RXON_CARD_DISABLED)) {
+                       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+                       iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
                }
+               if (flags & CT_CARD_DISABLED)
+                       iwl_tt_enter_ct_kill(priv);
+       }
+       if (!(flags & CT_CARD_DISABLED))
+               iwl_tt_exit_ct_kill(priv);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+
+       if (!(flags & RXON_CARD_DISABLED))
+               iwl_scan_cancel(priv);
+
+       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+            test_bit(STATUS_RF_KILL_HW, &priv->status)))
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+                       test_bit(STATUS_RF_KILL_HW, &priv->status));
+       else
+               wake_up_interruptible(&priv->wait_command_queue);
+}
+
+static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_mem_buffer *rxb)
+
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_missed_beacon_notif *missed_beacon;
+
+       missed_beacon = &pkt->u.missed_beacon;
+       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+           priv->missed_beacon_threshold) {
+               IWL_DEBUG_CALIB(priv,
+                   "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+                   le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+                   le32_to_cpu(missed_beacon->total_missed_becons),
+                   le32_to_cpu(missed_beacon->num_recvd_beacons),
+                   le32_to_cpu(missed_beacon->num_expected_beacons));
+               if (!test_bit(STATUS_SCANNING, &priv->status))
+                       iwl_init_sensitivity(priv);
        }
 }
-EXPORT_SYMBOL(iwl_recover_from_statistics);
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       priv->_agn.last_phy_res_valid = true;
+       memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+              sizeof(struct iwl_rx_phy_res));
+}
 
 /*
  * returns non-zero if packet should be dropped
  */
-int iwl_set_decrypted_flag(struct iwl_priv *priv,
-                          struct ieee80211_hdr *hdr,
-                          u32 decrypt_res,
-                          struct ieee80211_rx_status *stats)
+static int iwl_set_decrypted_flag(struct iwl_priv *priv,
+                                 struct ieee80211_hdr *hdr,
+                                 u32 decrypt_res,
+                                 struct ieee80211_rx_status *stats)
 {
        u16 fc = le16_to_cpu(hdr->frame_control);
 
@@ -315,4 +863,264 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_set_decrypted_flag);
+
+static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
+                                       struct ieee80211_hdr *hdr,
+                                       u16 len,
+                                       u32 ampdu_status,
+                                       struct iwl_rx_mem_buffer *rxb,
+                                       struct ieee80211_rx_status *stats)
+{
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!priv->is_open)) {
+               IWL_DEBUG_DROP_LIMIT(priv,
+                   "Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       /* In case of HW accelerated crypto and bad decryption, drop */
+       if (!priv->cfg->mod_params->sw_crypto &&
+           iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+               return;
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IWL_ERR(priv, "dev_alloc_skb failed\n");
+               return;
+       }
+
+       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+       iwl_update_stats(priv, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(priv->hw, skb);
+       priv->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+       u32 decrypt_out = 0;
+
+       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+                                       RX_RES_STATUS_STATION_FOUND)
+               decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+                               RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+       /* packet was not encrypted */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_NONE)
+               return decrypt_out;
+
+       /* packet was encrypted with unknown alg */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_ERR)
+               return decrypt_out;
+
+       /* decryption was not done in HW */
+       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+                                       RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+               return decrypt_out;
+
+       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               /* alg is CCM: check MIC only */
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+                       /* Bad MIC */
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+               break;
+
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+                       /* Bad TTAK */
+                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+                       break;
+               }
+               /* fall through if TTAK OK */
+       default:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+               break;
+       }
+
+       IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
+                                       decrypt_in, decrypt_out);
+
+       return decrypt_out;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+static void iwl_rx_reply_rx(struct iwl_priv *priv,
+                           struct iwl_rx_mem_buffer *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rx_phy_res *phy_res;
+       __le32 rx_pkt_status;
+       struct iwl_rx_mpdu_res_start *amsdu;
+       u32 len;
+       u32 ampdu_status;
+       u32 rate_n_flags;
+
+       /**
+        * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+        *      REPLY_RX: physical layer info is in this buffer
+        *      REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+        *              command and cached in priv->last_phy_res
+        *
+        * Here we set up local variables depending on which command is
+        * received.
+        */
+       if (pkt->hdr.cmd == REPLY_RX) {
+               phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+                               + phy_res->cfg_phy_cnt);
+
+               len = le16_to_cpu(phy_res->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+                               phy_res->cfg_phy_cnt + len);
+               ampdu_status = le32_to_cpu(rx_pkt_status);
+       } else {
+               if (!priv->_agn.last_phy_res_valid) {
+                       IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+                       return;
+               }
+               phy_res = &priv->_agn.last_phy_res;
+               amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+               len = le16_to_cpu(amsdu->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+               ampdu_status = iwl_translate_rx_status(priv,
+                                               le32_to_cpu(rx_pkt_status));
+       }
+
+       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+                               phy_res->cfg_phy_cnt);
+               return;
+       }
+
+       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+                               le32_to_cpu(rx_pkt_status));
+               return;
+       }
+
+       /* This will be used in several places later */
+       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+       /* rx_status carries information about the packet to mac80211 */
+       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+                                              rx_status.band);
+       rx_status.rate_idx =
+               iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+       rx_status.flag = 0;
+
+       /* TSF isn't reliable. In order to allow smooth user experience,
+        * this W/A doesn't propagate it to the mac80211 */
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+       priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+       rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
+
+       iwl_dbg_log_rx_data_frame(priv, len, header);
+       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+               rx_status.signal, (unsigned long long)rx_status.mactime);
+
+       /*
+        * "antenna number"
+        *
+        * It seems that the antenna field in the phy flags value
+        * is actually a bit field. This is undefined by radiotap,
+        * it wants an actual antenna number but I always get "7"
+        * for most legacy frames I receive indicating that the
+        * same frame was received on all three RX chains.
+        *
+        * I think this field should be removed in favor of a
+        * new 802.11n radiotap field "RX chains" that is defined
+        * as a bitmask.
+        */
+       rx_status.antenna =
+               (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+       /* set the preamble flag if appropriate */
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       /* Set up the HT phy flags */
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               rx_status.flag |= RX_FLAG_HT;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               rx_status.flag |= RX_FLAG_40MHZ;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rx_status.flag |= RX_FLAG_SHORT_GI;
+
+       iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+                                   rxb, &rx_status);
+}
+
+/**
+ * iwl_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ */
+void iwl_setup_rx_handlers(struct iwl_priv *priv)
+{
+       void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+       handlers = priv->rx_handlers;
+
+       handlers[REPLY_ALIVE]                   = iwl_rx_reply_alive;
+       handlers[REPLY_ERROR]                   = iwl_rx_reply_error;
+       handlers[CHANNEL_SWITCH_NOTIFICATION]   = iwl_rx_csa;
+       handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
+       handlers[PM_SLEEP_NOTIFICATION]         = iwl_rx_pm_sleep_notif;
+       handlers[PM_DEBUG_STATISTIC_NOTIFIC]    = iwl_rx_pm_debug_statistics_notif;
+       handlers[BEACON_NOTIFICATION]           = iwl_rx_beacon_notif;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * statistics request from the host as well as for the periodic
+        * statistics notifications (after received beacons) from the uCode.
+        */
+       handlers[REPLY_STATISTICS_CMD]          = iwl_rx_reply_statistics;
+       handlers[STATISTICS_NOTIFICATION]       = iwl_rx_statistics;
+
+       iwl_setup_rx_scan_handlers(priv);
+
+       handlers[CARD_STATE_NOTIFICATION]       = iwl_rx_card_state_notif;
+       handlers[MISSED_BEACONS_NOTIFICATION]   = iwl_rx_missed_beacon_notif;
+
+       /* Rx handlers */
+       handlers[REPLY_RX_PHY_CMD]              = iwl_rx_reply_rx_phy;
+       handlers[REPLY_RX_MPDU_CMD]             = iwl_rx_reply_rx;
+
+       /* block ack */
+       handlers[REPLY_COMPRESSED_BA]           = iwlagn_rx_reply_compressed_ba;
+
+       /* Set up hardware specific Rx handlers */
+       priv->cfg->ops->lib->rx_handler_setup(priv);
+}
index 12d9363..3a4d9e6 100644 (file)
@@ -101,7 +101,7 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
                ieee80211_scan_completed(priv->hw, aborted);
        }
 
-       priv->is_internal_short_scan = false;
+       priv->scan_type = IWL_SCAN_NORMAL;
        priv->scan_vif = NULL;
        priv->scan_request = NULL;
 }
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
        queue_work(priv->workqueue, &priv->abort_scan);
        return 0;
 }
-EXPORT_SYMBOL(iwl_scan_cancel);
 
 /**
  * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
 
        return test_bit(STATUS_SCAN_HW, &priv->status);
 }
-EXPORT_SYMBOL(iwl_scan_cancel_timeout);
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
 static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -257,8 +255,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
        queue_work(priv->workqueue, &priv->scan_completed);
 
        if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
-           priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
+           iwl_advanced_bt_coexist(priv) &&
            priv->bt_status != scan_notif->bt_status) {
                if (scan_notif->bt_status) {
                        /* BT on */
@@ -289,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
        priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
                                        iwl_rx_scan_complete_notif;
 }
-EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
 
 inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
                                     enum ieee80211_band band,
@@ -302,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
                return IWL_ACTIVE_DWELL_TIME_24 +
                        IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
 }
-EXPORT_SYMBOL(iwl_get_active_dwell_time);
 
 u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
                               enum ieee80211_band band,
@@ -334,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
 
        return passive;
 }
-EXPORT_SYMBOL(iwl_get_passive_dwell_time);
 
 void iwl_init_scan_params(struct iwl_priv *priv)
 {
@@ -344,12 +338,11 @@ void iwl_init_scan_params(struct iwl_priv *priv)
        if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
                priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
 }
-EXPORT_SYMBOL(iwl_init_scan_params);
 
-static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
-                                         struct ieee80211_vif *vif,
-                                         bool internal,
-                                         enum ieee80211_band band)
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  enum iwl_scan_type scan_type,
+                                  enum ieee80211_band band)
 {
        int ret;
 
@@ -377,17 +370,19 @@ static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
        }
 
        IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
-                       internal ? "internal short " : "");
+                       scan_type == IWL_SCAN_NORMAL ? "" :
+                       scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
+                       "internal short ");
 
        set_bit(STATUS_SCANNING, &priv->status);
-       priv->is_internal_short_scan = internal;
+       priv->scan_type = scan_type;
        priv->scan_start = jiffies;
        priv->scan_band = band;
 
        ret = priv->cfg->ops->utils->request_scan(priv, vif);
        if (ret) {
                clear_bit(STATUS_SCANNING, &priv->status);
-               priv->is_internal_short_scan = false;
+               priv->scan_type = IWL_SCAN_NORMAL;
                return ret;
        }
 
@@ -412,7 +407,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
        mutex_lock(&priv->mutex);
 
        if (test_bit(STATUS_SCANNING, &priv->status) &&
-           !priv->is_internal_short_scan) {
+           priv->scan_type != IWL_SCAN_NORMAL) {
                IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
                ret = -EAGAIN;
                goto out_unlock;
@@ -426,11 +421,11 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
         * If an internal scan is in progress, just set
         * up the scan_request as per above.
         */
-       if (priv->is_internal_short_scan) {
+       if (priv->scan_type != IWL_SCAN_NORMAL) {
                IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
                ret = 0;
        } else
-               ret = iwl_scan_initiate(priv, vif, false,
+               ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
                                        req->channels[0]->band);
 
        IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -440,7 +435,6 @@ out_unlock:
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_mac_hw_scan);
 
 /*
  * internal short scan, this function should only been called while associated.
@@ -460,7 +454,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
 
        mutex_lock(&priv->mutex);
 
-       if (priv->is_internal_short_scan == true) {
+       if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
                IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
                goto unlock;
        }
@@ -470,7 +464,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
                goto unlock;
        }
 
-       if (iwl_scan_initiate(priv, NULL, true, priv->band))
+       if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
                IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
  unlock:
        mutex_unlock(&priv->mutex);
@@ -537,7 +531,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
 
        return (u16)len;
 }
-EXPORT_SYMBOL(iwl_fill_probe_req);
 
 static void iwl_bg_abort_scan(struct work_struct *work)
 {
@@ -558,8 +551,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
            container_of(work, struct iwl_priv, scan_completed);
        bool aborted;
 
-       IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
-                      priv->is_internal_short_scan ? "internal short " : "");
+       IWL_DEBUG_SCAN(priv, "Completed scan.\n");
 
        cancel_delayed_work(&priv->scan_check);
 
@@ -574,7 +566,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
                goto out_settings;
        }
 
-       if (priv->is_internal_short_scan && !aborted) {
+       if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
+               ieee80211_tx_status_irqsafe(priv->hw,
+                                           priv->_agn.offchan_tx_skb);
+               priv->_agn.offchan_tx_skb = NULL;
+       }
+
+       if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
                int err;
 
                /* Check if mac80211 requested scan during our internal scan */
@@ -582,7 +580,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
                        goto out_complete;
 
                /* If so request a new scan */
-               err = iwl_scan_initiate(priv, priv->scan_vif, false,
+               err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
                                        priv->scan_request->channels[0]->band);
                if (err) {
                        IWL_DEBUG_SCAN(priv,
@@ -622,7 +620,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
        INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
        INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
 }
-EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
 
 void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
 {
@@ -636,4 +633,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
                mutex_unlock(&priv->mutex);
        }
 }
-EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
index 49493d1..bc90a12 100644 (file)
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_add_sta);
 
 static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
                                   struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        return sta_id;
 
 }
-EXPORT_SYMBOL_GPL(iwl_prep_station);
 
 #define STA_WAIT_TIMEOUT (HZ/2)
 
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        *sta_id_r = sta_id;
        return ret;
 }
-EXPORT_SYMBOL(iwl_add_station_common);
 
 /**
  * iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
        spin_unlock_irqrestore(&priv->sta_lock, flags);
        return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(iwl_remove_station);
 
 /**
  * iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
        if (!cleared)
                IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
 }
-EXPORT_SYMBOL(iwl_clear_ucode_stations);
 
 /**
  * iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        else
                IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
 }
-EXPORT_SYMBOL(iwl_restore_stations);
 
 void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                        priv->stations[sta_id].sta.sta.addr, ret);
        iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
 }
-EXPORT_SYMBOL(iwl_reprogram_ap_sta);
 
 int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
 {
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
 
        return WEP_INVALID_OFFSET;
 }
-EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
 
 void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
 {
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
        }
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
-EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        }
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_lq_cmd);
 
 int iwl_mac_sta_remove(struct ieee80211_hw *hw,
                       struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
        mutex_unlock(&priv->mutex);
        return ret;
 }
-EXPORT_SYMBOL(iwl_mac_sta_remove);
index 073b6ce..277c917 100644 (file)
@@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
        }
        txq->need_update = 0;
 }
-EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+
+/**
+ * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+
+       if (q->n_bd == 0)
+               return;
+
+        while (q->write_ptr != q->read_ptr) {
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+}
 
 /**
  * iwl_tx_queue_free - Deallocate DMA queue.
@@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
 {
        struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
        struct device *dev = &priv->pci_dev->dev;
        int i;
 
-       if (q->n_bd == 0)
-               return;
-
-       /* first, empty all BD's */
-       for (; q->write_ptr != q->read_ptr;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+       iwl_tx_queue_unmap(priv, txq_id);
 
        /* De-alloc array of command/tx buffers */
        for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
        /* 0-fill queue descriptor structure */
        memset(txq, 0, sizeof(*txq));
 }
-EXPORT_SYMBOL(iwl_tx_queue_free);
 
 /**
- * iwl_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
  */
-void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_unmap(struct iwl_priv *priv)
 {
        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
        struct iwl_queue *q = &txq->q;
-       struct device *dev = &priv->pci_dev->dev;
        int i;
        bool huge = false;
 
        if (q->n_bd == 0)
                return;
 
-       for (; q->read_ptr != q->write_ptr;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+       while (q->read_ptr != q->write_ptr) {
                /* we have no way to tell if it is a huge cmd ATM */
                i = get_cmd_index(q, q->read_ptr, 0);
 
-               if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+               if (txq->meta[i].flags & CMD_SIZE_HUGE)
                        huge = true;
-                       continue;
-               }
+               else
+                       pci_unmap_single(priv->pci_dev,
+                                        dma_unmap_addr(&txq->meta[i], mapping),
+                                        dma_unmap_len(&txq->meta[i], len),
+                                        PCI_DMA_BIDIRECTIONAL);
 
-               pci_unmap_single(priv->pci_dev,
-                                dma_unmap_addr(&txq->meta[i], mapping),
-                                dma_unmap_len(&txq->meta[i], len),
-                                PCI_DMA_BIDIRECTIONAL);
+            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
+
        if (huge) {
                i = q->n_window;
                pci_unmap_single(priv->pci_dev,
@@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
                                 dma_unmap_len(&txq->meta[i], len),
                                 PCI_DMA_BIDIRECTIONAL);
        }
+}
+
+/**
+ * iwl_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_cmd_queue_free(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_cmd_queue_unmap(priv);
 
        /* De-alloc array of command/tx buffers */
        for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
        /* 0-fill queue descriptor structure */
        memset(txq, 0, sizeof(*txq));
 }
-EXPORT_SYMBOL(iwl_cmd_queue_free);
 
 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
  * DMA services
@@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
                s = 0;
        return s;
 }
-EXPORT_SYMBOL(iwl_queue_space);
 
 
 /**
@@ -384,7 +401,6 @@ out_free_arrays:
 
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_tx_queue_init);
 
 void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                        int slots_num, u32 txq_id)
@@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
        /* Tell device where to find queue */
        priv->cfg->ops->lib->txq_init(priv, txq);
 }
-EXPORT_SYMBOL(iwl_tx_queue_reset);
 
 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 
@@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        }
        meta->flags = 0;
 }
-EXPORT_SYMBOL(iwl_tx_cmd_complete);
index 5a49822..ed57e44 100644 (file)
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
                        return -EINVAL;
                }
 
-               freq = ieee80211_channel_to_frequency(umac_bss->channel);
+               freq = ieee80211_channel_to_frequency(umac_bss->channel,
+                                                     band->band);
                channel = ieee80211_get_channel(wiphy, freq);
                signal = umac_bss->rssi * 100;
 
index a944893..9a57cf6 100644 (file)
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
        switch (le32_to_cpu(complete->status)) {
        case UMAC_ASSOC_COMPLETE_SUCCESS:
                chan = ieee80211_get_channel(wiphy,
-                       ieee80211_channel_to_frequency(complete->channel));
+                       ieee80211_channel_to_frequency(complete->channel,
+                               complete->band == UMAC_BAND_2GHZ ?
+                                       IEEE80211_BAND_2GHZ :
+                                       IEEE80211_BAND_5GHZ));
                if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
                        /* Associated to a unallowed channel, disassociate. */
                        __iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
                goto err;
        }
 
-       freq = ieee80211_channel_to_frequency(umac_bss->channel);
+       freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
        channel = ieee80211_get_channel(wiphy, freq);
        signal = umac_bss->rssi * 100;
 
index 698a1f7..30ef035 100644 (file)
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                /* No channel, no luck */
                if (chan_no != -1) {
                        struct wiphy *wiphy = priv->wdev->wiphy;
-                       int freq = ieee80211_channel_to_frequency(chan_no);
+                       int freq = ieee80211_channel_to_frequency(chan_no,
+                                                       IEEE80211_BAND_2GHZ);
                        struct ieee80211_channel *channel =
                                ieee80211_get_channel(wiphy, freq);
 
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
        lbs_deb_enter(LBS_DEB_CFG80211);
 
        survey->channel = ieee80211_get_channel(wiphy,
-               ieee80211_channel_to_frequency(priv->channel));
+               ieee80211_channel_to_frequency(priv->channel,
+                                              IEEE80211_BAND_2GHZ));
 
        ret = lbs_get_rssi(priv, &signal, &noise);
        if (ret == 0) {
index 78c4da1..7e8a658 100644 (file)
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
        if (priv->current_addr[0] == 0xff)
                memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
 
-       memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
-       if (priv->mesh_dev)
-               memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
+       if (!priv->copied_hwaddr) {
+               memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+               if (priv->mesh_dev)
+                       memcpy(priv->mesh_dev->dev_addr,
+                               priv->current_addr, ETH_ALEN);
+               priv->copied_hwaddr = 1;
+       }
 
 out:
        lbs_deb_leave(LBS_DEB_CMD);
index 18dd9a0..bc461eb 100644 (file)
@@ -90,6 +90,7 @@ struct lbs_private {
        void *card;
        u8 fw_ready;
        u8 surpriseremoved;
+       u8 setup_fw_on_resume;
        int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
        void (*reset_card) (struct lbs_private *priv);
        int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
        u32 fwcapinfo;
        u16 regioncode;
        u8 current_addr[ETH_ALEN];
+       u8 copied_hwaddr;
 
        /* Command download */
        u8 dnld_sent;
index 5eac135..6cb6935 100644 (file)
@@ -387,7 +387,7 @@ struct lbs_offset_value {
 struct mrvl_ie_domain_param_set {
        struct mrvl_ie_header header;
 
-       u8 country_code[3];
+       u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
 } __packed;
 
index 0060023..f6c2cd6 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/firmware.h>
 #include <linux/jiffies.h>
-#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
-#include <linux/semaphore.h>
 #include <linux/slab.h>
 #include <linux/spi/libertas_spi.h>
 #include <linux/spi/spi.h>
 #include "dev.h"
 #include "if_spi.h"
 
+struct if_spi_packet {
+       struct list_head                list;
+       u16                             blen;
+       u8                              buffer[0] __attribute__((aligned(4)));
+};
+
 struct if_spi_card {
        struct spi_device               *spi;
        struct lbs_private              *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
        unsigned long                   spu_reg_delay;
 
        /* Handles all SPI communication (except for FW load) */
-       struct task_struct              *spi_thread;
-       int                             run_thread;
-
-       /* Used to wake up the spi_thread */
-       struct semaphore                spi_ready;
-       struct semaphore                spi_thread_terminated;
+       struct workqueue_struct         *workqueue;
+       struct work_struct              packet_work;
 
        u8                              cmd_buffer[IF_SPI_CMD_BUF_SIZE];
+
+       /* A buffer of incoming packets from libertas core.
+        * Since we can't sleep in hw_host_to_card, we have to buffer
+        * them. */
+       struct list_head                cmd_packet_list;
+       struct list_head                data_packet_list;
+
+       /* Protects cmd_packet_list and data_packet_list */
+       spinlock_t                      buffer_lock;
 };
 
 static void free_if_spi_card(struct if_spi_card *card)
 {
+       struct list_head *cursor, *next;
+       struct if_spi_packet *packet;
+
+       list_for_each_safe(cursor, next, &card->cmd_packet_list) {
+               packet = container_of(cursor, struct if_spi_packet, list);
+               list_del(&packet->list);
+               kfree(packet);
+       }
+       list_for_each_safe(cursor, next, &card->data_packet_list) {
+               packet = container_of(cursor, struct if_spi_packet, list);
+               list_del(&packet->list);
+               kfree(packet);
+       }
        spi_set_drvdata(card->spi, NULL);
        kfree(card);
 }
@@ -622,7 +644,7 @@ out:
 /*
  * SPI Transfer Thread
  *
- * The SPI thread handles all SPI transfers, so there is no need for a lock.
+ * The SPI worker handles all SPI transfers, so there is no need for a lock.
  */
 
 /* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
        return err;
 }
 
+/* Move data or a command from the host to the card. */
+static void if_spi_h2c(struct if_spi_card *card,
+                       struct if_spi_packet *packet, int type)
+{
+       int err = 0;
+       u16 int_type, port_reg;
+
+       switch (type) {
+       case MVMS_DAT:
+               int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
+               port_reg = IF_SPI_DATA_RDWRPORT_REG;
+               break;
+       case MVMS_CMD:
+               int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
+               port_reg = IF_SPI_CMD_RDWRPORT_REG;
+               break;
+       default:
+               lbs_pr_err("can't transfer buffer of type %d\n", type);
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* Write the data to the card */
+       err = spu_write(card, port_reg, packet->buffer, packet->blen);
+       if (err)
+               goto out;
+
+out:
+       kfree(packet);
+
+       if (err)
+               lbs_pr_err("%s: error %d\n", __func__, err);
+}
+
 /* Inform the host about a card event */
 static void if_spi_e2h(struct if_spi_card *card)
 {
@@ -766,71 +822,88 @@ out:
                lbs_pr_err("%s: error %d\n", __func__, err);
 }
 
-static int lbs_spi_thread(void *data)
+static void if_spi_host_to_card_worker(struct work_struct *work)
 {
        int err;
-       struct if_spi_card *card = data;
+       struct if_spi_card *card;
        u16 hiStatus;
+       unsigned long flags;
+       struct if_spi_packet *packet;
 
-       while (1) {
-               /* Wait to be woken up by one of two things.  First, our ISR
-                * could tell us that something happened on the WLAN.
-                * Secondly, libertas could call hw_host_to_card with more
-                * data, which we might be able to send.
-                */
-               do {
-                       err = down_interruptible(&card->spi_ready);
-                       if (!card->run_thread) {
-                               up(&card->spi_thread_terminated);
-                               do_exit(0);
-                       }
-               } while (err == -EINTR);
+       card = container_of(work, struct if_spi_card, packet_work);
 
-               /* Read the host interrupt status register to see what we
-                * can do. */
-               err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
-                                       &hiStatus);
-               if (err) {
-                       lbs_pr_err("I/O error\n");
+       lbs_deb_enter(LBS_DEB_SPI);
+
+       /* Read the host interrupt status register to see what we
+        * can do. */
+       err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+                               &hiStatus);
+       if (err) {
+               lbs_pr_err("I/O error\n");
+               goto err;
+       }
+
+       if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
+               err = if_spi_c2h_cmd(card);
+               if (err)
                        goto err;
-               }
+       }
+       if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
+               err = if_spi_c2h_data(card);
+               if (err)
+                       goto err;
+       }
 
-               if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
-                       err = if_spi_c2h_cmd(card);
-                       if (err)
-                               goto err;
-               }
-               if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
-                       err = if_spi_c2h_data(card);
-                       if (err)
-                               goto err;
+       /* workaround: in PS mode, the card does not set the Command
+        * Download Ready bit, but it sets TX Download Ready. */
+       if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+          (card->priv->psstate != PS_STATE_FULL_POWER &&
+           (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
+               /* This means two things. First of all,
+                * if there was a previous command sent, the card has
+                * successfully received it.
+                * Secondly, it is now ready to download another
+                * command.
+                */
+               lbs_host_to_card_done(card->priv);
+
+               /* Do we have any command packets from the host to
+                * send? */
+               packet = NULL;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               if (!list_empty(&card->cmd_packet_list)) {
+                       packet = (struct if_spi_packet *)(card->
+                                       cmd_packet_list.next);
+                       list_del(&packet->list);
                }
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
 
-               /* workaround: in PS mode, the card does not set the Command
-                * Download Ready bit, but it sets TX Download Ready. */
-               if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
-                  (card->priv->psstate != PS_STATE_FULL_POWER &&
-                   (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
-                       lbs_host_to_card_done(card->priv);
+               if (packet)
+                       if_spi_h2c(card, packet, MVMS_CMD);
+       }
+       if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
+               /* Do we have any data packets from the host to
+                * send? */
+               packet = NULL;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               if (!list_empty(&card->data_packet_list)) {
+                       packet = (struct if_spi_packet *)(card->
+                                       data_packet_list.next);
+                       list_del(&packet->list);
                }
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
 
-               if (hiStatus & IF_SPI_HIST_CARD_EVENT)
-                       if_spi_e2h(card);
+               if (packet)
+                       if_spi_h2c(card, packet, MVMS_DAT);
+       }
+       if (hiStatus & IF_SPI_HIST_CARD_EVENT)
+               if_spi_e2h(card);
 
 err:
-               if (err)
-                       lbs_pr_err("%s: got error %d\n", __func__, err);
-       }
-}
+       if (err)
+               lbs_pr_err("%s: got error %d\n", __func__, err);
 
-/* Block until lbs_spi_thread thread has terminated */
-static void if_spi_terminate_spi_thread(struct if_spi_card *card)
-{
-       /* It would be nice to use kthread_stop here, but that function
-        * can't wake threads waiting for a semaphore. */
-       card->run_thread = 0;
-       up(&card->spi_ready);
-       down(&card->spi_thread_terminated);
+       lbs_deb_leave(LBS_DEB_SPI);
 }
 
 /*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
                                u8 type, u8 *buf, u16 nb)
 {
        int err = 0;
+       unsigned long flags;
        struct if_spi_card *card = priv->card;
+       struct if_spi_packet *packet;
+       u16 blen;
 
        lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
 
-       nb = ALIGN(nb, 4);
+       if (nb == 0) {
+               lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
+               err = -EINVAL;
+               goto out;
+       }
+       blen = ALIGN(nb, 4);
+       packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
+       if (!packet) {
+               err = -ENOMEM;
+               goto out;
+       }
+       packet->blen = blen;
+       memcpy(packet->buffer, buf, nb);
+       memset(packet->buffer + nb, 0, blen - nb);
 
        switch (type) {
        case MVMS_CMD:
-               err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
+               priv->dnld_sent = DNLD_CMD_SENT;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               list_add_tail(&packet->list, &card->cmd_packet_list);
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
                break;
        case MVMS_DAT:
-               err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
+               priv->dnld_sent = DNLD_DATA_SENT;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               list_add_tail(&packet->list, &card->data_packet_list);
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
                break;
        default:
                lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
                break;
        }
 
+       /* Queue spi xfer work */
+       queue_work(card->workqueue, &card->packet_work);
+out:
        lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
        return err;
 }
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
  * Host Interrupts
  *
  * Service incoming interrupts from the WLAN device. We can't sleep here, so
- * don't try to talk on the SPI bus, just wake up the SPI thread.
+ * don't try to talk on the SPI bus, just queue the SPI xfer work.
  */
 static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
 {
        struct if_spi_card *card = dev_id;
 
-       up(&card->spi_ready);
+       queue_work(card->workqueue, &card->packet_work);
+
        return IRQ_HANDLED;
 }
 
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
  * SPI callbacks
  */
 
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_init_card(struct if_spi_card *card)
 {
-       struct if_spi_card *card;
-       struct lbs_private *priv = NULL;
-       struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
-       int err = 0, i;
+       struct spi_device *spi = card->spi;
+       int err, i;
        u32 scratch;
-       struct sched_param param = { .sched_priority = 1 };
        const struct firmware *helper = NULL;
        const struct firmware *mainfw = NULL;
 
        lbs_deb_enter(LBS_DEB_SPI);
 
-       if (!pdata) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       if (pdata->setup) {
-               err = pdata->setup(spi);
-               if (err)
-                       goto out;
-       }
-
-       /* Allocate card structure to represent this specific device */
-       card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
-       if (!card) {
-               err = -ENOMEM;
-               goto out;
-       }
-       spi_set_drvdata(spi, card);
-       card->pdata = pdata;
-       card->spi = spi;
-       card->prev_xfer_time = jiffies;
-
-       sema_init(&card->spi_ready, 0);
-       sema_init(&card->spi_thread_terminated, 0);
-
-       /* Initialize the SPI Interface Unit */
-       err = spu_init(card, pdata->use_dummy_writes);
+       err = spu_init(card, card->pdata->use_dummy_writes);
        if (err)
-               goto free_card;
+               goto out;
        err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
        if (err)
-               goto free_card;
+               goto out;
 
-       /* Firmware load */
        err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
        if (err)
-               goto free_card;
+               goto out;
        if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
                lbs_deb_spi("Firmware is already loaded for "
                            "Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                        lbs_pr_err("Unsupported chip_id: 0x%02x\n",
                                        card->card_id);
                        err = -ENODEV;
-                       goto free_card;
+                       goto out;
                }
 
                err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                                        &mainfw);
                if (err) {
                        lbs_pr_err("failed to find firmware (%d)\n", err);
-                       goto free_card;
+                       goto out;
                }
 
                lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,14 +1035,67 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                                spi->max_speed_hz);
                err = if_spi_prog_helper_firmware(card, helper);
                if (err)
-                       goto free_card;
+                       goto out;
                err = if_spi_prog_main_firmware(card, mainfw);
                if (err)
-                       goto free_card;
+                       goto out;
                lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
        }
 
        err = spu_set_interrupt_mode(card, 0, 1);
+       if (err)
+               goto out;
+
+out:
+       if (helper)
+               release_firmware(helper);
+       if (mainfw)
+               release_firmware(mainfw);
+
+       lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
+
+       return err;
+}
+
+static int __devinit if_spi_probe(struct spi_device *spi)
+{
+       struct if_spi_card *card;
+       struct lbs_private *priv = NULL;
+       struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+       int err = 0;
+
+       lbs_deb_enter(LBS_DEB_SPI);
+
+       if (!pdata) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (pdata->setup) {
+               err = pdata->setup(spi);
+               if (err)
+                       goto out;
+       }
+
+       /* Allocate card structure to represent this specific device */
+       card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
+       if (!card) {
+               err = -ENOMEM;
+               goto teardown;
+       }
+       spi_set_drvdata(spi, card);
+       card->pdata = pdata;
+       card->spi = spi;
+       card->prev_xfer_time = jiffies;
+
+       INIT_LIST_HEAD(&card->cmd_packet_list);
+       INIT_LIST_HEAD(&card->data_packet_list);
+       spin_lock_init(&card->buffer_lock);
+
+       /* Initialize the SPI Interface Unit */
+
+       /* Firmware load */
+       err = if_spi_init_card(card);
        if (err)
                goto free_card;
 
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
        priv->fw_ready = 1;
 
        /* Initialize interrupt handling stuff. */
-       card->run_thread = 1;
-       card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
-       if (IS_ERR(card->spi_thread)) {
-               card->run_thread = 0;
-               err = PTR_ERR(card->spi_thread);
-               lbs_pr_err("error creating SPI thread: err=%d\n", err);
-               goto remove_card;
-       }
-       if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
-               lbs_pr_err("Error setting scheduler, using default.\n");
+       card->workqueue = create_workqueue("libertas_spi");
+       INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
 
        err = request_irq(spi->irq, if_spi_host_interrupt,
                        IRQF_TRIGGER_FALLING, "libertas_spi", card);
        if (err) {
                lbs_pr_err("can't get host irq line-- request_irq failed\n");
-               goto terminate_thread;
+               goto terminate_workqueue;
        }
 
-       /* poke the IRQ handler so that we don't miss the first interrupt */
-       up(&card->spi_ready);
-
        /* Start the card.
         * This will call register_netdev, and we'll start
         * getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
 
 release_irq:
        free_irq(spi->irq, card);
-terminate_thread:
-       if_spi_terminate_spi_thread(card);
-remove_card:
+terminate_workqueue:
+       flush_workqueue(card->workqueue);
+       destroy_workqueue(card->workqueue);
        lbs_remove_card(priv); /* will call free_netdev */
 free_card:
        free_if_spi_card(card);
+teardown:
+       if (pdata->teardown)
+               pdata->teardown(spi);
 out:
-       if (helper)
-               release_firmware(helper);
-       if (mainfw)
-               release_firmware(mainfw);
-
        lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
        return err;
 }
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
        lbs_remove_card(priv); /* will call free_netdev */
 
        free_irq(spi->irq, card);
-       if_spi_terminate_spi_thread(card);
+       flush_workqueue(card->workqueue);
+       destroy_workqueue(card->workqueue);
        if (card->pdata->teardown)
                card->pdata->teardown(spi);
        free_if_spi_card(card);
index 6836a6d..ca8149c 100644 (file)
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
        return 0;
 }
 
+/**
+ * @brief This function gets the HW spec from the firmware and sets
+ *        some basic parameters.
+ *
+ *  @param priv    A pointer to struct lbs_private structure
+ *  @return        0 or -1
+ */
+static int lbs_setup_firmware(struct lbs_private *priv)
+{
+       int ret = -1;
+       s16 curlevel = 0, minlevel = 0, maxlevel = 0;
+
+       lbs_deb_enter(LBS_DEB_FW);
+
+       /* Read MAC address from firmware */
+       memset(priv->current_addr, 0xff, ETH_ALEN);
+       ret = lbs_update_hw_spec(priv);
+       if (ret)
+               goto done;
+
+       /* Read power levels if available */
+       ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+       if (ret == 0) {
+               priv->txpower_cur = curlevel;
+               priv->txpower_min = minlevel;
+               priv->txpower_max = maxlevel;
+       }
+
+       /* Send cmd to FW to enable 11D function */
+       ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
+       lbs_set_mac_control(priv);
+done:
+       lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+       return ret;
+}
+
 int lbs_suspend(struct lbs_private *priv)
 {
        int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
                        lbs_pr_err("deep sleep activation failed: %d\n", ret);
        }
 
-       lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_resume);
-
-/**
- * @brief This function gets the HW spec from the firmware and sets
- *        some basic parameters.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @return       0 or -1
- */
-static int lbs_setup_firmware(struct lbs_private *priv)
-{
-       int ret = -1;
-       s16 curlevel = 0, minlevel = 0, maxlevel = 0;
-
-       lbs_deb_enter(LBS_DEB_FW);
-
-       /* Read MAC address from firmware */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
-       ret = lbs_update_hw_spec(priv);
-       if (ret)
-               goto done;
-
-       /* Read power levels if available */
-       ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
-       if (ret == 0) {
-               priv->txpower_cur = curlevel;
-               priv->txpower_min = minlevel;
-               priv->txpower_max = maxlevel;
-       }
+       if (priv->setup_fw_on_resume)
+               ret = lbs_setup_firmware(priv);
 
-       /* Send cmd to FW to enable 11D function */
-       ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
-
-       lbs_set_mac_control(priv);
-done:
        lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
        return ret;
 }
+EXPORT_SYMBOL_GPL(lbs_resume);
 
 /**
  *  This function handles the timeout of command sending.
index acf3bf6..9d097b9 100644 (file)
@@ -918,7 +918,6 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
        struct mrvl_mesh_defaults defs;
-       int maxlen;
        int ret;
 
        ret = mesh_get_default_parameters(dev, &defs);
@@ -931,13 +930,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
                defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
        }
 
-       /* SSID not null terminated: reserve room for \0 + \n */
-       maxlen = defs.meshie.val.mesh_id_len + 2;
-       maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
+       memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len);
+       buf[defs.meshie.val.mesh_id_len] = '\n';
+       buf[defs.meshie.val.mesh_id_len + 1] = '\0';
 
-       defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
-
-       return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
+       return defs.meshie.val.mesh_id_len + 1;
 }
 
 /**
index 9278b3c..d400508 100644 (file)
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
        lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct lbtf_private *priv = hw->priv;
 
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         * there are no buffered multicast frames to send
         */
        ieee80211_stop_queues(priv->hw);
-       return NETDEV_TX_OK;
 }
 
 static void lbtf_tx_work(struct work_struct *work)
index 454f045..56f439d 100644 (file)
@@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
 }
 
 
-static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
@@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (skb->len < 10) {
                /* Should not happen; just a sanity check for addr1 use */
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
                txi->flags |= IEEE80211_TX_STAT_ACK;
        ieee80211_tx_status_irqsafe(hw, skb);
-       return NETDEV_TX_OK;
 }
 
 
@@ -943,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
 static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
                                       enum ieee80211_ampdu_mlme_action action,
-                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                                      u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 9ecf840..3695227 100644 (file)
@@ -232,6 +232,9 @@ struct mwl8k_priv {
        struct completion firmware_loading_complete;
 };
 
+#define MAX_WEP_KEY_LEN         13
+#define NUM_WEP_KEYS            4
+
 /* Per interface specific private data */
 struct mwl8k_vif {
        struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
 
        /* Non AMPDU sequence number assigned by driver.  */
        u16 seqno;
+
+       /* Saved WEP keys */
+       struct {
+               u8 enabled;
+               u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
+       } wep_key_conf[NUM_WEP_KEYS];
+
+       /* BSSID */
+       u8 bssid[ETH_ALEN];
+
+       /* A flag to indicate is HW crypto is enabled for this bssid */
+       bool is_hw_crypto_enabled;
 };
 #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
+#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
 
 struct mwl8k_sta {
        /* Index into station database. Returned by UPDATE_STADB.  */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
 #define MWL8K_CMD_SET_RATEADAPT_MODE   0x0203
 #define MWL8K_CMD_BSS_START            0x1100          /* per-vif */
 #define MWL8K_CMD_SET_NEW_STN          0x1111          /* per-vif */
+#define MWL8K_CMD_UPDATE_ENCRYPTION    0x1122          /* per-vif */
 #define MWL8K_CMD_UPDATE_STADB         0x1123
 
 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
                MWL8K_CMDNAME(SET_RATEADAPT_MODE);
                MWL8K_CMDNAME(BSS_START);
                MWL8K_CMDNAME(SET_NEW_STN);
+               MWL8K_CMDNAME(UPDATE_ENCRYPTION);
                MWL8K_CMDNAME(UPDATE_STADB);
        default:
                snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
                skb_pull(skb, sizeof(*tr) - hdrlen);
 }
 
-static inline void mwl8k_add_dma_header(struct sk_buff *skb)
+static void
+mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
 {
        struct ieee80211_hdr *wh;
        int hdrlen;
+       int reqd_hdrlen;
        struct mwl8k_dma_data *tr;
 
        /*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
        wh = (struct ieee80211_hdr *)skb->data;
 
        hdrlen = ieee80211_hdrlen(wh->frame_control);
-       if (hdrlen != sizeof(*tr))
-               skb_push(skb, sizeof(*tr) - hdrlen);
+       reqd_hdrlen = sizeof(*tr);
+
+       if (hdrlen != reqd_hdrlen)
+               skb_push(skb, reqd_hdrlen - hdrlen);
 
        if (ieee80211_is_data_qos(wh->frame_control))
-               hdrlen -= 2;
+               hdrlen -= IEEE80211_QOS_CTL_LEN;
 
        tr = (struct mwl8k_dma_data *)skb->data;
        if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
         * payload".  That is, everything except for the 802.11 header.
         * This includes all crypto material including the MIC.
         */
-       tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
+       tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
 }
 
+static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *wh;
+       struct ieee80211_tx_info *tx_info;
+       struct ieee80211_key_conf *key_conf;
+       int data_pad;
+
+       wh = (struct ieee80211_hdr *)skb->data;
+
+       tx_info = IEEE80211_SKB_CB(skb);
+
+       key_conf = NULL;
+       if (ieee80211_is_data(wh->frame_control))
+               key_conf = tx_info->control.hw_key;
+
+       /*
+        * Make sure the packet header is in the DMA header format (4-address
+        * without QoS), the necessary crypto padding between the header and the
+        * payload has already been provided by mac80211, but it doesn't add tail
+        * padding when HW crypto is enabled.
+        *
+        * We have the following trailer padding requirements:
+        * - WEP: 4 trailer bytes (ICV)
+        * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
+        * - CCMP: 8 trailer bytes (MIC)
+        */
+       data_pad = 0;
+       if (key_conf != NULL) {
+               switch (key_conf->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+                       data_pad = 4;
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       data_pad = 12;
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       data_pad = 8;
+                       break;
+               }
+       }
+       mwl8k_add_dma_header(skb, data_pad);
+}
 
 /*
  * Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
 
 #define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST    0x80
 
+/* 8366 AP rx_status bits */
+#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK          0x80
+#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR       0xFF
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR      0x02
+#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR       0x04
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR      0x08
+
 static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
 {
        struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
        } else {
                status->band = IEEE80211_BAND_2GHZ;
        }
-       status->freq = ieee80211_channel_to_frequency(rxd->channel);
+       status->freq = ieee80211_channel_to_frequency(rxd->channel,
+                                                     status->band);
 
        *qos = rxd->qos_control;
 
+       if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+           (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+           (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
        return le16_to_cpu(rxd->pkt_len);
 }
 
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
 #define MWL8K_STA_RATE_INFO_MCS_FORMAT         0x0001
 
 #define MWL8K_STA_RX_CTRL_OWNED_BY_HOST                0x02
+#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR                0x04
+/* ICV=0 or MIC=1 */
+#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE         0x08
+/* Key is uploaded only in failure case */
+#define MWL8K_STA_RX_CTRL_KEY_INDEX                    0x30
 
 static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
 {
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
        } else {
                status->band = IEEE80211_BAND_2GHZ;
        }
-       status->freq = ieee80211_channel_to_frequency(rxd->channel);
+       status->freq = ieee80211_channel_to_frequency(rxd->channel,
+                                                     status->band);
 
        *qos = rxd->qos_control;
+       if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
+           (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
+               status->flag |= RX_FLAG_MMIC_ERROR;
 
        return le16_to_cpu(rxd->pkt_len);
 }
@@ -969,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
        }
        memset(rxq->rxd, 0, size);
 
-       rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
+       rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
        if (rxq->buf == NULL) {
                wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
                pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
                return -ENOMEM;
        }
-       memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
 
        for (i = 0; i < MWL8K_RX_DESCS; i++) {
                int desc_size;
@@ -1092,9 +1178,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
                ieee80211_queue_work(hw, &priv->finalize_join_worker);
 }
 
+static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
+                                                  u8 *bssid)
+{
+       struct mwl8k_vif *mwl8k_vif;
+
+       list_for_each_entry(mwl8k_vif,
+                           vif_list, list) {
+               if (memcmp(bssid, mwl8k_vif->bssid,
+                          ETH_ALEN) == 0)
+                       return mwl8k_vif;
+       }
+
+       return NULL;
+}
+
 static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
 {
        struct mwl8k_priv *priv = hw->priv;
+       struct mwl8k_vif *mwl8k_vif = NULL;
        struct mwl8k_rx_queue *rxq = priv->rxq + index;
        int processed;
 
@@ -1104,6 +1206,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
                void *rxd;
                int pkt_len;
                struct ieee80211_rx_status status;
+               struct ieee80211_hdr *wh;
                __le16 qos;
 
                skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1233,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
 
                rxq->rxd_count--;
 
-               skb_put(skb, pkt_len);
-               mwl8k_remove_dma_header(skb, qos);
+               wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
                /*
                 * Check for a pending join operation.  Save a
@@ -1141,6 +1243,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
                if (mwl8k_capture_bssid(priv, (void *)skb->data))
                        mwl8k_save_beacon(hw, skb);
 
+               if (ieee80211_has_protected(wh->frame_control)) {
+
+                       /* Check if hw crypto has been enabled for
+                        * this bss. If yes, set the status flags
+                        * accordingly
+                        */
+                       mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
+                                                               wh->addr1);
+
+                       if (mwl8k_vif != NULL &&
+                           mwl8k_vif->is_hw_crypto_enabled == true) {
+                               /*
+                                * When MMIC ERROR is encountered
+                                * by the firmware, payload is
+                                * dropped and only 32 bytes of
+                                * mwl8k Firmware header is sent
+                                * to the host.
+                                *
+                                * We need to add four bytes of
+                                * key information.  In it
+                                * MAC80211 expects keyidx set to
+                                * 0 for triggering Counter
+                                * Measure of MMIC failure.
+                                */
+                               if (status.flag & RX_FLAG_MMIC_ERROR) {
+                                       struct mwl8k_dma_data *tr;
+                                       tr = (struct mwl8k_dma_data *)skb->data;
+                                       memset((void *)&(tr->data), 0, 4);
+                                       pkt_len += 4;
+                               }
+
+                               if (!ieee80211_is_auth(wh->frame_control))
+                                       status.flag |= RX_FLAG_IV_STRIPPED |
+                                                      RX_FLAG_DECRYPTED |
+                                                      RX_FLAG_MMIC_STRIPPED;
+                       }
+               }
+
+               skb_put(skb, pkt_len);
+               mwl8k_remove_dma_header(skb, qos);
                memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
                ieee80211_rx_irqsafe(hw, skb);
 
@@ -1204,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
        }
        memset(txq->txd, 0, size);
 
-       txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
+       txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
        if (txq->skb == NULL) {
                wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
                pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
                return -ENOMEM;
        }
-       memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
 
        for (i = 0; i < MWL8K_TX_DESCS; i++) {
                struct mwl8k_tx_desc *tx_desc;
@@ -1392,6 +1533,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
 
                info = IEEE80211_SKB_CB(skb);
                ieee80211_tx_info_clear_status(info);
+
+               /* Rate control is happening in the firmware.
+                * Ensure no tx rate is being reported.
+                */
+                info->status.rates[0].idx = -1;
+                info->status.rates[0].count = 1;
+
                if (MWL8K_TXD_SUCCESS(status))
                        info->flags |= IEEE80211_TX_STAT_ACK;
 
@@ -1423,7 +1571,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
        txq->txd = NULL;
 }
 
-static int
+static void
 mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
@@ -1443,7 +1591,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        else
                qos = 0;
 
-       mwl8k_add_dma_header(skb);
+       if (priv->ap_fw)
+               mwl8k_encapsulate_tx_frame(skb);
+       else
+               mwl8k_add_dma_header(skb, 0);
+
        wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
        tx_info = IEEE80211_SKB_CB(skb);
@@ -1481,7 +1633,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
                wiphy_debug(hw->wiphy,
                            "failed to dma map skb, dropping TX frame.\n");
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        spin_lock_bh(&priv->tx_lock);
@@ -1518,8 +1670,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        mwl8k_tx_start(priv);
 
        spin_unlock_bh(&priv->tx_lock);
-
-       return NETDEV_TX_OK;
 }
 
 
@@ -1974,8 +2124,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
        cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
        cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
        cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
-       for (i = 0; i < MWL8K_TX_QUEUES; i++)
-               cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
+
+       /*
+        * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
+        * that order. Firmware has Q3 as highest priority and Q0 as lowest
+        * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
+        * priority is interpreted the right way in firmware.
+        */
+       for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+               int j = MWL8K_TX_QUEUES - 1 - i;
+               cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
+       }
+
        cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -3098,6 +3258,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
        return rc;
 }
 
+/*
+ * CMD_UPDATE_ENCRYPTION.
+ */
+
+#define MAX_ENCR_KEY_LENGTH    16
+#define MIC_KEY_LENGTH         8
+
+struct mwl8k_cmd_update_encryption {
+       struct mwl8k_cmd_pkt header;
+
+       __le32 action;
+       __le32 reserved;
+       __u8 mac_addr[6];
+       __u8 encr_type;
+
+} __attribute__((packed));
+
+struct mwl8k_cmd_set_key {
+       struct mwl8k_cmd_pkt header;
+
+       __le32 action;
+       __le32 reserved;
+       __le16 length;
+       __le16 key_type_id;
+       __le32 key_info;
+       __le32 key_id;
+       __le16 key_len;
+       __u8 key_material[MAX_ENCR_KEY_LENGTH];
+       __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+       __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+       __le16 tkip_rsc_low;
+       __le32 tkip_rsc_high;
+       __le16 tkip_tsc_low;
+       __le32 tkip_tsc_high;
+       __u8 mac_addr[6];
+} __attribute__((packed));
+
+enum {
+       MWL8K_ENCR_ENABLE,
+       MWL8K_ENCR_SET_KEY,
+       MWL8K_ENCR_REMOVE_KEY,
+       MWL8K_ENCR_SET_GROUP_KEY,
+};
+
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP       0
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE   1
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP      4
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED     7
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES       8
+
+enum {
+       MWL8K_ALG_WEP,
+       MWL8K_ALG_TKIP,
+       MWL8K_ALG_CCMP,
+};
+
+#define MWL8K_KEY_FLAG_TXGROUPKEY      0x00000004
+#define MWL8K_KEY_FLAG_PAIRWISE                0x00000008
+#define MWL8K_KEY_FLAG_TSC_VALID       0x00000040
+#define MWL8K_KEY_FLAG_WEP_TXKEY       0x01000000
+#define MWL8K_KEY_FLAG_MICKEY_VALID    0x02000000
+
+static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
+                                             struct ieee80211_vif *vif,
+                                             u8 *addr,
+                                             u8 encr_type)
+{
+       struct mwl8k_cmd_update_encryption *cmd;
+       int rc;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+       cmd->header.length = cpu_to_le16(sizeof(*cmd));
+       cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
+       memcpy(cmd->mac_addr, addr, ETH_ALEN);
+       cmd->encr_type = encr_type;
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+       cmd->header.length = cpu_to_le16(sizeof(*cmd));
+       cmd->length = cpu_to_le16(sizeof(*cmd) -
+                               offsetof(struct mwl8k_cmd_set_key, length));
+       cmd->key_id = cpu_to_le32(key->keyidx);
+       cmd->key_len = cpu_to_le16(key->keylen);
+       memcpy(cmd->mac_addr, addr, ETH_ALEN);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
+               if (key->keyidx == 0)
+                       cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
+
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
+               cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+                       : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+               cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
+                                               | MWL8K_KEY_FLAG_TSC_VALID);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
+               cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+                       : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
+                                               struct ieee80211_vif *vif,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       struct mwl8k_cmd_set_key *cmd;
+       int rc;
+       int keymlen;
+       u32 action;
+       u8 idx;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+       if (rc < 0)
+               goto done;
+
+       idx = key->keyidx;
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               action = MWL8K_ENCR_SET_KEY;
+       else
+               action = MWL8K_ENCR_SET_GROUP_KEY;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               if (!mwl8k_vif->wep_key_conf[idx].enabled) {
+                       memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
+                                               sizeof(*key) + key->keylen);
+                       mwl8k_vif->wep_key_conf[idx].enabled = 1;
+               }
+
+               keymlen = 0;
+               action = MWL8K_ENCR_SET_KEY;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               keymlen = key->keylen;
+               break;
+       default:
+               rc = -ENOTSUPP;
+               goto done;
+       }
+
+       memcpy(cmd->key_material, key->key, keymlen);
+       cmd->action = cpu_to_le32(action);
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
+                                               struct ieee80211_vif *vif,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       struct mwl8k_cmd_set_key *cmd;
+       int rc;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+       if (rc < 0)
+               goto done;
+
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+                       WLAN_CIPHER_SUITE_WEP104)
+               mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
+
+       cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_set_key(struct ieee80211_hw *hw,
+                        enum set_key_cmd cmd_param,
+                        struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta,
+                        struct ieee80211_key_conf *key)
+{
+       int rc = 0;
+       u8 encr_type;
+       u8 *addr;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               return -EOPNOTSUPP;
+
+       if (sta == NULL)
+               addr = hw->wiphy->perm_addr;
+       else
+               addr = sta->addr;
+
+       if (cmd_param == SET_KEY) {
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
+               if (rc)
+                       goto out;
+
+               if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
+                               || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
+                       encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
+               else
+                       encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
+
+               rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
+                                                               encr_type);
+               if (rc)
+                       goto out;
+
+               mwl8k_vif->is_hw_crypto_enabled = true;
+
+       } else {
+               rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
+
+               if (rc)
+                       goto out;
+
+               mwl8k_vif->is_hw_crypto_enabled = false;
+
+       }
+out:
+       return rc;
+}
+
 /*
  * CMD_UPDATE_STADB.
  */
@@ -3310,22 +3738,19 @@ static void mwl8k_rx_poll(unsigned long data)
 /*
  * Core driver operations.
  */
-static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        int index = skb_get_queue_mapping(skb);
-       int rc;
 
        if (!priv->radio_on) {
                wiphy_debug(hw->wiphy,
                            "dropped TX frame since radio disabled\n");
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
-       rc = mwl8k_txq_xmit(hw, index, skb);
-
-       return rc;
+       mwl8k_txq_xmit(hw, index, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3469,6 +3894,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
        mwl8k_vif->vif = vif;
        mwl8k_vif->macid = macid;
        mwl8k_vif->seqno = 0;
+       memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
+       mwl8k_vif->is_hw_crypto_enabled = false;
 
        /* Set the mac address.  */
        mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3528,9 +3955,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
                if (rc)
                        goto out;
 
-               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
-               if (!rc)
-                       rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+               if (rc)
+                       wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+               if (rc)
+                       wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
        } else {
                rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
                if (rc)
@@ -3866,18 +4297,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
 {
        struct mwl8k_priv *priv = hw->priv;
        int ret;
+       int i;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+       struct ieee80211_key_conf *key;
 
        if (!priv->ap_fw) {
                ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
                if (ret >= 0) {
                        MWL8K_STA(sta)->peer_id = ret;
-                       return 0;
+                       ret = 0;
                }
 
-               return ret;
+       } else {
+               ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
        }
 
-       return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
+       for (i = 0; i < NUM_WEP_KEYS; i++) {
+               key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
+               if (mwl8k_vif->wep_key_conf[i].enabled)
+                       mwl8k_set_key(hw, SET_KEY, vif, sta, key);
+       }
+       return ret;
 }
 
 static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3894,12 +4334,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                if (!priv->wmm_enabled)
                        rc = mwl8k_cmd_set_wmm_mode(hw, 1);
 
-               if (!rc)
-                       rc = mwl8k_cmd_set_edca_params(hw, queue,
+               if (!rc) {
+                       int q = MWL8K_TX_QUEUES - 1 - queue;
+                       rc = mwl8k_cmd_set_edca_params(hw, q,
                                                       params->cw_min,
                                                       params->cw_max,
                                                       params->aifs,
                                                       params->txop);
+               }
 
                mwl8k_fw_unlock(hw);
        }
@@ -3932,7 +4374,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
 static int
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                   enum ieee80211_ampdu_mlme_action action,
-                  struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                  u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4398,7 @@ static const struct ieee80211_ops mwl8k_ops = {
        .bss_info_changed       = mwl8k_bss_info_changed,
        .prepare_multicast      = mwl8k_prepare_multicast,
        .configure_filter       = mwl8k_configure_filter,
+       .set_key                = mwl8k_set_key,
        .set_rts_threshold      = mwl8k_set_rts_threshold,
        .sta_add                = mwl8k_sta_add,
        .sta_remove             = mwl8k_sta_remove,
@@ -4332,7 +4776,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
        hw->queues = MWL8K_TX_QUEUES;
 
        /* Set rssi values to dBm */
-       hw->flags |= IEEE80211_HW_SIGNAL_DBM;
+       hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
        hw->vif_data_size = sizeof(struct mwl8k_vif);
        hw->sta_data_size = sizeof(struct mwl8k_sta);
 
index 86cb54c..e99ca1c 100644 (file)
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
 
        freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
        channel = ieee80211_get_channel(wiphy, freq);
+       if (!channel) {
+               printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
+                       bss->a.channel, freq);
+               return; /* Then ignore it for now */
+       }
        timestamp = 0;
        capability = le16_to_cpu(bss->a.capabilities);
        beacon_interval = le16_to_cpu(bss->a.beacon_interv);
index 25f965f..0ec55b5 100644 (file)
@@ -43,9 +43,8 @@ config P54_SPI
        tristate "Prism54 SPI (stlc45xx) support"
        depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
        ---help---
-         This driver is for stlc4550 or stlc4560 based wireless chips.
-         This driver is experimental, untested and will probably only work on
-         Nokia's N800/N810 Portable Internet Tablet.
+         This driver is for stlc4550 or stlc4560 based wireless chips
+         such as Nokia's N800/N810 Portable Internet Tablet.
 
          If you choose to build a module, it'll be called p54spi.
 
index 35b09aa..13d750d 100644 (file)
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
        { .bitrate = 540, .hw_value = 11, },
 };
 
+static struct p54_rssi_db_entry p54_rssi_default = {
+       /*
+        * The defaults are taken from usb-logs of the
+        * vendor driver. So, they should be safe to
+        * use in case we can't get a match from the
+        * rssi <-> dBm conversion database.
+        */
+       .mul = 130,
+       .add = -398,
+};
+
 #define CHAN_HAS_CAL           BIT(0)
 #define CHAN_HAS_LIMIT         BIT(1)
 #define CHAN_HAS_CURVE         BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
        return -1;
 }
 
+static int same_band(u16 freq, u16 freq2)
+{
+       return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
+}
+
 static int p54_compare_channels(const void *_a,
                                const void *_b)
 {
        const struct p54_channel_entry *a = _a;
        const struct p54_channel_entry *b = _b;
 
-       return a->index - b->index;
+       return a->freq - b->freq;
+}
+
+static int p54_compare_rssichan(const void *_a,
+                               const void *_b)
+{
+       const struct p54_rssi_db_entry *a = _a;
+       const struct p54_rssi_db_entry *b = _b;
+
+       return a->freq - b->freq;
 }
 
 static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 
        for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
                           (i < list->entries); i++) {
+               struct p54_channel_entry *chan = &list->channels[i];
 
-               if (list->channels[i].band != band)
+               if (chan->band != band)
                        continue;
 
-               if (list->channels[i].data != CHAN_HAS_ALL) {
-                       wiphy_err(dev->wiphy,
-                                 "%s%s%s is/are missing for channel:%d [%d MHz].\n",
-                                 (list->channels[i].data & CHAN_HAS_CAL ? "" :
+               if (chan->data != CHAN_HAS_ALL) {
+                       wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
+                                 "channel:%d [%d MHz].\n",
+                                 (chan->data & CHAN_HAS_CAL ? "" :
                                   " [iqauto calibration data]"),
-                                 (list->channels[i].data & CHAN_HAS_LIMIT ? "" :
+                                 (chan->data & CHAN_HAS_LIMIT ? "" :
                                   " [output power limits]"),
-                                 (list->channels[i].data & CHAN_HAS_CURVE ? "" :
+                                 (chan->data & CHAN_HAS_CURVE ? "" :
                                   " [curve data]"),
-                                 list->channels[i].index, list->channels[i].freq);
+                                 chan->index, chan->freq);
                        continue;
                }
 
-               tmp->channels[j].band = list->channels[i].band;
-               tmp->channels[j].center_freq = list->channels[i].freq;
+               tmp->channels[j].band = chan->band;
+               tmp->channels[j].center_freq = chan->freq;
                j++;
        }
 
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
                }
        }
 
-       /* sort the list by the channel index */
+       /* sort the channel list by frequency */
        sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
             p54_compare_channels, NULL);
 
@@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
 static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
        "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
 
-static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
-                            u16 type)
+static int p54_parse_rssical(struct ieee80211_hw *dev,
+                            u8 *data, int len, u16 type)
 {
        struct p54_common *priv = dev->priv;
-       int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0;
-       int entry_size = sizeof(struct pda_rssi_cal_entry) + offset;
-       int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
-       int i;
+       struct p54_rssi_db_entry *entry;
+       size_t db_len, entries;
+       int offset = 0, i;
+
+       if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+               entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
+               if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
+                       wiphy_err(dev->wiphy, "rssical size mismatch.\n");
+                       goto err_data;
+               }
+       } else {
+               /*
+                * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
+                * have an empty two byte header.
+                */
+               if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
+                       offset += 2;
 
-       if (len != (entry_size * num_entries)) {
-               wiphy_err(dev->wiphy,
-                         "unknown rssi calibration data packing type:(%x) len:%d.\n",
-                         type, len);
+               entries = (len - offset) /
+                       sizeof(struct pda_rssi_cal_ext_entry);
 
-               print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
-                                    data, len);
+               if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
+                   entries <= 0) {
+                       wiphy_err(dev->wiphy, "invalid rssi database.\n");
+                       goto err_data;
+               }
+       }
 
-               wiphy_err(dev->wiphy, "please report this issue.\n");
-               return;
+       db_len = sizeof(*entry) * entries;
+       priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
+       if (!priv->rssi_db)
+               return -ENOMEM;
+
+       priv->rssi_db->offset = 0;
+       priv->rssi_db->entries = entries;
+       priv->rssi_db->entry_size = sizeof(*entry);
+       priv->rssi_db->len = db_len;
+
+       entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
+       if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+               struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
+
+               for (i = 0; i < entries; i++) {
+                       entry[i].freq = le16_to_cpu(cal[i].freq);
+                       entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+                       entry[i].add = (s16) le16_to_cpu(cal[i].add);
+               }
+       } else {
+               struct pda_rssi_cal_entry *cal = (void *) &data[offset];
+
+               for (i = 0; i < entries; i++) {
+                       u16 freq;
+                       switch (i) {
+                       case IEEE80211_BAND_2GHZ:
+                               freq = 2437;
+                               break;
+                       case IEEE80211_BAND_5GHZ:
+                               freq = 5240;
+                               break;
+                       }
+
+                       entry[i].freq = freq;
+                       entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+                       entry[i].add = (s16) le16_to_cpu(cal[i].add);
+               }
        }
 
-       for (i = 0; i < num_entries; i++) {
-               struct pda_rssi_cal_entry *cal = data +
-                                                (offset + i * entry_size);
-               priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul);
-               priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add);
+       /* sort the list by channel frequency */
+       sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
+       return 0;
+
+err_data:
+       wiphy_err(dev->wiphy,
+                 "rssi calibration data packing type:(%x) len:%d.\n",
+                 type, len);
+
+       print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
+
+       wiphy_err(dev->wiphy, "please report this issue.\n");
+       return -EINVAL;
+}
+
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
+{
+       struct p54_rssi_db_entry *entry;
+       int i, found = -1;
+
+       if (!priv->rssi_db)
+               return &p54_rssi_default;
+
+       entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
+       for (i = 0; i < priv->rssi_db->entries; i++) {
+               if (!same_band(freq, entry[i].freq))
+                       continue;
+
+               if (found == -1) {
+                       found = i;
+                       continue;
+               }
+
+               /* nearest match */
+               if (abs(freq - entry[i].freq) <
+                   abs(freq - entry[found].freq)) {
+                       found = i;
+                       continue;
+               } else {
+                       break;
+               }
        }
+
+       return found < 0 ? &p54_rssi_default : &entry[found];
 }
 
 static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
                case PDR_RSSI_LINEAR_APPROXIMATION:
                case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
                case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
-                       p54_parse_rssical(dev, entry->data, data_len,
-                                         le16_to_cpu(entry->code));
+                       err = p54_parse_rssical(dev, entry->data, data_len,
+                                               le16_to_cpu(entry->code));
+                       if (err)
+                               goto err;
                        break;
-               case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: {
-                       __le16 *src = (void *) entry->data;
-                       s16 *dst = (void *) &priv->rssical_db;
+               case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
+                       struct pda_custom_wrapper *pda = (void *) entry->data;
+                       __le16 *src;
+                       u16 *dst;
                        int i;
 
-                       if (data_len != sizeof(priv->rssical_db)) {
-                               err = -EINVAL;
-                               goto err;
-                       }
-                       for (i = 0; i < sizeof(priv->rssical_db) /
-                                       sizeof(*src); i++)
+                       if (priv->rssi_db || data_len < sizeof(*pda))
+                               break;
+
+                       priv->rssi_db = p54_convert_db(pda, data_len);
+                       if (!priv->rssi_db)
+                               break;
+
+                       src = (void *) priv->rssi_db->data;
+                       dst = (void *) priv->rssi_db->data;
+
+                       for (i = 0; i < priv->rssi_db->entries; i++)
                                *(dst++) = (s16) le16_to_cpu(*(src++));
+
                        }
                        break;
                case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +840,8 @@ good_eeprom:
                SET_IEEE80211_PERM_ADDR(dev, perm_addr);
        }
 
+       priv->cur_rssi = &p54_rssi_default;
+
        wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
                   dev->wiphy->perm_addr, priv->version,
                   p54_rf_chips[priv->rxhw]);
@@ -727,9 +852,11 @@ err:
        kfree(priv->iq_autocal);
        kfree(priv->output_limit);
        kfree(priv->curve_data);
+       kfree(priv->rssi_db);
        priv->iq_autocal = NULL;
        priv->output_limit = NULL;
        priv->curve_data = NULL;
+       priv->rssi_db = NULL;
 
        wiphy_err(dev->wiphy, "eeprom parse failed!\n");
        return err;
index 9051aef..afde72b 100644 (file)
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
        u8 data[0];
 } __packed;
 
+struct pda_rssi_cal_ext_entry {
+       __le16 freq;
+       __le16 mul;
+       __le16 add;
+} __packed;
+
 struct pda_rssi_cal_entry {
        __le16 mul;
        __le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
 
 /* used by our modificated eeprom image */
 #define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM           0xDEAD
+#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2         0xCAFF
 #define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM    0xBEEF
 #define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM             0xB05D
 
index 92b9b1f..2fab7d2 100644 (file)
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        union p54_scan_body_union *body;
        struct p54_scan_tail_rate *rate;
        struct pda_rssi_cal_entry *rssi;
+       struct p54_rssi_db_entry *rssi_data;
        unsigned int i;
        void *entry;
-       int band = priv->hw->conf.channel->band;
        __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
 
        skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        }
 
        rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
-       rssi->mul = cpu_to_le16(priv->rssical_db[band].mul);
-       rssi->add = cpu_to_le16(priv->rssical_db[band].add);
+       rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
+       rssi->mul = cpu_to_le16(rssi_data->mul);
+       rssi->add = cpu_to_le16(rssi_data->add);
        if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
                /* Longbow frontend needs ever more */
                rssi = (void *) skb_put(skb, sizeof(*rssi));
-               rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn);
-               rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2);
+               rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
+               rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
        }
 
        if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
 
        p54_tx(priv, skb);
+       priv->cur_rssi = rssi_data;
        return 0;
 
 err:
@@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
 {
        struct sk_buff *skb;
        struct p54_edcf *edcf;
+       u8 rtd;
 
        skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
                            P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
                edcf->sifs = 0x0a;
                edcf->eofpad = 0x06;
        }
+       /*
+        * calculate the extra round trip delay according to the
+        * formula from 802.11-2007 17.3.8.6.
+        */
+       rtd = 3 * priv->coverage_class;
+       edcf->slottime += rtd;
+       edcf->round_trip_delay = cpu_to_le16(rtd);
        /* (see prism54/isl_oid.h for further details) */
        edcf->frameburst = cpu_to_le16(0);
-       edcf->round_trip_delay = cpu_to_le16(0);
        edcf->flags = 0;
        memset(edcf->mapping, 0, sizeof(edcf->mapping));
        memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
index 04b63ec..eb581ab 100644 (file)
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
 /* eeprom */
 int p54_download_eeprom(struct p54_common *priv, void *buf,
                        u16 offset, u16 len);
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
 
 /* utility */
 u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
index 622d27b..356e6bb 100644 (file)
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
         * to cancel the old beacon template by hand, instead the firmware
         * will release the previous one through the feedback mechanism.
         */
-       WARN_ON(p54_tx_80211(priv->hw, beacon));
+       p54_tx_80211(priv->hw, beacon);
        priv->tsf_high32 = 0;
        priv->tsf_low32 = 0;
 
@@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
        return 0;
 }
 
+static unsigned int p54_flush_count(struct p54_common *priv)
+{
+       unsigned int total = 0, i;
+
+       BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
+
+       /*
+        * Because the firmware has the sole control over any frames
+        * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
+        * don't really count as pending or active.
+        */
+       for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
+               total += priv->tx_stats[i].len;
+       return total;
+}
+
+static void p54_flush(struct ieee80211_hw *dev, bool drop)
+{
+       struct p54_common *priv = dev->priv;
+       unsigned int total, i;
+
+       /*
+        * Currently, it wouldn't really matter if we wait for one second
+        * or 15 minutes. But once someone gets around and completes the
+        * TODOs [ancel stuck frames / reset device] in p54_work, it will
+        * suddenly make sense to wait that long.
+        */
+       i = P54_STATISTICS_UPDATE * 2 / 20;
+
+       /*
+        * In this case no locking is required because as we speak the
+        * queues have already been stopped and no new frames can sneak
+        * up from behind.
+        */
+       while ((total = p54_flush_count(priv) && i--)) {
+               /* waste time */
+               msleep(20);
+       }
+
+       WARN(total, "tx flush timeout, unresponsive firmware");
+}
+
+static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
+{
+       struct p54_common *priv = dev->priv;
+
+       mutex_lock(&priv->conf_mutex);
+       /* support all coverage class values as in 802.11-2007 Table 7-27 */
+       priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
+       p54_set_edcf(priv);
+       mutex_unlock(&priv->conf_mutex);
+}
+
 static const struct ieee80211_ops p54_ops = {
        .tx                     = p54_tx_80211,
        .start                  = p54_start,
@@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = {
        .sta_remove             = p54_sta_add_remove,
        .set_key                = p54_set_key,
        .config                 = p54_config,
+       .flush                  = p54_flush,
        .bss_info_changed       = p54_bss_info_changed,
        .configure_filter       = p54_configure_filter,
        .conf_tx                = p54_conf_tx,
        .get_stats              = p54_get_stats,
        .get_survey             = p54_get_survey,
+       .set_coverage_class     = p54_set_coverage_class,
 };
 
 struct ieee80211_hw *p54_init_common(size_t priv_data_len)
@@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
 
 int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
 {
-       struct p54_common *priv = dev->priv;
+       struct p54_common __maybe_unused *priv = dev->priv;
        int err;
 
        err = ieee80211_register_hw(dev);
@@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev)
        kfree(priv->iq_autocal);
        kfree(priv->output_limit);
        kfree(priv->curve_data);
+       kfree(priv->rssi_db);
        kfree(priv->used_rxkeys);
        priv->iq_autocal = NULL;
        priv->output_limit = NULL;
        priv->curve_data = NULL;
+       priv->rssi_db = NULL;
        priv->used_rxkeys = NULL;
        ieee80211_free_hw(dev);
 }
index 43a3b2e..50730fc 100644 (file)
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
        __le16 txop;
 } __packed;
 
-struct p54_rssi_linear_approximation {
+struct p54_rssi_db_entry {
+       u16 freq;
        s16 mul;
        s16 add;
        s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
        u8 rx_diversity_mask;
        u8 tx_diversity_mask;
        unsigned int output_power;
+       struct p54_rssi_db_entry *cur_rssi;
        int noise;
        /* calibration, output power limit and rssi<->dBm conversation data */
        struct pda_iq_autocal_entry *iq_autocal;
        unsigned int iq_autocal_len;
        struct p54_cal_database *curve_data;
        struct p54_cal_database *output_limit;
-       struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
+       struct p54_cal_database *rssi_db;
        struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
 
        /* BBP/MAC state */
@@ -215,6 +217,7 @@ struct p54_common {
        u32 tsf_low32, tsf_high32;
        u32 basic_rate_mask;
        u16 aid;
+       u8 coverage_class;
        bool powersave_override;
        __le32 beacon_req_id;
        struct completion beacon_comp;
index 1eacba4..0494d7b 100644 (file)
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
        while (i != idx) {
                u16 len;
                struct sk_buff *skb;
+               dma_addr_t dma_addr;
                desc = &ring[i];
                len = le16_to_cpu(desc->len);
                skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
 
                        len = priv->common.rx_mtu;
                }
+               dma_addr = le32_to_cpu(desc->host_addr);
+               pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+                       priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                skb_put(skb, len);
 
                if (p54_rx(dev, skb)) {
-                       pci_unmap_single(priv->pdev,
-                                        le32_to_cpu(desc->host_addr),
-                                        priv->common.rx_mtu + 32,
-                                        PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(priv->pdev, dma_addr,
+                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                        rx_buf[i] = NULL;
-                       desc->host_addr = 0;
+                       desc->host_addr = cpu_to_le32(0);
                } else {
                        skb_trim(skb, 0);
+                       pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
                        desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
                }
 
index d592cbd..0b7bfb0 100644 (file)
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
 0x03, 0x00, 0x00, 0x11,                /* PDR_ANTENNA_GAIN */
        0x08, 0x08, 0x08, 0x08,
 
-0x09, 0x00, 0xad, 0xde,                /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */
-       0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x0a, 0x00, 0xff, 0xca,                /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
+       0x01, 0x00, 0x0a, 0x00,
+       0x00, 0x00, 0x0a, 0x00,
+               0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
 
 /* struct pda_custom_wrapper */
 0x10, 0x06, 0x5d, 0xb0,                /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
        0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
 
 0x02, 0x00, 0x00, 0x00,                /* PDR_END */
-       0x67, 0x99,
+       0xb6, 0x04,
 };
 
 #endif /* P54SPI_EEPROM_H */
index 21713a7..9b344a9 100644 (file)
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
        {USB_DEVICE(0x1413, 0x5400)},   /* Telsey 802.11g USB2.0 Adapter */
        {USB_DEVICE(0x1435, 0x0427)},   /* Inventel UR054G */
        {USB_DEVICE(0x1668, 0x1050)},   /* Actiontec 802UIG-1 */
+       {USB_DEVICE(0x1740, 0x1000)},   /* Senao NUB-350 */
        {USB_DEVICE(0x2001, 0x3704)},   /* DLink DWL-G122 rev A2 */
        {USB_DEVICE(0x2001, 0x3705)},   /* D-Link DWL-G120 rev C1 */
        {USB_DEVICE(0x413c, 0x5513)},   /* Dell WLA3310 USB Wireless Adapter */
index f618b96..7834c26 100644 (file)
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
 
 static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
 {
-       int band = priv->hw->conf.channel->band;
-
        if (priv->rxhw != 5) {
-               return ((rssi * priv->rssical_db[band].mul) / 64 +
-                        priv->rssical_db[band].add) / 4;
+               return ((rssi * priv->cur_rssi->mul) / 64 +
+                        priv->cur_rssi->add) / 4;
        } else {
                /*
                 * TODO: find the correct formula
@@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
        rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
        priv->tsf_low32 = tsf32;
 
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
                header_len += hdr->align[0];
@@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
        }
 }
 
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
-               if (!IS_QOS_QUEUE(queue)) {
-                       dev_kfree_skb_any(skb);
-                       return NETDEV_TX_OK;
-               } else {
-                       return NETDEV_TX_BUSY;
-               }
+               dev_kfree_skb_any(skb);
+               return;
        }
 
        padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        p54info->extra_len = extra_len;
 
        p54_tx(priv, skb);
-       return NETDEV_TX_OK;
 }
index 848cc2c..518542b 100644 (file)
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
        __le32 mode;
        int ret;
 
+       if (priv->device_type != RNDIS_BCM4320B)
+               return -ENOTSUPP;
+
        netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
                                enabled ? "enabled" : "disabled",
                                timeout);
index 6f383cd..f630552 100644 (file)
@@ -97,6 +97,18 @@ config RT2800PCI_RT35XX
          Support for these devices is non-functional at the moment and is
          intended for testers and developers.
 
+config RT2800PCI_RT53XX
+       bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default n
+       ---help---
+         This adds support for rt53xx wireless chipset family to the
+         rt2800pci driver.
+         Supported chips: RT5390
+
+         Support for these devices is non-functional at the moment and is
+         intended for testers and developers.
+
 endif
 
 config RT2500USB
index 54ca49a..329f328 100644 (file)
@@ -46,7 +46,7 @@
  * These indirect registers work with busy bits,
  * and we will try maximal REGISTER_BUSY_COUNT times to access
  * the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
  * the access attempt is considered to have failed,
  * and we will print an error.
  */
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
-               rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, CSR14_TBCN, 1);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
        }
 
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
                rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, CSR14_TBCN, 0);
                rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -771,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
        rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
        rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
        rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
-       rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+       rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
        rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
        rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
 
@@ -787,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
                           entry_priv->desc_dma);
        rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
 
-       entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
+       entry_priv = rt2x00dev->atim->entries[0].priv_data;
        rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
        rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
                           entry_priv->desc_dma);
        rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
 
-       entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
+       entry_priv = rt2x00dev->bcn->entries[0].priv_data;
        rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
        rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
                           entry_priv->desc_dma);
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
                rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
        rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
        rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
        rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
        rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished before
+                * disabling the interrupts.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+       }
 }
 
 static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2400pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2400pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1106,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 2, word);
 
        rt2x00_desc_read(txd, 3, &word);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
        rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
        rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
        rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
        rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
        rt2x00_desc_write(txd, 3, word);
 
        rt2x00_desc_read(txd, 4, &word);
-       rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
+       rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
+                          txdesc->u.plcp.length_low);
        rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
        rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
-       rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
+       rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH,
+                          txdesc->u.plcp.length_high);
        rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
        rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
        rt2x00_desc_write(txd, 4, word);
@@ -1139,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
                           test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_RTS,
                           test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
        rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
                           test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
        rt2x00_desc_write(txd, 0, word);
@@ -1183,8 +1210,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
-       rt2x00_set_field32(&reg, CSR14_TBCN, 1);
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 }
@@ -1253,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
 static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
                             const enum data_queue_qid queue_idx)
 {
-       struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
        struct queue_entry_priv_pci *entry_priv;
        struct queue_entry *entry;
        struct txdone_entry_desc txdesc;
@@ -1289,57 +1314,68 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
        }
 }
 
-static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
 
-       /*
-        * 1 - Beacon timer expired interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /*
-        * 2 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
 
-       /*
-        * 3 - Atim ring transmit done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
-               rt2400pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2400pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       u32 reg;
 
        /*
-        * 4 - Priority ring transmit done interrupt.
+        * Handle all tx queues.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
-               rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2400pci_txdone(rt2x00dev, QID_ATIM);
+       rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2400pci_txdone(rt2x00dev, QID_AC_VI);
 
        /*
-        * 5 - Tx ring transmit done interrupt.
+        * Enable all TXDONE interrupts again.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
-               rt2400pci_txdone(rt2x00dev, QID_AC_VI);
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
-       return IRQ_HANDLED;
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+static void rt2400pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2400pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
 }
 
 static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
+       u32 reg, mask;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -1354,14 +1390,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
+       mask = reg;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-       return IRQ_WAKE_THREAD;
+       if (rt2x00_get_field32(reg, CSR7_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+               /*
+                * Mask out all txdone interrupts.
+                */
+               rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+       }
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock(&rt2x00dev->irqmask_lock);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock(&rt2x00dev->irqmask_lock);
+
+
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1574,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         */
        __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
        __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+       __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
 
        /*
         * Set the rssi offset.
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
        .irq_handler            = rt2400pci_interrupt,
-       .irq_handler_thread     = rt2400pci_interrupt_thread,
+       .txstatus_tasklet       = rt2400pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt2400pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2400pci_rxdone_tasklet,
        .probe_hw               = rt2400pci_probe_hw,
        .initialize             = rt2x00pci_initialize,
        .uninitialize           = rt2x00pci_uninitialize,
index a9ff26a..5827787 100644 (file)
@@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
                                  struct rt2x00intf_conf *conf,
                                  const unsigned int flags)
 {
-       struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
+       struct data_queue *queue = rt2x00dev->bcn;
        unsigned int bcn_preload;
        u32 reg;
 
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
-               rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, CSR14_TBCN, 1);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
        }
 
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
                rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, CSR14_TBCN, 0);
                rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -857,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
        rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
        rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
        rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
-       rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+       rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
        rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
        rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
 
@@ -873,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
                           entry_priv->desc_dma);
        rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
 
-       entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
+       entry_priv = rt2x00dev->atim->entries[0].priv_data;
        rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
        rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
                           entry_priv->desc_dma);
        rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
 
-       entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
+       entry_priv = rt2x00dev->bcn->entries[0].priv_data;
        rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
        rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
                           entry_priv->desc_dma);
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
                rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
        rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
        rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
        rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
        rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+       }
 }
 
 static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2500pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1263,10 +1287,12 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 2, word);
 
        rt2x00_desc_read(txd, 3, &word);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
-       rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
+                          txdesc->u.plcp.length_low);
+       rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH,
+                          txdesc->u.plcp.length_high);
        rt2x00_desc_write(txd, 3, word);
 
        rt2x00_desc_read(txd, 10, &word);
@@ -1291,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
        rt2x00_set_field32(&word, TXD_W0_OFDM,
                           (txdesc->rate_mode == RATE_MODE_OFDM));
        rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
-       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
        rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
                           test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
@@ -1337,8 +1363,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
-       rt2x00_set_field32(&reg, CSR14_TBCN, 1);
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 }
@@ -1386,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
 static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
                             const enum data_queue_qid queue_idx)
 {
-       struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
        struct queue_entry_priv_pci *entry_priv;
        struct queue_entry *entry;
        struct txdone_entry_desc txdesc;
@@ -1422,58 +1446,68 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
        }
 }
 
-static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
 
-       /*
-        * 1 - Beacon timer expired interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /*
-        * 2 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
 
-       /*
-        * 3 - Atim ring transmit done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
-               rt2500pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2500pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       u32 reg;
 
        /*
-        * 4 - Priority ring transmit done interrupt.
+        * Handle all tx queues.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
-               rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2500pci_txdone(rt2x00dev, QID_ATIM);
+       rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2500pci_txdone(rt2x00dev, QID_AC_VI);
 
        /*
-        * 5 - Tx ring transmit done interrupt.
+        * Enable all TXDONE interrupts again.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
-               rt2500pci_txdone(rt2x00dev, QID_AC_VI);
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
 
-       return IRQ_HANDLED;
+static void rt2500pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2500pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
 }
 
 static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
+       u32 reg, mask;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -1488,14 +1522,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
+       mask = reg;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-       return IRQ_WAKE_THREAD;
+       if (rt2x00_get_field32(reg, CSR7_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+               /*
+                * Mask out all txdone interrupts.
+                */
+               rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+       }
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock(&rt2x00dev->irqmask_lock);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock(&rt2x00dev->irqmask_lock);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1896,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         */
        __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
        __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+       __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
 
        /*
         * Set the rssi offset.
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
        .irq_handler            = rt2500pci_interrupt,
-       .irq_handler_thread     = rt2500pci_interrupt_thread,
+       .txstatus_tasklet       = rt2500pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt2500pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2500pci_rxdone_tasklet,
        .probe_hw               = rt2500pci_probe_hw,
        .initialize             = rt2x00pci_initialize,
        .uninitialize           = rt2x00pci_uninitialize,
index 6b3b1de..979fe65 100644 (file)
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
                rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
 
                rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
-               rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
                rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
-               rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
                rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
        }
 
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
@@ -1104,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
                           (txdesc->rate_mode == RATE_MODE_OFDM));
        rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
                           test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
        rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
        rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
        rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
@@ -1118,10 +1114,12 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 1, word);
 
        rt2x00_desc_read(txd, 2, &word);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+                          txdesc->u.plcp.length_low);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+                          txdesc->u.plcp.length_high);
        rt2x00_desc_write(txd, 2, word);
 
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1799,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
                __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
        }
        __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
+       __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
 
        /*
         * Set the rssi offset.
index 4c55e85..70b9abb 100644 (file)
@@ -51,6 +51,7 @@
  * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
  * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
  * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5390 2.4G 1T1R
  */
 #define RF2820                         0x0001
 #define RF2850                         0x0002
@@ -65,6 +66,7 @@
 #define RF3320                         0x000b
 #define RF3322                         0x000c
 #define RF3853                         0x000d
+#define RF5390                         0x5390
 
 /*
  * Chipset revisions.
@@ -77,6 +79,7 @@
 #define REV_RT3071E                    0x0211
 #define REV_RT3090E                    0x0211
 #define REV_RT3390E                    0x0211
+#define REV_RT5390F                    0x0502
 
 /*
  * Signal information.
 #define E2PROM_CSR_LOAD_STATUS         FIELD32(0x00000040)
 #define E2PROM_CSR_RELOAD              FIELD32(0x00000080)
 
+/*
+ * AUX_CTRL: Aux/PCI-E related configuration
+ */
+#define AUX_CTRL                       0x10c
+#define AUX_CTRL_WAKE_PCIE_EN          FIELD32(0x00000002)
+#define AUX_CTRL_FORCE_PCIE_CLK                FIELD32(0x00000400)
+
 /*
  * OPT_14: Unknown register used by rt3xxx devices.
  */
 
 /*
  * GPIO_CTRL_CFG:
+ * GPIOD: GPIO direction, 0: Output, 1: Input
  */
 #define GPIO_CTRL_CFG                  0x0228
 #define GPIO_CTRL_CFG_BIT0             FIELD32(0x00000001)
 #define GPIO_CTRL_CFG_BIT5             FIELD32(0x00000020)
 #define GPIO_CTRL_CFG_BIT6             FIELD32(0x00000040)
 #define GPIO_CTRL_CFG_BIT7             FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_BIT8             FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT0       FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT1       FIELD32(0x00000200)
+#define GPIO_CTRL_CFG_GPIOD_BIT2       FIELD32(0x00000400)
+#define GPIO_CTRL_CFG_GPIOD_BIT3       FIELD32(0x00000800)
+#define GPIO_CTRL_CFG_GPIOD_BIT4       FIELD32(0x00001000)
+#define GPIO_CTRL_CFG_GPIOD_BIT5       FIELD32(0x00002000)
+#define GPIO_CTRL_CFG_GPIOD_BIT6       FIELD32(0x00004000)
+#define GPIO_CTRL_CFG_GPIOD_BIT7       FIELD32(0x00008000)
 
 /*
  * MCU_CMD_CFG
 
 /*
  * US_CYC_CNT
+ * BT_MODE_EN: Bluetooth mode enable
+ * CLOCK CYCLE: Clock cycle count in 1us.
+ * PCI:0x21, PCIE:0x7d, USB:0x1e
  */
 #define US_CYC_CNT                     0x02a4
+#define US_CYC_CNT_BT_MODE_EN          FIELD32(0x00000100)
 #define US_CYC_CNT_CLOCK_CYCLE         FIELD32(0x000000ff)
 
 /*
  */
 #define        RF_CSR_CFG                      0x0500
 #define RF_CSR_CFG_DATA                        FIELD32(0x000000ff)
-#define RF_CSR_CFG_REGNUM              FIELD32(0x00001f00)
+#define RF_CSR_CFG_REGNUM              FIELD32(0x00003f00)
 #define RF_CSR_CFG_WRITE               FIELD32(0x00010000)
 #define RF_CSR_CFG_BUSY                        FIELD32(0x00020000)
 
  * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
  * PROTECT_CTRL: Protection control frame type for CCK TX
  *               0:none, 1:RTS/CTS, 2:CTS-to-self
- * PROTECT_NAV: TXOP protection type for CCK TX
- *              0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
+ * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
  * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
  * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
  * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
 #define CCK_PROT_CFG                   0x1364
 #define CCK_PROT_CFG_PROTECT_RATE      FIELD32(0x0000ffff)
 #define CCK_PROT_CFG_PROTECT_CTRL      FIELD32(0x00030000)
-#define CCK_PROT_CFG_PROTECT_NAV       FIELD32(0x000c0000)
+#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define CCK_PROT_CFG_PROTECT_NAV_LONG  FIELD32(0x00080000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_CCK   FIELD32(0x00100000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_OFDM  FIELD32(0x00200000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_MM20  FIELD32(0x00400000)
 #define OFDM_PROT_CFG                  0x1368
 #define OFDM_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define OFDM_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define OFDM_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define MM20_PROT_CFG                  0x136c
 #define MM20_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define MM20_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define MM20_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define MM20_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define MM40_PROT_CFG                  0x1370
 #define MM40_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define MM40_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define MM40_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define MM40_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define GF20_PROT_CFG                  0x1374
 #define GF20_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define GF20_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define GF20_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define GF20_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define GF40_PROT_CFG                  0x1378
 #define GF40_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define GF40_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define GF40_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define GF40_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1697,11 +1725,14 @@ struct mac_iveiv_entry {
  */
 
 /*
- * BBP 1: TX Antenna & Power
- * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
- *     3 - increase tx power by 6dBm
+ * BBP 1: TX Antenna & Power Control
+ * POWER_CTRL:
+ * 0 - normal,
+ * 1 - drop tx power by 6dBm,
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
  */
-#define BBP1_TX_POWER                  FIELD8(0x07)
+#define BBP1_TX_POWER_CTRL             FIELD8(0x07)
 #define BBP1_TX_ANTENNA                        FIELD8(0x18)
 
 /*
@@ -1715,6 +1746,13 @@ struct mac_iveiv_entry {
  */
 #define BBP4_TX_BF                     FIELD8(0x01)
 #define BBP4_BANDWIDTH                 FIELD8(0x18)
+#define BBP4_MAC_IF_CTRL               FIELD8(0x40)
+
+/*
+ * BBP 109
+ */
+#define BBP109_TX0_POWER               FIELD8(0x0f)
+#define BBP109_TX1_POWER               FIELD8(0xf0)
 
 /*
  * BBP 138: Unknown
@@ -1724,6 +1762,11 @@ struct mac_iveiv_entry {
 #define BBP138_TX_DAC1                 FIELD8(0x20)
 #define BBP138_TX_DAC2                 FIELD8(0x40)
 
+/*
+ * BBP 152: Rx Ant
+ */
+#define BBP152_RX_DEFAULT_ANT          FIELD8(0x80)
+
 /*
  * RFCSR registers
  * The wordsize of the RFCSR is 8 bits.
@@ -1733,11 +1776,17 @@ struct mac_iveiv_entry {
  * RFCSR 1:
  */
 #define RFCSR1_RF_BLOCK_EN             FIELD8(0x01)
+#define RFCSR1_PLL_PD                  FIELD8(0x02)
 #define RFCSR1_RX0_PD                  FIELD8(0x04)
 #define RFCSR1_TX0_PD                  FIELD8(0x08)
 #define RFCSR1_RX1_PD                  FIELD8(0x10)
 #define RFCSR1_TX1_PD                  FIELD8(0x20)
 
+/*
+ * RFCSR 2:
+ */
+#define RFCSR2_RESCAL_EN               FIELD8(0x80)
+
 /*
  * RFCSR 6:
  */
@@ -1749,6 +1798,11 @@ struct mac_iveiv_entry {
  */
 #define RFCSR7_RF_TUNING               FIELD8(0x01)
 
+/*
+ * RFCSR 11:
+ */
+#define RFCSR11_R                      FIELD8(0x03)
+
 /*
  * RFCSR 12:
  */
@@ -1770,6 +1824,7 @@ struct mac_iveiv_entry {
 #define RFCSR17_TXMIXER_GAIN           FIELD8(0x07)
 #define RFCSR17_TX_LO1_EN              FIELD8(0x08)
 #define RFCSR17_R                      FIELD8(0x20)
+#define RFCSR17_CODE                   FIELD8(0x7f)
 
 /*
  * RFCSR 20:
@@ -1802,8 +1857,32 @@ struct mac_iveiv_entry {
 /*
  * RFCSR 30:
  */
+#define RFCSR30_TX_H20M                        FIELD8(0x02)
+#define RFCSR30_RX_H20M                        FIELD8(0x04)
+#define RFCSR30_RX_VCM                 FIELD8(0x18)
 #define RFCSR30_RF_CALIBRATION         FIELD8(0x80)
 
+/*
+ * RFCSR 31:
+ */
+#define RFCSR31_RX_AGC_FC              FIELD8(0x1f)
+#define RFCSR31_RX_H20M                        FIELD8(0x20)
+
+/*
+ * RFCSR 38:
+ */
+#define RFCSR38_RX_LO1_EN              FIELD8(0x20)
+
+/*
+ * RFCSR 39:
+ */
+#define RFCSR39_RX_LO2_EN              FIELD8(0x80)
+
+/*
+ * RFCSR 49:
+ */
+#define RFCSR49_TX                     FIELD8(0x3f)
+
 /*
  * RF registers
  */
@@ -1836,6 +1915,11 @@ struct mac_iveiv_entry {
  * The wordsize of the EEPROM is 16 bits.
  */
 
+/*
+ * Chip ID
+ */
+#define EEPROM_CHIP_ID                 0x0000
+
 /*
  * EEPROM Version
  */
@@ -1989,23 +2073,26 @@ struct mac_iveiv_entry {
 #define EEPROM_RSSI_A2_LNA_A2          FIELD16(0xff00)
 
 /*
- * EEPROM Maximum TX power values
+ * EEPROM EIRP Maximum TX power values(unit: dbm)
  */
-#define EEPROM_MAX_TX_POWER            0x0027
-#define EEPROM_MAX_TX_POWER_24GHZ      FIELD16(0x00ff)
-#define EEPROM_MAX_TX_POWER_5GHZ       FIELD16(0xff00)
+#define EEPROM_EIRP_MAX_TX_POWER       0x0027
+#define EEPROM_EIRP_MAX_TX_POWER_2GHZ  FIELD16(0x00ff)
+#define EEPROM_EIRP_MAX_TX_POWER_5GHZ  FIELD16(0xff00)
 
 /*
  * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
  * This is delta in 40MHZ.
- * VALUE: Tx Power dalta value (MAX=4)
+ * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
  * TYPE: 1: Plus the delta value, 0: minus the delta value
- * TXPOWER: Enable:
+ * ENABLE: enable tx power compensation for 40BW
  */
 #define EEPROM_TXPOWER_DELTA           0x0028
-#define EEPROM_TXPOWER_DELTA_VALUE     FIELD16(0x003f)
-#define EEPROM_TXPOWER_DELTA_TYPE      FIELD16(0x0040)
-#define EEPROM_TXPOWER_DELTA_TXPOWER   FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_2G  FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE_2G   FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_5G  FIELD16(0x3f00)
+#define EEPROM_TXPOWER_DELTA_TYPE_5G   FIELD16(0x4000)
+#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
 
 /*
  * EEPROM TXPOWER 802.11BG
@@ -2058,6 +2145,7 @@ struct mac_iveiv_entry {
 #define MCU_LED_LED_POLARITY           0x54
 #define MCU_RADAR                      0x60
 #define MCU_BOOT_SIGNAL                        0x72
+#define MCU_ANT_SELECT                 0X73
 #define MCU_BBP_SIGNAL                 0x80
 #define MCU_POWER_SAVE                 0x83
 
@@ -2202,4 +2290,9 @@ struct mac_iveiv_entry {
 #define TXPOWER_A_TO_DEV(__txpower) \
        clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
 
+/*
+ *  Board's maximun TX power limitation
+ */
+#define EIRP_MAX_TX_POWER_LIMIT        0x50
+
 #endif /* RT2800_H */
index 54917a2..2ee6ceb 100644 (file)
@@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
        if (rt2800_wait_csr_ready(rt2x00dev))
                return -EBUSY;
 
-       if (rt2x00_is_pci(rt2x00dev))
+       if (rt2x00_is_pci(rt2x00dev)) {
+               if (rt2x00_rt(rt2x00dev, RT5390)) {
+                       rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+                       rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+                       rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+                       rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+               }
                rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+       }
 
        /*
         * Disable DMA, will be reenabled later when enabling
@@ -465,14 +472,15 @@ void rt2800_write_tx_data(struct queue_entry *entry,
                           test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
        rt2x00_set_field32(&word, TXWI_W0_AMPDU,
                           test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
-       rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
-       rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
-       rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+       rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY,
+                          txdesc->u.ht.mpdu_density);
+       rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop);
+       rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs);
        rt2x00_set_field32(&word, TXWI_W0_BW,
                           test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
        rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
                           test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
-       rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+       rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc);
        rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
        rt2x00_desc_write(txwi, 0, word);
 
@@ -481,7 +489,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
                           test_bit(ENTRY_TXD_ACK, &txdesc->flags));
        rt2x00_set_field32(&word, TXWI_W1_NSEQ,
                           test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
-       rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+       rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
        rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
                           test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
                           txdesc->key_idx : 0xff);
@@ -674,7 +682,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
         * confuse the rate control algortihm by providing clearly wrong
         * data.
         */
-       if (aggr == 1 && ampdu == 0 && real_mcs != mcs) {
+       if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
                skbdesc->tx_rate_idx = real_mcs;
                mcs = real_mcs;
        }
@@ -744,7 +752,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
                if (pid >= QID_RX)
                        continue;
 
-               queue = rt2x00queue_get_queue(rt2x00dev, pid);
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, pid);
                if (unlikely(!queue))
                        continue;
 
@@ -773,13 +781,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -810,7 +819,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
         * Write entire beacon with TXWI and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
                                   entry->skb->len + padding_len);
@@ -818,8 +834,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -831,8 +845,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 }
 EXPORT_SYMBOL_GPL(rt2800_write_beacon);
 
-static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
-                                      unsigned int beacon_base)
+static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
+                                               unsigned int beacon_base)
 {
        int i;
 
@@ -845,6 +859,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
                rt2800_register_write(rt2x00dev, beacon_base + i, 0);
 }
 
+void rt2800_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+       /*
+        * Clear beacon.
+        */
+       rt2800_clear_beacon_register(rt2x00dev,
+                                    HW_BEACON_OFFSET(entry->entry_idx));
+
+       /*
+        * Enabled beaconing again.
+        */
+       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
+
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 const struct rt2x00debug rt2800_rt2x00debug = {
        .owner  = THIS_MODULE,
@@ -1005,7 +1046,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
 
        memset(&wcid_entry, 0, sizeof(wcid_entry));
        if (crypto->cmd == SET_KEY)
-               memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+               memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
        rt2800_register_multiwrite(rt2x00dev, offset,
                                      &wcid_entry, sizeof(wcid_entry));
 }
@@ -1060,27 +1101,44 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
 }
 EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
 
+static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
+{
+       int idx;
+       u32 offset, reg;
+
+       /*
+        * Search for the first free pairwise key entry and return the
+        * corresponding index.
+        *
+        * Make sure the WCID starts _after_ the last possible shared key
+        * entry (>32).
+        *
+        * Since parts of the pairwise key table might be shared with
+        * the beacon frame buffers 6 & 7 we should only write into the
+        * first 222 entries.
+        */
+       for (idx = 33; idx <= 222; idx++) {
+               offset = MAC_WCID_ATTR_ENTRY(idx);
+               rt2800_register_read(rt2x00dev, offset, &reg);
+               if (!reg)
+                       return idx;
+       }
+       return -1;
+}
+
 int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
                               struct rt2x00lib_crypto *crypto,
                               struct ieee80211_key_conf *key)
 {
        struct hw_key_entry key_entry;
        u32 offset;
+       int idx;
 
        if (crypto->cmd == SET_KEY) {
-               /*
-                * 1 pairwise key is possible per AID, this means that the AID
-                * equals our hw_key_idx. Make sure the WCID starts _after_ the
-                * last possible shared key entry.
-                *
-                * Since parts of the pairwise key table might be shared with
-                * the beacon frame buffers 6 & 7 we should only write into the
-                * first 222 entries.
-                */
-               if (crypto->aid > (222 - 32))
+               idx = rt2800_find_pairwise_keyslot(rt2x00dev);
+               if (idx < 0)
                        return -ENOSPC;
-
-               key->hw_key_idx = 32 + crypto->aid;
+               key->hw_key_idx = idx;
 
                memcpy(key_entry.key, crypto->key,
                       sizeof(key_entry.key));
@@ -1154,30 +1212,12 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
        bool update_bssid = false;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                */
-               rt2800_clear_beacon(rt2x00dev,
-                                   HW_BEACON_OFFSET(intf->beacon->entry_idx));
                /*
                 * Enable synchronisation.
                 */
                rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
-                                  (conf->sync == TSF_SYNC_ADHOC ||
-                                   conf->sync == TSF_SYNC_AP_NONE));
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-               /*
-                * Enable pre tbtt interrupt for beaconing modes
-                */
-               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
-                                  (conf->sync == TSF_SYNC_AP_NONE));
-               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
        }
 
        if (flags & CONFIG_UPDATE_MAC) {
@@ -1361,10 +1401,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
 }
 EXPORT_SYMBOL_GPL(rt2800_config_erp);
 
+static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
+                                    enum antenna ant)
+{
+       u32 reg;
+       u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
+       u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
+
+       if (rt2x00_is_pci(rt2x00dev)) {
+               rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+               rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
+               rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
+       } else if (rt2x00_is_usb(rt2x00dev))
+               rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
+                                  eesk_pin, 0);
+
+       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+}
+
 void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
 {
        u8 r1;
        u8 r3;
+       u16 eeprom;
 
        rt2800_bbp_read(rt2x00dev, 1, &r1);
        rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1372,7 +1434,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        /*
         * Configure the TX antenna.
         */
-       switch ((int)ant->tx) {
+       switch (ant->tx_chain_num) {
        case 1:
                rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
                break;
@@ -1387,8 +1449,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        /*
         * Configure the RX antenna.
         */
-       switch ((int)ant->rx) {
+       switch (ant->rx_chain_num) {
        case 1:
+               if (rt2x00_rt(rt2x00dev, RT3070) ||
+                   rt2x00_rt(rt2x00dev, RT3090) ||
+                   rt2x00_rt(rt2x00dev, RT3390)) {
+                       rt2x00_eeprom_read(rt2x00dev,
+                                          EEPROM_NIC_CONF1, &eeprom);
+                       if (rt2x00_get_field16(eeprom,
+                                               EEPROM_NIC_CONF1_ANT_DIVERSITY))
+                               rt2800_set_ant_diversity(rt2x00dev,
+                                               rt2x00dev->default_ant.rx);
+               }
                rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
                break;
        case 2:
@@ -1434,13 +1506,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
 {
        rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
 
-       if (rt2x00dev->default_ant.tx == 1)
+       if (rt2x00dev->default_ant.tx_chain_num == 1)
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
 
-       if (rt2x00dev->default_ant.rx == 1) {
+       if (rt2x00dev->default_ant.rx_chain_num == 1) {
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
-       } else if (rt2x00dev->default_ant.rx == 2)
+       } else if (rt2x00dev->default_ant.rx_chain_num == 2)
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
 
        if (rf->channel > 14) {
@@ -1526,6 +1598,105 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
        rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 }
 
+
+#define RT5390_POWER_BOUND     0x27
+#define RT5390_FREQ_OFFSET_BOUND       0x5f
+
+static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       u8 rfcsr;
+       u16 eeprom;
+
+       rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+       rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+       rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+       if (info->default_power1 > RT5390_POWER_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+       rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+       if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
+                                 RT5390_FREQ_OFFSET_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       if (rf->channel <= 14) {
+               int idx = rf->channel-1;
+
+               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+                       if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+                               /* r55/r59 value array of channel 1~14 */
+                               static const char r55_bt_rev[] = {0x83, 0x83,
+                                       0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
+                                       0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
+                               static const char r59_bt_rev[] = {0x0e, 0x0e,
+                                       0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
+                                       0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
+
+                               rt2800_rfcsr_write(rt2x00dev, 55,
+                                                  r55_bt_rev[idx]);
+                               rt2800_rfcsr_write(rt2x00dev, 59,
+                                                  r59_bt_rev[idx]);
+                       } else {
+                               static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+                                       0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
+                                       0x88, 0x88, 0x86, 0x85, 0x84};
+
+                               rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
+                       }
+               } else {
+                       if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+                               static const char r55_nonbt_rev[] = {0x23, 0x23,
+                                       0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
+                                       0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
+                               static const char r59_nonbt_rev[] = {0x07, 0x07,
+                                       0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+                                       0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
+
+                               rt2800_rfcsr_write(rt2x00dev, 55,
+                                                  r55_nonbt_rev[idx]);
+                               rt2800_rfcsr_write(rt2x00dev, 59,
+                                                  r59_nonbt_rev[idx]);
+                       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+                               static const char r59_non_bt[] = {0x8f, 0x8f,
+                                       0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
+                                       0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
+
+                               rt2800_rfcsr_write(rt2x00dev, 59,
+                                                  r59_non_bt[idx]);
+                       }
+               }
+       }
+
+       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+       rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+}
+
 static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                                  struct ieee80211_conf *conf,
                                  struct rf_channel *rf,
@@ -1550,6 +1721,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
            rt2x00_rf(rt2x00dev, RF3052) ||
            rt2x00_rf(rt2x00dev, RF3320))
                rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+       else if (rt2x00_rf(rt2x00dev, RF5390))
+               rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
        else
                rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
 
@@ -1562,12 +1735,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_bbp_write(rt2x00dev, 86, 0);
 
        if (rf->channel <= 14) {
-               if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
-                       rt2800_bbp_write(rt2x00dev, 82, 0x62);
-                       rt2800_bbp_write(rt2x00dev, 75, 0x46);
-               } else {
-                       rt2800_bbp_write(rt2x00dev, 82, 0x84);
-                       rt2800_bbp_write(rt2x00dev, 75, 0x50);
+               if (!rt2x00_rt(rt2x00dev, RT5390)) {
+                       if (test_bit(CONFIG_EXTERNAL_LNA_BG,
+                                    &rt2x00dev->flags)) {
+                               rt2800_bbp_write(rt2x00dev, 82, 0x62);
+                               rt2800_bbp_write(rt2x00dev, 75, 0x46);
+                       } else {
+                               rt2800_bbp_write(rt2x00dev, 82, 0x84);
+                               rt2800_bbp_write(rt2x00dev, 75, 0x50);
+                       }
                }
        } else {
                rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1587,13 +1763,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        tx_pin = 0;
 
        /* Turn on unused PA or LNA when not using 1T or 1R */
-       if (rt2x00dev->default_ant.tx != 1) {
+       if (rt2x00dev->default_ant.tx_chain_num == 2) {
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
        }
 
        /* Turn on unused PA or LNA when not using 1T or 1R */
-       if (rt2x00dev->default_ant.rx != 1) {
+       if (rt2x00dev->default_ant.rx_chain_num == 2) {
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
        }
@@ -1637,30 +1813,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
 }
 
+static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
+                                     enum ieee80211_band band)
+{
+       u16 eeprom;
+       u8 comp_en;
+       u8 comp_type;
+       int comp_value;
+
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+
+       if (eeprom == 0xffff)
+               return 0;
+
+       if (band == IEEE80211_BAND_2GHZ) {
+               comp_en = rt2x00_get_field16(eeprom,
+                                EEPROM_TXPOWER_DELTA_ENABLE_2G);
+               if (comp_en) {
+                       comp_type = rt2x00_get_field16(eeprom,
+                                          EEPROM_TXPOWER_DELTA_TYPE_2G);
+                       comp_value = rt2x00_get_field16(eeprom,
+                                           EEPROM_TXPOWER_DELTA_VALUE_2G);
+                       if (!comp_type)
+                               comp_value = -comp_value;
+               }
+       } else {
+               comp_en = rt2x00_get_field16(eeprom,
+                                EEPROM_TXPOWER_DELTA_ENABLE_5G);
+               if (comp_en) {
+                       comp_type = rt2x00_get_field16(eeprom,
+                                          EEPROM_TXPOWER_DELTA_TYPE_5G);
+                       comp_value = rt2x00_get_field16(eeprom,
+                                           EEPROM_TXPOWER_DELTA_VALUE_5G);
+                       if (!comp_type)
+                               comp_value = -comp_value;
+               }
+       }
+
+       return comp_value;
+}
+
+static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
+                                    int is_rate_b,
+                                    enum ieee80211_band band,
+                                    int power_level,
+                                    u8 txpower)
+{
+       u32 reg;
+       u16 eeprom;
+       u8 criterion;
+       u8 eirp_txpower;
+       u8 eirp_txpower_criterion;
+       u8 reg_limit;
+       int bw_comp = 0;
+
+       if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
+               return txpower;
+
+       if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+               bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+       if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
+               /*
+                * Check if eirp txpower exceed txpower_limit.
+                * We use OFDM 6M as criterion and its eirp txpower
+                * is stored at EEPROM_EIRP_MAX_TX_POWER.
+                * .11b data rate need add additional 4dbm
+                * when calculating eirp txpower.
+                */
+               rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+               criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+
+               rt2x00_eeprom_read(rt2x00dev,
+                                  EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+               if (band == IEEE80211_BAND_2GHZ)
+                       eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+                                                EEPROM_EIRP_MAX_TX_POWER_2GHZ);
+               else
+                       eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+                                                EEPROM_EIRP_MAX_TX_POWER_5GHZ);
+
+               eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
+                                      (is_rate_b ? 4 : 0) + bw_comp;
+
+               reg_limit = (eirp_txpower > power_level) ?
+                                       (eirp_txpower - power_level) : 0;
+       } else
+               reg_limit = 0;
+
+       return txpower + bw_comp - reg_limit;
+}
+
 static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
-                                 const int max_txpower)
+                                 struct ieee80211_conf *conf)
 {
        u8 txpower;
-       u8 max_value = (u8)max_txpower;
        u16 eeprom;
-       int i;
+       int i, is_rate_b;
        u32 reg;
        u8 r1;
        u32 offset;
+       enum ieee80211_band band = conf->channel->band;
+       int power_level = conf->power_level;
 
        /*
-        * set to normal tx power mode: +/- 0dBm
+        * set to normal bbp tx power control mode: +/- 0dBm
         */
        rt2800_bbp_read(rt2x00dev, 1, &r1);
-       rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
+       rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
        rt2800_bbp_write(rt2x00dev, 1, r1);
-
-       /*
-        * The eeprom contains the tx power values for each rate. These
-        * values map to 100% tx power. Each 16bit word contains four tx
-        * power values and the order is the same as used in the TX_PWR_CFG
-        * registers.
-        */
        offset = TX_PWR_CFG_0;
 
        for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1674,73 +1936,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
                rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
                                   &eeprom);
 
-               /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
+               is_rate_b = i ? 0 : 1;
+               /*
+                * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
                 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE0);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
 
-               /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
+               /*
+                * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
                 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE1);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
 
-               /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS,
+               /*
+                * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
                 * TX_PWR_CFG_2: MCS6,  TX_PWR_CFG_3: MCS14,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE2);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
 
-               /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
+               /*
+                * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
                 * TX_PWR_CFG_2: MCS7,  TX_PWR_CFG_3: MCS15,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE3);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
 
                /* read the next four txpower values */
                rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
                                   &eeprom);
 
-               /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
+               is_rate_b = 0;
+               /*
+                * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
                 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE0);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
 
-               /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
+               /*
+                * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
                 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE1);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
 
-               /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
+               /*
+                * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
                 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE2);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
 
-               /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
+               /*
+                * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
                 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE3);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
 
                rt2800_register_write(rt2x00dev, offset, reg);
 
@@ -1799,11 +2087,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
        /* Always recalculate LNA gain before changing configuration */
        rt2800_config_lna_gain(rt2x00dev, libconf);
 
-       if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+       if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
                rt2800_config_channel(rt2x00dev, libconf->conf,
                                      &libconf->rf, &libconf->channel);
+               rt2800_config_txpower(rt2x00dev, libconf->conf);
+       }
        if (flags & IEEE80211_CONF_CHANGE_POWER)
-               rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
+               rt2800_config_txpower(rt2x00dev, libconf->conf);
        if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
                rt2800_config_retry_limit(rt2x00dev, libconf);
        if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1832,7 +2122,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3071) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
-                   rt2x00_rt(rt2x00dev, RT3390))
+                   rt2x00_rt(rt2x00dev, RT3390) ||
+                   rt2x00_rt(rt2x00dev, RT5390))
                        return 0x1c + (2 * rt2x00dev->lna_gain);
                else
                        return 0x2e + rt2x00dev->lna_gain;
@@ -1964,6 +2255,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
        } else {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2032,7 +2327,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2340,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2353,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2366,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2084,7 +2379,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2392,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2180,26 +2475,30 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
                                              wcid, sizeof(wcid));
 
-               rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
+               rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0);
                rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
        }
 
        /*
         * Clear all beacons
         */
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
 
        if (rt2x00_is_usb(rt2x00dev)) {
                rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
                rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
                rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+       } else if (rt2x00_is_pcie(rt2x00dev)) {
+               rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+               rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
+               rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
        }
 
        rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2335,15 +2634,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                     rt2800_wait_bbp_ready(rt2x00dev)))
                return -EACCES;
 
-       if (rt2800_is_305x_soc(rt2x00dev))
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_bbp_read(rt2x00dev, 4, &value);
+               rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
+               rt2800_bbp_write(rt2x00dev, 4, value);
+       }
+
+       if (rt2800_is_305x_soc(rt2x00dev) ||
+           rt2x00_rt(rt2x00dev, RT5390))
                rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
        rt2800_bbp_write(rt2x00dev, 65, 0x2c);
        rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
        if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
                rt2800_bbp_write(rt2x00dev, 69, 0x16);
                rt2800_bbp_write(rt2x00dev, 73, 0x12);
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_bbp_write(rt2x00dev, 69, 0x12);
+               rt2800_bbp_write(rt2x00dev, 73, 0x13);
+               rt2800_bbp_write(rt2x00dev, 75, 0x46);
+               rt2800_bbp_write(rt2x00dev, 76, 0x28);
+               rt2800_bbp_write(rt2x00dev, 77, 0x59);
        } else {
                rt2800_bbp_write(rt2x00dev, 69, 0x12);
                rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2354,7 +2669,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390)) {
+           rt2x00_rt(rt2x00dev, RT3390) ||
+           rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_bbp_write(rt2x00dev, 79, 0x13);
                rt2800_bbp_write(rt2x00dev, 80, 0x05);
                rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2366,35 +2682,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        }
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
-       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+       else
+               rt2800_bbp_write(rt2x00dev, 83, 0x6a);
 
        if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
                rt2800_bbp_write(rt2x00dev, 84, 0x19);
+       else if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 84, 0x9a);
        else
                rt2800_bbp_write(rt2x00dev, 84, 0x99);
 
-       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 86, 0x38);
+       else
+               rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
-       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 92, 0x02);
+       else
+               rt2800_bbp_write(rt2x00dev, 92, 0x00);
 
        if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+           rt2x00_rt(rt2x00dev, RT5390) ||
            rt2800_is_305x_soc(rt2x00dev))
                rt2800_bbp_write(rt2x00dev, 103, 0xc0);
        else
                rt2800_bbp_write(rt2x00dev, 103, 0x00);
 
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
        if (rt2800_is_305x_soc(rt2x00dev))
                rt2800_bbp_write(rt2x00dev, 105, 0x01);
+       else if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 105, 0x3c);
        else
                rt2800_bbp_write(rt2x00dev, 105, 0x05);
-       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 106, 0x03);
+       else
+               rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 128, 0x12);
 
        if (rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390)) {
+           rt2x00_rt(rt2x00dev, RT3390) ||
+           rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_bbp_read(rt2x00dev, 138, &value);
 
                rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -2406,6 +2749,42 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 138, value);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               int ant, div_mode;
+
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+               div_mode = rt2x00_get_field16(eeprom,
+                                             EEPROM_NIC_CONF1_ANT_DIVERSITY);
+               ant = (div_mode == 3) ? 1 : 0;
+
+               /* check if this is a Bluetooth combo card */
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+                       u32 reg;
+
+                       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+                       if (ant == 0)
+                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+                       else if (ant == 1)
+                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
+                       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+               }
+
+               rt2800_bbp_read(rt2x00dev, 152, &value);
+               if (ant == 0)
+                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+               else
+                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+               rt2800_bbp_write(rt2x00dev, 152, value);
+
+               /* Init frequency calibration */
+               rt2800_bbp_write(rt2x00dev, 142, 1);
+               rt2800_bbp_write(rt2x00dev, 143, 57);
+       }
 
        for (i = 0; i < EEPROM_BBP_SIZE; i++) {
                rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2436,6 +2815,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
        rt2800_bbp_write(rt2x00dev, 4, bbp);
 
+       rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
+       rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
        rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
        rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2491,18 +2874,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
+           !rt2x00_rt(rt2x00dev, RT5390) &&
            !rt2800_is_305x_soc(rt2x00dev))
                return 0;
 
        /*
         * Init RF calibration.
         */
-       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-       msleep(1);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+               rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+               msleep(1);
+               rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+       } else {
+               rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+               msleep(1);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       }
 
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2510,7 +2903,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
                rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
                rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
-               rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
                rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
                rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
                rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2593,6 +2986,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
                rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
                return 0;
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+               rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+               rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+               rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+               rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+               rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+
+               rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+               rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
+               rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+               rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+
+               rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+               rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+               rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+               rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+               rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+               rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
+               rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
+               rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
+               rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+               rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
+               rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+               rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
+               rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+               rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+
+               rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+               rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
        }
 
        if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2602,12 +3076,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
        } else if (rt2x00_rt(rt2x00dev, RT3071) ||
                   rt2x00_rt(rt2x00dev, RT3090)) {
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
                rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
                rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
                rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
 
-               rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
-
                rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
                rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
                if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +3093,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                                rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
                }
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+               rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+               rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+               rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
        } else if (rt2x00_rt(rt2x00dev, RT3390)) {
                rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
                rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2642,21 +3120,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                        rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
        }
 
-       /*
-        * Set back to initial state
-        */
-       rt2800_bbp_write(rt2x00dev, 24, 0);
+       if (!rt2x00_rt(rt2x00dev, RT5390)) {
+               /*
+                * Set back to initial state
+                */
+               rt2800_bbp_write(rt2x00dev, 24, 0);
 
-       rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
-       rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+               rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+               rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
 
-       /*
-        * set BBP back to BW20
-        */
-       rt2800_bbp_read(rt2x00dev, 4, &bbp);
-       rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
-       rt2800_bbp_write(rt2x00dev, 4, bbp);
+               /*
+                * Set BBP back to BW20
+                */
+               rt2800_bbp_read(rt2x00dev, 4, &bbp);
+               rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+               rt2800_bbp_write(rt2x00dev, 4, bbp);
+       }
 
        if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
            rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2668,24 +3148,29 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
        rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
        rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
 
-       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
-       if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
-           rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
-           rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-               if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
-                       rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
-       }
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
-       if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
-               rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
-                                 rt2x00_get_field16(eeprom,
-                                                  EEPROM_TXMIXER_GAIN_BG_VAL));
-       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+       if (!rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+               if (rt2x00_rt(rt2x00dev, RT3070) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+                       if (!test_bit(CONFIG_EXTERNAL_LNA_BG,
+                                     &rt2x00dev->flags))
+                               rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+               }
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+               if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+                       rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+                                       rt2x00_get_field16(eeprom,
+                                               EEPROM_TXMIXER_GAIN_BG_VAL));
+               rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+       }
 
        if (rt2x00_rt(rt2x00dev, RT3090)) {
                rt2800_bbp_read(rt2x00dev, 138, &bbp);
 
+               /*  Turn off unused DAC1 and ADC1 to reduce power consumption */
                rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
                if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
                        rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +3204,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
        }
 
-       if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+       if (rt2x00_rt(rt2x00dev, RT3070)) {
                rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
-               if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
-                   rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+               if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
                        rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
                else
                        rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2732,6 +3216,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+               rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+               rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       }
+
        return 0;
 }
 
@@ -2810,10 +3308,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
 
        rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
        rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
        rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
        rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
 
        /* Wait for DMA, ignore error */
@@ -2823,9 +3318,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
        rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-       rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
-       rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
 }
 EXPORT_SYMBOL_GPL(rt2800_disable_radio);
 
@@ -2986,13 +3478,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
                                   default_lna_gain);
        rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
-       if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
-       if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -3009,10 +3494,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
 
        /*
-        * Identify RF chipset.
+        * Identify RF chipset by EEPROM value
+        * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
+        * RT53xx: defined in "EEPROM_CHIP_ID" field
         */
-       value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
        rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+       if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
+       else
+               value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
 
        rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
                        value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -3024,7 +3514,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
-           !rt2x00_rt(rt2x00dev, RT3572)) {
+           !rt2x00_rt(rt2x00dev, RT3572) &&
+           !rt2x00_rt(rt2x00dev, RT5390)) {
                ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
                return -ENODEV;
        }
@@ -3038,7 +3529,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rf(rt2x00dev, RF3021) &&
            !rt2x00_rf(rt2x00dev, RF3022) &&
            !rt2x00_rf(rt2x00dev, RF3052) &&
-           !rt2x00_rf(rt2x00dev, RF3320)) {
+           !rt2x00_rf(rt2x00dev, RF3320) &&
+           !rt2x00_rf(rt2x00dev, RF5390)) {
                ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
                return -ENODEV;
        }
@@ -3046,11 +3538,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Identify default antenna configuration.
         */
-       rt2x00dev->default_ant.tx =
+       rt2x00dev->default_ant.tx_chain_num =
            rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
-       rt2x00dev->default_ant.rx =
+       rt2x00dev->default_ant.rx_chain_num =
            rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
 
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+       if (rt2x00_rt(rt2x00dev, RT3070) ||
+           rt2x00_rt(rt2x00dev, RT3090) ||
+           rt2x00_rt(rt2x00dev, RT3390)) {
+               value = rt2x00_get_field16(eeprom,
+                               EEPROM_NIC_CONF1_ANT_DIVERSITY);
+               switch (value) {
+               case 0:
+               case 1:
+               case 2:
+                       rt2x00dev->default_ant.tx = ANTENNA_A;
+                       rt2x00dev->default_ant.rx = ANTENNA_A;
+                       break;
+               case 3:
+                       rt2x00dev->default_ant.tx = ANTENNA_A;
+                       rt2x00dev->default_ant.rx = ANTENNA_B;
+                       break;
+               }
+       } else {
+               rt2x00dev->default_ant.tx = ANTENNA_A;
+               rt2x00dev->default_ant.rx = ANTENNA_A;
+       }
+
        /*
         * Read frequency offset and RF programming sequence.
         */
@@ -3084,6 +3600,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
 #endif /* CONFIG_RT2X00_LIB_LEDS */
 
+       /*
+        * Check if support EIRP tx power limit feature.
+        */
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+       if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
+                                       EIRP_MAX_TX_POWER_LIMIT)
+               __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3236,7 +3761,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        char *default_power1;
        char *default_power2;
        unsigned int i;
-       unsigned short max_power;
        u16 eeprom;
 
        /*
@@ -3303,7 +3827,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                   rt2x00_rf(rt2x00dev, RF2020) ||
                   rt2x00_rf(rt2x00dev, RF3021) ||
                   rt2x00_rf(rt2x00dev, RF3022) ||
-                  rt2x00_rf(rt2x00dev, RF3320)) {
+                  rt2x00_rf(rt2x00dev, RF3320) ||
+                  rt2x00_rf(rt2x00dev, RF5390)) {
                spec->num_channels = 14;
                spec->channels = rf_vals_3x;
        } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3361,26 +3886,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        spec->channels_info = info;
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
-       max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
        default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
        default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
 
        for (i = 0; i < 14; i++) {
-               info[i].max_power = max_power;
-               info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
-               info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
+               info[i].default_power1 = default_power1[i];
+               info[i].default_power2 = default_power2[i];
        }
 
        if (spec->num_channels > 14) {
-               max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
                default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
                default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
 
                for (i = 14; i < spec->num_channels; i++) {
-                       info[i].max_power = max_power;
-                       info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
-                       info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
+                       info[i].default_power1 = default_power1[i];
+                       info[i].default_power2 = default_power2[i];
                }
        }
 
@@ -3472,7 +3992,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
        if (queue_idx >= 4)
                return 0;
 
-       queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
 
        /* Update WMM TXOP register */
        offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -3530,7 +4050,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
 
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size)
 {
        int ret = 0;
 
index e3c995a..0c92d86 100644 (file)
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
 
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+void rt2800_clear_beacon(struct queue_entry *entry);
 
 extern const struct rt2x00debug rt2800_rt2x00debug;
 
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
 u64 rt2800_get_tsf(struct ieee80211_hw *hw);
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
                      struct survey_info *survey);
 
index aa97971..808073a 100644 (file)
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
                rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow beacon tasklets to be scheduled for periodic
+                * beacon updates.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+               tasklet_enable(&rt2x00dev->pretbtt_tasklet);
+
                rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
                break;
        default:
                break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+               /*
+                * Wait for tbtt tasklets to finish.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
+               tasklet_disable(&rt2x00dev->pretbtt_tasklet);
                break;
        default:
                break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_ON) ||
-                  (state == STATE_RADIO_IRQ_ON_ISR);
+       int mask = (state == STATE_RADIO_IRQ_ON);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
                rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+               /*
+                * Enable tasklets. The beacon related tasklets are
+                * enabled when the beacon queue is started.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
+               tasklet_enable(&rt2x00dev->autowake_tasklet);
        }
 
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
        rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
        rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
        rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished before
+                * disabling the interrupts.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+               tasklet_disable(&rt2x00dev->autowake_tasklet);
+       }
 }
 
 static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
        rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+               rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+               rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+               rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+       }
+
        rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
 
        rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
 
 static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
-       u32 reg;
-
-       rt2800_disable_radio(rt2x00dev);
-
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
-
-       rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
-       rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+       if (rt2x00_is_soc(rt2x00dev)) {
+               rt2800_disable_radio(rt2x00dev);
+               rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+               rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+       }
 }
 
 static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
-       /*
-        * Always put the device to sleep (even when we intend to wakeup!)
-        * if the device is booting and wasn't asleep it will return
-        * failure when attempting to wakeup.
-        */
-       rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
-
        if (state == STATE_AWAKE) {
-               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
                rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+       } else if (state == STATE_SLEEP) {
+               rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
+               rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
+               rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
        }
 
        return 0;
@@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2800pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -652,6 +682,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
                 */
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+               /*
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
+                */
+               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
                        rxdesc->flags |= RX_FLAG_DECRYPTED;
                else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -690,7 +726,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
 
        while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
                qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
-               if (qid >= QID_RX) {
+               if (unlikely(qid >= QID_RX)) {
                        /*
                         * Unknown queue, this shouldn't happen. Just drop
                         * this tx status.
@@ -700,7 +736,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
                        break;
                }
 
-               queue = rt2x00queue_get_queue(rt2x00dev, qid);
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
                if (unlikely(queue == NULL)) {
                        /*
                         * The queue is NULL, this shouldn't happen. Stop
@@ -711,7 +747,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
                        break;
                }
 
-               if (rt2x00queue_empty(queue)) {
+               if (unlikely(rt2x00queue_empty(queue))) {
                        /*
                         * The queue is empty. Stop processing here
                         * and drop the tx status.
@@ -726,45 +762,59 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
        }
 }
 
-static void rt2800pci_txstatus_tasklet(unsigned long data)
+static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       rt2800pci_txdone((struct rt2x00_dev *)data);
-}
-
-static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
-{
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       u32 reg;
 
        /*
-        * 1 - Pre TBTT interrupt.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
-               rt2x00lib_pretbtt(rt2x00dev);
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
+       rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 1);
+       rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
 
-       /*
-        * 2 - Beacondone interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
-               rt2x00lib_beacondone(rt2x00dev);
+static void rt2800pci_txstatus_tasklet(unsigned long data)
+{
+       rt2800pci_txdone((struct rt2x00_dev *)data);
 
        /*
-        * 3 - Rx ring done interrupt.
+        * No need to enable the tx status interrupt here as we always
+        * leave it enabled to minimize the possibility of a tx status
+        * register overflow. See comment in interrupt handler.
         */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
-               rt2x00pci_rxdone(rt2x00dev);
+}
 
-       /*
-        * 4 - Auto wakeup interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
-               rt2800pci_wakeup(rt2x00dev);
+static void rt2800pci_pretbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_pretbtt(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+
+static void rt2800pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
+static void rt2800pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
 
-       return IRQ_HANDLED;
+static void rt2800pci_autowake_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2800pci_wakeup(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
 }
 
 static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -785,7 +835,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
         *
         * Furthermore we don't disable the TX_FIFO_STATUS
         * interrupt here but leave it enabled so that the TX_STA_FIFO
-        * can also be read while the interrupt thread gets executed.
+        * can also be read while the tx status tasklet gets executed.
         *
         * Since we have only one producer and one consumer we don't
         * need to lock the kfifo.
@@ -810,8 +860,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
 static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
-       irqreturn_t ret = IRQ_HANDLED;
+       u32 reg, mask;
 
        /* Read status and ACK all interrupts */
        rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -823,38 +872,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
-               rt2800pci_txstatus_interrupt(rt2x00dev);
+       /*
+        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+        * for interrupts and interrupt masks we can just use the value of
+        * INT_SOURCE_CSR to create the interrupt mask.
+        */
+       mask = ~reg;
 
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+               rt2800pci_txstatus_interrupt(rt2x00dev);
                /*
-                * All other interrupts are handled in the interrupt thread.
-                * Store irqvalue for use in the interrupt thread.
+                * Never disable the TX_FIFO_STATUS interrupt.
                 */
-               rt2x00dev->irqvalue[0] = reg;
+               rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+       }
 
-               /*
-                * Disable interrupts, will be enabled again in the
-                * interrupt thread.
-               */
-               rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                                     STATE_RADIO_IRQ_OFF_ISR);
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
 
-               /*
-                * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
-                * tx status reports.
-                */
-               rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-               rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-               ret = IRQ_WAKE_THREAD;
-       }
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 
-       return ret;
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+               tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock(&rt2x00dev->irqmask_lock);
+       rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       reg &= mask;
+       rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock(&rt2x00dev->irqmask_lock);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -922,6 +977,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (!modparam_nohwcrypt)
                __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
        __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
+       __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
 
        /*
         * Set the rssi offset.
@@ -969,8 +1025,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
 
 static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .irq_handler            = rt2800pci_interrupt,
-       .irq_handler_thread     = rt2800pci_interrupt_thread,
-       .txstatus_tasklet       = rt2800pci_txstatus_tasklet,
+       .txstatus_tasklet       = rt2800pci_txstatus_tasklet,
+       .pretbtt_tasklet        = rt2800pci_pretbtt_tasklet,
+       .tbtt_tasklet           = rt2800pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2800pci_rxdone_tasklet,
+       .autowake_tasklet       = rt2800pci_autowake_tasklet,
        .probe_hw               = rt2800pci_probe_hw,
        .get_firmware_name      = rt2800pci_get_firmware_name,
        .check_firmware         = rt2800_check_firmware,
@@ -990,6 +1049,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .write_tx_desc          = rt2800pci_write_tx_desc,
        .write_tx_data          = rt2800_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
+       .clear_beacon           = rt2800_clear_beacon,
        .fill_rxdone            = rt2800pci_fill_rxdone,
        .config_shared_key      = rt2800_config_shared_key,
        .config_pairwise_key    = rt2800_config_pairwise_key,
@@ -1065,11 +1125,16 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
        { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT35XX
+       { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+       { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
+#ifdef CONFIG_RT2800PCI_RT53XX
+       { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
        { 0, }
 };
index b97a4a5..f1a9214 100644 (file)
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
@@ -486,6 +484,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
                 */
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+               /*
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
+                */
+               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
                        rxdesc->flags |= RX_FLAG_DECRYPTED;
                else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -561,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
                __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
        __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
        __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
+       __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
 
        /*
         * Set the rssi offset.
@@ -633,6 +638,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
        .write_tx_desc          = rt2800usb_write_tx_desc,
        .write_tx_data          = rt2800usb_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
+       .clear_beacon           = rt2800_clear_beacon,
        .get_tx_data_len        = rt2800usb_get_tx_data_len,
        .fill_rxdone            = rt2800usb_fill_rxdone,
        .config_shared_key      = rt2800_config_shared_key,
index 84aaf39..a3940d7 100644 (file)
@@ -189,6 +189,7 @@ struct rt2x00_chip {
 #define RT3572         0x3572
 #define RT3593         0x3593  /* PCIe */
 #define RT3883         0x3883  /* WSOC */
+#define RT5390         0x5390  /* 2.4GHz */
 
        u16 rf;
        u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
 struct antenna_setup {
        enum antenna rx;
        enum antenna tx;
+       u8 rx_chain_num;
+       u8 tx_chain_num;
 };
 
 /*
@@ -368,6 +371,7 @@ struct rt2x00_intf {
         * dedicated beacon entry.
         */
        struct queue_entry *beacon;
+       bool enable_beacon;
 
        /*
         * Actions that needed rescheduling.
@@ -463,7 +467,6 @@ struct rt2x00lib_crypto {
        const u8 *address;
 
        u32 bssidx;
-       u32 aid;
 
        u8 key[16];
        u8 tx_mic[8];
@@ -510,15 +513,14 @@ struct rt2x00lib_ops {
         */
        irq_handler_t irq_handler;
 
-       /*
-        * Threaded Interrupt handlers.
-        */
-       irq_handler_t irq_handler_thread;
-
        /*
         * TX status tasklet handler.
         */
        void (*txstatus_tasklet) (unsigned long data);
+       void (*pretbtt_tasklet) (unsigned long data);
+       void (*tbtt_tasklet) (unsigned long data);
+       void (*rxdone_tasklet) (unsigned long data);
+       void (*autowake_tasklet) (unsigned long data);
 
        /*
         * Device init handlers.
@@ -573,6 +575,7 @@ struct rt2x00lib_ops {
                               struct txentry_desc *txdesc);
        void (*write_beacon) (struct queue_entry *entry,
                              struct txentry_desc *txdesc);
+       void (*clear_beacon) (struct queue_entry *entry);
        int (*get_tx_data_len) (struct queue_entry *entry);
 
        /*
@@ -658,12 +661,15 @@ enum rt2x00_flags {
        DRIVER_REQUIRE_L2PAD,
        DRIVER_REQUIRE_TXSTATUS_FIFO,
        DRIVER_REQUIRE_TASKLET_CONTEXT,
+       DRIVER_REQUIRE_SW_SEQNO,
+       DRIVER_REQUIRE_HT_TX_DESC,
 
        /*
         * Driver features
         */
        CONFIG_SUPPORT_HW_BUTTON,
        CONFIG_SUPPORT_HW_CRYPTO,
+       CONFIG_SUPPORT_POWER_LIMIT,
        DRIVER_SUPPORT_CONTROL_FILTERS,
        DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
        DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -788,10 +794,12 @@ struct rt2x00_dev {
         *  - Open ap interface count.
         *  - Open sta interface count.
         *  - Association count.
+        *  - Beaconing enabled count.
         */
        unsigned int intf_ap_count;
        unsigned int intf_sta_count;
        unsigned int intf_associated;
+       unsigned int intf_beaconing;
 
        /*
         * Link quality
@@ -857,6 +865,13 @@ struct rt2x00_dev {
         */
        struct ieee80211_low_level_stats low_level_stats;
 
+       /**
+        * Work queue for all work which should not be placed
+        * on the mac80211 workqueue (because of dependencies
+        * between various work structures).
+        */
+       struct workqueue_struct *workqueue;
+
        /*
         * Scheduled work.
         * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -872,26 +887,19 @@ struct rt2x00_dev {
        struct work_struct txdone_work;
 
        /*
-        * Data queue arrays for RX, TX and Beacon.
-        * The Beacon array also contains the Atim queue
-        * if that is supported by the device.
+        * Data queue arrays for RX, TX, Beacon and ATIM.
         */
        unsigned int data_queues;
        struct data_queue *rx;
        struct data_queue *tx;
        struct data_queue *bcn;
+       struct data_queue *atim;
 
        /*
         * Firmware image.
         */
        const struct firmware *fw;
 
-       /*
-        * Interrupt values, stored between interrupt service routine
-        * and interrupt thread routine.
-        */
-       u32 irqvalue[2];
-
        /*
         * FIFO for storing tx status reports between isr and tasklet.
         */
@@ -901,6 +909,15 @@ struct rt2x00_dev {
         * Tasklet for processing tx status reports (rt2800pci).
         */
        struct tasklet_struct txstatus_tasklet;
+       struct tasklet_struct pretbtt_tasklet;
+       struct tasklet_struct tbtt_tasklet;
+       struct tasklet_struct rxdone_tasklet;
+       struct tasklet_struct autowake_tasklet;
+
+       /*
+        * Protect the interrupt mask register.
+        */
+       spinlock_t irqmask_lock;
 };
 
 /*
@@ -1046,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry);
 void rt2x00queue_unmap_skb(struct queue_entry *entry);
 
 /**
- * rt2x00queue_get_queue - Convert queue index to queue pointer
+ * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @queue: rt2x00 queue index (see &enum data_queue_qid).
+ *
+ * Returns NULL for non tx queues.
  */
-struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
-                                        const enum data_queue_qid queue);
+static inline struct data_queue *
+rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
+                        const enum data_queue_qid queue)
+{
+       if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
+               return &rt2x00dev->tx[queue];
+
+       if (queue == QID_ATIM)
+               return rt2x00dev->atim;
+
+       return NULL;
+}
 
 /**
  * rt2x00queue_get_entry - Get queue entry where the given index points to.
@@ -1168,7 +1197,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
 /*
  * mac80211 handlers.
  */
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
index 9597a03..9de9dbe 100644 (file)
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
                return;
 
        if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
-               rt2x00queue_update_beacon(rt2x00dev, vif, true);
+               rt2x00queue_update_beacon(rt2x00dev, vif);
 }
 
 static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
            vif->type != NL80211_IFTYPE_WDS)
                return;
 
-       rt2x00queue_update_beacon(rt2x00dev, vif, true);
+       /*
+        * Update the beacon without locking. This is safe on PCI devices
+        * as they only update the beacon periodically here. This should
+        * never be called for USB devices.
+        */
+       WARN_ON(rt2x00_is_usb(rt2x00dev));
+       rt2x00queue_update_beacon_locked(rt2x00dev, vif);
 }
 
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* send buffered bc/mc frames out for every bssid */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_bc_buffer_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_bc_buffer_iter,
+                                                  rt2x00dev);
        /*
         * Devices with pre tbtt interrupt don't need to update the beacon
         * here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* fetch next beacon */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_beaconupdate_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_beaconupdate_iter,
+                                                  rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
 
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* fetch next beacon */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_beaconupdate_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_beaconupdate_iter,
+                                                  rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
 
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
                              const int channel, const int tx_power,
                              const int value)
 {
-       entry->center_freq = ieee80211_channel_to_frequency(channel);
+       /* XXX: this assumption about the band is wrong for 802.11j */
+       entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       entry->center_freq = ieee80211_channel_to_frequency(channel,
+                                                           entry->band);
        entry->hw_value = value;
        entry->max_power = tx_power;
        entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
                                     GFP_KERNEL);
                if (status)
                        return status;
+       }
 
-               /* tasklet for processing the tx status reports. */
-               if (rt2x00dev->ops->lib->txstatus_tasklet)
-                       tasklet_init(&rt2x00dev->txstatus_tasklet,
-                                    rt2x00dev->ops->lib->txstatus_tasklet,
-                                    (unsigned long)rt2x00dev);
-
+       /*
+        * Initialize tasklets if used by the driver. Tasklets are
+        * disabled until the interrupts are turned on. The driver
+        * has to handle that.
+        */
+#define RT2X00_TASKLET_INIT(taskletname) \
+       if (rt2x00dev->ops->lib->taskletname) { \
+               tasklet_init(&rt2x00dev->taskletname, \
+                            rt2x00dev->ops->lib->taskletname, \
+                            (unsigned long)rt2x00dev); \
+               tasklet_disable(&rt2x00dev->taskletname); \
        }
 
+       RT2X00_TASKLET_INIT(txstatus_tasklet);
+       RT2X00_TASKLET_INIT(pretbtt_tasklet);
+       RT2X00_TASKLET_INIT(tbtt_tasklet);
+       RT2X00_TASKLET_INIT(rxdone_tasklet);
+       RT2X00_TASKLET_INIT(autowake_tasklet);
+
+#undef RT2X00_TASKLET_INIT
+
        /*
         * Register HW.
         */
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
 {
        int retval = -ENOMEM;
 
+       spin_lock_init(&rt2x00dev->irqmask_lock);
        mutex_init(&rt2x00dev->csr_mutex);
 
        set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                    BIT(NL80211_IFTYPE_WDS);
 
        /*
-        * Initialize configuration work.
+        * Initialize work.
         */
+       rt2x00dev->workqueue =
+           alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
+       if (!rt2x00dev->workqueue) {
+               retval = -ENOMEM;
+               goto exit;
+       }
+
        INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
 
        /*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
        cancel_work_sync(&rt2x00dev->intf_work);
        cancel_work_sync(&rt2x00dev->rxdone_work);
        cancel_work_sync(&rt2x00dev->txdone_work);
+       destroy_workqueue(rt2x00dev->workqueue);
 
        /*
         * Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
         * Kill the tx status tasklet.
         */
        tasklet_kill(&rt2x00dev->txstatus_tasklet);
+       tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+       tasklet_kill(&rt2x00dev->tbtt_tasklet);
+       tasklet_kill(&rt2x00dev->rxdone_tasklet);
+       tasklet_kill(&rt2x00dev->autowake_tasklet);
 
        /*
         * Uninitialize device.
index b7ad46e..ae1219d 100644 (file)
@@ -38,12 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 
        if (tx_info->control.sta)
-               txdesc->mpdu_density =
+               txdesc->u.ht.mpdu_density =
                    tx_info->control.sta->ht_cap.ampdu_density;
 
-       txdesc->ba_size = 7;    /* FIXME: What value is needed? */
+       txdesc->u.ht.ba_size = 7;       /* FIXME: What value is needed? */
 
-       txdesc->stbc =
+       txdesc->u.ht.stbc =
            (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
 
        /*
@@ -51,25 +51,24 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
         * mcs rate to be used
         */
        if (txrate->flags & IEEE80211_TX_RC_MCS) {
-               txdesc->mcs = txrate->idx;
+               txdesc->u.ht.mcs = txrate->idx;
 
                /*
                 * MIMO PS should be set to 1 for STA's using dynamic SM PS
                 * when using more then one tx stream (>MCS7).
                 */
-               if (tx_info->control.sta && txdesc->mcs > 7 &&
+               if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
                    ((tx_info->control.sta->ht_cap.cap &
                      IEEE80211_HT_CAP_SM_PS) >>
                     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
                    WLAN_HT_CAP_SM_PS_DYNAMIC)
                        __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
        } else {
-               txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+               txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
                if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-                       txdesc->mcs |= 0x08;
+                       txdesc->u.ht.mcs |= 0x08;
        }
 
-
        /*
         * This frame is eligible for an AMPDU, however, don't aggregate
         * frames that are intended to probe a specific tx rate.
@@ -78,14 +77,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
            !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
                __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
 
-       /*
-        * Determine HT Mix/Greenfield rate mode
-        */
-       if (txrate->flags & IEEE80211_TX_RC_MCS)
-               txdesc->rate_mode = RATE_MODE_HT_MIX;
-       if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
-               txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
-
        /*
         * Set 40Mhz mode if necessary (for legacy rates this will
         * duplicate the frame to both channels).
@@ -106,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
         * for frames not transmitted with TXOP_HTTXOP
         */
        if (ieee80211_is_mgmt(hdr->frame_control))
-               txdesc->txop = TXOP_BACKOFF;
+               txdesc->u.ht.txop = TXOP_BACKOFF;
        else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
-               txdesc->txop = TXOP_SIFS;
+               txdesc->u.ht.txop = TXOP_SIFS;
        else
-               txdesc->txop = TXOP_HTTXOP;
+               txdesc->u.ht.txop = TXOP_HTTXOP;
 }
 
 u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
index a105c50..2d94cba 100644 (file)
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
                               bool local);
 
 /**
- * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+ *     to hardware. Handles locking by itself (mutex).
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @vif: Interface for which the beacon should be updated.
- * @enable_beacon: Enable beaconing
  */
 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
-                             struct ieee80211_vif *vif,
-                             const bool enable_beacon);
+                             struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
+ *     to hardware. Caller needs to ensure locking.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+                                    struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_clear_beacon - Clear beacon in hardware
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+                            struct ieee80211_vif *vif);
 
 /**
  * rt2x00queue_index_inc - Index incrementation function
index bfda60e..c975b0a 100644 (file)
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
            !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
                return;
 
-       schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+       ieee80211_queue_delayed_work(rt2x00dev->hw,
+                                    &link->watchdog_work, WATCHDOG_INTERVAL);
 }
 
 void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
        rt2x00dev->ops->lib->watchdog(rt2x00dev);
 
        if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
-               schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+               ieee80211_queue_delayed_work(rt2x00dev->hw,
+                                            &link->watchdog_work,
+                                            WATCHDOG_INTERVAL);
 }
 
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
index f3da051..661c6ba 100644 (file)
@@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -116,13 +116,13 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto exit_fail;
 
        /*
-        * Determine which queue to put packet on.
+        * Use the ATIM queue if appropriate and present.
         */
        if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
            test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
-               queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
-       else
-               queue = rt2x00queue_get_queue(rt2x00dev, qid);
+               qid = QID_ATIM;
+
+       queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
        if (unlikely(!queue)) {
                ERROR(rt2x00dev,
                      "Attempt to send packet over invalid queue %d.\n"
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         * either RTS or CTS-to-self frame and handles everything
         * inside the hardware.
         */
-       if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
-                                               IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
-           !rt2x00dev->ops->hw->set_rts_threshold) {
+       if (!rt2x00dev->ops->hw->set_rts_threshold &&
+           (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
+                                               IEEE80211_TX_RC_USE_CTS_PROTECT))) {
                if (rt2x00queue_available(queue) <= 1)
                        goto exit_fail;
 
@@ -149,18 +149,17 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                        goto exit_fail;
        }
 
-       if (rt2x00queue_write_tx_frame(queue, skb, false))
+       if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
                goto exit_fail;
 
        if (rt2x00queue_threshold(queue))
                rt2x00queue_pause_queue(queue);
 
-       return NETDEV_TX_OK;
+       return;
 
  exit_fail:
        ieee80211_stop_queue(rt2x00dev->hw, qid);
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_tx);
 
@@ -191,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct rt2x00_intf *intf = vif_to_intf(vif);
-       struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
+       struct data_queue *queue = rt2x00dev->bcn;
        struct queue_entry *entry = NULL;
        unsigned int i;
 
@@ -519,11 +518,9 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        crypto.cmd = cmd;
 
-       if (sta) {
-               /* some drivers need the AID */
-               crypto.aid = sta->aid;
+       if (sta)
                crypto.address = sta->addr;
-       else
+       else
                crypto.address = bcast_addr;
 
        if (crypto.cipher == CIPHER_TKIP)
@@ -617,11 +614,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                                      bss_conf->bssid);
 
        /*
-        * Update the beacon.
+        * Update the beacon. This is only required on USB devices. PCI
+        * devices fetch beacons periodically.
         */
-       if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
-               rt2x00queue_update_beacon(rt2x00dev, vif,
-                                         bss_conf->enable_beacon);
+       if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+               rt2x00queue_update_beacon(rt2x00dev, vif);
+
+       /*
+        * Start/stop beaconing.
+        */
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (!bss_conf->enable_beacon && intf->enable_beacon) {
+                       rt2x00queue_clear_beacon(rt2x00dev, vif);
+                       rt2x00dev->intf_beaconing--;
+                       intf->enable_beacon = false;
+
+                       if (rt2x00dev->intf_beaconing == 0) {
+                               /*
+                                * Last beaconing interface disabled
+                                * -> stop beacon queue.
+                                */
+                               mutex_lock(&intf->beacon_skb_mutex);
+                               rt2x00queue_stop_queue(rt2x00dev->bcn);
+                               mutex_unlock(&intf->beacon_skb_mutex);
+                       }
+
+
+               } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+                       rt2x00dev->intf_beaconing++;
+                       intf->enable_beacon = true;
+
+                       if (rt2x00dev->intf_beaconing == 1) {
+                               /*
+                                * First beaconing interface enabled
+                                * -> start beacon queue.
+                                */
+                               mutex_lock(&intf->beacon_skb_mutex);
+                               rt2x00queue_start_queue(rt2x00dev->bcn);
+                               mutex_unlock(&intf->beacon_skb_mutex);
+                       }
+               }
+       }
 
        /*
         * When the association status has changed we must reset the link
@@ -657,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct data_queue *queue;
 
-       queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
        if (unlikely(!queue))
                return -EINVAL;
 
index ace0b66..4dd82b0 100644 (file)
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
        /*
         * Register interrupt handler.
         */
-       status = request_threaded_irq(rt2x00dev->irq,
-                                     rt2x00dev->ops->lib->irq_handler,
-                                     rt2x00dev->ops->lib->irq_handler_thread,
-                                     IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+       status = request_irq(rt2x00dev->irq,
+                            rt2x00dev->ops->lib->irq_handler,
+                            IRQF_SHARED, rt2x00dev->name, rt2x00dev);
        if (status) {
                ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
                      rt2x00dev->irq, status);
index ca82b3a..4b3c70e 100644 (file)
@@ -221,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
        struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
        unsigned long irqflags;
 
-       if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
-           unlikely(!tx_info->control.vif))
+       if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+               return;
+
+       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+
+       if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
                return;
 
        /*
-        * Hardware should insert sequence counter.
-        * FIXME: We insert a software sequence counter first for
-        * hardware that doesn't support hardware sequence counting.
+        * The hardware is not able to insert a sequence number. Assign a
+        * software generated one here.
         *
         * This is wrong because beacons are not getting sequence
         * numbers assigned properly.
@@ -246,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
 
        spin_unlock_irqrestore(&intf->seqlock, irqflags);
 
-       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 }
 
 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
@@ -260,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
        unsigned int duration;
        unsigned int residual;
 
+       /*
+        * Determine with what IFS priority this frame should be send.
+        * Set ifs to IFS_SIFS when the this is not the first fragment,
+        * or this fragment came after RTS/CTS.
+        */
+       if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+               txdesc->u.plcp.ifs = IFS_BACKOFF;
+       else
+               txdesc->u.plcp.ifs = IFS_SIFS;
+
        /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
        data_length = entry->skb->len + 4;
        data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
@@ -268,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
         * PLCP setup
         * Length calculation depends on OFDM/CCK rate.
         */
-       txdesc->signal = hwrate->plcp;
-       txdesc->service = 0x04;
+       txdesc->u.plcp.signal = hwrate->plcp;
+       txdesc->u.plcp.service = 0x04;
 
        if (hwrate->flags & DEV_RATE_OFDM) {
-               txdesc->length_high = (data_length >> 6) & 0x3f;
-               txdesc->length_low = data_length & 0x3f;
+               txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
+               txdesc->u.plcp.length_low = data_length & 0x3f;
        } else {
                /*
                 * Convert length to microseconds.
@@ -288,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
                         * Check if we need to set the Length Extension
                         */
                        if (hwrate->bitrate == 110 && residual <= 30)
-                               txdesc->service |= 0x80;
+                               txdesc->u.plcp.service |= 0x80;
                }
 
-               txdesc->length_high = (duration >> 8) & 0xff;
-               txdesc->length_low = duration & 0xff;
+               txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
+               txdesc->u.plcp.length_low = duration & 0xff;
 
                /*
                 * When preamble is enabled we should set the
                 * preamble bit for the signal.
                 */
                if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-                       txdesc->signal |= 0x08;
+                       txdesc->u.plcp.signal |= 0x08;
        }
 }
 
@@ -309,9 +321,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
-       struct ieee80211_rate *rate =
-           ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
-       const struct rt2x00_rate *hwrate;
+       struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+       struct ieee80211_rate *rate;
+       const struct rt2x00_rate *hwrate = NULL;
 
        memset(txdesc, 0, sizeof(*txdesc));
 
@@ -365,42 +377,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
 
        /*
         * Beacons and probe responses require the tsf timestamp
-        * to be inserted into the frame, except for a frame that has been injected
-        * through a monitor interface. This latter is needed for testing a
-        * monitor interface.
+        * to be inserted into the frame.
         */
-       if ((ieee80211_is_beacon(hdr->frame_control) ||
-           ieee80211_is_probe_resp(hdr->frame_control)) &&
-           (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
+       if (ieee80211_is_beacon(hdr->frame_control) ||
+           ieee80211_is_probe_resp(hdr->frame_control))
                __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 
-       /*
-        * Determine with what IFS priority this frame should be send.
-        * Set ifs to IFS_SIFS when the this is not the first fragment,
-        * or this fragment came after RTS/CTS.
-        */
        if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
-           !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
+           !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
                __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
-               txdesc->ifs = IFS_BACKOFF;
-       } else
-               txdesc->ifs = IFS_SIFS;
 
        /*
         * Determine rate modulation.
         */
-       hwrate = rt2x00_get_rate(rate->hw_value);
-       txdesc->rate_mode = RATE_MODE_CCK;
-       if (hwrate->flags & DEV_RATE_OFDM)
-               txdesc->rate_mode = RATE_MODE_OFDM;
+       if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+               txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
+       else if (txrate->flags & IEEE80211_TX_RC_MCS)
+               txdesc->rate_mode = RATE_MODE_HT_MIX;
+       else {
+               rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
+               hwrate = rt2x00_get_rate(rate->hw_value);
+               if (hwrate->flags & DEV_RATE_OFDM)
+                       txdesc->rate_mode = RATE_MODE_OFDM;
+               else
+                       txdesc->rate_mode = RATE_MODE_CCK;
+       }
 
        /*
         * Apply TX descriptor handling by components
         */
        rt2x00crypto_create_tx_descriptor(entry, txdesc);
-       rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
        rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
-       rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
+
+       if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
+               rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
+       else
+               rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
 }
 
 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -566,13 +578,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
        return 0;
 }
 
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
-                             struct ieee80211_vif *vif,
-                             const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+                            struct ieee80211_vif *vif)
 {
        struct rt2x00_intf *intf = vif_to_intf(vif);
-       struct skb_frame_desc *skbdesc;
-       struct txentry_desc txdesc;
 
        if (unlikely(!intf->beacon))
                return -ENOBUFS;
@@ -584,17 +593,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
         */
        rt2x00queue_free_skb(intf->beacon);
 
-       if (!enable_beacon) {
-               rt2x00queue_stop_queue(intf->beacon->queue);
-               mutex_unlock(&intf->beacon_skb_mutex);
-               return 0;
-       }
+       /*
+        * Clear beacon (single bssid devices don't need to clear the beacon
+        * since the beacon queue will get stopped anyway).
+        */
+       if (rt2x00dev->ops->lib->clear_beacon)
+               rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+       mutex_unlock(&intf->beacon_skb_mutex);
+
+       return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+                                    struct ieee80211_vif *vif)
+{
+       struct rt2x00_intf *intf = vif_to_intf(vif);
+       struct skb_frame_desc *skbdesc;
+       struct txentry_desc txdesc;
+
+       if (unlikely(!intf->beacon))
+               return -ENOBUFS;
+
+       /*
+        * Clean up the beacon skb.
+        */
+       rt2x00queue_free_skb(intf->beacon);
 
        intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
-       if (!intf->beacon->skb) {
-               mutex_unlock(&intf->beacon_skb_mutex);
+       if (!intf->beacon->skb)
                return -ENOMEM;
-       }
 
        /*
         * Copy all TX descriptor information into txdesc,
@@ -611,13 +639,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
        skbdesc->entry = intf->beacon;
 
        /*
-        * Send beacon to hardware and enable beacon genaration..
+        * Send beacon to hardware.
         */
        rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 
+       return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+                             struct ieee80211_vif *vif)
+{
+       struct rt2x00_intf *intf = vif_to_intf(vif);
+       int ret;
+
+       mutex_lock(&intf->beacon_skb_mutex);
+       ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
        mutex_unlock(&intf->beacon_skb_mutex);
 
-       return 0;
+       return ret;
 }
 
 void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -665,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
 
-struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
-                                        const enum data_queue_qid queue)
-{
-       int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
-
-       if (queue == QID_RX)
-               return rt2x00dev->rx;
-
-       if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
-               return &rt2x00dev->tx[queue];
-
-       if (!rt2x00dev->bcn)
-               return NULL;
-
-       if (queue == QID_BEACON)
-               return &rt2x00dev->bcn[0];
-       else if (queue == QID_ATIM && atim)
-               return &rt2x00dev->bcn[1];
-
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
-
 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
                                          enum queue_index index)
 {
@@ -885,7 +902,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
         * The queue flush has failed...
         */
        if (unlikely(!rt2x00queue_empty(queue)))
-               WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+               WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
 
        /*
         * Restore the queue to the previous status
@@ -1063,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
                goto exit;
 
        if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
-               status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
+               status = rt2x00queue_alloc_entries(rt2x00dev->atim,
                                                   rt2x00dev->ops->atim);
                if (status)
                        goto exit;
@@ -1137,6 +1154,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->rx = queue;
        rt2x00dev->tx = &queue[1];
        rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
+       rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
 
        /*
         * Initialize queue parameters.
@@ -1153,9 +1171,9 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
        tx_queue_for_each(rt2x00dev, queue)
                rt2x00queue_init(rt2x00dev, queue, qid++);
 
-       rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
+       rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
        if (req_atim)
-               rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
+               rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
 
        return 0;
 }
index fab8e26..0c8b0c6 100644 (file)
@@ -305,20 +305,27 @@ struct txentry_desc {
        u16 length;
        u16 header_length;
 
-       u16 length_high;
-       u16 length_low;
-       u16 signal;
-       u16 service;
-
-       u16 mcs;
-       u16 stbc;
-       u16 ba_size;
-       u16 rate_mode;
-       u16 mpdu_density;
+       union {
+               struct {
+                       u16 length_high;
+                       u16 length_low;
+                       u16 signal;
+                       u16 service;
+                       enum ifs ifs;
+               } plcp;
+
+               struct {
+                       u16 mcs;
+                       u8 stbc;
+                       u8 ba_size;
+                       u8 mpdu_density;
+                       enum txop txop;
+               } ht;
+       } u;
+
+       enum rate_modulation rate_mode;
 
        short retry_limit;
-       short ifs;
-       short txop;
 
        enum cipher cipher;
        u16 key_idx;
index e8259ae..6f867ee 100644 (file)
@@ -85,8 +85,6 @@ enum dev_state {
        STATE_RADIO_OFF,
        STATE_RADIO_IRQ_ON,
        STATE_RADIO_IRQ_OFF,
-       STATE_RADIO_IRQ_ON_ISR,
-       STATE_RADIO_IRQ_OFF_ISR,
 };
 
 /*
index 1a9937d..fbe735f 100644 (file)
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
         * Schedule the delayed work for reading the TX status
         * from the device.
         */
-       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+       queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 }
 
 static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
         * Schedule the delayed work for reading the RX status
         * from the device.
         */
-       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+       queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
 }
 
 static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
                 * Schedule the completion handler manually, when this
                 * worker function runs, it should cleanup the queue.
                 */
-               ieee80211_queue_work(queue->rt2x00dev->hw, completion);
+               queue_work(queue->rt2x00dev->workqueue, completion);
 
                /*
                 * Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
        WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
                " invoke forced tx handler\n", queue->qid);
 
-       ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
+       queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
 }
 
 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
index 8de44dd..77e8113 100644 (file)
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
                                struct rt2x00intf_conf *conf,
                                const unsigned int flags)
 {
-       unsigned int beacon_base;
        u32 reg;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                * For the Beacon base registers, we only need to clear
-                * the first byte since that byte contains the VALID and OWNER
-                * bits which (when set to 0) will invalidate the entire beacon.
-                */
-               beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-               rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
-
                /*
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
        }
 
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
                rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
 
                rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
                rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
+               tasklet_enable(&rt2x00dev->autowake_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
        rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
        rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
        rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+               tasklet_disable(&rt2x00dev->autowake_tasklet);
+       }
 }
 
 static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt61pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt61pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1882,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 1, word);
 
        rt2x00_desc_read(txd, 2, &word);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+                          txdesc->u.plcp.length_low);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+                          txdesc->u.plcp.length_high);
        rt2x00_desc_write(txd, 2, word);
 
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1930,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
                           test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_OFDM,
                           (txdesc->rate_mode == RATE_MODE_OFDM));
-       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
        rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
                           test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1962,13 +1980,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
        struct queue_entry_priv_pci *entry_priv = entry->priv_data;
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1986,7 +2005,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
         * Write entire beacon with descriptor and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
                                      entry_priv->desc, TXINFO_SIZE);
@@ -2002,8 +2028,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
         */
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
 
-       rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -2014,6 +2038,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
        entry->skb = NULL;
 }
 
+static void rt61pci_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+       rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+       /*
+        * Clear beacon.
+        */
+       rt2x00pci_register_write(rt2x00dev,
+                                HW_BEACON_OFFSET(entry->entry_idx), 0);
+
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+       rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
 /*
  * RX control handlers
  */
@@ -2078,9 +2128,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
                /*
-                * FIXME: Legacy driver indicates that the frame does
-                * contain the Michael Mic. Unfortunately, in rt2x00
-                * the MIC seems to be missing completely...
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
                 */
                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 
@@ -2143,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
                 * queue identication number.
                 */
                type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
-               queue = rt2x00queue_get_queue(rt2x00dev, type);
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
                if (unlikely(!queue))
                        continue;
 
@@ -2211,61 +2260,77 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
        rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
 
-static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
+static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                    struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
-       u32 reg_mcu = rt2x00dev->irqvalue[1];
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
 
-       /*
-        * 1 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
 
-       /*
-        * 2 - Tx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
-               rt61pci_txdone(rt2x00dev);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
 
-       /*
-        * 3 - Handle MCU command done.
-        */
-       if (reg_mcu)
-               rt2x00pci_register_write(rt2x00dev,
-                                        M2H_CMD_DONE_CSR, 0xffffffff);
+static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
+                                        struct rt2x00_field32 irq_field)
+{
+       u32 reg;
 
        /*
-        * 4 - MCU Autowakeup interrupt.
+        * Enable a single MCU interrupt. The interrupt mask register
+        * access needs locking.
         */
-       if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
-               rt61pci_wakeup(rt2x00dev);
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
 
-       /*
-        * 5 - Beacon done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
-       return IRQ_HANDLED;
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+static void rt61pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt61pci_txdone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+}
+
+static void rt61pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
+}
+
+static void rt61pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
 }
 
+static void rt61pci_autowake_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt61pci_wakeup(rt2x00dev);
+       rt2x00pci_register_write(rt2x00dev,
+                                M2H_CMD_DONE_CSR, 0xffffffff);
+       rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+}
 
 static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg_mcu;
-       u32 reg;
+       u32 reg_mcu, mask_mcu;
+       u32 reg, mask;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -2283,14 +2348,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
-       rt2x00dev->irqvalue[1] = reg_mcu;
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+       if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+               tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+       /*
+        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+        * for interrupts and interrupt masks we can just use the value of
+        * INT_SOURCE_CSR to create the interrupt mask.
+        */
+       mask = reg;
+       mask_mcu = reg_mcu;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
-       return IRQ_WAKE_THREAD;
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock(&rt2x00dev->irqmask_lock);
+
+       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+
+       rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+       reg |= mask_mcu;
+       rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+       spin_unlock(&rt2x00dev->irqmask_lock);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -2819,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
        if (queue_idx >= 4)
                return 0;
 
-       queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
 
        /* Update WMM TXOP register */
        offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2884,7 +2981,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
        .irq_handler            = rt61pci_interrupt,
-       .irq_handler_thread     = rt61pci_interrupt_thread,
+       .txstatus_tasklet       = rt61pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt61pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt61pci_rxdone_tasklet,
+       .autowake_tasklet       = rt61pci_autowake_tasklet,
        .probe_hw               = rt61pci_probe_hw,
        .get_firmware_name      = rt61pci_get_firmware_name,
        .check_firmware         = rt61pci_check_firmware,
@@ -2903,6 +3003,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
        .stop_queue             = rt61pci_stop_queue,
        .write_tx_desc          = rt61pci_write_tx_desc,
        .write_beacon           = rt61pci_write_beacon,
+       .clear_beacon           = rt61pci_clear_beacon,
        .fill_rxdone            = rt61pci_fill_rxdone,
        .config_shared_key      = rt61pci_config_shared_key,
        .config_pairwise_key    = rt61pci_config_pairwise_key,
index 0b4e859..02f1148 100644 (file)
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
                                struct rt2x00intf_conf *conf,
                                const unsigned int flags)
 {
-       unsigned int beacon_base;
        u32 reg;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                * For the Beacon base registers we only need to clear
-                * the first byte since that byte contains the VALID and OWNER
-                * bits which (when set to 0) will invalidate the entire beacon.
-                */
-               beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-               rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
                /*
                 * Enable synchronisation.
                 */
                rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
                rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
        }
 
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt73usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
@@ -1488,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
                           test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_OFDM,
                           (txdesc->rate_mode == RATE_MODE_OFDM));
-       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+       rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
        rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
                           test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1513,10 +1499,12 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 1, word);
 
        rt2x00_desc_read(txd, 2, &word);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
-       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+                          txdesc->u.plcp.length_low);
+       rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+                          txdesc->u.plcp.length_high);
        rt2x00_desc_write(txd, 2, word);
 
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1547,13 +1535,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1577,7 +1566,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
         * Write entire beacon with descriptor and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
                                      entry->skb->len + padding_len);
@@ -1590,8 +1586,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
         */
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
 
-       rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1602,6 +1596,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
        entry->skb = NULL;
 }
 
+static void rt73usb_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       unsigned int beacon_base;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+       /*
+        * Clear beacon.
+        */
+       beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+       rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
 static int rt73usb_get_tx_data_len(struct queue_entry *entry)
 {
        int length;
@@ -1698,9 +1719,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
                /*
-                * FIXME: Legacy driver indicates that the frame does
-                * contain the Michael Mic. Unfortunately, in rt2x00
-                * the MIC seems to be missing completely...
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
                 */
                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 
@@ -2229,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
        if (queue_idx >= 4)
                return 0;
 
-       queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+       queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
 
        /* Update WMM TXOP register */
        offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2313,6 +2333,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
        .flush_queue            = rt2x00usb_flush_queue,
        .write_tx_desc          = rt73usb_write_tx_desc,
        .write_beacon           = rt73usb_write_beacon,
+       .clear_beacon           = rt73usb_clear_beacon,
        .get_tx_data_len        = rt73usb_get_tx_data_len,
        .fill_rxdone            = rt73usb_fill_rxdone,
        .config_shared_key      = rt73usb_config_shared_key,
@@ -2446,6 +2467,7 @@ static struct usb_device_id rt73usb_device_table[] = {
        { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
+       { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
        /* Qcom */
        { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
index 5851cbc..80db5ca 100644 (file)
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        rx_status.freq = dev->conf.channel->center_freq;
                        rx_status.band = dev->conf.channel->band;
                        rx_status.mactime = le64_to_cpu(entry->tsft);
-                       rx_status.flag |= RX_FLAG_TSFT;
+                       rx_status.flag |= RX_FLAG_MACTIME_MPDU;
                        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
-
-       return 0;
 }
 
 void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
        struct ieee80211_hw *dev = vif_priv->dev;
        struct ieee80211_mgmt *mgmt;
        struct sk_buff *skb;
-       int err = 0;
 
        /* don't overflow the tx ring */
        if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       err = rtl8180_tx(dev, skb);
-       WARN_ON(err);
+       rtl8180_tx(dev, skb);
 
 resched:
        /*
index 6b82cac..1e0be14 100644 (file)
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
        }
 }
 
-static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct rtl8187_priv *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
                kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
                kfree_skb(skb);
        }
        usb_free_urb(urb);
-
-       return NETDEV_TX_OK;
 }
 
 static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
        rx_status.rate_idx = rate;
        rx_status.freq = dev->conf.channel->center_freq;
        rx_status.band = dev->conf.channel->band;
-       rx_status.flag |= RX_FLAG_TSFT;
+       rx_status.flag |= RX_FLAG_MACTIME_MPDU;
        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
@@ -871,23 +869,35 @@ static void rtl8187_work(struct work_struct *work)
        /* The RTL8187 returns the retry count through register 0xFFFA. In
         * addition, it appears to be a cumulative retry count, not the
         * value for the current TX packet. When multiple TX entries are
-        * queued, the retry count will be valid for the last one in the queue.
-        * The "error" should not matter for purposes of rate setting. */
+        * waiting in the queue, the retry count will be the total for all.
+        * The "error" may matter for purposes of rate setting, but there is
+        * no other choice with this hardware.
+        */
        struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
                                    work.work);
        struct ieee80211_tx_info *info;
        struct ieee80211_hw *dev = priv->dev;
        static u16 retry;
        u16 tmp;
+       u16 avg_retry;
+       int length;
 
        mutex_lock(&priv->conf_mutex);
        tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
+       length = skb_queue_len(&priv->b_tx_status.queue);
+       if (unlikely(!length))
+               length = 1;
+       if (unlikely(tmp < retry))
+               tmp = retry;
+       avg_retry = (tmp - retry) / length;
        while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
                struct sk_buff *old_skb;
 
                old_skb = skb_dequeue(&priv->b_tx_status.queue);
                info = IEEE80211_SKB_CB(old_skb);
-               info->status.rates[0].count = tmp - retry + 1;
+               info->status.rates[0].count = avg_retry + 1;
+               if (info->status.rates[0].count > RETRY_COUNT)
+                       info->flags &= ~IEEE80211_TX_STAT_ACK;
                ieee80211_tx_status_irqsafe(dev, old_skb);
        }
        retry = tmp;
@@ -933,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
                rtl818x_iowrite32(priv, &priv->map->TX_CONF,
                                  RTL818X_TX_CONF_HW_SEQNUM |
                                  RTL818X_TX_CONF_DISREQQSIZE |
-                                 (7 << 8  /* short retry limit */) |
-                                 (7 << 0  /* long retry limit */) |
+                                 (RETRY_COUNT << 8  /* short retry limit */) |
+                                 (RETRY_COUNT << 0  /* long retry limit */) |
                                  (7 << 21 /* MAX TX DMA */));
                rtl8187_init_urbs(dev);
                rtl8187b_init_status_urb(dev);
@@ -1378,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
        dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
                     IEEE80211_HW_SIGNAL_DBM |
                     IEEE80211_HW_RX_INCLUDES_FCS;
+       /* Initialize rate-control variables */
+       dev->max_rates = 1;
+       dev->max_rate_tries = RETRY_COUNT;
 
        eeprom.data = dev;
        eeprom.register_read = rtl8187_eeprom_register_read;
index 0d7b142..f1cc907 100644 (file)
@@ -35,6 +35,8 @@
 #define RFKILL_MASK_8187_89_97 0x2
 #define RFKILL_MASK_8198       0x4
 
+#define RETRY_COUNT            7
+
 struct rtl8187_rx_info {
        struct urb *urb;
        struct ieee80211_hw *dev;
index 7f6573f..ce49e0c 100644 (file)
@@ -1,15 +1,33 @@
 config RTL8192CE
-       tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL
+       tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
+       depends on MAC80211 && PCI && EXPERIMENTAL
        select FW_LOADER
        select RTLWIFI
+       select RTL8192C_COMMON
        ---help---
        This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
        wireless network adapters.
 
        If you choose to build it as a module, it will be called rtl8192ce
 
+config RTL8192CU
+       tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
+       depends on MAC80211 && USB && EXPERIMENTAL
+       select FW_LOADER
+       select RTLWIFI
+       select RTL8192C_COMMON
+       ---help---
+       This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
+       wireless network adapters.
+
+       If you choose to build it as a module, it will be called rtl8192cu
+
 config RTLWIFI
        tristate
-       depends on RTL8192CE
+       depends on RTL8192CE || RTL8192CU
+       default m
+
+config RTL8192C_COMMON
+       tristate
+       depends on RTL8192CE || RTL8192CU
        default m
index 2a7a438..ec9393f 100644 (file)
@@ -5,9 +5,22 @@ rtlwifi-objs   :=              \
                core.o          \
                debug.o         \
                efuse.o         \
-               pci.o           \
                ps.o            \
                rc.o            \
                regd.o
 
+rtl8192c_common-objs +=                \
+
+ifneq ($(CONFIG_PCI),)
+rtlwifi-objs   += pci.o
+endif
+
+ifneq ($(CONFIG_USB),)
+rtlwifi-objs   += usb.o
+endif
+
+obj-$(CONFIG_RTL8192C_COMMON)  += rtl8192c/
 obj-$(CONFIG_RTL8192CE)                += rtl8192ce/
+obj-$(CONFIG_RTL8192CU)                += rtl8192cu/
+
+ccflags-y += -D__CHECK_ENDIAN__
index cf0b73e..bb0c781 100644 (file)
@@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
                ht_cap->mcs.rx_mask[1] = 0xFF;
                ht_cap->mcs.rx_mask[4] = 0x01;
 
-               ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+               ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
        } else if (get_rf_type(rtlphy) == RF_1T1R) {
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
@@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
                ht_cap->mcs.rx_mask[1] = 0x00;
                ht_cap->mcs.rx_mask[4] = 0x01;
 
-               ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+               ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
        }
 }
 
@@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
        rtlmac->hw = hw;
 
        /* <2> rate control register */
-       if (rtl_rate_control_register()) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("rtl: Unable to register rtl_rc,"
-                         "use default RC !!\n"));
-       } else {
-               hw->rate_control_algorithm = "rtl_rc";
-       }
+       hw->rate_control_algorithm = "rtl_rc";
 
        /*
         * <3> init CRDA must come after init
@@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
 
 void rtl_deinit_core(struct ieee80211_hw *hw)
 {
-        /*RC*/
-       rtl_rate_control_unregister();
 }
 
 void rtl_init_rx_config(struct ieee80211_hw *hw)
@@ -399,21 +391,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
        u8 rate_flag = info->control.rates[0].flags;
 
        /* Common Settings */
-       tcb_desc->b_rts_stbc = false;
-       tcb_desc->b_cts_enable = false;
+       tcb_desc->rts_stbc = false;
+       tcb_desc->cts_enable = false;
        tcb_desc->rts_sc = 0;
-       tcb_desc->b_rts_bw = false;
-       tcb_desc->b_rts_use_shortpreamble = false;
-       tcb_desc->b_rts_use_shortgi = false;
+       tcb_desc->rts_bw = false;
+       tcb_desc->rts_use_shortpreamble = false;
+       tcb_desc->rts_use_shortgi = false;
 
        if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
                /* Use CTS-to-SELF in protection mode. */
-               tcb_desc->b_rts_enable = true;
-               tcb_desc->b_cts_enable = true;
+               tcb_desc->rts_enable = true;
+               tcb_desc->cts_enable = true;
                tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
        } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
                /* Use RTS-CTS in protection mode. */
-               tcb_desc->b_rts_enable = true;
+               tcb_desc->rts_enable = true;
                tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
        }
 
@@ -429,7 +421,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
                if (mac->opmode == NL80211_IFTYPE_STATION)
                        tcb_desc->ratr_index = 0;
                else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-                       if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+                       if (tcb_desc->multicast || tcb_desc->broadcast) {
                                tcb_desc->hw_rate =
                                    rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
                                tcb_desc->use_driver_rate = 1;
@@ -439,7 +431,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
                }
        }
 
-       if (rtlpriv->dm.b_useramask) {
+       if (rtlpriv->dm.useramask) {
                /* TODO we will differentiate adhoc and station futrue  */
                tcb_desc->mac_id = 0;
 
@@ -461,19 +453,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       tcb_desc->b_packet_bw = false;
+       tcb_desc->packet_bw = false;
 
        if (!mac->bw_40 || !mac->ht_enable)
                return;
 
-       if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+       if (tcb_desc->multicast || tcb_desc->broadcast)
                return;
 
        /*use legency rate, shall use 20MHz */
        if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
                return;
 
-       tcb_desc->b_packet_bw = true;
+       tcb_desc->packet_bw = true;
 }
 
 static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
@@ -498,7 +490,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
        struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct ieee80211_rate *txrate;
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 
@@ -545,9 +537,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
                }
 
                if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
-                       tcb_desc->b_multicast = 1;
+                       tcb_desc->multicast = 1;
                else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
-                       tcb_desc->b_broadcast = 1;
+                       tcb_desc->broadcast = 1;
 
                _rtl_txrate_selectmode(hw, tcb_desc);
                _rtl_query_bandwidth_mode(hw, tcb_desc);
@@ -570,7 +562,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        if (ieee80211_is_auth(fc)) {
                RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
@@ -587,7 +579,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
        u8 category;
 
@@ -632,7 +624,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u16 ether_type;
        u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
        const struct iphdr *ip;
@@ -646,7 +638,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
                              SNAP_SIZE + PROTOC_TYPE_SIZE);
        ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
-       ether_type = ntohs(ether_type);
 
        if (ETH_P_IP == ether_type) {
                if (IPPROTO_UDP == ip->protocol) {
@@ -690,7 +681,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
 
                return true;
-       } else if (0x86DD == ether_type) {
+       } else if (ETH_P_IPV6 == ether_type) {
+               /* IPv6 */
                return true;
        }
 
@@ -777,10 +769,10 @@ void rtl_watchdog_wq_callback(void *data)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       bool b_busytraffic = false;
-       bool b_higher_busytraffic = false;
-       bool b_higher_busyrxtraffic = false;
-       bool b_higher_busytxtraffic = false;
+       bool busytraffic = false;
+       bool higher_busytraffic = false;
+       bool higher_busyrxtraffic = false;
+       bool higher_busytxtraffic = false;
 
        u8 idx = 0;
        u32 rx_cnt_inp4eriod = 0;
@@ -788,7 +780,7 @@ void rtl_watchdog_wq_callback(void *data)
        u32 aver_rx_cnt_inperiod = 0;
        u32 aver_tx_cnt_inperiod = 0;
 
-       bool benter_ps = false;
+       bool enter_ps = false;
 
        if (is_hal_stop(rtlhal))
                return;
@@ -832,29 +824,29 @@ void rtl_watchdog_wq_callback(void *data)
 
                /* (2) check traffic busy */
                if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
-                       b_busytraffic = true;
+                       busytraffic = true;
 
                /* Higher Tx/Rx data. */
                if (aver_rx_cnt_inperiod > 4000 ||
                    aver_tx_cnt_inperiod > 4000) {
-                       b_higher_busytraffic = true;
+                       higher_busytraffic = true;
 
                        /* Extremely high Rx data. */
                        if (aver_rx_cnt_inperiod > 5000)
-                               b_higher_busyrxtraffic = true;
+                               higher_busyrxtraffic = true;
                        else
-                               b_higher_busytxtraffic = false;
+                               higher_busytxtraffic = false;
                }
 
                if (((rtlpriv->link_info.num_rx_inperiod +
                      rtlpriv->link_info.num_tx_inperiod) > 8) ||
                    (rtlpriv->link_info.num_rx_inperiod > 2))
-                       benter_ps = false;
+                       enter_ps = false;
                else
-                       benter_ps = true;
+                       enter_ps = true;
 
                /* LeisurePS only work in infra mode. */
-               if (benter_ps)
+               if (enter_ps)
                        rtl_lps_enter(hw);
                else
                        rtl_lps_leave(hw);
@@ -863,9 +855,9 @@ void rtl_watchdog_wq_callback(void *data)
        rtlpriv->link_info.num_rx_inperiod = 0;
        rtlpriv->link_info.num_tx_inperiod = 0;
 
-       rtlpriv->link_info.b_busytraffic = b_busytraffic;
-       rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
-       rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+       rtlpriv->link_info.busytraffic = busytraffic;
+       rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
+       rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
 
 }
 
@@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
 
 static int __init rtl_core_module_init(void)
 {
+       if (rtl_rate_control_register())
+               printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
+                      "use default RC !!\n");
        return 0;
 }
 
 static void __exit rtl_core_module_exit(void)
 {
+        /*RC*/
+       rtl_rate_control_unregister();
 }
 
 module_init(rtl_core_module_init);
index 3de5a14..0430453 100644 (file)
@@ -30,6 +30,7 @@
 #define __RTL_BASE_H__
 
 #define RTL_DUMMY_OFFSET       0
+#define RTL_RX_DESC_SIZE       24
 #define RTL_DUMMY_UNIT         8
 #define RTL_TX_DUMMY_SIZE      (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
 #define RTL_TX_DESC_SIZE       32
 #define FRAME_OFFSET_SEQUENCE          22
 #define FRAME_OFFSET_ADDRESS4          24
 
-#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val)                \
-       WRITEEF2BYTE(_hdr, _val)
-#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val)     \
-       WRITEEF1BYTE(_hdr, _val)
-#define SET_80211_HDR_PWR_MGNT(_hdr, _val)             \
-       SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
-#define SET_80211_HDR_TO_DS(_hdr, _val)                        \
-       SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
 
 #define SET_80211_PS_POLL_AID(_hdr, _val)              \
-       WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val)
+       (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
 #define SET_80211_PS_POLL_BSSID(_hdr, _val)            \
-       CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val))
+       memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_PS_POLL_TA(_hdr, _val)               \
-       CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val))
+       memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
 
 #define SET_80211_HDR_DURATION(_hdr, _val)     \
-       WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val)
+       (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
 #define SET_80211_HDR_ADDRESS1(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+       memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
 #define SET_80211_HDR_ADDRESS2(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
+       memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_HDR_ADDRESS3(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
-#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val)  \
-       WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
-
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val)     \
-       WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
-       WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
-#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
-       WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
-#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr)   \
-       READEF2BYTE(((u8 *)(__phdr)) + 34)
-#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
-       WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
-#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
-       SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
-       (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+       memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
 
 int rtl_init_core(struct ieee80211_hw *hw);
 void rtl_deinit_core(struct ieee80211_hw *hw);
index d6a924a..e4f4aee 100644 (file)
@@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        rtlpriv->intf_ops->adapter_tx(hw, skb);
 
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static int rtl_op_add_interface(struct ieee80211_hw *hw,
@@ -434,9 +433,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
 
        aci = _rtl_get_hal_qnum(queue);
        mac->ac[aci].aifs = param->aifs;
-       mac->ac[aci].cw_min = param->cw_min;
-       mac->ac[aci].cw_max = param->cw_max;
-       mac->ac[aci].tx_op = param->txop;
+       mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
+       mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
+       mac->ac[aci].tx_op = cpu_to_le16(param->txop);
        memcpy(&mac->edca_param[aci], param, sizeof(*param));
        rtlpriv->cfg->ops->set_qos(hw, aci);
        return 0;
@@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
                         ("BSS_CHANGED_HT\n"));
 
+               rcu_read_lock();
                sta = ieee80211_find_sta(mac->vif, mac->bssid);
 
                if (sta) {
@@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                mac->current_ampdu_factor =
                                    sta->ht_cap.ampdu_factor;
                }
+               rcu_read_unlock();
 
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
                                              (u8 *) (&mac->max_mss_density));
@@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                else
                        mac->mode = WIRELESS_MODE_G;
 
+               rcu_read_lock();
                sta = ieee80211_find_sta(mac->vif, mac->bssid);
 
                if (sta) {
@@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                 */
                        }
                }
+               rcu_read_unlock();
 
                /*mac80211 just give us CCK rates any time
                 *So we add G rate in basic rates when
@@ -666,7 +669,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
                                        (u8 *) (&basic_rates));
 
-                       if (rtlpriv->dm.b_useramask)
+                       if (rtlpriv->dm.useramask)
                                rtlpriv->cfg->ops->update_rate_mask(hw, 0);
                        else
                                rtlpriv->cfg->ops->update_rate_table(hw);
@@ -681,7 +684,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
         */
        if (changed & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
-                       if (ppsc->b_fwctrl_lps) {
+                       if (ppsc->fwctrl_lps) {
                                u8 mstatus = RT_MEDIA_CONNECT;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                      HW_VAR_H2C_FW_JOINBSSRPT,
@@ -689,7 +692,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                ppsc->report_linked = true;
                        }
                } else {
-                       if (ppsc->b_fwctrl_lps) {
+                       if (ppsc->fwctrl_lps) {
                                u8 mstatus = RT_MEDIA_DISCONNECT;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                      HW_VAR_H2C_FW_JOINBSSRPT,
@@ -748,7 +751,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
 static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                              u8 buf_size)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -817,7 +821,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
                /* fix fwlps issue */
                rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
 
-               if (rtlpriv->dm.b_useramask)
+               if (rtlpriv->dm.useramask)
                        rtlpriv->cfg->ops->update_rate_mask(hw, 0);
                else
                        rtlpriv->cfg->ops->update_rate_table(hw);
index 08bdec2..e4aa868 100644 (file)
 #define COMP_MAC80211          BIT(26)
 #define COMP_REGD                      BIT(27)
 #define COMP_CHAN                      BIT(28)
+#define COMP_USB                       BIT(29)
 
 /*--------------------------------------------------------------
                Define the rt_print components
index b8433f3..4f92cba 100644 (file)
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
 }
 
 static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
-                               u8 efuse_data, u8 offset, int *bcontinual,
-                               u8 *write_state, struct pgpkt_struct target_pkt,
-                               int *repeat_times, int *bresult, u8 word_en)
+                       u8 efuse_data, u8 offset, int *bcontinual,
+                       u8 *write_state, struct pgpkt_struct *target_pkt,
+                       int *repeat_times, int *bresult, u8 word_en)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
        tmp_pkt.word_en = tmp_header & 0x0F;
        tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
 
-       if (tmp_pkt.offset != target_pkt.offset) {
-               efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+       if (tmp_pkt.offset != target_pkt->offset) {
+               *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
                *write_state = PG_STATE_HEADER;
        } else {
                for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
                }
 
                if (bdataempty == false) {
-                       efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+                       *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
                        *write_state = PG_STATE_HEADER;
                } else {
                        match_word_en = 0x0F;
-                       if (!((target_pkt.word_en & BIT(0)) |
+                       if (!((target_pkt->word_en & BIT(0)) |
                             (tmp_pkt.word_en & BIT(0))))
                                match_word_en &= (~BIT(0));
 
-                       if (!((target_pkt.word_en & BIT(1)) |
+                       if (!((target_pkt->word_en & BIT(1)) |
                             (tmp_pkt.word_en & BIT(1))))
                                match_word_en &= (~BIT(1));
 
-                       if (!((target_pkt.word_en & BIT(2)) |
+                       if (!((target_pkt->word_en & BIT(2)) |
                             (tmp_pkt.word_en & BIT(2))))
                                match_word_en &= (~BIT(2));
 
-                       if (!((target_pkt.word_en & BIT(3)) |
+                       if (!((target_pkt->word_en & BIT(3)) |
                             (tmp_pkt.word_en & BIT(3))))
                                match_word_en &= (~BIT(3));
 
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
                                badworden = efuse_word_enable_data_write(
                                                            hw, *efuse_addr + 1,
                                                            tmp_pkt.word_en,
-                                                           target_pkt.data);
+                                                           target_pkt->data);
 
                                if (0x0F != (badworden & 0x0F)) {
                                        u8 reorg_offset = offset;
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
                                }
 
                                tmp_word_en = 0x0F;
-                               if ((target_pkt.word_en & BIT(0)) ^
+                               if ((target_pkt->word_en & BIT(0)) ^
                                    (match_word_en & BIT(0)))
                                        tmp_word_en &= (~BIT(0));
 
-                               if ((target_pkt.word_en & BIT(1)) ^
+                               if ((target_pkt->word_en & BIT(1)) ^
                                    (match_word_en & BIT(1)))
                                        tmp_word_en &= (~BIT(1));
 
-                               if ((target_pkt.word_en & BIT(2)) ^
+                               if ((target_pkt->word_en & BIT(2)) ^
                                        (match_word_en & BIT(2)))
                                        tmp_word_en &= (~BIT(2));
 
-                               if ((target_pkt.word_en & BIT(3)) ^
+                               if ((target_pkt->word_en & BIT(3)) ^
                                    (match_word_en & BIT(3)))
                                        tmp_word_en &= (~BIT(3));
 
                                if ((tmp_word_en & 0x0F) != 0x0F) {
                                        *efuse_addr = efuse_get_current_size(hw);
-                                       target_pkt.offset = offset;
-                                       target_pkt.word_en = tmp_word_en;
+                                       target_pkt->offset = offset;
+                                       target_pkt->word_en = tmp_word_en;
                                } else
                                        *bcontinual = false;
                                *write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
                                }
                        } else {
                                *efuse_addr += (2 * tmp_word_cnts) + 1;
-                               target_pkt.offset = offset;
-                               target_pkt.word_en = word_en;
+                               target_pkt->offset = offset;
+                               target_pkt->word_en = word_en;
                                *write_state = PG_STATE_HEADER;
                        }
                }
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
                                efuse_write_data_case1(hw, &efuse_addr,
                                                       efuse_data, offset,
                                                       &bcontinual,
-                                                      &write_state, target_pkt,
+                                                      &write_state, &target_pkt,
                                                       &repeat_times, &bresult,
                                                       word_en);
                        else
@@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
        return word_cnts;
 }
 
-void efuse_reset_loader(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 tmp_u2b;
-
-       tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
-       rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
-                      (tmp_u2b & ~(BIT(12))));
-       udelay(10000);
-       rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
-                      (tmp_u2b | BIT(12)));
-       udelay(10000);
-}
-
-bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
-{
-       return true;
-}
index 2d39a4d..47774dd 100644 (file)
@@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
 extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
 extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
 extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
-extern bool efuse_program_map(struct ieee80211_hw *hw,
-                             char *p_filename, u8 tabletype);
-extern void efuse_reset_loader(struct ieee80211_hw *hw);
 
 #endif
index 0fa36aa..9cd7703 100644 (file)
@@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
 
        ppsc->reg_rfps_level = 0;
-       ppsc->b_support_aspm = 0;
+       ppsc->support_aspm = 0;
 
        /*Update PCI ASPM setting */
        ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        switch (rtlpci->const_support_pciaspm) {
        case 0:{
                        /*Not support ASPM. */
-                       bool b_support_aspm = false;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = false;
+                       ppsc->support_aspm = support_aspm;
                        break;
                }
        case 1:{
                        /*Support ASPM. */
-                       bool b_support_aspm = true;
-                       bool b_support_backdoor = true;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = true;
+                       bool support_backdoor = true;
+                       ppsc->support_aspm = support_aspm;
 
                        /*if(priv->oem_id == RT_CID_TOSHIBA &&
                           !priv->ndis_adapter.amd_l1_patch)
-                          b_support_backdoor = false; */
+                          support_backdoor = false; */
 
-                       ppsc->b_support_backdoor = b_support_backdoor;
+                       ppsc->support_backdoor = support_backdoor;
 
                        break;
                }
        case 2:
                /*ASPM value set by chipset. */
                if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
-                       bool b_support_aspm = true;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = true;
+                       ppsc->support_aspm = support_aspm;
                }
                break;
        default:
@@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
 
                skb = __skb_dequeue(&ring->queue);
                pci_unmap_single(rtlpci->pdev,
-                                le32_to_cpu(rtlpriv->cfg->ops->
+                                rtlpriv->cfg->ops->
                                             get_desc((u8 *) entry, true,
-                                                     HW_DESC_TXBUFF_ADDR)),
+                                                     HW_DESC_TXBUFF_ADDR),
                                 skb->len, PCI_DMA_TODEVICE);
 
                RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
@@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        return;
                } else {
                        struct ieee80211_hdr *hdr;
-                       u16 fc;
+                       __le16 fc;
                        struct sk_buff *new_skb = NULL;
 
                        rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
@@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                         */
 
                        hdr = (struct ieee80211_hdr *)(skb->data);
-                       fc = le16_to_cpu(hdr->frame_control);
+                       fc = hdr->frame_control;
 
-                       if (!stats.b_crc) {
+                       if (!stats.crc) {
                                memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                                       sizeof(rx_status));
 
@@ -619,6 +619,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                        struct sk_buff *uskb = NULL;
                                        u8 *pdata;
                                        uskb = dev_alloc_skb(skb->len + 128);
+                                       if (!uskb) {
+                                               RT_TRACE(rtlpriv,
+                                                       (COMP_INTR | COMP_RECV),
+                                                       DBG_EMERG,
+                                                       ("can't alloc rx skb\n"));
+                                               goto done;
+                                       }
                                        memcpy(IEEE80211_SKB_RXCB(uskb),
                                                        &rx_status,
                                                        sizeof(rx_status));
@@ -641,7 +648,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
                        if (unlikely(!new_skb)) {
                                RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
-                                        DBG_DMESG,
+                                        DBG_EMERG,
                                         ("can't alloc skb for rx\n"));
                                goto done;
                        }
@@ -659,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 
                }
 done:
-               bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+               bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
                tmp_one = 1;
                rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
                                            HW_DESC_RXBUFF_ADDR,
@@ -683,75 +690,6 @@ done:
 
 }
 
-void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       int prio;
-
-       for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
-               struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
-
-               while (skb_queue_len(&ring->queue)) {
-                       struct rtl_tx_desc *entry = &ring->desc[ring->idx];
-                       struct sk_buff *skb;
-                       struct ieee80211_tx_info *info;
-                       u8 own;
-
-                       /*
-                        *beacon packet will only use the first
-                        *descriptor defautly, and the own may not
-                        *be cleared by the hardware, and
-                        *beacon will free in prepare beacon
-                        */
-                       if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
-                           prio == HCCA_QUEUE)
-                               break;
-
-                       own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
-                                                              true,
-                                                              HW_DESC_OWN);
-
-                       if (own)
-                               break;
-
-                       skb = __skb_dequeue(&ring->queue);
-                       pci_unmap_single(rtlpci->pdev,
-                                        le32_to_cpu(rtlpriv->cfg->ops->
-                                                    get_desc((u8 *) entry,
-                                                    true,
-                                                    HW_DESC_TXBUFF_ADDR)),
-                                        skb->len, PCI_DMA_TODEVICE);
-
-                       ring->idx = (ring->idx + 1) % ring->entries;
-
-                       info = IEEE80211_SKB_CB(skb);
-                       ieee80211_tx_info_clear_status(info);
-
-                       info->flags |= IEEE80211_TX_STAT_ACK;
-                       /*info->status.rates[0].count = 1; */
-
-                       ieee80211_tx_status_irqsafe(hw, skb);
-
-                       if ((ring->entries - skb_queue_len(&ring->queue))
-                           == 2 && prio != BEACON_QUEUE) {
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        ("more desc left, wake "
-                                         "skb_queue@%d,ring->idx = %d,"
-                                         "skb_queue_len = 0x%d\n",
-                                         prio, ring->idx,
-                                         skb_queue_len(&ring->queue)));
-
-                               ieee80211_wake_queue(hw,
-                                                    skb_get_queue_mapping
-                                                    (skb));
-                       }
-
-                       skb = NULL;
-               }
-       }
-}
-
 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 {
        struct ieee80211_hw *hw = dev_id;
@@ -952,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        rtlhal->hw = hw;
        rtlpci->pdev = pdev;
 
-       ppsc->b_inactiveps = false;
-       ppsc->b_leisure_ps = true;
-       ppsc->b_fwctrl_lps = true;
-       ppsc->b_reg_fwctrl_lps = 3;
+       ppsc->inactiveps = false;
+       ppsc->leisure_ps = true;
+       ppsc->fwctrl_lps = true;
+       ppsc->reg_fwctrl_lps = 3;
        ppsc->reg_max_lps_awakeintvl = 5;
 
-       if (ppsc->b_reg_fwctrl_lps == 1)
+       if (ppsc->reg_fwctrl_lps == 1)
                ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
-       else if (ppsc->b_reg_fwctrl_lps == 2)
+       else if (ppsc->reg_fwctrl_lps == 2)
                ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
-       else if (ppsc->b_reg_fwctrl_lps == 3)
+       else if (ppsc->reg_fwctrl_lps == 3)
                ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
 
        /*Tx/Rx related var */
@@ -1017,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
                 ("queue:%d, ring_addr:%p\n", prio, ring));
 
        for (i = 0; i < entries; i++) {
-               nextdescaddress = cpu_to_le32((u32) dma +
-                                             ((i + 1) % entries) *
-                                             sizeof(*ring));
+               nextdescaddress = (u32) dma + ((i + 1) % entries) *
+                                             sizeof(*ring);
 
                rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
                                            true, HW_DESC_TX_NEXTDESC_ADDR,
@@ -1066,9 +1003,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
                        struct sk_buff *skb =
                            dev_alloc_skb(rtlpci->rxbuffersize);
                        u32 bufferaddress;
-                       entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
                        if (!skb)
                                return 0;
+                       entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
 
                        /*skb->dev = dev; */
 
@@ -1083,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
                                           rtlpci->rxbuffersize,
                                           PCI_DMA_FROMDEVICE);
 
-                       bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+                       bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
                        rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
                                                    HW_DESC_RXBUFF_ADDR,
                                                    (u8 *)&bufferaddress);
@@ -1114,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
                struct sk_buff *skb = __skb_dequeue(&ring->queue);
 
                pci_unmap_single(rtlpci->pdev,
-                                le32_to_cpu(rtlpriv->cfg->
+                                rtlpriv->cfg->
                                             ops->get_desc((u8 *) entry, true,
-                                                  HW_DESC_TXBUFF_ADDR)),
+                                                  HW_DESC_TXBUFF_ADDR),
                                 skb->len, PCI_DMA_TODEVICE);
                kfree_skb(skb);
                ring->idx = (ring->idx + 1) % ring->entries;
@@ -1248,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
                                    __skb_dequeue(&ring->queue);
 
                                pci_unmap_single(rtlpci->pdev,
-                                                le32_to_cpu(rtlpriv->cfg->ops->
+                                                rtlpriv->cfg->ops->
                                                         get_desc((u8 *)
                                                         entry,
                                                         true,
-                                                        HW_DESC_TXBUFF_ADDR)),
+                                                        HW_DESC_TXBUFF_ADDR),
                                                 skb->len, PCI_DMA_TODEVICE);
                                kfree_skb(skb);
                                ring->idx = (ring->idx + 1) % ring->entries;
@@ -1266,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
        return 0;
 }
 
-unsigned int _rtl_mac_to_hwqueue(u16 fc,
+static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
                unsigned int mac80211_queue_index)
 {
        unsigned int hw_queue_index;
@@ -1305,7 +1242,7 @@ out:
        return hw_queue_index;
 }
 
-int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1316,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        unsigned int queue_index, hw_queue;
        unsigned long flags;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 *pda_addr = hdr->addr1;
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        /*ssn */
@@ -1422,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        return 0;
 }
 
-void rtl_pci_deinit(struct ieee80211_hw *hw)
+static void rtl_pci_deinit(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1437,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw)
 
 }
 
-int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        int err;
@@ -1454,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
        return 1;
 }
 
-int rtl_pci_start(struct ieee80211_hw *hw)
+static int rtl_pci_start(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1489,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw)
        return 0;
 }
 
-void rtl_pci_stop(struct ieee80211_hw *hw)
+static void rtl_pci_stop(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1540,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
        struct pci_dev *bridge_pdev = pdev->bus->self;
        u16 venderid;
        u16 deviceid;
-       u8 revisionid;
        u16 irqline;
        u8 tmp;
 
        venderid = pdev->vendor;
        deviceid = pdev->device;
-       pci_read_config_byte(pdev, 0x8, &revisionid);
        pci_read_config_word(pdev, 0x3C, &irqline);
 
        if (deviceid == RTL_PCI_8192_DID ||
@@ -1557,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
            deviceid == RTL_PCI_8173_DID ||
            deviceid == RTL_PCI_8172_DID ||
            deviceid == RTL_PCI_8171_DID) {
-               switch (revisionid) {
+               switch (pdev->revision) {
                case RTL_PCI_REVISION_ID_8192PCIE:
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                                 ("8192 PCI-E is found - "
@@ -1831,7 +1766,7 @@ fail3:
        ieee80211_free_hw(hw);
 
        if (rtlpriv->io.pci_mem_start != 0)
-               pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+               pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
 
 fail2:
        pci_release_regions(pdev);
@@ -1881,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
        }
 
        if (rtlpriv->io.pci_mem_start != 0) {
-               pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+               pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
                pci_release_regions(pdev);
        }
 
index d36a669..0caa814 100644 (file)
@@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev);
 
 static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return readw((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return readl((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
 {
-       writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write16_async(struct rtl_priv *rtlpriv,
                                     u32 addr, u16 val)
 {
-       writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write32_async(struct rtl_priv *rtlpriv,
                                     u32 addr, u32 val)
 {
-       writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
index d2326c1..6b7e217 100644 (file)
@@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        enum rf_pwrstate rtstate;
-       bool b_actionallowed = false;
+       bool actionallowed = false;
        u16 rfwait_cnt = 0;
        unsigned long flag;
 
@@ -139,13 +139,13 @@ no_protect:
                ppsc->rfoff_reason &= (~changesource);
 
                if ((changesource == RF_CHANGE_BY_HW) &&
-                   (ppsc->b_hwradiooff == true)) {
-                       ppsc->b_hwradiooff = false;
+                   (ppsc->hwradiooff == true)) {
+                       ppsc->hwradiooff = false;
                }
 
                if (!ppsc->rfoff_reason) {
                        ppsc->rfoff_reason = 0;
-                       b_actionallowed = true;
+                       actionallowed = true;
                }
 
                break;
@@ -153,17 +153,17 @@ no_protect:
        case ERFOFF:
 
                if ((changesource == RF_CHANGE_BY_HW)
-                   && (ppsc->b_hwradiooff == false)) {
-                       ppsc->b_hwradiooff = true;
+                   && (ppsc->hwradiooff == false)) {
+                       ppsc->hwradiooff = true;
                }
 
                ppsc->rfoff_reason |= changesource;
-               b_actionallowed = true;
+               actionallowed = true;
                break;
 
        case ERFSLEEP:
                ppsc->rfoff_reason |= changesource;
-               b_actionallowed = true;
+               actionallowed = true;
                break;
 
        default:
@@ -172,7 +172,7 @@ no_protect:
                break;
        }
 
-       if (b_actionallowed)
+       if (actionallowed)
                rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
 
        if (!protect_or_not) {
@@ -181,7 +181,7 @@ no_protect:
                spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
        }
 
-       return b_actionallowed;
+       return actionallowed;
 }
 EXPORT_SYMBOL(rtl_ps_set_rf_state);
 
@@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 
-       ppsc->b_swrf_processing = true;
+       ppsc->swrf_processing = true;
 
        if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
                if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
@@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
                }
        }
 
-       ppsc->b_swrf_processing = false;
+       ppsc->swrf_processing = false;
 }
 
 void rtl_ips_nic_off_wq_callback(void *data)
@@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data)
        if (rtlpriv->sec.being_setkey)
                return;
 
-       if (ppsc->b_inactiveps) {
+       if (ppsc->inactiveps) {
                rtstate = ppsc->rfpwr_state;
 
                /*
                 *Do not enter IPS in the following conditions:
                 *(1) RF is already OFF or Sleep
-                *(2) b_swrf_processing (indicates the IPS is still under going)
+                *(2) swrf_processing (indicates the IPS is still under going)
                 *(3) Connectted (only disconnected can trigger IPS)
                 *(4) IBSS (send Beacon)
                 *(5) AP mode (send Beacon)
@@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data)
                 */
 
                if (rtstate == ERFON &&
-                   !ppsc->b_swrf_processing &&
+                   !ppsc->swrf_processing &&
                    (mac->link_state == MAC80211_NOLINK) &&
                    !mac->act_scanning) {
                        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
                                 ("IPSEnter(): Turn off RF.\n"));
 
                        ppsc->inactive_pwrstate = ERFOFF;
-                       ppsc->b_in_powersavemode = true;
+                       ppsc->in_powersavemode = true;
 
                        /*rtl_pci_reset_trx_ring(hw); */
                        _rtl_ps_inactive_ps(hw);
@@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
 
-       if (ppsc->b_inactiveps) {
+       if (ppsc->inactiveps) {
                rtstate = ppsc->rfpwr_state;
 
                if (rtstate != ERFON &&
-                   !ppsc->b_swrf_processing &&
+                   !ppsc->swrf_processing &&
                    ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
 
                        ppsc->inactive_pwrstate = ERFON;
-                       ppsc->b_in_powersavemode = false;
+                       ppsc->in_powersavemode = false;
 
                        _rtl_ps_inactive_ps(hw);
                }
@@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
         *   mode and set RPWM to turn RF on.
         */
 
-       if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) &&
+       if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
             ppsc->report_linked) {
-               bool b_fw_current_inps;
+               bool fw_current_inps;
                if (ppsc->dot11_psmode == EACTIVE) {
                        RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                                 ("FW LPS leave ps_mode:%x\n",
@@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_H2C_FW_PWRMODE,
                                        (u8 *) (&fw_pwrmode));
-                       b_fw_current_inps = false;
+                       fw_current_inps = false;
 
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_FW_PSMODE_STATUS,
-                                       (u8 *) (&b_fw_current_inps));
+                                       (u8 *) (&fw_current_inps));
 
                } else {
                        if (rtl_get_fwlps_doze(hw)) {
@@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
                                                 ppsc->fwctrl_psmode));
 
                                rpwm_val = 0x02;        /* RF off */
-                               b_fw_current_inps = true;
+                               fw_current_inps = true;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_FW_PSMODE_STATUS,
-                                               (u8 *) (&b_fw_current_inps));
+                                               (u8 *) (&fw_current_inps));
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_H2C_FW_PWRMODE,
                                                (u8 *) (&ppsc->fwctrl_psmode));
@@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        unsigned long flag;
 
-       if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps))
+       if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
                return;
 
        if (rtlpriv->sec.being_setkey)
                return;
 
-       if (rtlpriv->link_info.b_busytraffic)
+       if (rtlpriv->link_info.busytraffic)
                return;
 
        /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
@@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
 
-       if (ppsc->b_leisure_ps) {
+       if (ppsc->leisure_ps) {
                /* Idle for a while if we connect to AP a while ago. */
                if (mac->cnt_after_linked >= 2) {
                        if (ppsc->dot11_psmode == EACTIVE) {
@@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
 
-       if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) {
+       if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
                if (ppsc->dot11_psmode != EACTIVE) {
 
                        /*FIX ME */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644 (file)
index 0000000..aee42d7
--- /dev/null
@@ -0,0 +1,9 @@
+rtl8192c-common-objs :=                \
+               main.o          \
+               dm_common.o     \
+               fw_common.o     \
+               phy_common.o
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644 (file)
index 0000000..bb02327
--- /dev/null
@@ -0,0 +1,1398 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "dm_common.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
+       0x7f8001fe,
+       0x788001e2,
+       0x71c001c7,
+       0x6b8001ae,
+       0x65400195,
+       0x5fc0017f,
+       0x5a400169,
+       0x55400155,
+       0x50800142,
+       0x4c000130,
+       0x47c0011f,
+       0x43c0010f,
+       0x40000100,
+       0x3c8000f2,
+       0x390000e4,
+       0x35c000d7,
+       0x32c000cb,
+       0x300000c0,
+       0x2d4000b5,
+       0x2ac000ab,
+       0x288000a2,
+       0x26000098,
+       0x24000090,
+       0x22000088,
+       0x20000080,
+       0x1e400079,
+       0x1c800072,
+       0x1b00006c,
+       0x19800066,
+       0x18000060,
+       0x16c0005b,
+       0x15800056,
+       0x14400051,
+       0x1300004c,
+       0x12000048,
+       0x11000044,
+       0x10000040,
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+       {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+       {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
+       {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+       {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
+       {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+       {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
+       {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+       {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
+       {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+       {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
+       {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+       {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
+       {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+       {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
+       {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+       {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
+       {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+       {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
+       {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+       {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+       {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+       {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
+       {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
+       {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
+       {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
+       {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
+       {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
+       {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
+       {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
+       {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
+       {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
+       {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
+       {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+       {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+       {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
+       {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+       {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
+       {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+       {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
+       {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+       {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
+       {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+       {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
+       {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+       {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
+       {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+       {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
+       {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+       {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
+       {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+       {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
+       {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+       {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+       {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+       {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
+       {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
+       {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+       {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+       {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+};
+
+static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
+{
+       dm_digtable.dig_enable_flag = true;
+       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+       dm_digtable.cur_igvalue = 0x20;
+       dm_digtable.pre_igvalue = 0x0;
+       dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+       dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+       dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+       dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+       dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+       dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+       dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+       dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+       dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+       dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+       dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+       dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+       dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
+       dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+}
+
+static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       long rssi_val_min = 0;
+
+       if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
+           (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
+               if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+                       rssi_val_min =
+                           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
+                            rtlpriv->dm.undecorated_smoothed_pwdb) ?
+                           rtlpriv->dm.undecorated_smoothed_pwdb :
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+               else
+                       rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+       } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
+                  dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+               rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+       } else if (dm_digtable.curmultista_connectstate ==
+                  DIG_MULTISTA_CONNECT) {
+               rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+       }
+
+       return (u8) rssi_val_min;
+}
+
+static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+       u32 ret_value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+       falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+       falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+       falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+       falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+       falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+           falsealm_cnt->cnt_rate_illegal +
+           falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
+       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+       falsealm_cnt->cnt_cck_fail = ret_value;
+
+       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+       falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+       falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
+                                falsealm_cnt->cnt_rate_illegal +
+                                falsealm_cnt->cnt_crc8_fail +
+                                falsealm_cnt->cnt_mcs_fail +
+                                falsealm_cnt->cnt_cck_fail);
+
+       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+                 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+                 falsealm_cnt->cnt_parity_fail,
+                 falsealm_cnt->cnt_rate_illegal,
+                 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+                 falsealm_cnt->cnt_ofdm_fail,
+                 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value_igi = dm_digtable.cur_igvalue;
+
+       if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+               value_igi--;
+       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
+               value_igi += 0;
+       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
+               value_igi++;
+       else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
+               value_igi += 2;
+       if (value_igi > DM_DIG_FA_UPPER)
+               value_igi = DM_DIG_FA_UPPER;
+       else if (value_igi < DM_DIG_FA_LOWER)
+               value_igi = DM_DIG_FA_LOWER;
+       if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+               value_igi = 0x32;
+
+       dm_digtable.cur_igvalue = value_igi;
+       rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
+               if ((dm_digtable.backoff_val - 2) <
+                   dm_digtable.backoff_val_range_min)
+                       dm_digtable.backoff_val =
+                           dm_digtable.backoff_val_range_min;
+               else
+                       dm_digtable.backoff_val -= 2;
+       } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
+               if ((dm_digtable.backoff_val + 2) >
+                   dm_digtable.backoff_val_range_max)
+                       dm_digtable.backoff_val =
+                           dm_digtable.backoff_val_range_max;
+               else
+                       dm_digtable.backoff_val += 2;
+       }
+
+       if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
+           dm_digtable.rx_gain_range_max)
+               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
+       else if ((dm_digtable.rssi_val_min + 10 -
+                 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
+       else
+               dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
+                   dm_digtable.backoff_val;
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("rssi_val_min = %x backoff_val %x\n",
+                 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
+
+       rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
+{
+       static u8 binitialized; /* initialized to false */
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+       bool multi_sta = false;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               multi_sta = true;
+
+       if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
+                                    DIG_STA_DISCONNECT)) {
+               binitialized = false;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+               return;
+       } else if (binitialized == false) {
+               binitialized = true;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+               dm_digtable.cur_igvalue = 0x20;
+               rtl92c_dm_write_dig(hw);
+       }
+
+       if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+               if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
+                   (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+
+                       if (dm_digtable.dig_ext_port_stage ==
+                           DIG_EXT_PORT_STAGE_2) {
+                               dm_digtable.cur_igvalue = 0x20;
+                               rtl92c_dm_write_dig(hw);
+                       }
+
+                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+               } else if (rssi_strength > dm_digtable.rssi_highthresh) {
+                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+                       rtl92c_dm_ctrl_initgain_by_fa(hw);
+               }
+       } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+               dm_digtable.cur_igvalue = 0x20;
+               rtl92c_dm_write_dig(hw);
+       }
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("curmultista_connectstate = "
+                 "%x dig_ext_port_stage %x\n",
+                 dm_digtable.curmultista_connectstate,
+                 dm_digtable.dig_ext_port_stage));
+}
+
+static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("presta_connectstate = %x,"
+                 " cursta_connectctate = %x\n",
+                 dm_digtable.presta_connectstate,
+                 dm_digtable.cursta_connectctate));
+
+       if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
+           || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
+           || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+
+               if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
+                       dm_digtable.rssi_val_min =
+                           rtl92c_dm_initial_gain_min_pwdb(hw);
+                       rtl92c_dm_ctrl_initgain_by_rssi(hw);
+               }
+       } else {
+               dm_digtable.rssi_val_min = 0;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+               dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+               dm_digtable.cur_igvalue = 0x20;
+               dm_digtable.pre_igvalue = 0;
+               rtl92c_dm_write_dig(hw);
+       }
+}
+
+static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+               dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+
+               if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+                       if (dm_digtable.rssi_val_min <= 25)
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_LowRssi;
+                       else
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_HighRssi;
+               } else {
+                       if (dm_digtable.rssi_val_min <= 20)
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_LowRssi;
+                       else
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_HighRssi;
+               }
+       } else {
+               dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+       }
+
+       if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
+               if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+                       if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
+                               dm_digtable.cur_cck_fa_state =
+                                   CCK_FA_STAGE_High;
+                       else
+                               dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
+
+                       if (dm_digtable.pre_cck_fa_state !=
+                           dm_digtable.cur_cck_fa_state) {
+                               if (dm_digtable.cur_cck_fa_state ==
+                                   CCK_FA_STAGE_Low)
+                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+                                                     0x83);
+                               else
+                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+                                                     0xcd);
+
+                               dm_digtable.pre_cck_fa_state =
+                                   dm_digtable.cur_cck_fa_state;
+                       }
+
+                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
+
+                       if (IS_92C_SERIAL(rtlhal->version))
+                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+                                             MASKBYTE2, 0xd7);
+               } else {
+                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+
+                       if (IS_92C_SERIAL(rtlhal->version))
+                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+                                             MASKBYTE2, 0xd3);
+               }
+               dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
+       }
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       if (mac->act_scanning == true)
+               return;
+
+       if ((mac->link_state > MAC80211_NOLINK) &&
+           (mac->link_state < MAC80211_LINKED))
+               dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
+       else if (mac->link_state >= MAC80211_LINKED)
+               dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
+       else
+               dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+
+       rtl92c_dm_initial_gain_sta(hw);
+       rtl92c_dm_initial_gain_multi_sta(hw);
+       rtl92c_dm_cck_packet_detection_thresh(hw);
+
+       dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
+
+}
+
+static void rtl92c_dm_dig(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->dm.dm_initialgain_enable == false)
+               return;
+       if (dm_digtable.dig_enable_flag == false)
+               return;
+
+       rtl92c_dm_ctrl_initgain_by_twoport(hw);
+
+}
+
+static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dynamic_txpower_enable = false;
+
+       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+                ("cur_igvalue = 0x%x, "
+                 "pre_igvalue = 0x%x, backoff_val = %d\n",
+                 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+                 dm_digtable.backoff_val));
+
+       if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+               rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+                             dm_digtable.cur_igvalue);
+               rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+                             dm_digtable.cur_igvalue);
+
+               dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_write_dig);
+
+static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+       u8 h2c_parameter[3] = { 0 };
+
+       return;
+
+       if (tmpentry_max_pwdb != 0) {
+               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
+                   tmpentry_max_pwdb;
+       } else {
+               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+       }
+
+       if (tmpentry_min_pwdb != 0xff) {
+               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
+                   tmpentry_min_pwdb;
+       } else {
+               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+       }
+
+       h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+       h2c_parameter[0] = 0;
+
+       rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+}
+
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       rtlpriv->dm.current_turbo_edca = false;
+       rtlpriv->dm.is_any_nonbepkts = false;
+       rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
+
+static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       static u64 last_txok_cnt;
+       static u64 last_rxok_cnt;
+       u64 cur_txok_cnt;
+       u64 cur_rxok_cnt;
+       u32 edca_be_ul = 0x5ea42b;
+       u32 edca_be_dl = 0x5ea42b;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               goto dm_checkedcaturbo_exit;
+
+       if (mac->link_state != MAC80211_LINKED) {
+               rtlpriv->dm.current_turbo_edca = false;
+               return;
+       }
+
+       if (!mac->ht_enable) {  /*FIX MERGE */
+               if (!(edca_be_ul & 0xffff0000))
+                       edca_be_ul |= 0x005e0000;
+
+               if (!(edca_be_dl & 0xffff0000))
+                       edca_be_dl |= 0x005e0000;
+       }
+
+       if ((!rtlpriv->dm.is_any_nonbepkts) &&
+           (!rtlpriv->dm.disable_framebursting)) {
+               cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+               cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+               if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+                       if (!rtlpriv->dm.is_cur_rdlstate ||
+                           !rtlpriv->dm.current_turbo_edca) {
+                               rtl_write_dword(rtlpriv,
+                                               REG_EDCA_BE_PARAM,
+                                               edca_be_dl);
+                               rtlpriv->dm.is_cur_rdlstate = true;
+                       }
+               } else {
+                       if (rtlpriv->dm.is_cur_rdlstate ||
+                           !rtlpriv->dm.current_turbo_edca) {
+                               rtl_write_dword(rtlpriv,
+                                               REG_EDCA_BE_PARAM,
+                                               edca_be_ul);
+                               rtlpriv->dm.is_cur_rdlstate = false;
+                       }
+               }
+               rtlpriv->dm.current_turbo_edca = true;
+       } else {
+               if (rtlpriv->dm.current_turbo_edca) {
+                       u8 tmp = AC0_BE;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_AC_PARAM,
+                                                     (u8 *) (&tmp));
+                       rtlpriv->dm.current_turbo_edca = false;
+               }
+       }
+
+dm_checkedcaturbo_exit:
+       rtlpriv->dm.is_any_nonbepkts = false;
+       last_txok_cnt = rtlpriv->stats.txbytesunicast;
+       last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
+                                                            *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 thermalvalue, delta, delta_lck, delta_iqk;
+       long ele_a, ele_d, temp_cck, val_x, value32;
+       long val_y, ele_c;
+       u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
+       int i;
+       bool is2t = IS_92C_SERIAL(rtlhal->version);
+       u8 txpwr_level[2] = {0, 0};
+       u8 ofdm_min_index = 6, rf;
+
+       rtlpriv->dm.txpower_trackingInit = true;
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
+
+       thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                 "eeprom_thermalmeter 0x%x\n",
+                 thermalvalue, rtlpriv->dm.thermalvalue,
+                 rtlefuse->eeprom_thermalmeter));
+
+       rtl92c_phy_ap_calibrate(hw, (thermalvalue -
+                                    rtlefuse->eeprom_thermalmeter));
+       if (is2t)
+               rf = 2;
+       else
+               rf = 1;
+
+       if (thermalvalue) {
+               ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                     MASKDWORD) & MASKOFDM_D;
+
+               for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+                       if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+                               ofdm_index_old[0] = (u8) i;
+
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                       ("Initial pathA ele_d reg0x%x = 0x%lx, "
+                                        "ofdm_index=0x%x\n",
+                                        ROFDM0_XATXIQIMBALANCE,
+                                        ele_d, ofdm_index_old[0]));
+                               break;
+                       }
+               }
+
+               if (is2t) {
+                       ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+                                             MASKDWORD) & MASKOFDM_D;
+
+                       for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+                               if (ele_d == (ofdmswing_table[i] &
+                                   MASKOFDM_D)) {
+                                       ofdm_index_old[1] = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                          DBG_LOUD,
+                                          ("Initial pathB ele_d reg0x%x = "
+                                          "0x%lx, ofdm_index=0x%x\n",
+                                          ROFDM0_XBTXIQIMBALANCE, ele_d,
+                                          ofdm_index_old[1]));
+                                       break;
+                               }
+                       }
+               }
+
+               temp_cck =
+                   rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
+
+               for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+                       if (rtlpriv->dm.cck_inch14) {
+                               if (memcmp((void *)&temp_cck,
+                                          (void *)&cckswing_table_ch14[i][2],
+                                          4) == 0) {
+                                       cck_index_old = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                                DBG_LOUD,
+                                                ("Initial reg0x%x = 0x%lx, "
+                                                 "cck_index=0x%x, ch 14 %d\n",
+                                                 RCCK0_TXFILTER2, temp_cck,
+                                                 cck_index_old,
+                                                 rtlpriv->dm.cck_inch14));
+                                       break;
+                               }
+                       } else {
+                               if (memcmp((void *)&temp_cck,
+                                          (void *)
+                                          &cckswing_table_ch1ch13[i][2],
+                                          4) == 0) {
+                                       cck_index_old = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                                DBG_LOUD,
+                                                ("Initial reg0x%x = 0x%lx, "
+                                                 "cck_index=0x%x, ch14 %d\n",
+                                                 RCCK0_TXFILTER2, temp_cck,
+                                                 cck_index_old,
+                                                 rtlpriv->dm.cck_inch14));
+                                       break;
+                               }
+                       }
+               }
+
+               if (!rtlpriv->dm.thermalvalue) {
+                       rtlpriv->dm.thermalvalue =
+                           rtlefuse->eeprom_thermalmeter;
+                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
+                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+                       for (i = 0; i < rf; i++)
+                               rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+                       rtlpriv->dm.cck_index = cck_index_old;
+               }
+
+               delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue) :
+                   (rtlpriv->dm.thermalvalue - thermalvalue);
+
+               delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+                   (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+
+               delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+                   (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                       ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                        "eeprom_thermalmeter 0x%x delta 0x%x "
+                        "delta_lck 0x%x delta_iqk 0x%x\n",
+                        thermalvalue, rtlpriv->dm.thermalvalue,
+                        rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+                        delta_iqk));
+
+               if (delta_lck > 1) {
+                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
+                       rtl92c_phy_lc_calibrate(hw);
+               }
+
+               if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+                       if (thermalvalue > rtlpriv->dm.thermalvalue) {
+                               for (i = 0; i < rf; i++)
+                                       rtlpriv->dm.ofdm_index[i] -= delta;
+                               rtlpriv->dm.cck_index -= delta;
+                       } else {
+                               for (i = 0; i < rf; i++)
+                                       rtlpriv->dm.ofdm_index[i] += delta;
+                               rtlpriv->dm.cck_index += delta;
+                       }
+
+                       if (is2t) {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("temp OFDM_A_index=0x%x, "
+                                         "OFDM_B_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         rtlpriv->dm.ofdm_index[0],
+                                         rtlpriv->dm.ofdm_index[1],
+                                         rtlpriv->dm.cck_index));
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("temp OFDM_A_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         rtlpriv->dm.ofdm_index[0],
+                                         rtlpriv->dm.cck_index));
+                       }
+
+                       if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+                               for (i = 0; i < rf; i++)
+                                       ofdm_index[i] =
+                                           rtlpriv->dm.ofdm_index[i]
+                                           + 1;
+                               cck_index = rtlpriv->dm.cck_index + 1;
+                       } else {
+                               for (i = 0; i < rf; i++)
+                                       ofdm_index[i] =
+                                           rtlpriv->dm.ofdm_index[i];
+                               cck_index = rtlpriv->dm.cck_index;
+                       }
+
+                       for (i = 0; i < rf; i++) {
+                               if (txpwr_level[i] >= 0 &&
+                                   txpwr_level[i] <= 26) {
+                                       if (thermalvalue >
+                                           rtlefuse->eeprom_thermalmeter) {
+                                               if (delta < 5)
+                                                       ofdm_index[i] -= 1;
+
+                                               else
+                                                       ofdm_index[i] -= 2;
+                                       } else if (delta > 5 && thermalvalue <
+                                                  rtlefuse->
+                                                  eeprom_thermalmeter) {
+                                               ofdm_index[i] += 1;
+                                       }
+                               } else if (txpwr_level[i] >= 27 &&
+                                          txpwr_level[i] <= 32
+                                          && thermalvalue >
+                                          rtlefuse->eeprom_thermalmeter) {
+                                       if (delta < 5)
+                                               ofdm_index[i] -= 1;
+
+                                       else
+                                               ofdm_index[i] -= 2;
+                               } else if (txpwr_level[i] >= 32 &&
+                                          txpwr_level[i] <= 38 &&
+                                          thermalvalue >
+                                          rtlefuse->eeprom_thermalmeter
+                                          && delta > 5) {
+                                       ofdm_index[i] -= 1;
+                               }
+                       }
+
+                       if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
+                               if (thermalvalue >
+                                   rtlefuse->eeprom_thermalmeter) {
+                                       if (delta < 5)
+                                               cck_index -= 1;
+
+                                       else
+                                               cck_index -= 2;
+                               } else if (delta > 5 && thermalvalue <
+                                          rtlefuse->eeprom_thermalmeter) {
+                                       cck_index += 1;
+                               }
+                       } else if (txpwr_level[i] >= 27 &&
+                                  txpwr_level[i] <= 32 &&
+                                  thermalvalue >
+                                  rtlefuse->eeprom_thermalmeter) {
+                               if (delta < 5)
+                                       cck_index -= 1;
+
+                               else
+                                       cck_index -= 2;
+                       } else if (txpwr_level[i] >= 32 &&
+                                  txpwr_level[i] <= 38 &&
+                                  thermalvalue > rtlefuse->eeprom_thermalmeter
+                                  && delta > 5) {
+                               cck_index -= 1;
+                       }
+
+                       for (i = 0; i < rf; i++) {
+                               if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
+                                       ofdm_index[i] = OFDM_TABLE_SIZE - 1;
+
+                               else if (ofdm_index[i] < ofdm_min_index)
+                                       ofdm_index[i] = ofdm_min_index;
+                       }
+
+                       if (cck_index > CCK_TABLE_SIZE - 1)
+                               cck_index = CCK_TABLE_SIZE - 1;
+                       else if (cck_index < 0)
+                               cck_index = 0;
+
+                       if (is2t) {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("new OFDM_A_index=0x%x, "
+                                         "OFDM_B_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         ofdm_index[0], ofdm_index[1],
+                                         cck_index));
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("new OFDM_A_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         ofdm_index[0], cck_index));
+                       }
+               }
+
+               if (rtlpriv->dm.txpower_track_control && delta != 0) {
+                       ele_d =
+                           (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+                       val_x = rtlphy->reg_e94;
+                       val_y = rtlphy->reg_e9c;
+
+                       if (val_x != 0) {
+                               if ((val_x & 0x00000200) != 0)
+                                       val_x = val_x | 0xFFFFFC00;
+                               ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+                               if ((val_y & 0x00000200) != 0)
+                                       val_y = val_y | 0xFFFFFC00;
+                               ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+                               value32 = (ele_d << 22) |
+                                   ((ele_c & 0x3F) << 16) | ele_a;
+
+                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                             MASKDWORD, value32);
+
+                               value32 = (ele_c & 0x000003C0) >> 6;
+                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+                                             value32);
+
+                               value32 = ((val_x * ele_d) >> 7) & 0x01;
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(31), value32);
+
+                               value32 = ((val_y * ele_d) >> 7) & 0x01;
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(29), value32);
+                       } else {
+                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                             MASKDWORD,
+                                             ofdmswing_table[ofdm_index[0]]);
+
+                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+                                             0x00);
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(31) | BIT(29), 0x00);
+                       }
+
+                       if (!rtlpriv->dm.cck_inch14) {
+                               rtl_write_byte(rtlpriv, 0xa22,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [0]);
+                               rtl_write_byte(rtlpriv, 0xa23,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [1]);
+                               rtl_write_byte(rtlpriv, 0xa24,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [2]);
+                               rtl_write_byte(rtlpriv, 0xa25,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [3]);
+                               rtl_write_byte(rtlpriv, 0xa26,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [4]);
+                               rtl_write_byte(rtlpriv, 0xa27,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [5]);
+                               rtl_write_byte(rtlpriv, 0xa28,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [6]);
+                               rtl_write_byte(rtlpriv, 0xa29,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [7]);
+                       } else {
+                               rtl_write_byte(rtlpriv, 0xa22,
+                                              cckswing_table_ch14[cck_index]
+                                              [0]);
+                               rtl_write_byte(rtlpriv, 0xa23,
+                                              cckswing_table_ch14[cck_index]
+                                              [1]);
+                               rtl_write_byte(rtlpriv, 0xa24,
+                                              cckswing_table_ch14[cck_index]
+                                              [2]);
+                               rtl_write_byte(rtlpriv, 0xa25,
+                                              cckswing_table_ch14[cck_index]
+                                              [3]);
+                               rtl_write_byte(rtlpriv, 0xa26,
+                                              cckswing_table_ch14[cck_index]
+                                              [4]);
+                               rtl_write_byte(rtlpriv, 0xa27,
+                                              cckswing_table_ch14[cck_index]
+                                              [5]);
+                               rtl_write_byte(rtlpriv, 0xa28,
+                                              cckswing_table_ch14[cck_index]
+                                              [6]);
+                               rtl_write_byte(rtlpriv, 0xa29,
+                                              cckswing_table_ch14[cck_index]
+                                              [7]);
+                       }
+
+                       if (is2t) {
+                               ele_d = (ofdmswing_table[ofdm_index[1]] &
+                                        0xFFC00000) >> 22;
+
+                               val_x = rtlphy->reg_eb4;
+                               val_y = rtlphy->reg_ebc;
+
+                               if (val_x != 0) {
+                                       if ((val_x & 0x00000200) != 0)
+                                               val_x = val_x | 0xFFFFFC00;
+                                       ele_a = ((val_x * ele_d) >> 8) &
+                                           0x000003FF;
+
+                                       if ((val_y & 0x00000200) != 0)
+                                               val_y = val_y | 0xFFFFFC00;
+                                       ele_c = ((val_y * ele_d) >> 8) &
+                                           0x00003FF;
+
+                                       value32 = (ele_d << 22) |
+                                           ((ele_c & 0x3F) << 16) | ele_a;
+                                       rtl_set_bbreg(hw,
+                                                     ROFDM0_XBTXIQIMBALANCE,
+                                                     MASKDWORD, value32);
+
+                                       value32 = (ele_c & 0x000003C0) >> 6;
+                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+                                                     MASKH4BITS, value32);
+
+                                       value32 = ((val_x * ele_d) >> 7) & 0x01;
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(27), value32);
+
+                                       value32 = ((val_y * ele_d) >> 7) & 0x01;
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(25), value32);
+                               } else {
+                                       rtl_set_bbreg(hw,
+                                                     ROFDM0_XBTXIQIMBALANCE,
+                                                     MASKDWORD,
+                                                     ofdmswing_table[ofdm_index
+                                                                     [1]]);
+                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+                                                     MASKH4BITS, 0x00);
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(27) | BIT(25), 0x00);
+                               }
+
+                       }
+               }
+
+               if (delta_iqk > 3) {
+                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+                       rtl92c_phy_iq_calibrate(hw, false);
+               }
+
+               if (rtlpriv->dm.txpower_track_control)
+                       rtlpriv->dm.thermalvalue = thermalvalue;
+       }
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+
+}
+
+static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
+                                               struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.txpower_tracking = true;
+       rtlpriv->dm.txpower_trackingInit = false;
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("pMgntInfo->txpower_tracking = %d\n",
+                 rtlpriv->dm.txpower_tracking));
+}
+
+static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
+}
+
+static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
+}
+
+static void rtl92c_dm_check_txpower_tracking_thermal_meter(
+                                               struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       static u8 tm_trigger;
+
+       if (!rtlpriv->dm.txpower_tracking)
+               return;
+
+       if (!tm_trigger) {
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
+                             0x60);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        ("Trigger 92S Thermal Meter!!\n"));
+               tm_trigger = 1;
+               return;
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        ("Schedule TxPowerTracking direct call!!\n"));
+               rtl92c_dm_txpower_tracking_directcall(hw);
+               tm_trigger = 0;
+       }
+}
+
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
+
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+       p_ra->ratr_state = DM_RATR_STA_INIT;
+       p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+       if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+               rtlpriv->dm.useramask = true;
+       else
+               rtlpriv->dm.useramask = false;
+
+}
+EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
+
+static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rate_adaptive *p_ra = &(rtlpriv->ra);
+       u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+
+       if (is_hal_stop(rtlhal)) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                        ("<---- driver is going to unload\n"));
+               return;
+       }
+
+       if (!rtlpriv->dm.useramask) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                       ("<---- driver does not control rate adaptive mask\n"));
+               return;
+       }
+
+       if (mac->link_state == MAC80211_LINKED) {
+
+               switch (p_ra->pre_ratr_state) {
+               case DM_RATR_STA_HIGH:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               case DM_RATR_STA_MIDDLE:
+                       high_rssithresh_for_ra = 55;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               case DM_RATR_STA_LOW:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 25;
+                       break;
+               default:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               }
+
+               if (rtlpriv->dm.undecorated_smoothed_pwdb >
+                   (long)high_rssithresh_for_ra)
+                       p_ra->ratr_state = DM_RATR_STA_HIGH;
+               else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+                        (long)low_rssithresh_for_ra)
+                       p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+               else
+                       p_ra->ratr_state = DM_RATR_STA_LOW;
+
+               if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("RSSI = %ld\n",
+                                 rtlpriv->dm.undecorated_smoothed_pwdb));
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("PreState = %d, CurState = %d\n",
+                                 p_ra->pre_ratr_state, p_ra->ratr_state));
+
+                       rtlpriv->cfg->ops->update_rate_mask(hw,
+                                       p_ra->ratr_state);
+
+                       p_ra->pre_ratr_state = p_ra->ratr_state;
+               }
+       }
+}
+
+static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+       dm_pstable.pre_ccastate = CCA_MAX;
+       dm_pstable.cur_ccasate = CCA_MAX;
+       dm_pstable.pre_rfstate = RF_MAX;
+       dm_pstable.cur_rfstate = RF_MAX;
+       dm_pstable.rssi_val_min = 0;
+}
+
+static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (dm_pstable.rssi_val_min != 0) {
+               if (dm_pstable.pre_ccastate == CCA_2R) {
+                       if (dm_pstable.rssi_val_min >= 35)
+                               dm_pstable.cur_ccasate = CCA_1R;
+                       else
+                               dm_pstable.cur_ccasate = CCA_2R;
+               } else {
+                       if (dm_pstable.rssi_val_min <= 30)
+                               dm_pstable.cur_ccasate = CCA_2R;
+                       else
+                               dm_pstable.cur_ccasate = CCA_1R;
+               }
+       } else {
+               dm_pstable.cur_ccasate = CCA_MAX;
+       }
+
+       if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
+               if (dm_pstable.cur_ccasate == CCA_1R) {
+                       if (get_rf_type(rtlphy) == RF_2T2R) {
+                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+                                             MASKBYTE0, 0x13);
+                               rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
+                       } else {
+                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+                                             MASKBYTE0, 0x23);
+                               rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
+                       }
+               } else {
+                       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
+                                     0x33);
+                       rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
+               }
+               dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
+       }
+
+       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
+                                              (dm_pstable.cur_ccasate ==
+                                               0) ? "1RCCA" : "2RCCA"));
+}
+
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
+{
+       static u8 initialize;
+       static u32 reg_874, reg_c70, reg_85c, reg_a74;
+
+       if (initialize == 0) {
+               reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                        MASKDWORD) & 0x1CC000) >> 14;
+
+               reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+                                        MASKDWORD) & BIT(3)) >> 3;
+
+               reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+                                        MASKDWORD) & 0xFF000000) >> 24;
+
+               reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+
+               initialize = 1;
+       }
+
+       if (!bforce_in_normal) {
+               if (dm_pstable.rssi_val_min != 0) {
+                       if (dm_pstable.pre_rfstate == RF_NORMAL) {
+                               if (dm_pstable.rssi_val_min >= 30)
+                                       dm_pstable.cur_rfstate = RF_SAVE;
+                               else
+                                       dm_pstable.cur_rfstate = RF_NORMAL;
+                       } else {
+                               if (dm_pstable.rssi_val_min <= 25)
+                                       dm_pstable.cur_rfstate = RF_NORMAL;
+                               else
+                                       dm_pstable.cur_rfstate = RF_SAVE;
+                       }
+               } else {
+                       dm_pstable.cur_rfstate = RF_MAX;
+               }
+       } else {
+               dm_pstable.cur_rfstate = RF_NORMAL;
+       }
+
+       if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
+               if (dm_pstable.cur_rfstate == RF_SAVE) {
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0x1C0000, 0x2);
+                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+                                     0xFF000000, 0x63);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0xC000, 0x2);
+                       rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
+               } else {
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0x1CC000, reg_874);
+                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
+                                     reg_c70);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
+                                     reg_85c);
+                       rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+               }
+
+               dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_rf_saving);
+
+static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (((mac->link_state == MAC80211_NOLINK)) &&
+           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+               dm_pstable.rssi_val_min = 0;
+               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                        ("Not connected to any\n"));
+       }
+
+       if (mac->link_state == MAC80211_LINKED) {
+               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       dm_pstable.rssi_val_min =
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                                ("AP Client PWDB = 0x%lx\n",
+                                 dm_pstable.rssi_val_min));
+               } else {
+                       dm_pstable.rssi_val_min =
+                           rtlpriv->dm.undecorated_smoothed_pwdb;
+                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                                ("STA Default Port PWDB = 0x%lx\n",
+                                 dm_pstable.rssi_val_min));
+               }
+       } else {
+               dm_pstable.rssi_val_min =
+                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                        ("AP Ext Port PWDB = 0x%lx\n",
+                         dm_pstable.rssi_val_min));
+       }
+
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtl92c_dm_1r_cca(hw);
+}
+
+void rtl92c_dm_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+       rtl92c_dm_diginit(hw);
+       rtl92c_dm_init_dynamic_txpower(hw);
+       rtl92c_dm_init_edca_turbo(hw);
+       rtl92c_dm_init_rate_adaptive_mask(hw);
+       rtl92c_dm_initialize_txpower_tracking(hw);
+       rtl92c_dm_init_dynamic_bb_powersaving(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_init);
+
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool fw_current_inpsmode = false;
+       bool fw_ps_awake = true;
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                     (u8 *) (&fw_current_inpsmode));
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+                                     (u8 *) (&fw_ps_awake));
+
+       if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
+                                            fw_ps_awake)
+           && (!ppsc->rfchange_inprogress)) {
+               rtl92c_dm_pwdb_monitor(hw);
+               rtl92c_dm_dig(hw);
+               rtl92c_dm_false_alarm_counter_statistics(hw);
+               rtl92c_dm_dynamic_bb_powersaving(hw);
+               rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
+               rtl92c_dm_check_txpower_tracking(hw);
+               rtl92c_dm_refresh_rate_adaptive_mask(hw);
+               rtl92c_dm_check_edca_turbo(hw);
+
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644 (file)
index 0000000..b9cbb0a
--- /dev/null
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef        __RTL92COMMON_DM_H__
+#define __RTL92COMMON_DM_H__
+
+#include "../wifi.h"
+#include "../rtl8192ce/def.h"
+#include "../rtl8192ce/reg.h"
+#include "fw_common.h"
+
+#define HAL_DM_DIG_DISABLE                     BIT(0)
+#define HAL_DM_HIPWR_DISABLE                   BIT(1)
+
+#define OFDM_TABLE_LENGTH                      37
+#define CCK_TABLE_LENGTH                       33
+
+#define OFDM_TABLE_SIZE                                37
+#define CCK_TABLE_SIZE                         33
+
+#define BW_AUTO_SWITCH_HIGH_LOW                        25
+#define BW_AUTO_SWITCH_LOW_HIGH                        30
+
+#define DM_DIG_THRESH_HIGH                     40
+#define DM_DIG_THRESH_LOW                      35
+
+#define DM_FALSEALARM_THRESH_LOW               400
+#define DM_FALSEALARM_THRESH_HIGH              1000
+
+#define DM_DIG_MAX                             0x3e
+#define DM_DIG_MIN                             0x1e
+
+#define DM_DIG_FA_UPPER                                0x32
+#define DM_DIG_FA_LOWER                                0x20
+#define DM_DIG_FA_TH0                          0x20
+#define DM_DIG_FA_TH1                          0x100
+#define DM_DIG_FA_TH2                          0x200
+
+#define DM_DIG_BACKOFF_MAX                     12
+#define DM_DIG_BACKOFF_MIN                     -4
+#define DM_DIG_BACKOFF_DEFAULT                 10
+
+#define RXPATHSELECTION_SS_TH_lOW              30
+#define RXPATHSELECTION_DIFF_TH                        18
+
+#define DM_RATR_STA_INIT                       0
+#define DM_RATR_STA_HIGH                       1
+#define DM_RATR_STA_MIDDLE                     2
+#define DM_RATR_STA_LOW                                3
+
+#define CTS2SELF_THVAL                         30
+#define REGC38_TH                              20
+
+#define WAIOTTHVal                             25
+
+#define TXHIGHPWRLEVEL_NORMAL                  0
+#define TXHIGHPWRLEVEL_LEVEL1                  1
+#define TXHIGHPWRLEVEL_LEVEL2                  2
+#define TXHIGHPWRLEVEL_BT1                     3
+#define TXHIGHPWRLEVEL_BT2                     4
+
+#define DM_TYPE_BYFW                           0
+#define DM_TYPE_BYDRIVER                       1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2                74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1                67
+
+struct ps_t {
+       u8 pre_ccastate;
+       u8 cur_ccasate;
+       u8 pre_rfstate;
+       u8 cur_rfstate;
+       long rssi_val_min;
+};
+
+struct dig_t {
+       u8 dig_enable_flag;
+       u8 dig_ext_port_stage;
+       u32 rssi_lowthresh;
+       u32 rssi_highthresh;
+       u32 fa_lowthresh;
+       u32 fa_highthresh;
+       u8 cursta_connectctate;
+       u8 presta_connectstate;
+       u8 curmultista_connectstate;
+       u8 pre_igvalue;
+       u8 cur_igvalue;
+       char backoff_val;
+       char backoff_val_range_max;
+       char backoff_val_range_min;
+       u8 rx_gain_range_max;
+       u8 rx_gain_range_min;
+       u8 rssi_val_min;
+       u8 pre_cck_pd_state;
+       u8 cur_cck_pd_state;
+       u8 pre_cck_fa_state;
+       u8 cur_cck_fa_state;
+       u8 pre_ccastate;
+       u8 cur_ccasate;
+};
+
+struct swat_t {
+       u8 failure_cnt;
+       u8 try_flag;
+       u8 stop_trying;
+       long pre_rssi;
+       long trying_threshold;
+       u8 cur_antenna;
+       u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+       DIG_TYPE_THRESH_HIGH = 0,
+       DIG_TYPE_THRESH_LOW = 1,
+       DIG_TYPE_BACKOFF = 2,
+       DIG_TYPE_RX_GAIN_MIN = 3,
+       DIG_TYPE_RX_GAIN_MAX = 4,
+       DIG_TYPE_ENABLE = 5,
+       DIG_TYPE_DISABLE = 6,
+       DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+       CCK_PD_STAGE_LowRssi = 0,
+       CCK_PD_STAGE_HighRssi = 1,
+       CCK_FA_STAGE_Low = 2,
+       CCK_FA_STAGE_High = 3,
+       CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+       CCA_1R = 0,
+       CCA_2R = 1,
+       CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+       RF_SAVE = 0,
+       RF_NORMAL = 1,
+       RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+       ANS_ANTENNA_B = 1,
+       ANS_ANTENNA_A = 2,
+       ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+       DIG_EXT_PORT_STAGE_0 = 0,
+       DIG_EXT_PORT_STAGE_1 = 1,
+       DIG_EXT_PORT_STAGE_2 = 2,
+       DIG_EXT_PORT_STAGE_3 = 3,
+       DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+       DIG_STA_DISCONNECT = 0,
+       DIG_STA_CONNECT = 1,
+       DIG_STA_BEFORE_CONNECT = 2,
+       DIG_MULTISTA_DISCONNECT = 3,
+       DIG_MULTISTA_CONNECT = 4,
+       DIG_CONNECT_MAX
+};
+
+extern struct dig_t dm_digtable;
+void rtl92c_dm_init(struct ieee80211_hw *hw);
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+
+#endif
 #include "../wifi.h"
 #include "../pci.h"
 #include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "fw.h"
-#include "table.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "fw_common.h"
 
 static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
 {
@@ -133,17 +132,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       bool is_version_b;
        u8 *bufferPtr = (u8 *) buffer;
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
 
-       is_version_b = IS_CHIP_VER_B(version);
-       if (is_version_b) {
+       if (IS_CHIP_VER_B(version)) {
                u32 pageNums, remainSize;
                u32 page, offset;
 
-               if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+               if (IS_HARDWARE_TYPE_8192CE(rtlhal))
                        _rtl92c_fill_dummy(bufferPtr, &size);
 
                pageNums = size / FW_8192C_PAGE_SIZE;
@@ -231,14 +228,14 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        u32 fwsize;
        int err;
        enum version_8192c version = rtlhal->version;
+       const struct firmware *firmware;
 
-       const struct firmware *firmware = NULL;
-
+       printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
+              rtlpriv->cfg->fw_name);
        err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
                               rtlpriv->io.dev);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("Failed to request firmware!\n"));
+               printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
                return 1;
        }
 
@@ -281,6 +278,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
 
        return 0;
 }
+EXPORT_SYMBOL(rtl92c_download_fw);
 
 static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
 {
@@ -318,12 +316,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
 
        while (true) {
                spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
-               if (rtlhal->b_h2c_setinprogress) {
+               if (rtlhal->h2c_setinprogress) {
                        RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
                                 ("H2C set in progress! Wait to set.."
                                  "element_id(%d).\n", element_id));
 
-                       while (rtlhal->b_h2c_setinprogress) {
+                       while (rtlhal->h2c_setinprogress) {
                                spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
                                                       flag);
                                h2c_waitcounter++;
@@ -339,7 +337,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
                        }
                        spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
                } else {
-                       rtlhal->b_h2c_setinprogress = true;
+                       rtlhal->h2c_setinprogress = true;
                        spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
                        break;
                }
@@ -495,7 +493,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
        }
 
        spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
-       rtlhal->b_h2c_setinprogress = false;
+       rtlhal->h2c_setinprogress = false;
        spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
 
        RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
@@ -507,7 +505,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        u32 tmp_cmdbuf[2];
 
-       if (rtlhal->bfw_ready == false) {
+       if (rtlhal->fw_ready == false) {
                RT_ASSERT(false, ("return H2C cmd because of Fw "
                                  "download fail!!!\n"));
                return;
@@ -519,6 +517,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
 
        return;
 }
+EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
 
 void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
 {
@@ -539,6 +538,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
                u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
        }
 }
+EXPORT_SYMBOL(rtl92c_firmware_selfreset);
 
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
 {
@@ -559,39 +559,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
        rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
 
 }
-
-static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
-                                   struct sk_buff *skb)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       struct rtl8192_tx_ring *ring;
-       struct rtl_tx_desc *pdesc;
-       u8 own;
-       unsigned long flags;
-       struct sk_buff *pskb = NULL;
-
-       ring = &rtlpci->tx_ring[BEACON_QUEUE];
-
-       pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
-
-       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
-
-       pdesc = &ring->desc[0];
-       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
-
-       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
-
-       __skb_queue_tail(&ring->queue, skb);
-
-       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
-       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
-
-       return true;
-}
+EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
 
 #define BEACON_PG              0 /*->1*/
 #define PSPOLL_PG              2
@@ -776,7 +744,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
        memcpy((u8 *) skb_put(skb, totalpacketlen),
               &reserved_page_packet, totalpacketlen);
 
-       rtstatus = _rtl92c_cmd_send_packet(hw, skb);
+       rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
 
        if (rtstatus)
                b_dlok = true;
@@ -793,6 +761,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
 }
+EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
 
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
 {
@@ -802,3 +771,4 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
 
        rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
 }
+EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
new file mode 100644 (file)
index 0000000..2f624fc
--- /dev/null
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+
+
+MODULE_AUTHOR("lizhaoming      <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Georgia         <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang       <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644 (file)
index 0000000..a702282
--- /dev/null
@@ -0,0 +1,2042 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "dm_common.h"
+#include "phy_common.h"
+
+/* Define macro to shorten lines */
+#define MCS_TXPWR      mcs_txpwrlevel_origoffset
+
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 returnvalue, originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "bitmask(%#x)\n", regaddr,
+                                              bitmask));
+       originalvalue = rtl_read_dword(rtlpriv, regaddr);
+       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+       returnvalue = (originalvalue & bitmask) >> bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+                                              "Addr[0x%x]=0x%x\n", bitmask,
+                                              regaddr, originalvalue));
+
+       return returnvalue;
+
+}
+EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
+
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+                                              " data(%#x)\n", regaddr, bitmask,
+                                              data));
+
+       if (bitmask != MASKDWORD) {
+               originalvalue = rtl_read_dword(rtlpriv, regaddr);
+               bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+               data = ((originalvalue & (~bitmask)) | (data << bitshift));
+       }
+
+       rtl_write_dword(rtlpriv, regaddr, data);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+                                              " data(%#x)\n", regaddr, bitmask,
+                                              data));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
+
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+                                        enum radio_path rfpath, u32 offset)
+{
+       RT_ASSERT(false, ("deprecated!\n"));
+       return 0;
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
+
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+                                          enum radio_path rfpath, u32 offset,
+                                          u32 data)
+{
+       RT_ASSERT(false, ("deprecated!\n"));
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
+
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+                                     enum radio_path rfpath, u32 offset)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+       u32 newoffset;
+       u32 tmplong, tmplong2;
+       u8 rfpi_enable = 0;
+       u32 retvalue;
+
+       offset &= 0x3f;
+       newoffset = offset;
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
+               return 0xFFFFFFFF;
+       }
+       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+       if (rfpath == RF90_PATH_A)
+               tmplong2 = tmplong;
+       else
+               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+           (newoffset << 23) | BLSSIREADEDGE;
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+                     tmplong & (~BLSSIREADEDGE));
+       mdelay(1);
+       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+       mdelay(1);
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+                     tmplong | BLSSIREADEDGE);
+       mdelay(1);
+       if (rfpath == RF90_PATH_A)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+                                                BIT(8));
+       else if (rfpath == RF90_PATH_B)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+                                                BIT(8));
+       if (rfpi_enable)
+               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+                                        BLSSIREADBACKDATA);
+       else
+               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+                                        BLSSIREADBACKDATA);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+                                              rfpath, pphyreg->rflssi_readback,
+                                              retvalue));
+       return retvalue;
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
+
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                       enum radio_path rfpath, u32 offset,
+                                       u32 data)
+{
+       u32 data_and_addr;
+       u32 newoffset;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
+               return;
+       }
+       offset &= 0x3f;
+       newoffset = offset;
+       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+                                              rfpath, pphyreg->rf3wire_offset,
+                                              data_and_addr));
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+{
+       u32 i;
+
+       for (i = 0; i <= 31; i++) {
+               if (((bitmask >> i) & 0x1) == 1)
+                       break;
+       }
+       return i;
+}
+EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+
+static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+{
+       rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+       rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
+       rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
+       rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
+}
+bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtlpriv->cfg->ops->phy_rf6052_config(hw);
+}
+EXPORT_SYMBOL(rtl92c_phy_rf_config);
+
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       bool rtstatus;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+       rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+                                                BASEBAND_CONFIG_PHY_REG);
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+               return false;
+       }
+       if (rtlphy->rf_type == RF_1T2R) {
+               _rtl92c_phy_bb_config_1t(hw);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+       }
+       if (rtlefuse->autoload_failflag == false) {
+               rtlphy->pwrgroup_cnt = 0;
+               rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
+                                                  BASEBAND_CONFIG_PHY_REG);
+       }
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+               return false;
+       }
+       rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+                                                BASEBAND_CONFIG_AGC_TAB);
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+               return false;
+       }
+       rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+                                               RFPGA0_XA_HSSIPARAMETER2,
+                                               0x200));
+       return true;
+}
+EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
+
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (regaddr == RTXAGC_A_RATE18_06) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
+       }
+       if (regaddr == RTXAGC_A_RATE54_24) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
+       }
+       if (regaddr == RTXAGC_A_CCK1_MCS32) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
+       }
+       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
+       }
+       if (regaddr == RTXAGC_A_MCS03_MCS00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
+       }
+       if (regaddr == RTXAGC_A_MCS07_MCS04) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
+       }
+       if (regaddr == RTXAGC_A_MCS11_MCS08) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
+       }
+       if (regaddr == RTXAGC_A_MCS15_MCS12) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
+       }
+       if (regaddr == RTXAGC_B_RATE18_06) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
+       }
+       if (regaddr == RTXAGC_B_RATE54_24) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
+       }
+
+       if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
+       }
+
+       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS03_MCS00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS07_MCS04) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS11_MCS08) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS15_MCS12) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
+
+               rtlphy->pwrgroup_cnt++;
+       }
+}
+EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
+
+void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->default_initialgain[0] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[1] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[2] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[3] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Default initial gain (c50=0x%x, "
+                 "c58=0x%x, c60=0x%x, c68=0x%x\n",
+                 rtlphy->default_initialgain[0],
+                 rtlphy->default_initialgain[1],
+                 rtlphy->default_initialgain[2],
+                 rtlphy->default_initialgain[3]));
+
+       rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+                                              ROFDM0_RXDETECTOR3, MASKBYTE0);
+       rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+                                             ROFDM0_RXDETECTOR2, MASKDWORD);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Default framesync (0x%x) = 0x%x\n",
+                 ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+           RFPGA0_XA_LSSIPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+           RFPGA0_XB_LSSIPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
+           RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
+           RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
+           RFPGA0_XCD_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
+           RFPGA0_XCD_SWITCHCONTROL;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
+           ROFDM0_XARXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
+           ROFDM0_XBRXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
+           ROFDM0_XCRXIQIMBANLANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
+           ROFDM0_XDRXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
+           ROFDM0_XATXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
+           ROFDM0_XBTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
+           ROFDM0_XCTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
+           ROFDM0_XDTXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
+           RFPGA0_XA_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
+           RFPGA0_XB_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
+           RFPGA0_XC_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
+           RFPGA0_XD_LSSIREADBACK;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
+           TRANSCEIVEA_HSPI_READBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
+           TRANSCEIVEB_HSPI_READBACK;
+
+}
+EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
+
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 txpwr_level;
+       long txpwr_dbm;
+
+       txpwr_level = rtlphy->cur_cck_txpwridx;
+       txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                                WIRELESS_MODE_B, txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
+           rtlefuse->legacy_ht_txpowerdiff;
+       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                        WIRELESS_MODE_G,
+                                        txpwr_level) > txpwr_dbm)
+               txpwr_dbm =
+                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+                                                txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                        WIRELESS_MODE_N_24G,
+                                        txpwr_level) > txpwr_dbm)
+               txpwr_dbm =
+                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+                                                txpwr_level);
+       *powerlevel = txpwr_dbm;
+}
+
+static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+                                     u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 index = (channel - 1);
+
+       cckpowerlevel[RF90_PATH_A] =
+           rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+       cckpowerlevel[RF90_PATH_B] =
+           rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+       if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
+               ofdmpowerlevel[RF90_PATH_A] =
+                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+               ofdmpowerlevel[RF90_PATH_B] =
+                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+       } else if (get_rf_type(rtlphy) == RF_2T2R) {
+               ofdmpowerlevel[RF90_PATH_A] =
+                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+               ofdmpowerlevel[RF90_PATH_B] =
+                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+       }
+}
+
+static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
+                                        u8 channel, u8 *cckpowerlevel,
+                                        u8 *ofdmpowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+       rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+       u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+       if (rtlefuse->txpwr_fromeprom == false)
+               return;
+       _rtl92c_get_txpower_index(hw, channel,
+                                 &cckpowerlevel[0], &ofdmpowerlevel[0]);
+       _rtl92c_ccxpower_index_check(hw,
+                                    channel, &cckpowerlevel[0],
+                                    &ofdmpowerlevel[0]);
+       rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+       rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
+                                                      channel);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
+
+bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 idx;
+       u8 rf_path;
+
+       u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+                                                     WIRELESS_MODE_B,
+                                                     power_indbm);
+       u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+                                                      WIRELESS_MODE_N_24G,
+                                                      power_indbm);
+       if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
+               ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
+       else
+               ofdmtxpwridx = 0;
+       RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
+                ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+                 power_indbm, ccktxpwridx, ofdmtxpwridx));
+       for (idx = 0; idx < 14; idx++) {
+               for (rf_path = 0; rf_path < 2; rf_path++) {
+                       rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
+                       rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
+                           ofdmtxpwridx;
+                       rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
+                           ofdmtxpwridx;
+               }
+       }
+       rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+       return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
+
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
+{
+}
+EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
+
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+                               enum wireless_mode wirelessmode,
+                               long power_indbm)
+{
+       u8 txpwridx;
+       long offset;
+
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               offset = -7;
+               break;
+       case WIRELESS_MODE_G:
+       case WIRELESS_MODE_N_24G:
+               offset = -8;
+               break;
+       default:
+               offset = -8;
+               break;
+       }
+
+       if ((power_indbm - offset) > 0)
+               txpwridx = (u8) ((power_indbm - offset) * 2);
+       else
+               txpwridx = 0;
+
+       if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
+               txpwridx = MAX_TXPWR_IDX_NMODE_92S;
+
+       return txpwridx;
+}
+EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
+
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx)
+{
+       long offset;
+       long pwrout_dbm;
+
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               offset = -7;
+               break;
+       case WIRELESS_MODE_G:
+       case WIRELESS_MODE_N_24G:
+               offset = -8;
+               break;
+       default:
+               offset = -8;
+               break;
+       }
+       pwrout_dbm = txpwridx / 2 + offset;
+       return pwrout_dbm;
+}
+EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
+
+void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum io_type iotype;
+
+       if (!is_hal_stop(rtlhal)) {
+               switch (operation) {
+               case SCAN_OPT_BACKUP:
+                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+
+                       break;
+               case SCAN_OPT_RESTORE:
+                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Unknown Scan Backup operation.\n"));
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
+
+void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_bw = rtlphy->current_chan_bw;
+
+       if (rtlphy->set_bwmode_inprogress)
+               return;
+       rtlphy->set_bwmode_inprogress = true;
+       if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
+               rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
+       else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        ("FALSE driver sleep or unload\n"));
+               rtlphy->set_bwmode_inprogress = false;
+               rtlphy->current_chan_bw = tmp_bw;
+       }
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
+
+void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 delay;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                ("switch to channel%d\n", rtlphy->current_channel));
+       if (is_hal_stop(rtlhal))
+               return;
+       do {
+               if (!rtlphy->sw_chnl_inprogress)
+                       break;
+               if (!_rtl92c_phy_sw_chnl_step_by_step
+                   (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
+                    &rtlphy->sw_chnl_step, &delay)) {
+                       if (delay > 0)
+                               mdelay(delay);
+                       else
+                               continue;
+               } else
+                       rtlphy->sw_chnl_inprogress = false;
+               break;
+       } while (true);
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
+
+u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlphy->sw_chnl_inprogress)
+               return 0;
+       if (rtlphy->set_bwmode_inprogress)
+               return 0;
+       RT_ASSERT((rtlphy->current_channel <= 14),
+                 ("WIRELESS_MODE_G but channel>14"));
+       rtlphy->sw_chnl_inprogress = true;
+       rtlphy->sw_chnl_stage = 0;
+       rtlphy->sw_chnl_step = 0;
+       if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+               rtl92c_phy_sw_chnl_callback(hw);
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        ("sw_chnl_inprogress false schdule workitem\n"));
+               rtlphy->sw_chnl_inprogress = false;
+       } else {
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        ("sw_chnl_inprogress false driver sleep or"
+                         " unload\n"));
+               rtlphy->sw_chnl_inprogress = false;
+       }
+       return 1;
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
+
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+                                            u8 channel, u8 *stage, u8 *step,
+                                            u32 *delay)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+       u32 precommoncmdcnt;
+       struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+       u32 postcommoncmdcnt;
+       struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+       u32 rfdependcmdcnt;
+       struct swchnlcmd *currentcmd = NULL;
+       u8 rfpath;
+       u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+       precommoncmdcnt = 0;
+       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT,
+                                        CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
+       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+
+       postcommoncmdcnt = 0;
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+                                        MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+
+       rfdependcmdcnt = 0;
+
+       RT_ASSERT((channel >= 1 && channel <= 14),
+                 ("illegal channel for Zebra: %d\n", channel));
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+                                        RF_CHNLBW, channel, 10);
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
+                                        0);
+
+       do {
+               switch (*stage) {
+               case 0:
+                       currentcmd = &precommoncmd[*step];
+                       break;
+               case 1:
+                       currentcmd = &rfdependcmd[*step];
+                       break;
+               case 2:
+                       currentcmd = &postcommoncmd[*step];
+                       break;
+               }
+
+               if (currentcmd->cmdid == CMDID_END) {
+                       if ((*stage) == 2) {
+                               return true;
+                       } else {
+                               (*stage)++;
+                               (*step) = 0;
+                               continue;
+                       }
+               }
+
+               switch (currentcmd->cmdid) {
+               case CMDID_SET_TXPOWEROWER_LEVEL:
+                       rtl92c_phy_set_txpower_level(hw, channel);
+                       break;
+               case CMDID_WRITEPORT_ULONG:
+                       rtl_write_dword(rtlpriv, currentcmd->para1,
+                                       currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_USHORT:
+                       rtl_write_word(rtlpriv, currentcmd->para1,
+                                      (u16) currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_UCHAR:
+                       rtl_write_byte(rtlpriv, currentcmd->para1,
+                                      (u8) currentcmd->para2);
+                       break;
+               case CMDID_RF_WRITEREG:
+                       for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+                               rtlphy->rfreg_chnlval[rfpath] =
+                                   ((rtlphy->rfreg_chnlval[rfpath] &
+                                     0xfffffc00) | currentcmd->para2);
+
+                               rtl_set_rfreg(hw, (enum radio_path)rfpath,
+                                             currentcmd->para1,
+                                             RFREG_OFFSET_MASK,
+                                             rtlphy->rfreg_chnlval[rfpath]);
+                       }
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+
+               break;
+       } while (true);
+
+       (*delay) = currentcmd->msdelay;
+       (*step)++;
+       return false;
+}
+
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                            u32 cmdtableidx, u32 cmdtablesz,
+                                            enum swchnlcmd_id cmdid,
+                                            u32 para1, u32 para2, u32 msdelay)
+{
+       struct swchnlcmd *pcmd;
+
+       if (cmdtable == NULL) {
+               RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+               return false;
+       }
+
+       if (cmdtableidx >= cmdtablesz)
+               return false;
+
+       pcmd = cmdtable + cmdtableidx;
+       pcmd->cmdid = cmdid;
+       pcmd->para1 = para1;
+       pcmd->para2 = para2;
+       pcmd->msdelay = msdelay;
+       return true;
+}
+
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
+{
+       return true;
+}
+EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
+
+static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+       u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+       u8 result = 0x00;
+
+       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
+                     config_pathb ? 0x28160202 : 0x28160502);
+
+       if (config_pathb) {
+               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
+       }
+
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+       mdelay(IQK_DELAY_TIME);
+
+       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+       reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+       reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+       reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+       if (!(reg_eac & BIT(28)) &&
+           (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+           (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+               result |= 0x01;
+       else
+               return result;
+
+       if (!(reg_eac & BIT(27)) &&
+           (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+           (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+               result |= 0x02;
+       return result;
+}
+
+static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+       u8 result = 0x00;
+
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+       mdelay(IQK_DELAY_TIME);
+       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+       reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+       reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+       reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+       reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+       if (!(reg_eac & BIT(31)) &&
+           (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+           (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+               result |= 0x01;
+       else
+               return result;
+
+       if (!(reg_eac & BIT(30)) &&
+           (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+           (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+               result |= 0x02;
+       return result;
+}
+
+static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                              bool iqk_ok, long result[][8],
+                                              u8 final_candidate, bool btxonly)
+{
+       u32 oldval_0, x, tx0_a, reg;
+       long y, tx0_c;
+
+       if (final_candidate == 0xFF)
+               return;
+       else if (iqk_ok) {
+               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                         MASKDWORD) >> 22) & 0x3FF;
+               x = result[final_candidate][0];
+               if ((x & 0x00000200) != 0)
+                       x = x | 0xFFFFFC00;
+               tx0_a = (x * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+                             ((x * oldval_0 >> 7) & 0x1));
+               y = result[final_candidate][1];
+               if ((y & 0x00000200) != 0)
+                       y = y | 0xFFFFFC00;
+               tx0_c = (y * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+                             ((tx0_c & 0x3C0) >> 6));
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+                             (tx0_c & 0x3F));
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+                             ((y * oldval_0 >> 7) & 0x1));
+               if (btxonly)
+                       return;
+               reg = result[final_candidate][2];
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+               reg = result[final_candidate][3] & 0x3F;
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+               reg = (result[final_candidate][3] >> 6) & 0xF;
+               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+       }
+}
+
+static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                              bool iqk_ok, long result[][8],
+                                              u8 final_candidate, bool btxonly)
+{
+       u32 oldval_1, x, tx1_a, reg;
+       long y, tx1_c;
+
+       if (final_candidate == 0xFF)
+               return;
+       else if (iqk_ok) {
+               oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+                                         MASKDWORD) >> 22) & 0x3FF;
+               x = result[final_candidate][4];
+               if ((x & 0x00000200) != 0)
+                       x = x | 0xFFFFFC00;
+               tx1_a = (x * oldval_1) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
+                             ((x * oldval_1 >> 7) & 0x1));
+               y = result[final_candidate][5];
+               if ((y & 0x00000200) != 0)
+                       y = y | 0xFFFFFC00;
+               tx1_c = (y * oldval_1) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
+                             ((tx1_c & 0x3C0) >> 6));
+               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
+                             (tx1_c & 0x3F));
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
+                             ((y * oldval_1 >> 7) & 0x1));
+               if (btxonly)
+                       return;
+               reg = result[final_candidate][6];
+               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+               reg = result[final_candidate][7] & 0x3F;
+               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+               reg = (result[final_candidate][7] >> 6) & 0xF;
+               rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+       }
+}
+
+static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
+                                           u32 *addareg, u32 *addabackup,
+                                           u32 registernum)
+{
+       u32 i;
+
+       for (i = 0; i < registernum; i++)
+               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+
+static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
+                                          u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
+                                             u32 *addareg, u32 *addabackup,
+                                             u32 regiesternum)
+{
+       u32 i;
+
+       for (i = 0; i < regiesternum; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+
+static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
+                                            u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
+                                    u32 *addareg, bool is_patha_on, bool is2t)
+{
+       u32 pathOn;
+       u32 i;
+
+       pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+       if (false == is2t) {
+               pathOn = 0x0bdb25a0;
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+       } else {
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+       }
+
+       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+}
+
+static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+                                               u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i],
+                              (u8) (macbackup[i] & (~BIT(3))));
+       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+
+static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+       u32 mode;
+
+       mode = pi_mode ? 0x01000100 : 0x01000000;
+       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+
+static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
+                                          long result[][8], u8 c1, u8 c2)
+{
+       u32 i, j, diff, simularity_bitmap, bound;
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       u8 final_candidate[2] = { 0xFF, 0xFF };
+       bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+       if (is2t)
+               bound = 8;
+       else
+               bound = 4;
+
+       simularity_bitmap = 0;
+
+       for (i = 0; i < bound; i++) {
+               diff = (result[c1][i] > result[c2][i]) ?
+                   (result[c1][i] - result[c2][i]) :
+                   (result[c2][i] - result[c1][i]);
+
+               if (diff > MAX_TOLERANCE) {
+                       if ((i == 2 || i == 6) && !simularity_bitmap) {
+                               if (result[c1][i] + result[c1][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c2;
+                               else if (result[c2][i] + result[c2][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c1;
+                               else
+                                       simularity_bitmap = simularity_bitmap |
+                                           (1 << i);
+                       } else
+                               simularity_bitmap =
+                                   simularity_bitmap | (1 << i);
+               }
+       }
+
+       if (simularity_bitmap == 0) {
+               for (i = 0; i < (bound / 4); i++) {
+                       if (final_candidate[i] != 0xFF) {
+                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+                                       result[3][j] =
+                                           result[final_candidate[i]][j];
+                               bresult = false;
+                       }
+               }
+               return bresult;
+       } else if (!(simularity_bitmap & 0x0F)) {
+               for (i = 0; i < 4; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else if (!(simularity_bitmap & 0xF0) && is2t) {
+               for (i = 4; i < 8; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else {
+               return false;
+       }
+
+}
+
+static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
+                                    long result[][8], u8 t, bool is2t)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 i;
+       u8 patha_ok, pathb_ok;
+       u32 adda_reg[IQK_ADDA_REG_NUM] = {
+               0x85c, 0xe6c, 0xe70, 0xe74,
+               0xe78, 0xe7c, 0xe80, 0xe84,
+               0xe88, 0xe8c, 0xed0, 0xed4,
+               0xed8, 0xedc, 0xee0, 0xeec
+       };
+
+       u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+               0x522, 0x550, 0x551, 0x040
+       };
+
+       const u32 retrycount = 2;
+
+       u32 bbvalue;
+
+       if (t == 0) {
+               bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+
+               _rtl92c_phy_save_adda_registers(hw, adda_reg,
+                                               rtlphy->adda_backup, 16);
+               _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
+                                              rtlphy->iqk_mac_backup);
+       }
+       _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
+       if (t == 0) {
+               rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+                                                  RFPGA0_XA_HSSIPARAMETER1,
+                                                  BIT(8));
+       }
+       if (!rtlphy->rfpi_enable)
+               _rtl92c_phy_pi_mode_switch(hw, true);
+       if (t == 0) {
+               rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
+               rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
+               rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
+       }
+       rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+       rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+       rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+       if (is2t) {
+               rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
+       }
+       _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
+                                           rtlphy->iqk_mac_backup);
+       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
+       if (is2t)
+               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
+       for (i = 0; i < retrycount; i++) {
+               patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
+               if (patha_ok == 0x03) {
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       break;
+               } else if (i == (retrycount - 1) && patha_ok == 0x01)
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94,
+                                                     MASKDWORD) & 0x3FF0000) >>
+                                                     16;
+               result[t][1] =
+                   (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
+
+       }
+
+       if (is2t) {
+               _rtl92c_phy_path_a_standby(hw);
+               _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
+               for (i = 0; i < retrycount; i++) {
+                       pathb_ok = _rtl92c_phy_path_b_iqk(hw);
+                       if (pathb_ok == 0x03) {
+                               result[t][4] = (rtl_get_bbreg(hw,
+                                                     0xeb4,
+                                                     MASKDWORD) &
+                                               0x3FF0000) >> 16;
+                               result[t][5] =
+                                   (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               result[t][6] =
+                                   (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               result[t][7] =
+                                   (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               break;
+                       } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+                               result[t][4] = (rtl_get_bbreg(hw,
+                                                     0xeb4,
+                                                     MASKDWORD) &
+                                               0x3FF0000) >> 16;
+                       }
+                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+               }
+       }
+       rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
+       rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
+       rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+       if (is2t)
+               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+       if (t != 0) {
+               if (!rtlphy->rfpi_enable)
+                       _rtl92c_phy_pi_mode_switch(hw, false);
+               _rtl92c_phy_reload_adda_registers(hw, adda_reg,
+                                                 rtlphy->adda_backup, 16);
+               _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
+                                                rtlphy->iqk_mac_backup);
+       }
+}
+
+static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
+                                    char delta, bool is2t)
+{
+       /* This routine is deliberately dummied out for later fixes */
+#if 0
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+       u32 reg_d[PATH_NUM];
+       u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
+
+       u32 bb_backup[APK_BB_REG_NUM];
+       u32 bb_reg[APK_BB_REG_NUM] = {
+               0x904, 0xc04, 0x800, 0xc08, 0x874
+       };
+       u32 bb_ap_mode[APK_BB_REG_NUM] = {
+               0x00000020, 0x00a05430, 0x02040000,
+               0x000800e4, 0x00204000
+       };
+       u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
+               0x00000020, 0x00a05430, 0x02040000,
+               0x000800e4, 0x22204000
+       };
+
+       u32 afe_backup[APK_AFE_REG_NUM];
+       u32 afe_reg[APK_AFE_REG_NUM] = {
+               0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
+               0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
+               0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
+               0xeec
+       };
+
+       u32 mac_backup[IQK_MAC_REG_NUM];
+       u32 mac_reg[IQK_MAC_REG_NUM] = {
+               0x522, 0x550, 0x551, 0x040
+       };
+
+       u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+               {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+       };
+
+       u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
+               {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+       };
+
+       u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+               {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+       };
+
+       u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
+               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+       };
+
+       u32 afe_on_off[PATH_NUM] = {
+               0x04db25a4, 0x0b1b25a4
+       };
+
+       u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
+
+       u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
+
+       u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
+
+       u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
+
+       const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
+               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+       };
+
+       const u32 apk_normal_setting_value_1[13] = {
+               0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+               0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+               0x12680000, 0x00880000, 0x00880000
+       };
+
+       const u32 apk_normal_setting_value_2[16] = {
+               0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+               0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+               0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+               0x00050006
+       };
+
+       const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
+
+       long bb_offset, delta_v, delta_offset;
+
+       if (!is2t)
+               pathbound = 1;
+
+       for (index = 0; index < PATH_NUM; index++) {
+               apk_offset[index] = apk_normal_offset[index];
+               apk_value[index] = apk_normal_value[index];
+               afe_on_off[index] = 0x6fdb25a4;
+       }
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               for (path = 0; path < pathbound; path++) {
+                       apk_rf_init_value[path][index] =
+                           apk_normal_rf_init_value[path][index];
+                       apk_rf_value_0[path][index] =
+                           apk_normal_rf_value_0[path][index];
+               }
+               bb_ap_mode[index] = bb_normal_ap_mode[index];
+
+               apkbound = 6;
+       }
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               if (index == 0)
+                       continue;
+               bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
+       }
+
+       _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
+
+       _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
+
+       for (path = 0; path < pathbound; path++) {
+               if (path == RF90_PATH_A) {
+                       offset = 0xb00;
+                       for (index = 0; index < 11; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+                       offset = 0xb68;
+                       for (; index < 13; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+                       offset = 0xb00;
+                       for (index = 0; index < 16; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_2
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+               } else if (path == RF90_PATH_B) {
+                       offset = 0xb70;
+                       for (index = 0; index < 10; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
+                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+                       offset = 0xb68;
+                       index = 11;
+                       for (; index < 13; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+                       offset = 0xb60;
+                       for (index = 0; index < 16; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_2
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+               }
+
+               reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
+                                           0xd, MASKDWORD);
+
+               for (index = 0; index < APK_AFE_REG_NUM; index++)
+                       rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
+                                     afe_on_off[path]);
+
+               if (path == RF90_PATH_A) {
+                       for (index = 0; index < APK_BB_REG_NUM; index++) {
+                               if (index == 0)
+                                       continue;
+                               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
+                                             bb_ap_mode[index]);
+                       }
+               }
+
+               _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
+
+               if (path == 0) {
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
+               } else {
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
+                                     0x10000);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+                                     0x1000f);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+                                     0x20103);
+               }
+
+               delta_offset = ((delta + 14) / 2);
+               if (delta_offset < 0)
+                       delta_offset = 0;
+               else if (delta_offset > 12)
+                       delta_offset = 12;
+
+               for (index = 0; index < APK_BB_REG_NUM; index++) {
+                       if (index != 1)
+                               continue;
+
+                       tmpreg = apk_rf_init_value[path][index];
+
+                       if (!rtlefuse->apk_thermalmeterignore) {
+                               bb_offset = (tmpreg & 0xF0000) >> 16;
+
+                               if (!(tmpreg & BIT(15)))
+                                       bb_offset = -bb_offset;
+
+                               delta_v =
+                                   apk_delta_mapping[index][delta_offset];
+
+                               bb_offset += delta_v;
+
+                               if (bb_offset < 0) {
+                                       tmpreg = tmpreg & (~BIT(15));
+                                       bb_offset = -bb_offset;
+                               } else {
+                                       tmpreg = tmpreg | BIT(15);
+                               }
+
+                               tmpreg =
+                                   (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
+                       }
+
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
+                                     MASKDWORD, 0x8992e);
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
+                                     MASKDWORD, apk_rf_value_0[path][index]);
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+                                     MASKDWORD, tmpreg);
+
+                       i = 0;
+                       do {
+                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
+                               rtl_set_bbreg(hw, apk_offset[path],
+                                             MASKDWORD, apk_value[0]);
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset 0x%x "
+                                        "value 0x%x\n",
+                                        apk_offset[path],
+                                        rtl_get_bbreg(hw, apk_offset[path],
+                                                      MASKDWORD)));
+
+                               mdelay(3);
+
+                               rtl_set_bbreg(hw, apk_offset[path],
+                                             MASKDWORD, apk_value[1]);
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset 0x%x "
+                                        "value 0x%x\n",
+                                        apk_offset[path],
+                                        rtl_get_bbreg(hw, apk_offset[path],
+                                                      MASKDWORD)));
+
+                               mdelay(20);
+
+                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+
+                               if (path == RF90_PATH_A)
+                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
+                                                              0x03E00000);
+                               else
+                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
+                                                              0xF8000000);
+
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset "
+                                        "0xbd8[25:21] %x\n", tmpreg));
+
+                               i++;
+
+                       } while (tmpreg > apkbound && i < 4);
+
+                       apk_result[path][index] = tmpreg;
+               }
+       }
+
+       _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               if (index == 0)
+                       continue;
+               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
+       }
+
+       _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
+
+       for (path = 0; path < pathbound; path++) {
+               rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+                             MASKDWORD, reg_d[path]);
+
+               if (path == RF90_PATH_B) {
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+                                     0x1000f);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+                                     0x20101);
+               }
+
+               if (apk_result[path][1] > 6)
+                       apk_result[path][1] = 6;
+       }
+
+       for (path = 0; path < pathbound; path++) {
+               rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
+                             ((apk_result[path][1] << 15) |
+                              (apk_result[path][1] << 10) |
+                              (apk_result[path][1] << 5) |
+                              apk_result[path][1]));
+
+               if (path == RF90_PATH_A)
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+                                     ((apk_result[path][1] << 15) |
+                                      (apk_result[path][1] << 10) |
+                                      (0x00 << 5) | 0x05));
+               else
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+                                     ((apk_result[path][1] << 15) |
+                                      (apk_result[path][1] << 10) |
+                                      (0x02 << 5) | 0x05));
+
+               rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
+                             ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
+                              0x08));
+
+       }
+
+       rtlphy->apk_done = true;
+#endif
+}
+
+static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+                                         bool bmain, bool is2t)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (is_hal_stop(rtlhal)) {
+               rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
+               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+       }
+       if (is2t) {
+               if (bmain)
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x1);
+               else
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x2);
+       } else {
+               if (bmain)
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
+               else
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
+
+       }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       long result[4][8];
+       u8 i, final_candidate;
+       bool patha_ok, pathb_ok;
+       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+           reg_ecc, reg_tmp = 0;
+       bool is12simular, is13simular, is23simular;
+       bool start_conttx = false, singletone = false;
+       u32 iqk_bb_reg[10] = {
+               ROFDM0_XARXIQIMBALANCE,
+               ROFDM0_XBRXIQIMBALANCE,
+               ROFDM0_ECCATHRESHOLD,
+               ROFDM0_AGCRSSITABLE,
+               ROFDM0_XATXIQIMBALANCE,
+               ROFDM0_XBTXIQIMBALANCE,
+               ROFDM0_XCTXIQIMBALANCE,
+               ROFDM0_XCTXAFE,
+               ROFDM0_XDTXAFE,
+               ROFDM0_RXIQEXTANTA
+       };
+
+       if (recovery) {
+               _rtl92c_phy_reload_adda_registers(hw,
+                                                 iqk_bb_reg,
+                                                 rtlphy->iqk_bb_backup, 10);
+               return;
+       }
+       if (start_conttx || singletone)
+               return;
+       for (i = 0; i < 8; i++) {
+               result[0][i] = 0;
+               result[1][i] = 0;
+               result[2][i] = 0;
+               result[3][i] = 0;
+       }
+       final_candidate = 0xff;
+       patha_ok = false;
+       pathb_ok = false;
+       is12simular = false;
+       is23simular = false;
+       is13simular = false;
+       for (i = 0; i < 3; i++) {
+               if (IS_92C_SERIAL(rtlhal->version))
+                       _rtl92c_phy_iq_calibrate(hw, result, i, true);
+               else
+                       _rtl92c_phy_iq_calibrate(hw, result, i, false);
+               if (i == 1) {
+                       is12simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 0,
+                                                                    1);
+                       if (is12simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+               }
+               if (i == 2) {
+                       is13simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 0,
+                                                                    2);
+                       if (is13simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+                       is23simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 1,
+                                                                    2);
+                       if (is23simular)
+                               final_candidate = 1;
+                       else {
+                               for (i = 0; i < 8; i++)
+                                       reg_tmp += result[3][i];
+
+                               if (reg_tmp != 0)
+                                       final_candidate = 3;
+                               else
+                                       final_candidate = 0xFF;
+                       }
+               }
+       }
+       for (i = 0; i < 4; i++) {
+               reg_e94 = result[i][0];
+               reg_e9c = result[i][1];
+               reg_ea4 = result[i][2];
+               reg_eac = result[i][3];
+               reg_eb4 = result[i][4];
+               reg_ebc = result[i][5];
+               reg_ec4 = result[i][6];
+               reg_ecc = result[i][7];
+       }
+       if (final_candidate != 0xff) {
+               rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
+               rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
+               reg_ea4 = result[final_candidate][2];
+               reg_eac = result[final_candidate][3];
+               rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
+               rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
+               reg_ec4 = result[final_candidate][6];
+               reg_ecc = result[final_candidate][7];
+               patha_ok = pathb_ok = true;
+       } else {
+               rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
+               rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
+       }
+       if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+               _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+                                                  final_candidate,
+                                                  (reg_ea4 == 0));
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
+                       _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
+                                                          result,
+                                                          final_candidate,
+                                                          (reg_ec4 == 0));
+       }
+       _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
+                                       rtlphy->iqk_bb_backup, 10);
+}
+EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
+
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool start_conttx = false, singletone = false;
+
+       if (start_conttx || singletone)
+               return;
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
+       else
+               rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
+
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlphy->apk_done)
+               return;
+       if (IS_92C_SERIAL(rtlhal->version))
+               _rtl92c_phy_ap_calibrate(hw, delta, true);
+       else
+               _rtl92c_phy_ap_calibrate(hw, delta, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
+
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (IS_92C_SERIAL(rtlhal->version))
+               _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
+       else
+               _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
+
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool postprocessing = false;
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+                 iotype, rtlphy->set_io_inprogress));
+       do {
+               switch (iotype) {
+               case IO_CMD_RESUME_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                ("[IO CMD] Resume DM after scan.\n"));
+                       postprocessing = true;
+                       break;
+               case IO_CMD_PAUSE_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                ("[IO CMD] Pause DM before scan.\n"));
+                       postprocessing = true;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+       } while (false);
+       if (postprocessing && !rtlphy->set_io_inprogress) {
+               rtlphy->set_io_inprogress = true;
+               rtlphy->current_io_type = iotype;
+       } else {
+               return false;
+       }
+       rtl92c_phy_set_io(hw);
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+       return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
+
+void rtl92c_phy_set_io(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+                 rtlphy->current_io_type, rtlphy->set_io_inprogress));
+       switch (rtlphy->current_io_type) {
+       case IO_CMD_RESUME_DM_BY_SCAN:
+               dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+               rtl92c_dm_write_dig(hw);
+               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+               break;
+       case IO_CMD_PAUSE_DM_BY_SCAN:
+               rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+               dm_digtable.cur_igvalue = 0x17;
+               rtl92c_dm_write_dig(hw);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       rtlphy->set_io_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("<---(%#x)\n", rtlphy->current_io_type));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io);
+
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
+
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+       u32 u4b_tmp;
+       u8 delay = 5;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+       u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+       while (u4b_tmp != 0 && delay > 0) {
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+               u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+               delay--;
+       }
+       if (delay == 0) {
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+                        ("Switch RF timeout !!!.\n"));
+               return;
+       }
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644 (file)
index 0000000..53ffb09
--- /dev/null
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT                 16
+#define MAX_RFDEPENDCMD_CNT            16
+#define MAX_POSTCMD_CNT                        16
+
+#define MAX_DOZE_WAITING_TIMES_9x      64
+
+#define RT_CANNOT_IO(hw)               false
+#define HIGHPOWER_RADIOA_ARRAYLEN      22
+
+#define MAX_TOLERANCE                  5
+#define        IQK_DELAY_TIME                  1
+
+#define        APK_BB_REG_NUM                  5
+#define        APK_AFE_REG_NUM                 16
+#define        APK_CURVE_REG_NUM               4
+#define        PATH_NUM                        2
+
+#define LOOP_LIMIT                     5
+#define MAX_STALL_TIME                 50
+#define AntennaDiversityValue          0x80
+#define MAX_TXPWR_IDX_NMODE_92S                63
+#define Reset_Cnt_Limit                        3
+
+#define IQK_ADDA_REG_NUM               16
+#define IQK_MAC_REG_NUM                        4
+
+#define RF90_PATH_MAX                  2
+
+#define CT_OFFSET_MAC_ADDR             0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX       0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX    0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF        0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET  0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET  0x72
+
+#define CT_OFFSET_CHANNEL_PLAH         0x75
+#define CT_OFFSET_THERMAL_METER                0x78
+#define CT_OFFSET_RF_OPTION            0x79
+#define CT_OFFSET_VERSION              0x7E
+#define CT_OFFSET_CUSTOMER_ID          0x7F
+
+#define RTL92C_MAX_PATH_NUM            2
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER        255
+enum swchnlcmd_id {
+       CMDID_END,
+       CMDID_SET_TXPOWEROWER_LEVEL,
+       CMDID_BBREGWRITE10,
+       CMDID_WRITEPORT_ULONG,
+       CMDID_WRITEPORT_USHORT,
+       CMDID_WRITEPORT_UCHAR,
+       CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+       enum swchnlcmd_id cmdid;
+       u32 para1;
+       u32 para2;
+       u32 msdelay;
+};
+
+enum hw90_block_e {
+       HW90_BLOCK_MAC = 0,
+       HW90_BLOCK_PHY0 = 1,
+       HW90_BLOCK_PHY1 = 2,
+       HW90_BLOCK_RF = 3,
+       HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+       BASEBAND_CONFIG_PHY_REG = 0,
+       BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+       RA_OFFSET_LEGACY_OFDM1,
+       RA_OFFSET_LEGACY_OFDM2,
+       RA_OFFSET_HT_OFDM1,
+       RA_OFFSET_HT_OFDM2,
+       RA_OFFSET_HT_OFDM3,
+       RA_OFFSET_HT_OFDM4,
+       RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+       ANTENNA_NONE,
+       ANTENNA_D,
+       ANTENNA_C,
+       ANTENNA_CD,
+       ANTENNA_B,
+       ANTENNA_BD,
+       ANTENNA_BC,
+       ANTENNA_BCD,
+       ANTENNA_A,
+       ANTENNA_AD,
+       ANTENNA_AC,
+       ANTENNA_ACD,
+       ANTENNA_AB,
+       ANTENNA_ABD,
+       ANTENNA_ABC,
+       ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+       u32 r_tx_antenna:4;
+       u32 r_ant_l:4;
+       u32 r_ant_non_ht:4;
+       u32 r_ant_ht1:4;
+       u32 r_ant_ht2:4;
+       u32 r_ant_ht_s1:4;
+       u32 r_ant_non_ht_s1:4;
+       u32 ofdm_txsc:2;
+       u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+       u8 r_cckrx_enable_2:2;
+       u8 r_cckrx_enable:2;
+       u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+       u8 mac_addr[ETH_ALEN];
+       u8 cck_tx_power_idx[6];
+       u8 ht40_1s_tx_power_idx[6];
+       u8 ht40_2s_tx_power_idx_diff[3];
+       u8 ht20_tx_power_idx_diff[3];
+       u8 ofdm_tx_power_idx_diff[3];
+       u8 ht40_max_power_offset[3];
+       u8 ht20_max_power_offset[3];
+       u8 channel_plan;
+       u8 thermal_meter;
+       u8 rf_option[5];
+       u8 version;
+       u8 oem_id;
+       u8 regulatory;
+};
+
+struct tx_power_struct {
+       u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_txpowerdiff;
+       u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 pwrgroup_cnt;
+       u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
+                                  u32 regaddr, u32 bitmask);
+extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+                                 u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+                                  enum radio_path rfpath, u32 regaddr,
+                                  u32 bitmask);
+extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+                                 enum radio_path rfpath, u32 regaddr,
+                                 u32 bitmask, u32 data);
+extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                                enum radio_path rfpath);
+extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                        long *powerlevel);
+extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+                                         long power_indbm);
+extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
+                                            u8 operation);
+extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+                                  enum nl80211_channel_type ch_type);
+extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
+                                        u16 beaconinterval);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
+                                             u32 rfpath);
+extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                         enum rf_pwrstate rfpwr_state);
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx);
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+                               enum wireless_mode wirelessmode,
+                               long power_indbm);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                            u32 cmdtableidx, u32 cmdtablesz,
+                                            enum swchnlcmd_id cmdid, u32 para1,
+                                            u32 para2, u32 msdelay);
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+                                            u8 channel, u8 *stage, u8 *step,
+                                            u32 *delay);
+
+#endif
index 0f0be7c..c0cb0cf 100644 (file)
@@ -1,6 +1,5 @@
 rtl8192ce-objs :=              \
                dm.o            \
-               fw.o            \
                hw.o            \
                led.o           \
                phy.o           \
@@ -10,3 +9,5 @@ rtl8192ce-objs :=              \
                trx.o
 
 obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
+
+ccflags-y += -D__CHECK_ENDIAN__
index 83cd648..2f577c8 100644 (file)
 #define CHIP_92C                       0x01
 #define CHIP_88C                       0x00
 
+/* Add vendor information into chip version definition.
+ * Add UMC B-Cut and RTL8723 chip info definition.
+ *
+ * BIT 7       Reserved
+ * BIT 6       UMC BCut
+ * BIT 5       Manufacturer(TSMC/UMC)
+ * BIT 4       TEST/NORMAL
+ * BIT 3       8723 Version
+ * BIT 2       8723?
+ * BIT 1       1T2R?
+ * BIT 0       88C/92C
+*/
+
 enum version_8192c {
        VERSION_A_CHIP_92C = 0x01,
        VERSION_A_CHIP_88C = 0x00,
        VERSION_B_CHIP_92C = 0x11,
        VERSION_B_CHIP_88C = 0x10,
+       VERSION_TEST_CHIP_88C = 0x00,
+       VERSION_TEST_CHIP_92C = 0x01,
+       VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
+       VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
+       VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
+       VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
+       VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
+       VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
+       VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
+       VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
+       VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
+       VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
+       VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
        VERSION_UNKNOWN = 0x88,
 };
 
@@ -254,4 +280,122 @@ struct h2c_cmd_8192c {
        u8 *p_cmdbuffer;
 };
 
+static inline u8 _rtl92c_get_chnl_group(u8 chnl)
+{
+       u8 group = 0;
+
+       if (chnl < 3)
+               group = 0;
+       else if (chnl < 9)
+               group = 1;
+       else
+               group = 2;
+
+       return group;
+}
+
+/* NOTE: reference to rtl8192c_rates struct */
+static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
+                                      u8 desc_rate, bool first_ampdu)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int rate_idx = 0;
+
+       if (first_ampdu) {
+               if (false == isHT) {
+                       switch (desc_rate) {
+                       case DESC92C_RATE1M:
+                               rate_idx = 0;
+                               break;
+                       case DESC92C_RATE2M:
+                               rate_idx = 1;
+                               break;
+                       case DESC92C_RATE5_5M:
+                               rate_idx = 2;
+                               break;
+                       case DESC92C_RATE11M:
+                               rate_idx = 3;
+                               break;
+                       case DESC92C_RATE6M:
+                               rate_idx = 4;
+                               break;
+                       case DESC92C_RATE9M:
+                               rate_idx = 5;
+                               break;
+                       case DESC92C_RATE12M:
+                               rate_idx = 6;
+                               break;
+                       case DESC92C_RATE18M:
+                               rate_idx = 7;
+                               break;
+                       case DESC92C_RATE24M:
+                               rate_idx = 8;
+                               break;
+                       case DESC92C_RATE36M:
+                               rate_idx = 9;
+                               break;
+                       case DESC92C_RATE48M:
+                               rate_idx = 10;
+                               break;
+                       case DESC92C_RATE54M:
+                               rate_idx = 11;
+                               break;
+                       default:
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+                                        ("Rate %d is not support, set to "
+                                       "1M rate.\n", desc_rate));
+                               rate_idx = 0;
+                               break;
+                       }
+               } else {
+                       rate_idx = 11;
+               }
+               return rate_idx;
+       }
+       switch (desc_rate) {
+       case DESC92C_RATE1M:
+               rate_idx = 0;
+               break;
+       case DESC92C_RATE2M:
+               rate_idx = 1;
+               break;
+       case DESC92C_RATE5_5M:
+               rate_idx = 2;
+               break;
+       case DESC92C_RATE11M:
+               rate_idx = 3;
+               break;
+       case DESC92C_RATE6M:
+               rate_idx = 4;
+               break;
+       case DESC92C_RATE9M:
+               rate_idx = 5;
+               break;
+       case DESC92C_RATE12M:
+               rate_idx = 6;
+               break;
+       case DESC92C_RATE18M:
+               rate_idx = 7;
+               break;
+       case DESC92C_RATE24M:
+               rate_idx = 8;
+               break;
+       case DESC92C_RATE36M:
+               rate_idx = 9;
+               break;
+       case DESC92C_RATE48M:
+               rate_idx = 10;
+               break;
+       case DESC92C_RATE54M:
+               rate_idx = 11;
+               break;
+       /* TODO: How to mapping MCS rate? */
+       /*  NOTE: referenc to __ieee80211_rx */
+       default:
+               rate_idx = 11;
+               break;
+       }
+       return rate_idx;
+}
+
 #endif
index 62e7c64..7d76504 100644 (file)
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
-#include "fw.h"
 
-struct dig_t dm_digtable;
-static struct ps_t dm_pstable;
-
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
-       0x7f8001fe,
-       0x788001e2,
-       0x71c001c7,
-       0x6b8001ae,
-       0x65400195,
-       0x5fc0017f,
-       0x5a400169,
-       0x55400155,
-       0x50800142,
-       0x4c000130,
-       0x47c0011f,
-       0x43c0010f,
-       0x40000100,
-       0x3c8000f2,
-       0x390000e4,
-       0x35c000d7,
-       0x32c000cb,
-       0x300000c0,
-       0x2d4000b5,
-       0x2ac000ab,
-       0x288000a2,
-       0x26000098,
-       0x24000090,
-       0x22000088,
-       0x20000080,
-       0x1e400079,
-       0x1c800072,
-       0x1b00006c,
-       0x19800066,
-       0x18000060,
-       0x16c0005b,
-       0x15800056,
-       0x14400051,
-       0x1300004c,
-       0x12000048,
-       0x11000044,
-       0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
-       {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
-       {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
-       {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
-       {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
-       {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
-       {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
-       {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
-       {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
-       {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
-       {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
-       {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
-       {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
-       {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
-       {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
-       {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
-       {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
-       {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
-       {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
-       {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
-       {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
-       {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
-       {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
-       {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
-       {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
-       {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
-       {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
-       {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
-       {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
-       {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
-       {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
-       {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
-       {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
-       {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
-       {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
-       {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
-       {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
-       {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
-       {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
-       {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
-       {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
-       {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
-       {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
-       {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
-       {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
-       {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
-       {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
-       {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
-       {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
-       {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
-       {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
-       {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
-       {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
-       {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
-       {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
-       {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
-       {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
-       {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
-       {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
-       {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
-static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
-{
-       dm_digtable.dig_enable_flag = true;
-       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-       dm_digtable.cur_igvalue = 0x20;
-       dm_digtable.pre_igvalue = 0x0;
-       dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-       dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
-       dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
-       dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
-       dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
-       dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
-       dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
-       dm_digtable.rx_gain_range_max = DM_DIG_MAX;
-       dm_digtable.rx_gain_range_min = DM_DIG_MIN;
-       dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-       dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-       dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
-       dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
-       dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-}
-
-static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       long rssi_val_min = 0;
-
-       if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-           (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
-               if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
-                       rssi_val_min =
-                           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
-                            rtlpriv->dm.undecorated_smoothed_pwdb) ?
-                           rtlpriv->dm.undecorated_smoothed_pwdb :
-                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-               else
-                       rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
-                  dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
-               rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable.curmultista_connectstate ==
-                  DIG_MULTISTA_CONNECT) {
-               rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-       }
-
-       return (u8) rssi_val_min;
-}
-
-static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
-{
-       u32 ret_value;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
-       falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
-       falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
-       falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
-       falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
-       falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
-           falsealm_cnt->cnt_rate_illegal +
-           falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
-
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
-       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
-       falsealm_cnt->cnt_cck_fail = ret_value;
-
-       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
-       falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
-       falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
-                                falsealm_cnt->cnt_rate_illegal +
-                                falsealm_cnt->cnt_crc8_fail +
-                                falsealm_cnt->cnt_mcs_fail +
-                                falsealm_cnt->cnt_cck_fail);
-
-       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
-       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
-                 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
-                 falsealm_cnt->cnt_parity_fail,
-                 falsealm_cnt->cnt_rate_illegal,
-                 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
-                 falsealm_cnt->cnt_ofdm_fail,
-                 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value_igi = dm_digtable.cur_igvalue;
-
-       if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
-               value_igi--;
-       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
-               value_igi += 0;
-       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
-               value_igi++;
-       else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
-               value_igi += 2;
-       if (value_igi > DM_DIG_FA_UPPER)
-               value_igi = DM_DIG_FA_UPPER;
-       else if (value_igi < DM_DIG_FA_LOWER)
-               value_igi = DM_DIG_FA_LOWER;
-       if (rtlpriv->falsealm_cnt.cnt_all > 10000)
-               value_igi = 0x32;
-
-       dm_digtable.cur_igvalue = value_igi;
-       rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
-               if ((dm_digtable.backoff_val - 2) <
-                   dm_digtable.backoff_val_range_min)
-                       dm_digtable.backoff_val =
-                           dm_digtable.backoff_val_range_min;
-               else
-                       dm_digtable.backoff_val -= 2;
-       } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
-               if ((dm_digtable.backoff_val + 2) >
-                   dm_digtable.backoff_val_range_max)
-                       dm_digtable.backoff_val =
-                           dm_digtable.backoff_val_range_max;
-               else
-                       dm_digtable.backoff_val += 2;
-       }
-
-       if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
-           dm_digtable.rx_gain_range_max)
-               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
-       else if ((dm_digtable.rssi_val_min + 10 -
-                 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
-               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
-       else
-               dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
-                   dm_digtable.backoff_val;
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("rssi_val_min = %x backoff_val %x\n",
-                 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
-
-       rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
-{
-       static u8 binitialized; /* initialized to false */
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-       bool b_multi_sta = false;
-
-       if (mac->opmode == NL80211_IFTYPE_ADHOC)
-               b_multi_sta = true;
-
-       if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
-                                      DIG_STA_DISCONNECT)) {
-               binitialized = false;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-               return;
-       } else if (binitialized == false) {
-               binitialized = true;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-               dm_digtable.cur_igvalue = 0x20;
-               rtl92c_dm_write_dig(hw);
-       }
-
-       if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
-               if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
-                   (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
-
-                       if (dm_digtable.dig_ext_port_stage ==
-                           DIG_EXT_PORT_STAGE_2) {
-                               dm_digtable.cur_igvalue = 0x20;
-                               rtl92c_dm_write_dig(hw);
-                       }
-
-                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
-               } else if (rssi_strength > dm_digtable.rssi_highthresh) {
-                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
-                       rtl92c_dm_ctrl_initgain_by_fa(hw);
-               }
-       } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-               dm_digtable.cur_igvalue = 0x20;
-               rtl92c_dm_write_dig(hw);
-       }
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("curmultista_connectstate = "
-                 "%x dig_ext_port_stage %x\n",
-                 dm_digtable.curmultista_connectstate,
-                 dm_digtable.dig_ext_port_stage));
-}
-
-static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("presta_connectstate = %x,"
-                 " cursta_connectctate = %x\n",
-                 dm_digtable.presta_connectstate,
-                 dm_digtable.cursta_connectctate));
-
-       if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
-           || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
-           || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-
-               if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
-                       dm_digtable.rssi_val_min =
-                           rtl92c_dm_initial_gain_min_pwdb(hw);
-                       rtl92c_dm_ctrl_initgain_by_rssi(hw);
-               }
-       } else {
-               dm_digtable.rssi_val_min = 0;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-               dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-               dm_digtable.cur_igvalue = 0x20;
-               dm_digtable.pre_igvalue = 0;
-               rtl92c_dm_write_dig(hw);
-       }
-}
-
-static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-               dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
-
-               if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-                       if (dm_digtable.rssi_val_min <= 25)
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_LowRssi;
-                       else
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_HighRssi;
-               } else {
-                       if (dm_digtable.rssi_val_min <= 20)
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_LowRssi;
-                       else
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_HighRssi;
-               }
-       } else {
-               dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-       }
-
-       if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
-               if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-                       if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
-                               dm_digtable.cur_cck_fa_state =
-                                   CCK_FA_STAGE_High;
-                       else
-                               dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
-
-                       if (dm_digtable.pre_cck_fa_state !=
-                           dm_digtable.cur_cck_fa_state) {
-                               if (dm_digtable.cur_cck_fa_state ==
-                                   CCK_FA_STAGE_Low)
-                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-                                                     0x83);
-                               else
-                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-                                                     0xcd);
-
-                               dm_digtable.pre_cck_fa_state =
-                                   dm_digtable.cur_cck_fa_state;
-                       }
-
-                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
-                       if (IS_92C_SERIAL(rtlhal->version))
-                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-                                             MASKBYTE2, 0xd7);
-               } else {
-                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
-                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
-
-                       if (IS_92C_SERIAL(rtlhal->version))
-                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-                                             MASKBYTE2, 0xd3);
-               }
-               dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
-       }
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
-{
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-       if (mac->act_scanning == true)
-               return;
-
-       if ((mac->link_state > MAC80211_NOLINK) &&
-           (mac->link_state < MAC80211_LINKED))
-               dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
-       else if (mac->link_state >= MAC80211_LINKED)
-               dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
-       else
-               dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-
-       rtl92c_dm_initial_gain_sta(hw);
-       rtl92c_dm_initial_gain_multi_sta(hw);
-       rtl92c_dm_cck_packet_detection_thresh(hw);
-
-       dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
-
-}
-
-static void rtl92c_dm_dig(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->dm.b_dm_initialgain_enable == false)
-               return;
-       if (dm_digtable.dig_enable_flag == false)
-               return;
-
-       rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
-}
-
-static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.bdynamic_txpower_enable = false;
-
-       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
-       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
-static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        long undecorated_smoothed_pwdb;
 
-       if (!rtlpriv->dm.bdynamic_txpower_enable)
+       if (!rtlpriv->dm.dynamic_txpower_enable)
                return;
 
        if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -583,891 +111,3 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
 
        rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
 }
-
-void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
-                ("cur_igvalue = 0x%x, "
-                 "pre_igvalue = 0x%x, backoff_val = %d\n",
-                 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
-                 dm_digtable.backoff_val));
-
-       if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
-               rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
-                             dm_digtable.cur_igvalue);
-               rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
-                             dm_digtable.cur_igvalue);
-
-               dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
-       }
-}
-
-static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
-       u8 h2c_parameter[3] = { 0 };
-
-       return;
-
-       if (tmpentry_max_pwdb != 0) {
-               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
-                   tmpentry_max_pwdb;
-       } else {
-               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
-       }
-
-       if (tmpentry_min_pwdb != 0xff) {
-               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
-                   tmpentry_min_pwdb;
-       } else {
-               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
-       }
-
-       h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
-       h2c_parameter[0] = 0;
-
-       rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
-}
-
-void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       rtlpriv->dm.bcurrent_turbo_edca = false;
-       rtlpriv->dm.bis_any_nonbepkts = false;
-       rtlpriv->dm.bis_cur_rdlstate = false;
-}
-
-static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       static u64 last_txok_cnt;
-       static u64 last_rxok_cnt;
-       u64 cur_txok_cnt;
-       u64 cur_rxok_cnt;
-       u32 edca_be_ul = 0x5ea42b;
-       u32 edca_be_dl = 0x5ea42b;
-
-       if (mac->opmode == NL80211_IFTYPE_ADHOC)
-               goto dm_checkedcaturbo_exit;
-
-       if (mac->link_state != MAC80211_LINKED) {
-               rtlpriv->dm.bcurrent_turbo_edca = false;
-               return;
-       }
-
-       if (!mac->ht_enable) {  /*FIX MERGE */
-               if (!(edca_be_ul & 0xffff0000))
-                       edca_be_ul |= 0x005e0000;
-
-               if (!(edca_be_dl & 0xffff0000))
-                       edca_be_dl |= 0x005e0000;
-       }
-
-       if ((!rtlpriv->dm.bis_any_nonbepkts) &&
-           (!rtlpriv->dm.b_disable_framebursting)) {
-               cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
-               cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
-               if (cur_rxok_cnt > 4 * cur_txok_cnt) {
-                       if (!rtlpriv->dm.bis_cur_rdlstate ||
-                           !rtlpriv->dm.bcurrent_turbo_edca) {
-                               rtl_write_dword(rtlpriv,
-                                               REG_EDCA_BE_PARAM,
-                                               edca_be_dl);
-                               rtlpriv->dm.bis_cur_rdlstate = true;
-                       }
-               } else {
-                       if (rtlpriv->dm.bis_cur_rdlstate ||
-                           !rtlpriv->dm.bcurrent_turbo_edca) {
-                               rtl_write_dword(rtlpriv,
-                                               REG_EDCA_BE_PARAM,
-                                               edca_be_ul);
-                               rtlpriv->dm.bis_cur_rdlstate = false;
-                       }
-               }
-               rtlpriv->dm.bcurrent_turbo_edca = true;
-       } else {
-               if (rtlpriv->dm.bcurrent_turbo_edca) {
-                       u8 tmp = AC0_BE;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_AC_PARAM,
-                                                     (u8 *) (&tmp));
-                       rtlpriv->dm.bcurrent_turbo_edca = false;
-               }
-       }
-
-dm_checkedcaturbo_exit:
-       rtlpriv->dm.bis_any_nonbepkts = false;
-       last_txok_cnt = rtlpriv->stats.txbytesunicast;
-       last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
-}
-
-static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
-                                                            *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 thermalvalue, delta, delta_lck, delta_iqk;
-       long ele_a, ele_d, temp_cck, val_x, value32;
-       long val_y, ele_c;
-       u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
-       int i;
-       bool is2t = IS_92C_SERIAL(rtlhal->version);
-       u8 txpwr_level[2] = {0, 0};
-       u8 ofdm_min_index = 6, rf;
-
-       rtlpriv->dm.btxpower_trackingInit = true;
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
-
-       thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
-                 "eeprom_thermalmeter 0x%x\n",
-                 thermalvalue, rtlpriv->dm.thermalvalue,
-                 rtlefuse->eeprom_thermalmeter));
-
-       rtl92c_phy_ap_calibrate(hw, (thermalvalue -
-                                    rtlefuse->eeprom_thermalmeter));
-       if (is2t)
-               rf = 2;
-       else
-               rf = 1;
-
-       if (thermalvalue) {
-               ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                     MASKDWORD) & MASKOFDM_D;
-
-               for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
-                       if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
-                               ofdm_index_old[0] = (u8) i;
-
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                       ("Initial pathA ele_d reg0x%x = 0x%lx, "
-                                        "ofdm_index=0x%x\n",
-                                        ROFDM0_XATXIQIMBALANCE,
-                                        ele_d, ofdm_index_old[0]));
-                               break;
-                       }
-               }
-
-               if (is2t) {
-                       ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
-                                             MASKDWORD) & MASKOFDM_D;
-
-                       for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
-                               if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
-                                       ofdm_index_old[1] = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                          DBG_LOUD,
-                                          ("Initial pathB ele_d reg0x%x = "
-                                          "0x%lx, ofdm_index=0x%x\n",
-                                          ROFDM0_XBTXIQIMBALANCE, ele_d,
-                                          ofdm_index_old[1]));
-                                       break;
-                               }
-                       }
-               }
-
-               temp_cck =
-                   rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
-
-               for (i = 0; i < CCK_TABLE_LENGTH; i++) {
-                       if (rtlpriv->dm.b_cck_inch14) {
-                               if (memcmp((void *)&temp_cck,
-                                          (void *)&cckswing_table_ch14[i][2],
-                                          4) == 0) {
-                                       cck_index_old = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                                DBG_LOUD,
-                                                ("Initial reg0x%x = 0x%lx, "
-                                                 "cck_index=0x%x, ch 14 %d\n",
-                                                 RCCK0_TXFILTER2, temp_cck,
-                                                 cck_index_old,
-                                                 rtlpriv->dm.b_cck_inch14));
-                                       break;
-                               }
-                       } else {
-                               if (memcmp((void *)&temp_cck,
-                                          (void *)
-                                          &cckswing_table_ch1ch13[i][2],
-                                          4) == 0) {
-                                       cck_index_old = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                                DBG_LOUD,
-                                                ("Initial reg0x%x = 0x%lx, "
-                                                 "cck_index=0x%x, ch14 %d\n",
-                                                 RCCK0_TXFILTER2, temp_cck,
-                                                 cck_index_old,
-                                                 rtlpriv->dm.b_cck_inch14));
-                                       break;
-                               }
-                       }
-               }
-
-               if (!rtlpriv->dm.thermalvalue) {
-                       rtlpriv->dm.thermalvalue =
-                           rtlefuse->eeprom_thermalmeter;
-                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
-                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
-                       for (i = 0; i < rf; i++)
-                               rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
-                       rtlpriv->dm.cck_index = cck_index_old;
-               }
-
-               delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue) :
-                   (rtlpriv->dm.thermalvalue - thermalvalue);
-
-               delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
-                   (rtlpriv->dm.thermalvalue_lck - thermalvalue);
-
-               delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
-                   (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
-
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                       ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
-                        "eeprom_thermalmeter 0x%x delta 0x%x "
-                        "delta_lck 0x%x delta_iqk 0x%x\n",
-                        thermalvalue, rtlpriv->dm.thermalvalue,
-                        rtlefuse->eeprom_thermalmeter, delta, delta_lck,
-                        delta_iqk));
-
-               if (delta_lck > 1) {
-                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
-                       rtl92c_phy_lc_calibrate(hw);
-               }
-
-               if (delta > 0 && rtlpriv->dm.txpower_track_control) {
-                       if (thermalvalue > rtlpriv->dm.thermalvalue) {
-                               for (i = 0; i < rf; i++)
-                                       rtlpriv->dm.ofdm_index[i] -= delta;
-                               rtlpriv->dm.cck_index -= delta;
-                       } else {
-                               for (i = 0; i < rf; i++)
-                                       rtlpriv->dm.ofdm_index[i] += delta;
-                               rtlpriv->dm.cck_index += delta;
-                       }
-
-                       if (is2t) {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("temp OFDM_A_index=0x%x, "
-                                         "OFDM_B_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         rtlpriv->dm.ofdm_index[0],
-                                         rtlpriv->dm.ofdm_index[1],
-                                         rtlpriv->dm.cck_index));
-                       } else {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("temp OFDM_A_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         rtlpriv->dm.ofdm_index[0],
-                                         rtlpriv->dm.cck_index));
-                       }
-
-                       if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
-                               for (i = 0; i < rf; i++)
-                                       ofdm_index[i] =
-                                           rtlpriv->dm.ofdm_index[i]
-                                           + 1;
-                               cck_index = rtlpriv->dm.cck_index + 1;
-                       } else {
-                               for (i = 0; i < rf; i++)
-                                       ofdm_index[i] =
-                                           rtlpriv->dm.ofdm_index[i];
-                               cck_index = rtlpriv->dm.cck_index;
-                       }
-
-                       for (i = 0; i < rf; i++) {
-                               if (txpwr_level[i] >= 0 &&
-                                   txpwr_level[i] <= 26) {
-                                       if (thermalvalue >
-                                           rtlefuse->eeprom_thermalmeter) {
-                                               if (delta < 5)
-                                                       ofdm_index[i] -= 1;
-
-                                               else
-                                                       ofdm_index[i] -= 2;
-                                       } else if (delta > 5 && thermalvalue <
-                                                  rtlefuse->
-                                                  eeprom_thermalmeter) {
-                                               ofdm_index[i] += 1;
-                                       }
-                               } else if (txpwr_level[i] >= 27 &&
-                                          txpwr_level[i] <= 32
-                                          && thermalvalue >
-                                          rtlefuse->eeprom_thermalmeter) {
-                                       if (delta < 5)
-                                               ofdm_index[i] -= 1;
-
-                                       else
-                                               ofdm_index[i] -= 2;
-                               } else if (txpwr_level[i] >= 32 &&
-                                          txpwr_level[i] <= 38 &&
-                                          thermalvalue >
-                                          rtlefuse->eeprom_thermalmeter
-                                          && delta > 5) {
-                                       ofdm_index[i] -= 1;
-                               }
-                       }
-
-                       if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
-                               if (thermalvalue >
-                                   rtlefuse->eeprom_thermalmeter) {
-                                       if (delta < 5)
-                                               cck_index -= 1;
-
-                                       else
-                                               cck_index -= 2;
-                               } else if (delta > 5 && thermalvalue <
-                                          rtlefuse->eeprom_thermalmeter) {
-                                       cck_index += 1;
-                               }
-                       } else if (txpwr_level[i] >= 27 &&
-                                  txpwr_level[i] <= 32 &&
-                                  thermalvalue >
-                                  rtlefuse->eeprom_thermalmeter) {
-                               if (delta < 5)
-                                       cck_index -= 1;
-
-                               else
-                                       cck_index -= 2;
-                       } else if (txpwr_level[i] >= 32 &&
-                                  txpwr_level[i] <= 38 &&
-                                  thermalvalue > rtlefuse->eeprom_thermalmeter
-                                  && delta > 5) {
-                               cck_index -= 1;
-                       }
-
-                       for (i = 0; i < rf; i++) {
-                               if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
-                                       ofdm_index[i] = OFDM_TABLE_SIZE - 1;
-
-                               else if (ofdm_index[i] < ofdm_min_index)
-                                       ofdm_index[i] = ofdm_min_index;
-                       }
-
-                       if (cck_index > CCK_TABLE_SIZE - 1)
-                               cck_index = CCK_TABLE_SIZE - 1;
-                       else if (cck_index < 0)
-                               cck_index = 0;
-
-                       if (is2t) {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("new OFDM_A_index=0x%x, "
-                                         "OFDM_B_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         ofdm_index[0], ofdm_index[1],
-                                         cck_index));
-                       } else {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("new OFDM_A_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         ofdm_index[0], cck_index));
-                       }
-               }
-
-               if (rtlpriv->dm.txpower_track_control && delta != 0) {
-                       ele_d =
-                           (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
-                       val_x = rtlphy->reg_e94;
-                       val_y = rtlphy->reg_e9c;
-
-                       if (val_x != 0) {
-                               if ((val_x & 0x00000200) != 0)
-                                       val_x = val_x | 0xFFFFFC00;
-                               ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
-
-                               if ((val_y & 0x00000200) != 0)
-                                       val_y = val_y | 0xFFFFFC00;
-                               ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
-                               value32 = (ele_d << 22) |
-                                   ((ele_c & 0x3F) << 16) | ele_a;
-
-                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                             MASKDWORD, value32);
-
-                               value32 = (ele_c & 0x000003C0) >> 6;
-                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
-                                             value32);
-
-                               value32 = ((val_x * ele_d) >> 7) & 0x01;
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(31), value32);
-
-                               value32 = ((val_y * ele_d) >> 7) & 0x01;
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(29), value32);
-                       } else {
-                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                             MASKDWORD,
-                                             ofdmswing_table[ofdm_index[0]]);
-
-                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
-                                             0x00);
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(31) | BIT(29), 0x00);
-                       }
-
-                       if (!rtlpriv->dm.b_cck_inch14) {
-                               rtl_write_byte(rtlpriv, 0xa22,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [0]);
-                               rtl_write_byte(rtlpriv, 0xa23,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [1]);
-                               rtl_write_byte(rtlpriv, 0xa24,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [2]);
-                               rtl_write_byte(rtlpriv, 0xa25,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [3]);
-                               rtl_write_byte(rtlpriv, 0xa26,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [4]);
-                               rtl_write_byte(rtlpriv, 0xa27,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [5]);
-                               rtl_write_byte(rtlpriv, 0xa28,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [6]);
-                               rtl_write_byte(rtlpriv, 0xa29,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [7]);
-                       } else {
-                               rtl_write_byte(rtlpriv, 0xa22,
-                                              cckswing_table_ch14[cck_index]
-                                              [0]);
-                               rtl_write_byte(rtlpriv, 0xa23,
-                                              cckswing_table_ch14[cck_index]
-                                              [1]);
-                               rtl_write_byte(rtlpriv, 0xa24,
-                                              cckswing_table_ch14[cck_index]
-                                              [2]);
-                               rtl_write_byte(rtlpriv, 0xa25,
-                                              cckswing_table_ch14[cck_index]
-                                              [3]);
-                               rtl_write_byte(rtlpriv, 0xa26,
-                                              cckswing_table_ch14[cck_index]
-                                              [4]);
-                               rtl_write_byte(rtlpriv, 0xa27,
-                                              cckswing_table_ch14[cck_index]
-                                              [5]);
-                               rtl_write_byte(rtlpriv, 0xa28,
-                                              cckswing_table_ch14[cck_index]
-                                              [6]);
-                               rtl_write_byte(rtlpriv, 0xa29,
-                                              cckswing_table_ch14[cck_index]
-                                              [7]);
-                       }
-
-                       if (is2t) {
-                               ele_d = (ofdmswing_table[ofdm_index[1]] &
-                                        0xFFC00000) >> 22;
-
-                               val_x = rtlphy->reg_eb4;
-                               val_y = rtlphy->reg_ebc;
-
-                               if (val_x != 0) {
-                                       if ((val_x & 0x00000200) != 0)
-                                               val_x = val_x | 0xFFFFFC00;
-                                       ele_a = ((val_x * ele_d) >> 8) &
-                                           0x000003FF;
-
-                                       if ((val_y & 0x00000200) != 0)
-                                               val_y = val_y | 0xFFFFFC00;
-                                       ele_c = ((val_y * ele_d) >> 8) &
-                                           0x00003FF;
-
-                                       value32 = (ele_d << 22) |
-                                           ((ele_c & 0x3F) << 16) | ele_a;
-                                       rtl_set_bbreg(hw,
-                                                     ROFDM0_XBTXIQIMBALANCE,
-                                                     MASKDWORD, value32);
-
-                                       value32 = (ele_c & 0x000003C0) >> 6;
-                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
-                                                     MASKH4BITS, value32);
-
-                                       value32 = ((val_x * ele_d) >> 7) & 0x01;
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(27), value32);
-
-                                       value32 = ((val_y * ele_d) >> 7) & 0x01;
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(25), value32);
-                               } else {
-                                       rtl_set_bbreg(hw,
-                                                     ROFDM0_XBTXIQIMBALANCE,
-                                                     MASKDWORD,
-                                                     ofdmswing_table[ofdm_index
-                                                                     [1]]);
-                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
-                                                     MASKH4BITS, 0x00);
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(27) | BIT(25), 0x00);
-                               }
-
-                       }
-               }
-
-               if (delta_iqk > 3) {
-                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
-                       rtl92c_phy_iq_calibrate(hw, false);
-               }
-
-               if (rtlpriv->dm.txpower_track_control)
-                       rtlpriv->dm.thermalvalue = thermalvalue;
-       }
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
-
-}
-
-static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
-                                               struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.btxpower_tracking = true;
-       rtlpriv->dm.btxpower_trackingInit = false;
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("pMgntInfo->btxpower_tracking = %d\n",
-                 rtlpriv->dm.btxpower_tracking));
-}
-
-static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
-}
-
-static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
-}
-
-static void rtl92c_dm_check_txpower_tracking_thermal_meter(
-                                               struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       static u8 tm_trigger;
-
-       if (!rtlpriv->dm.btxpower_tracking)
-               return;
-
-       if (!tm_trigger) {
-               rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
-                             0x60);
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                        ("Trigger 92S Thermal Meter!!\n"));
-               tm_trigger = 1;
-               return;
-       } else {
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                        ("Schedule TxPowerTracking direct call!!\n"));
-               rtl92c_dm_txpower_tracking_directcall(hw);
-               tm_trigger = 0;
-       }
-}
-
-void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
-}
-
-void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rate_adaptive *p_ra = &(rtlpriv->ra);
-
-       p_ra->ratr_state = DM_RATR_STA_INIT;
-       p_ra->pre_ratr_state = DM_RATR_STA_INIT;
-
-       if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
-               rtlpriv->dm.b_useramask = true;
-       else
-               rtlpriv->dm.b_useramask = false;
-
-}
-
-static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct rate_adaptive *p_ra = &(rtlpriv->ra);
-       u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
-
-       if (is_hal_stop(rtlhal)) {
-               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                        ("<---- driver is going to unload\n"));
-               return;
-       }
-
-       if (!rtlpriv->dm.b_useramask) {
-               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                       ("<---- driver does not control rate adaptive mask\n"));
-               return;
-       }
-
-       if (mac->link_state == MAC80211_LINKED) {
-
-               switch (p_ra->pre_ratr_state) {
-               case DM_RATR_STA_HIGH:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               case DM_RATR_STA_MIDDLE:
-                       high_rssithresh_for_ra = 55;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               case DM_RATR_STA_LOW:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 25;
-                       break;
-               default:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               }
-
-               if (rtlpriv->dm.undecorated_smoothed_pwdb >
-                   (long)high_rssithresh_for_ra)
-                       p_ra->ratr_state = DM_RATR_STA_HIGH;
-               else if (rtlpriv->dm.undecorated_smoothed_pwdb >
-                        (long)low_rssithresh_for_ra)
-                       p_ra->ratr_state = DM_RATR_STA_MIDDLE;
-               else
-                       p_ra->ratr_state = DM_RATR_STA_LOW;
-
-               if (p_ra->pre_ratr_state != p_ra->ratr_state) {
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("RSSI = %ld\n",
-                                 rtlpriv->dm.undecorated_smoothed_pwdb));
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("PreState = %d, CurState = %d\n",
-                                 p_ra->pre_ratr_state, p_ra->ratr_state));
-
-                       rtlpriv->cfg->ops->update_rate_mask(hw,
-                                       p_ra->ratr_state);
-
-                       p_ra->pre_ratr_state = p_ra->ratr_state;
-               }
-       }
-}
-
-static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
-       dm_pstable.pre_ccastate = CCA_MAX;
-       dm_pstable.cur_ccasate = CCA_MAX;
-       dm_pstable.pre_rfstate = RF_MAX;
-       dm_pstable.cur_rfstate = RF_MAX;
-       dm_pstable.rssi_val_min = 0;
-}
-
-static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       if (dm_pstable.rssi_val_min != 0) {
-               if (dm_pstable.pre_ccastate == CCA_2R) {
-                       if (dm_pstable.rssi_val_min >= 35)
-                               dm_pstable.cur_ccasate = CCA_1R;
-                       else
-                               dm_pstable.cur_ccasate = CCA_2R;
-               } else {
-                       if (dm_pstable.rssi_val_min <= 30)
-                               dm_pstable.cur_ccasate = CCA_2R;
-                       else
-                               dm_pstable.cur_ccasate = CCA_1R;
-               }
-       } else {
-               dm_pstable.cur_ccasate = CCA_MAX;
-       }
-
-       if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
-               if (dm_pstable.cur_ccasate == CCA_1R) {
-                       if (get_rf_type(rtlphy) == RF_2T2R) {
-                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
-                                             MASKBYTE0, 0x13);
-                               rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
-                       } else {
-                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
-                                             MASKBYTE0, 0x23);
-                               rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
-                       }
-               } else {
-                       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
-                                     0x33);
-                       rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
-               }
-               dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
-       }
-
-       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
-                                              (dm_pstable.cur_ccasate ==
-                                               0) ? "1RCCA" : "2RCCA"));
-}
-
-void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
-{
-       static u8 initialize;
-       static u32 reg_874, reg_c70, reg_85c, reg_a74;
-
-       if (initialize == 0) {
-               reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                        MASKDWORD) & 0x1CC000) >> 14;
-
-               reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
-                                        MASKDWORD) & BIT(3)) >> 3;
-
-               reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
-                                        MASKDWORD) & 0xFF000000) >> 24;
-
-               reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
-
-               initialize = 1;
-       }
-
-       if (!bforce_in_normal) {
-               if (dm_pstable.rssi_val_min != 0) {
-                       if (dm_pstable.pre_rfstate == RF_NORMAL) {
-                               if (dm_pstable.rssi_val_min >= 30)
-                                       dm_pstable.cur_rfstate = RF_SAVE;
-                               else
-                                       dm_pstable.cur_rfstate = RF_NORMAL;
-                       } else {
-                               if (dm_pstable.rssi_val_min <= 25)
-                                       dm_pstable.cur_rfstate = RF_NORMAL;
-                               else
-                                       dm_pstable.cur_rfstate = RF_SAVE;
-                       }
-               } else {
-                       dm_pstable.cur_rfstate = RF_MAX;
-               }
-       } else {
-               dm_pstable.cur_rfstate = RF_NORMAL;
-       }
-
-       if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
-               if (dm_pstable.cur_rfstate == RF_SAVE) {
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0x1C0000, 0x2);
-                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
-                                     0xFF000000, 0x63);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0xC000, 0x2);
-                       rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
-               } else {
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0x1CC000, reg_874);
-                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
-                                     reg_c70);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
-                                     reg_85c);
-                       rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
-               }
-
-               dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
-       }
-}
-
-static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (((mac->link_state == MAC80211_NOLINK)) &&
-           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
-               dm_pstable.rssi_val_min = 0;
-               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                        ("Not connected to any\n"));
-       }
-
-       if (mac->link_state == MAC80211_LINKED) {
-               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-                       dm_pstable.rssi_val_min =
-                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                                ("AP Client PWDB = 0x%lx\n",
-                                 dm_pstable.rssi_val_min));
-               } else {
-                       dm_pstable.rssi_val_min =
-                           rtlpriv->dm.undecorated_smoothed_pwdb;
-                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                                ("STA Default Port PWDB = 0x%lx\n",
-                                 dm_pstable.rssi_val_min));
-               }
-       } else {
-               dm_pstable.rssi_val_min =
-                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-
-               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                        ("AP Ext Port PWDB = 0x%lx\n",
-                         dm_pstable.rssi_val_min));
-       }
-
-       if (IS_92C_SERIAL(rtlhal->version))
-               rtl92c_dm_1r_cca(hw);
-}
-
-void rtl92c_dm_init(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
-       rtl92c_dm_diginit(hw);
-       rtl92c_dm_init_dynamic_txpower(hw);
-       rtl92c_dm_init_edca_turbo(hw);
-       rtl92c_dm_init_rate_adaptive_mask(hw);
-       rtl92c_dm_initialize_txpower_tracking(hw);
-       rtl92c_dm_init_dynamic_bb_powersaving(hw);
-}
-
-void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       bool b_fw_current_inpsmode = false;
-       bool b_fw_ps_awake = true;
-
-       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
-                                     (u8 *) (&b_fw_current_inpsmode));
-       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
-                                     (u8 *) (&b_fw_ps_awake));
-
-       if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
-                                            b_fw_ps_awake)
-           && (!ppsc->rfchange_inprogress)) {
-               rtl92c_dm_pwdb_monitor(hw);
-               rtl92c_dm_dig(hw);
-               rtl92c_dm_false_alarm_counter_statistics(hw);
-               rtl92c_dm_dynamic_bb_powersaving(hw);
-               rtl92c_dm_dynamic_txpower(hw);
-               rtl92c_dm_check_txpower_tracking(hw);
-               rtl92c_dm_refresh_rate_adaptive_mask(hw);
-               rtl92c_dm_check_edca_turbo(hw);
-       }
-}
index 463439e..36302eb 100644 (file)
@@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
 void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
 void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
 void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
 
 #endif
index 1c41a0c..05477f4 100644 (file)
@@ -37,7 +37,6 @@
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
-#include "fw.h"
 #include "led.h"
 #include "hw.h"
 
@@ -124,7 +123,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_FW_PSMODE_STATUS:
-               *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+               *((bool *) (val)) = ppsc->fw_current_inpsmode;
                break;
        case HW_VAR_CORRECT_TSF:{
                u64 tsf;
@@ -173,15 +172,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_BASIC_RATE:{
-                       u16 b_rate_cfg = ((u16 *) val)[0];
+                       u16 rate_cfg = ((u16 *) val)[0];
                        u8 rate_index = 0;
-                       b_rate_cfg = b_rate_cfg & 0x15f;
-                       b_rate_cfg |= 0x01;
-                       rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+                       rate_cfg &= 0x15f;
+                       rate_cfg |= 0x01;
+                       rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
                        rtl_write_byte(rtlpriv, REG_RRSR + 1,
-                                      (b_rate_cfg >> 8)&0xff);
-                       while (b_rate_cfg > 0x1) {
-                               b_rate_cfg = (b_rate_cfg >> 1);
+                                      (rate_cfg >> 8)&0xff);
+                       while (rate_cfg > 0x1) {
+                               rate_cfg = (rate_cfg >> 1);
                                rate_index++;
                        }
                        rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
@@ -318,15 +317,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
        case HW_VAR_AC_PARAM:{
                        u8 e_aci = *((u8 *) val);
-                       u32 u4b_ac_param = 0;
+                       u32 u4b_ac_param;
+                       u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+                       u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+                       u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
 
-                       u4b_ac_param |= (u32) mac->ac[e_aci].aifs;
-                       u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min
+                       u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+                       u4b_ac_param |= ((u32)cw_min
                                         & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
-                       u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max &
+                       u4b_ac_param |= ((u32)cw_max &
                                         0xF) << AC_PARAM_ECW_MAX_OFFSET;
-                       u4b_ac_param |= (u32) mac->ac[e_aci].tx_op
-                           << AC_PARAM_TXOP_LIMIT_OFFSET;
+                       u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
 
                        RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
                                 ("queue:%x, ac_param:%x\n", e_aci,
@@ -469,12 +470,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_FW_PSMODE_STATUS:
-               ppsc->b_fw_current_inpsmode = *((bool *) val);
+               ppsc->fw_current_inpsmode = *((bool *) val);
                break;
        case HW_VAR_H2C_FW_JOINBSSRPT:{
                        u8 mstatus = (*(u8 *) val);
                        u8 tmp_regcr, tmp_reg422;
-                       bool b_recover = false;
+                       bool recover = false;
 
                        if (mstatus == RT_MEDIA_CONNECT) {
                                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
@@ -491,7 +492,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                    rtl_read_byte(rtlpriv,
                                                  REG_FWHW_TXQ_CTRL + 2);
                                if (tmp_reg422 & BIT(6))
-                                       b_recover = true;
+                                       recover = true;
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
 
@@ -500,7 +501,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
 
-                               if (b_recover) {
+                               if (recover) {
                                        rtl_write_byte(rtlpriv,
                                                       REG_FWHW_TXQ_CTRL + 2,
                                                       tmp_reg422);
@@ -868,7 +869,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
        rtl_write_word(rtlpriv, 0x350, 0x870c);
        rtl_write_byte(rtlpriv, 0x352, 0x1);
 
-       if (ppsc->b_support_backdoor)
+       if (ppsc->support_backdoor)
                rtl_write_byte(rtlpriv, 0x349, 0x1b);
        else
                rtl_write_byte(rtlpriv, 0x349, 0x03);
@@ -940,15 +941,15 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
                         ("Failed to download FW. Init HW "
                          "without FW now..\n"));
                err = 1;
-               rtlhal->bfw_ready = false;
+               rtlhal->fw_ready = false;
                return err;
        } else {
-               rtlhal->bfw_ready = true;
+               rtlhal->fw_ready = true;
        }
 
        rtlhal->last_hmeboxnum = 0;
-       rtl92c_phy_mac_config(hw);
-       rtl92c_phy_bb_config(hw);
+       rtl92ce_phy_mac_config(hw);
+       rtl92ce_phy_bb_config(hw);
        rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
        rtl92c_phy_rf_config(hw);
        rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
@@ -1170,21 +1171,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
        u32 u4b_ac_param;
+       u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
+       u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
+       u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
 
        rtl92c_dm_init_edca_turbo(hw);
-
        u4b_ac_param = (u32) mac->ac[aci].aifs;
-       u4b_ac_param |=
-           ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
-       u4b_ac_param |=
-           ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
-       u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
+       u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
+       u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
+       u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
        RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
                 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
-                 aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min,
-                 mac->ac[aci].cw_max, mac->ac[aci].tx_op));
+                 aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
+                 cw_max, tx_op));
        switch (aci) {
        case AC1_BK:
                rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -1237,7 +1237,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
-       if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready)
+       if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
                rtl92c_firmware_selfreset(hw);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
        rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1335,19 +1335,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
        rtl92ce_enable_interrupt(hw);
 }
 
-static u8 _rtl92c_get_chnl_group(u8 chnl)
-{
-       u8 group;
-
-       if (chnl < 3)
-               group = 0;
-       else if (chnl < 9)
-               group = 1;
-       else
-               group = 2;
-       return group;
-}
-
 static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
                                                 bool autoload_fail,
                                                 u8 *hwinfo)
@@ -1568,7 +1555,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
        rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
 
        if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
-               rtlefuse->b_apk_thermalmeterignore = true;
+               rtlefuse->apk_thermalmeterignore = true;
 
        rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
        RTPRINT(rtlpriv, FINIT, INIT_TxPower,
@@ -1625,7 +1612,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
 
        rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
        rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
-       rtlefuse->b_txpwr_fromeprom = true;
+       rtlefuse->txpwr_fromeprom = true;
        rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -1668,7 +1655,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
 
        switch (rtlhal->oem_id) {
        case RT_CID_819x_HP:
-               pcipriv->ledctl.bled_opendrain = true;
+               pcipriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819x_Lenovo:
        case RT_CID_DEFAULT:
@@ -1693,10 +1680,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
 
        rtlhal->version = _rtl92ce_read_chip_version(hw);
        if (get_rf_type(rtlphy) == RF_1T1R)
-               rtlpriv->dm.brfpath_rxenable[0] = true;
+               rtlpriv->dm.rfpath_rxenable[0] = true;
        else
-               rtlpriv->dm.brfpath_rxenable[0] =
-                   rtlpriv->dm.brfpath_rxenable[1] = true;
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
                                                rtlhal->version));
        tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
@@ -1725,18 +1712,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        u32 ratr_value = (u32) mac->basic_rates;
-       u8 *p_mcsrate = mac->mcs;
+       u8 *mcsrate = mac->mcs;
        u8 ratr_index = 0;
-       u8 b_nmode = mac->ht_enable;
+       u8 nmode = mac->ht_enable;
        u8 mimo_ps = 1;
        u16 shortgi_rate;
        u32 tmp_ratr_value;
-       u8 b_curtxbw_40mhz = mac->bw_40;
-       u8 b_curshortgi_40mhz = mac->sgi_40;
-       u8 b_curshortgi_20mhz = mac->sgi_20;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
        enum wireless_mode wirelessmode = mac->mode;
 
-       ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12;
+       ratr_value |= ((*(u16 *) (mcsrate))) << 12;
 
        switch (wirelessmode) {
        case WIRELESS_MODE_B:
@@ -1750,7 +1737,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
                break;
        case WIRELESS_MODE_N_24G:
        case WIRELESS_MODE_N_5G:
-               b_nmode = 1;
+               nmode = 1;
                if (mimo_ps == 0) {
                        ratr_value &= 0x0007F005;
                } else {
@@ -1776,9 +1763,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
 
        ratr_value &= 0x0FFFFFFF;
 
-       if (b_nmode && ((b_curtxbw_40mhz &&
-                        b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
-                                                b_curshortgi_20mhz))) {
+       if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
+                      curshortgi_20mhz))) {
 
                ratr_value |= 0x10000000;
                tmp_ratr_value = (ratr_value >> 12);
@@ -1806,11 +1792,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
        u32 ratr_bitmap = (u32) mac->basic_rates;
        u8 *p_mcsrate = mac->mcs;
        u8 ratr_index;
-       u8 b_curtxbw_40mhz = mac->bw_40;
-       u8 b_curshortgi_40mhz = mac->sgi_40;
-       u8 b_curshortgi_20mhz = mac->sgi_20;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
        enum wireless_mode wirelessmode = mac->mode;
-       bool b_shortgi = false;
+       bool shortgi = false;
        u8 rate_mask[5];
        u8 macid = 0;
        u8 mimops = 1;
@@ -1852,7 +1838,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                } else {
                        if (rtlphy->rf_type == RF_1T2R ||
                            rtlphy->rf_type == RF_1T1R) {
-                               if (b_curtxbw_40mhz) {
+                               if (curtxbw_40mhz) {
                                        if (rssi_level == 1)
                                                ratr_bitmap &= 0x000f0000;
                                        else if (rssi_level == 2)
@@ -1868,7 +1854,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                                                ratr_bitmap &= 0x000ff005;
                                }
                        } else {
-                               if (b_curtxbw_40mhz) {
+                               if (curtxbw_40mhz) {
                                        if (rssi_level == 1)
                                                ratr_bitmap &= 0x0f0f0000;
                                        else if (rssi_level == 2)
@@ -1886,13 +1872,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                        }
                }
 
-               if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
-                   (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+               if ((curtxbw_40mhz && curshortgi_40mhz) ||
+                   (!curtxbw_40mhz && curshortgi_20mhz)) {
 
                        if (macid == 0)
-                               b_shortgi = true;
+                               shortgi = true;
                        else if (macid == 1)
-                               b_shortgi = false;
+                               shortgi = false;
                }
                break;
        default:
@@ -1906,9 +1892,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
        }
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 ("ratr_bitmap :%x\n", ratr_bitmap));
-       *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
-                                      (ratr_index << 28));
-       rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
+       *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+                                      (ratr_index << 28);
+       rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
                                                 "ratr_val:%x, %x:%x:%x:%x:%x\n",
                                                 ratr_index, ratr_bitmap,
@@ -1940,13 +1926,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
        u8 u1tmp;
-       bool b_actuallyset = false;
+       bool actuallyset = false;
        unsigned long flag;
 
        if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
                return false;
 
-       if (ppsc->b_swrf_processing)
+       if (ppsc->swrf_processing)
                return false;
 
        spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
@@ -1972,24 +1958,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
        e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
 
-       if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+       if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
                RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                         ("GPIOChangeRF  - HW Radio ON, RF ON\n"));
 
                e_rfpowerstate_toset = ERFON;
-               ppsc->b_hwradiooff = false;
-               b_actuallyset = true;
-       } else if ((ppsc->b_hwradiooff == false)
+               ppsc->hwradiooff = false;
+               actuallyset = true;
+       } else if ((ppsc->hwradiooff == false)
                   && (e_rfpowerstate_toset == ERFOFF)) {
                RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                         ("GPIOChangeRF  - HW Radio OFF, RF OFF\n"));
 
                e_rfpowerstate_toset = ERFOFF;
-               ppsc->b_hwradiooff = true;
-               b_actuallyset = true;
+               ppsc->hwradiooff = true;
+               actuallyset = true;
        }
 
-       if (b_actuallyset) {
+       if (actuallyset) {
                if (e_rfpowerstate_toset == ERFON) {
                        if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
                            RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
@@ -2028,7 +2014,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        }
 
        *valid = 1;
-       return !ppsc->b_hwradiooff;
+       return !ppsc->hwradiooff;
 
 }
 
index 305c819..a3dfdb6 100644 (file)
@@ -30,6 +30,8 @@
 #ifndef __RTL92CE_HW_H__
 #define __RTL92CE_HW_H__
 
+#define H2C_RA_MASK    6
+
 void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
 void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
                     u8 *p_macaddr, bool is_group, u8 enc_algo,
                     bool is_wepkey, bool clear_all);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
 
 #endif
index 78a0569..7b1da8d 100644 (file)
@@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                         ("switch case not process\n"));
                break;
        }
-       pled->b_ledon = true;
+       pled->ledon = true;
 }
 
 void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.bled_opendrain == true)
+               if (pcipriv->ledctl.led_opendrain == true)
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
                else
@@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                         ("switch case not process\n"));
                break;
        }
-       pled->b_ledon = false;
+       pled->ledon = false;
 }
 
 void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
index 4504411..d0541e8 100644 (file)
 #include "../ps.h"
 #include "reg.h"
 #include "def.h"
+#include "hw.h"
 #include "phy.h"
 #include "rf.h"
 #include "dm.h"
 #include "table.h"
 
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data);
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data);
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
-                                                 u8 configtype);
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
-                                                   u8 configtype);
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                            u32 cmdtableidx, u32 cmdtablesz,
-                                            enum swchnlcmd_id cmdid, u32 para1,
-                                            u32 para2, u32 msdelay);
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
-                                            u8 channel, u8 *stage, u8 *step,
-                                            u32 *delay);
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
-                                      enum wireless_mode wirelessmode,
-                                      long power_indbm);
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
-                                             enum radio_path rfpath);
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                        enum wireless_mode wirelessmode,
-                                        u8 txpwridx);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 returnvalue, originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
-                                              "bitmask(%#x)\n", regaddr,
-                                              bitmask));
-       originalvalue = rtl_read_dword(rtlpriv, regaddr);
-       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
-       returnvalue = (originalvalue & bitmask) >> bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
-                                              "Addr[0x%x]=0x%x\n", bitmask,
-                                              regaddr, originalvalue));
-
-       return returnvalue;
-
-}
-
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-                          u32 regaddr, u32 bitmask, u32 data)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
-                                              " data(%#x)\n", regaddr, bitmask,
-                                              data));
-
-       if (bitmask != MASKDWORD) {
-               originalvalue = rtl_read_dword(rtlpriv, regaddr);
-               bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
-               data = ((originalvalue & (~bitmask)) | (data << bitshift));
-       }
-
-       rtl_write_dword(rtlpriv, regaddr, data);
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
-                                              " data(%#x)\n", regaddr, bitmask,
-                                              data));
-
-}
-
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
                            enum radio_path rfpath, u32 regaddr, u32 bitmask)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -149,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
        return readback_value;
 }
 
-void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
                           enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data)
 {
@@ -197,137 +121,25 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
                                               bitmask, data, rfpath));
 }
 
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset)
-{
-       RT_ASSERT(false, ("deprecated!\n"));
-       return 0;
-}
-
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data)
-{
-       RT_ASSERT(false, ("deprecated!\n"));
-}
-
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-       u32 newoffset;
-       u32 tmplong, tmplong2;
-       u8 rfpi_enable = 0;
-       u32 retvalue;
-
-       offset &= 0x3f;
-       newoffset = offset;
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
-               return 0xFFFFFFFF;
-       }
-       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
-       if (rfpath == RF90_PATH_A)
-               tmplong2 = tmplong;
-       else
-               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
-       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
-           (newoffset << 23) | BLSSIREADEDGE;
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong & (~BLSSIREADEDGE));
-       mdelay(1);
-       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
-       mdelay(1);
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong | BLSSIREADEDGE);
-       mdelay(1);
-       if (rfpath == RF90_PATH_A)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
-                                                BIT(8));
-       else if (rfpath == RF90_PATH_B)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
-                                                BIT(8));
-       if (rfpi_enable)
-               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
-                                        BLSSIREADBACKDATA);
-       else
-               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
-                                        BLSSIREADBACKDATA);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
-                                              rfpath, pphyreg->rflssi_readback,
-                                              retvalue));
-       return retvalue;
-}
-
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data)
-{
-       u32 data_and_addr;
-       u32 newoffset;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
-               return;
-       }
-       offset &= 0x3f;
-       newoffset = offset;
-       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
-       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
-                                              rfpath, pphyreg->rf3wire_offset,
-                                              data_and_addr));
-}
-
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
-{
-       u32 i;
-
-       for (i = 0; i <= 31; i++) {
-               if (((bitmask >> i) & 0x1) == 1)
-                       break;
-       }
-       return i;
-}
-
-static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
-{
-       rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
-       rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
-       rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
-       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
-       rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
-}
-
-bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        bool is92c = IS_92C_SERIAL(rtlhal->version);
-       bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
+       bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
 
        if (is92c)
                rtl_write_byte(rtlpriv, 0x14, 0x71);
        return rtstatus;
 }
 
-bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
 {
        bool rtstatus = true;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u16 regval;
        u32 regvaldw;
-       u8 b_reg_hwparafile = 1;
+       u8 reg_hwparafile = 1;
 
        _rtl92c_phy_init_bb_rf_register_definition(hw);
        regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
@@ -342,56 +154,12 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
        regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
        rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
-       if (b_reg_hwparafile == 1)
+       if (reg_hwparafile == 1)
                rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
        return rtstatus;
 }
 
-bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
-{
-       return rtl92c_phy_rf6052_config(hw);
-}
-
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       bool rtstatus;
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
-       rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
-                                                BASEBAND_CONFIG_PHY_REG);
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
-               return false;
-       }
-       if (rtlphy->rf_type == RF_1T2R) {
-               _rtl92c_phy_bb_config_1t(hw);
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
-       }
-       if (rtlefuse->autoload_failflag == false) {
-               rtlphy->pwrgroup_cnt = 0;
-               rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
-                                                  BASEBAND_CONFIG_PHY_REG);
-       }
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
-               return false;
-       }
-       rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
-                                                BASEBAND_CONFIG_AGC_TAB);
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
-               return false;
-       }
-       rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
-                                               RFPGA0_XA_HSSIPARAMETER2,
-                                               0x200));
-       return true;
-}
-
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u32 i;
@@ -408,11 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
        return true;
 }
 
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
-{
-}
-
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                                  u8 configtype)
 {
        int i;
@@ -456,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                  phy_regarray_table[i],
                                  phy_regarray_table[i + 1]));
                }
-               rtl92c_phy_config_bb_external_pa(hw);
        } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
                for (i = 0; i < agctab_arraylen; i = i + 2) {
                        rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -472,175 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
        return true;
 }
 
-static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-                                                  u32 regaddr, u32 bitmask,
-                                                  u32 data)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       if (regaddr == RTXAGC_A_RATE18_06) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][0]));
-       }
-       if (regaddr == RTXAGC_A_RATE54_24) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][1]));
-       }
-       if (regaddr == RTXAGC_A_CCK1_MCS32) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][6]));
-       }
-       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][7]));
-       }
-       if (regaddr == RTXAGC_A_MCS03_MCS00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][2]));
-       }
-       if (regaddr == RTXAGC_A_MCS07_MCS04) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][3]));
-       }
-       if (regaddr == RTXAGC_A_MCS11_MCS08) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][4]));
-       }
-       if (regaddr == RTXAGC_A_MCS15_MCS12) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][5]));
-       }
-       if (regaddr == RTXAGC_B_RATE18_06) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][8]));
-       }
-       if (regaddr == RTXAGC_B_RATE54_24) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][9]));
-       }
-
-       if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][14]));
-       }
-
-       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][15]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS03_MCS00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][10]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS07_MCS04) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][11]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS11_MCS08) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][12]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS15_MCS12) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][13]));
-
-               rtlphy->pwrgroup_cnt++;
-       }
-}
-
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
                                                    u8 configtype)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -679,13 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
        return true;
 }
 
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
-                                             enum radio_path rfpath)
-{
-       return true;
-}
-
-bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath)
 {
 
@@ -740,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                udelay(1);
                        }
                }
-               _rtl92c_phy_config_rf_external_pa(hw, rfpath);
                break;
        case RF90_PATH_B:
                for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -776,346 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        return true;
 }
 
-void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->default_initialgain[0] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[1] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[2] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[3] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                ("Default initial gain (c50=0x%x, "
-                 "c58=0x%x, c60=0x%x, c68=0x%x\n",
-                 rtlphy->default_initialgain[0],
-                 rtlphy->default_initialgain[1],
-                 rtlphy->default_initialgain[2],
-                 rtlphy->default_initialgain[3]));
-
-       rtlphy->framesync = (u8) rtl_get_bbreg(hw,
-                                              ROFDM0_RXDETECTOR3, MASKBYTE0);
-       rtlphy->framesync_c34 = rtl_get_bbreg(hw,
-                                             ROFDM0_RXDETECTOR2, MASKDWORD);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                ("Default framesync (0x%x) = 0x%x\n",
-                 ROFDM0_RXDETECTOR3, rtlphy->framesync));
-}
-
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
-           RFPGA0_XA_LSSIPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
-           RFPGA0_XB_LSSIPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
-           RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
-           RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
-           RFPGA0_XCD_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
-           RFPGA0_XCD_SWITCHCONTROL;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
-           ROFDM0_XARXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
-           ROFDM0_XBRXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
-           ROFDM0_XCRXIQIMBANLANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
-           ROFDM0_XDRXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
-           ROFDM0_XATXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
-           ROFDM0_XBTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
-           ROFDM0_XCTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
-           ROFDM0_XDTXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
-           RFPGA0_XA_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
-           RFPGA0_XB_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
-           RFPGA0_XC_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
-           RFPGA0_XD_LSSIREADBACK;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
-           TRANSCEIVEA_HSPI_READBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
-           TRANSCEIVEB_HSPI_READBACK;
-
-}
-
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 txpwr_level;
-       long txpwr_dbm;
-
-       txpwr_level = rtlphy->cur_cck_txpwridx;
-       txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                                WIRELESS_MODE_B, txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
-           rtlefuse->legacy_ht_txpowerdiff;
-       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                        WIRELESS_MODE_G,
-                                        txpwr_level) > txpwr_dbm)
-               txpwr_dbm =
-                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
-                                                txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
-       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                        WIRELESS_MODE_N_24G,
-                                        txpwr_level) > txpwr_dbm)
-               txpwr_dbm =
-                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
-                                                txpwr_level);
-       *powerlevel = txpwr_dbm;
-}
-
-static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
-                                     u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 index = (channel - 1);
-
-       cckpowerlevel[RF90_PATH_A] =
-           rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
-       cckpowerlevel[RF90_PATH_B] =
-           rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
-       if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
-               ofdmpowerlevel[RF90_PATH_A] =
-                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
-               ofdmpowerlevel[RF90_PATH_B] =
-                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
-       } else if (get_rf_type(rtlphy) == RF_2T2R) {
-               ofdmpowerlevel[RF90_PATH_A] =
-                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
-               ofdmpowerlevel[RF90_PATH_B] =
-                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
-       }
-}
-
-static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
-                                        u8 channel, u8 *cckpowerlevel,
-                                        u8 *ofdmpowerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
-       rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
-}
-
-void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 cckpowerlevel[2], ofdmpowerlevel[2];
-
-       if (rtlefuse->b_txpwr_fromeprom == false)
-               return;
-       _rtl92c_get_txpower_index(hw, channel,
-                                 &cckpowerlevel[0], &ofdmpowerlevel[0]);
-       _rtl92c_ccxpower_index_check(hw,
-                                    channel, &cckpowerlevel[0],
-                                    &ofdmpowerlevel[0]);
-       rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
-       rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
-}
-
-bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 idx;
-       u8 rf_path;
-
-       u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
-                                                     WIRELESS_MODE_B,
-                                                     power_indbm);
-       u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
-                                                      WIRELESS_MODE_N_24G,
-                                                      power_indbm);
-       if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
-               ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
-       else
-               ofdmtxpwridx = 0;
-       RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
-                ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
-                 power_indbm, ccktxpwridx, ofdmtxpwridx));
-       for (idx = 0; idx < 14; idx++) {
-               for (rf_path = 0; rf_path < 2; rf_path++) {
-                       rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
-                       rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
-                           ofdmtxpwridx;
-                       rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
-                           ofdmtxpwridx;
-               }
-       }
-       rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
-       return true;
-}
-
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
-{
-}
-
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
-                                      enum wireless_mode wirelessmode,
-                                      long power_indbm)
-{
-       u8 txpwridx;
-       long offset;
-
-       switch (wirelessmode) {
-       case WIRELESS_MODE_B:
-               offset = -7;
-               break;
-       case WIRELESS_MODE_G:
-       case WIRELESS_MODE_N_24G:
-               offset = -8;
-               break;
-       default:
-               offset = -8;
-               break;
-       }
-
-       if ((power_indbm - offset) > 0)
-               txpwridx = (u8) ((power_indbm - offset) * 2);
-       else
-               txpwridx = 0;
-
-       if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
-               txpwridx = MAX_TXPWR_IDX_NMODE_92S;
-
-       return txpwridx;
-}
-
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                        enum wireless_mode wirelessmode,
-                                        u8 txpwridx)
-{
-       long offset;
-       long pwrout_dbm;
-
-       switch (wirelessmode) {
-       case WIRELESS_MODE_B:
-               offset = -7;
-               break;
-       case WIRELESS_MODE_G:
-       case WIRELESS_MODE_N_24G:
-               offset = -8;
-               break;
-       default:
-               offset = -8;
-               break;
-       }
-       pwrout_dbm = txpwridx / 2 + offset;
-       return pwrout_dbm;
-}
-
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("Unknown Scan Backup operation.\n"));
-                       break;
-               }
-       }
-}
-
-void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1183,656 +432,18 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
 }
 
-void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-                           enum nl80211_channel_type ch_type)
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
 {
+       u8 tmpreg;
+       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       u8 tmp_bw = rtlphy->current_chan_bw;
 
-       if (rtlphy->set_bwmode_inprogress)
-               return;
-       rtlphy->set_bwmode_inprogress = true;
-       if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
-               rtl92c_phy_set_bw_mode_callback(hw);
-       else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                        ("FALSE driver sleep or unload\n"));
-               rtlphy->set_bwmode_inprogress = false;
-               rtlphy->current_chan_bw = tmp_bw;
-       }
-}
+       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
 
-void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u32 delay;
-
-       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
-                ("switch to channel%d\n", rtlphy->current_channel));
-       if (is_hal_stop(rtlhal))
-               return;
-       do {
-               if (!rtlphy->sw_chnl_inprogress)
-                       break;
-               if (!_rtl92c_phy_sw_chnl_step_by_step
-                   (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
-                    &rtlphy->sw_chnl_step, &delay)) {
-                       if (delay > 0)
-                               mdelay(delay);
-                       else
-                               continue;
-               } else
-                       rtlphy->sw_chnl_inprogress = false;
-               break;
-       } while (true);
-       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
-}
-
-u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (rtlphy->sw_chnl_inprogress)
-               return 0;
-       if (rtlphy->set_bwmode_inprogress)
-               return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 ("WIRELESS_MODE_G but channel>14"));
-       rtlphy->sw_chnl_inprogress = true;
-       rtlphy->sw_chnl_stage = 0;
-       rtlphy->sw_chnl_step = 0;
-       if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
-               rtl92c_phy_sw_chnl_callback(hw);
-               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        ("sw_chnl_inprogress false schdule workitem\n"));
-               rtlphy->sw_chnl_inprogress = false;
-       } else {
-               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        ("sw_chnl_inprogress false driver sleep or"
-                         " unload\n"));
-               rtlphy->sw_chnl_inprogress = false;
-       }
-       return 1;
-}
-
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
-                                            u8 channel, u8 *stage, u8 *step,
-                                            u32 *delay)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
-       u32 precommoncmdcnt;
-       struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
-       u32 postcommoncmdcnt;
-       struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
-       u32 rfdependcmdcnt;
-       struct swchnlcmd *currentcmd = NULL;
-       u8 rfpath;
-       u8 num_total_rfpath = rtlphy->num_total_rfpath;
-
-       precommoncmdcnt = 0;
-       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                        MAX_PRECMD_CNT,
-                                        CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
-       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
-
-       postcommoncmdcnt = 0;
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
-                                        MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
-
-       rfdependcmdcnt = 0;
-
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 ("illegal channel for Zebra: %d\n", channel));
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                        MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
-                                        RF_CHNLBW, channel, 10);
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                        MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
-                                        0);
-
-       do {
-               switch (*stage) {
-               case 0:
-                       currentcmd = &precommoncmd[*step];
-                       break;
-               case 1:
-                       currentcmd = &rfdependcmd[*step];
-                       break;
-               case 2:
-                       currentcmd = &postcommoncmd[*step];
-                       break;
-               }
-
-               if (currentcmd->cmdid == CMDID_END) {
-                       if ((*stage) == 2) {
-                               return true;
-                       } else {
-                               (*stage)++;
-                               (*step) = 0;
-                               continue;
-                       }
-               }
-
-               switch (currentcmd->cmdid) {
-               case CMDID_SET_TXPOWEROWER_LEVEL:
-                       rtl92c_phy_set_txpower_level(hw, channel);
-                       break;
-               case CMDID_WRITEPORT_ULONG:
-                       rtl_write_dword(rtlpriv, currentcmd->para1,
-                                       currentcmd->para2);
-                       break;
-               case CMDID_WRITEPORT_USHORT:
-                       rtl_write_word(rtlpriv, currentcmd->para1,
-                                      (u16) currentcmd->para2);
-                       break;
-               case CMDID_WRITEPORT_UCHAR:
-                       rtl_write_byte(rtlpriv, currentcmd->para1,
-                                      (u8) currentcmd->para2);
-                       break;
-               case CMDID_RF_WRITEREG:
-                       for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
-                               rtlphy->rfreg_chnlval[rfpath] =
-                                   ((rtlphy->rfreg_chnlval[rfpath] &
-                                     0xfffffc00) | currentcmd->para2);
-
-                               rtl_set_rfreg(hw, (enum radio_path)rfpath,
-                                             currentcmd->para1,
-                                             RFREG_OFFSET_MASK,
-                                             rtlphy->rfreg_chnlval[rfpath]);
-                       }
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-
-               break;
-       } while (true);
-
-       (*delay) = currentcmd->msdelay;
-       (*step)++;
-       return false;
-}
-
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                            u32 cmdtableidx, u32 cmdtablesz,
-                                            enum swchnlcmd_id cmdid,
-                                            u32 para1, u32 para2, u32 msdelay)
-{
-       struct swchnlcmd *pcmd;
-
-       if (cmdtable == NULL) {
-               RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
-               return false;
-       }
-
-       if (cmdtableidx >= cmdtablesz)
-               return false;
-
-       pcmd = cmdtable + cmdtableidx;
-       pcmd->cmdid = cmdid;
-       pcmd->para1 = para1;
-       pcmd->para2 = para2;
-       pcmd->msdelay = msdelay;
-       return true;
-}
-
-bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
-{
-       return true;
-}
-
-static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
-{
-       u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
-       u8 result = 0x00;
-
-       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
-       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
-       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
-       rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
-                     config_pathb ? 0x28160202 : 0x28160502);
-
-       if (config_pathb) {
-               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
-               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
-       }
-
-       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
-       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
-       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
-
-       mdelay(IQK_DELAY_TIME);
-
-       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
-       reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
-       reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
-       reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
-
-       if (!(reg_eac & BIT(28)) &&
-           (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
-           (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
-               result |= 0x01;
-       else
-               return result;
-
-       if (!(reg_eac & BIT(27)) &&
-           (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
-           (((reg_eac & 0x03FF0000) >> 16) != 0x36))
-               result |= 0x02;
-       return result;
-}
-
-static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
-{
-       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
-       u8 result = 0x00;
-
-       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
-       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
-       mdelay(IQK_DELAY_TIME);
-       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
-       reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
-       reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
-       reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
-       reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
-       if (!(reg_eac & BIT(31)) &&
-           (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
-           (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
-               result |= 0x01;
-       else
-               return result;
-
-       if (!(reg_eac & BIT(30)) &&
-           (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
-           (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
-               result |= 0x02;
-       return result;
-}
-
-static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
-                                              bool b_iqk_ok, long result[][8],
-                                              u8 final_candidate, bool btxonly)
-{
-       u32 oldval_0, x, tx0_a, reg;
-       long y, tx0_c;
-
-       if (final_candidate == 0xFF)
-               return;
-       else if (b_iqk_ok) {
-               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                         MASKDWORD) >> 22) & 0x3FF;
-               x = result[final_candidate][0];
-               if ((x & 0x00000200) != 0)
-                       x = x | 0xFFFFFC00;
-               tx0_a = (x * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
-                             ((x * oldval_0 >> 7) & 0x1));
-               y = result[final_candidate][1];
-               if ((y & 0x00000200) != 0)
-                       y = y | 0xFFFFFC00;
-               tx0_c = (y * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
-                             ((tx0_c & 0x3C0) >> 6));
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
-                             (tx0_c & 0x3F));
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
-                             ((y * oldval_0 >> 7) & 0x1));
-               if (btxonly)
-                       return;
-               reg = result[final_candidate][2];
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
-               reg = result[final_candidate][3] & 0x3F;
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
-               reg = (result[final_candidate][3] >> 6) & 0xF;
-               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
-       }
-}
-
-static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
-                                              bool b_iqk_ok, long result[][8],
-                                              u8 final_candidate, bool btxonly)
-{
-       u32 oldval_1, x, tx1_a, reg;
-       long y, tx1_c;
-
-       if (final_candidate == 0xFF)
-               return;
-       else if (b_iqk_ok) {
-               oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
-                                         MASKDWORD) >> 22) & 0x3FF;
-               x = result[final_candidate][4];
-               if ((x & 0x00000200) != 0)
-                       x = x | 0xFFFFFC00;
-               tx1_a = (x * oldval_1) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
-                             ((x * oldval_1 >> 7) & 0x1));
-               y = result[final_candidate][5];
-               if ((y & 0x00000200) != 0)
-                       y = y | 0xFFFFFC00;
-               tx1_c = (y * oldval_1) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
-                             ((tx1_c & 0x3C0) >> 6));
-               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
-                             (tx1_c & 0x3F));
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
-                             ((y * oldval_1 >> 7) & 0x1));
-               if (btxonly)
-                       return;
-               reg = result[final_candidate][6];
-               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
-               reg = result[final_candidate][7] & 0x3F;
-               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
-               reg = (result[final_candidate][7] >> 6) & 0xF;
-               rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
-       }
-}
-
-static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
-                                           u32 *addareg, u32 *addabackup,
-                                           u32 registernum)
-{
-       u32 i;
-
-       for (i = 0; i < registernum; i++)
-               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
-                                          u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
-       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
-                                             u32 *addareg, u32 *addabackup,
-                                             u32 regiesternum)
-{
-       u32 i;
-
-       for (i = 0; i < regiesternum; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
-                                            u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
-       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
-                                    u32 *addareg, bool is_patha_on, bool is2t)
-{
-       u32 pathOn;
-       u32 i;
-
-       pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
-       if (false == is2t) {
-               pathOn = 0x0bdb25a0;
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
-       } else {
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
-       }
-
-       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
-                                               u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       rtl_write_byte(rtlpriv, macreg[0], 0x3F);
-
-       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i],
-                              (u8) (macbackup[i] & (~BIT(3))));
-       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
-{
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
-       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
-       u32 mode;
-
-       mode = pi_mode ? 0x01000100 : 0x01000000;
-       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
-       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
-static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
-                                          long result[][8], u8 c1, u8 c2)
-{
-       u32 i, j, diff, simularity_bitmap, bound;
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       u8 final_candidate[2] = { 0xFF, 0xFF };
-       bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
-
-       if (is2t)
-               bound = 8;
-       else
-               bound = 4;
-
-       simularity_bitmap = 0;
-
-       for (i = 0; i < bound; i++) {
-               diff = (result[c1][i] > result[c2][i]) ?
-                   (result[c1][i] - result[c2][i]) :
-                   (result[c2][i] - result[c1][i]);
-
-               if (diff > MAX_TOLERANCE) {
-                       if ((i == 2 || i == 6) && !simularity_bitmap) {
-                               if (result[c1][i] + result[c1][i + 1] == 0)
-                                       final_candidate[(i / 4)] = c2;
-                               else if (result[c2][i] + result[c2][i + 1] == 0)
-                                       final_candidate[(i / 4)] = c1;
-                               else
-                                       simularity_bitmap = simularity_bitmap |
-                                           (1 << i);
-                       } else
-                               simularity_bitmap =
-                                   simularity_bitmap | (1 << i);
-               }
-       }
-
-       if (simularity_bitmap == 0) {
-               for (i = 0; i < (bound / 4); i++) {
-                       if (final_candidate[i] != 0xFF) {
-                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
-                                       result[3][j] =
-                                           result[final_candidate[i]][j];
-                               bresult = false;
-                       }
-               }
-               return bresult;
-       } else if (!(simularity_bitmap & 0x0F)) {
-               for (i = 0; i < 4; i++)
-                       result[3][i] = result[c1][i];
-               return false;
-       } else if (!(simularity_bitmap & 0xF0) && is2t) {
-               for (i = 4; i < 8; i++)
-                       result[3][i] = result[c1][i];
-               return false;
-       } else {
-               return false;
-       }
-
-}
-
-static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
-                                    long result[][8], u8 t, bool is2t)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u32 i;
-       u8 patha_ok, pathb_ok;
-       u32 adda_reg[IQK_ADDA_REG_NUM] = {
-               0x85c, 0xe6c, 0xe70, 0xe74,
-               0xe78, 0xe7c, 0xe80, 0xe84,
-               0xe88, 0xe8c, 0xed0, 0xed4,
-               0xed8, 0xedc, 0xee0, 0xeec
-       };
-
-       u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
-               0x522, 0x550, 0x551, 0x040
-       };
-
-       const u32 retrycount = 2;
-
-       u32 bbvalue;
-
-       if (t == 0) {
-               bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
-               _rtl92c_phy_save_adda_registers(hw, adda_reg,
-                                               rtlphy->adda_backup, 16);
-               _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
-                                              rtlphy->iqk_mac_backup);
-       }
-       _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
-       if (t == 0) {
-               rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
-                                                  RFPGA0_XA_HSSIPARAMETER1,
-                                                  BIT(8));
-       }
-       if (!rtlphy->b_rfpi_enable)
-               _rtl92c_phy_pi_mode_switch(hw, true);
-       if (t == 0) {
-               rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
-               rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
-               rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
-       }
-       rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
-       rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
-       rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
-       if (is2t) {
-               rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
-               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
-       }
-       _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
-                                           rtlphy->iqk_mac_backup);
-       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
-       if (is2t)
-               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
-       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
-       for (i = 0; i < retrycount; i++) {
-               patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
-               if (patha_ok == 0x03) {
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       break;
-               } else if (i == (retrycount - 1) && patha_ok == 0x01)
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94,
-                                                     MASKDWORD) & 0x3FF0000) >>
-                                                     16;
-               result[t][1] =
-                   (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
-
-       }
-
-       if (is2t) {
-               _rtl92c_phy_path_a_standby(hw);
-               _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
-               for (i = 0; i < retrycount; i++) {
-                       pathb_ok = _rtl92c_phy_path_b_iqk(hw);
-                       if (pathb_ok == 0x03) {
-                               result[t][4] = (rtl_get_bbreg(hw,
-                                                     0xeb4,
-                                                     MASKDWORD) &
-                                               0x3FF0000) >> 16;
-                               result[t][5] =
-                                   (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               result[t][6] =
-                                   (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               result[t][7] =
-                                   (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               break;
-                       } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
-                               result[t][4] = (rtl_get_bbreg(hw,
-                                                     0xeb4,
-                                                     MASKDWORD) &
-                                               0x3FF0000) >> 16;
-                       }
-                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-               }
-       }
-       rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
-       rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
-       rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
-       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
-       if (is2t)
-               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
-       if (t != 0) {
-               if (!rtlphy->b_rfpi_enable)
-                       _rtl92c_phy_pi_mode_switch(hw, false);
-               _rtl92c_phy_reload_adda_registers(hw, adda_reg,
-                                                 rtlphy->adda_backup, 16);
-               _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
-                                                rtlphy->iqk_mac_backup);
-       }
-}
-
-static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
-{
-       u8 tmpreg;
-       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
-
-       if ((tmpreg & 0x70) != 0)
-               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
-       else
-               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       if ((tmpreg & 0x70) != 0)
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+       else
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
 
        if ((tmpreg & 0x70) != 0) {
                rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
@@ -1866,666 +477,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
        }
 }
 
-static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
-                                    char delta, bool is2t)
-{
-       /* This routine is deliberately dummied out for later fixes */
-#if 0
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
-       u32 reg_d[PATH_NUM];
-       u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
-
-       u32 bb_backup[APK_BB_REG_NUM];
-       u32 bb_reg[APK_BB_REG_NUM] = {
-               0x904, 0xc04, 0x800, 0xc08, 0x874
-       };
-       u32 bb_ap_mode[APK_BB_REG_NUM] = {
-               0x00000020, 0x00a05430, 0x02040000,
-               0x000800e4, 0x00204000
-       };
-       u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
-               0x00000020, 0x00a05430, 0x02040000,
-               0x000800e4, 0x22204000
-       };
-
-       u32 afe_backup[APK_AFE_REG_NUM];
-       u32 afe_reg[APK_AFE_REG_NUM] = {
-               0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
-               0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
-               0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
-               0xeec
-       };
-
-       u32 mac_backup[IQK_MAC_REG_NUM];
-       u32 mac_reg[IQK_MAC_REG_NUM] = {
-               0x522, 0x550, 0x551, 0x040
-       };
-
-       u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
-               {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
-       };
-
-       u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
-               {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
-       };
-
-       u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
-               {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
-       };
-
-       u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
-               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
-       };
-
-       u32 afe_on_off[PATH_NUM] = {
-               0x04db25a4, 0x0b1b25a4
-       };
-
-       u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
-
-       u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
-
-       u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
-
-       u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
-
-       const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
-               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
-       };
-
-       const u32 apk_normal_setting_value_1[13] = {
-               0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
-               0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
-               0x12680000, 0x00880000, 0x00880000
-       };
-
-       const u32 apk_normal_setting_value_2[16] = {
-               0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
-               0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
-               0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
-               0x00050006
-       };
-
-       const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
-
-       long bb_offset, delta_v, delta_offset;
-
-       if (!is2t)
-               pathbound = 1;
-
-       for (index = 0; index < PATH_NUM; index++) {
-               apk_offset[index] = apk_normal_offset[index];
-               apk_value[index] = apk_normal_value[index];
-               afe_on_off[index] = 0x6fdb25a4;
-       }
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               for (path = 0; path < pathbound; path++) {
-                       apk_rf_init_value[path][index] =
-                           apk_normal_rf_init_value[path][index];
-                       apk_rf_value_0[path][index] =
-                           apk_normal_rf_value_0[path][index];
-               }
-               bb_ap_mode[index] = bb_normal_ap_mode[index];
-
-               apkbound = 6;
-       }
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               if (index == 0)
-                       continue;
-               bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
-       }
-
-       _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
-
-       _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
-
-       for (path = 0; path < pathbound; path++) {
-               if (path == RF90_PATH_A) {
-                       offset = 0xb00;
-                       for (index = 0; index < 11; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
-                       offset = 0xb68;
-                       for (; index < 13; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
-                       offset = 0xb00;
-                       for (index = 0; index < 16; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_2
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-               } else if (path == RF90_PATH_B) {
-                       offset = 0xb70;
-                       for (index = 0; index < 10; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
-                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
-                       offset = 0xb68;
-                       index = 11;
-                       for (; index < 13; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
-                       offset = 0xb60;
-                       for (index = 0; index < 16; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_2
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-               }
-
-               reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
-                                           0xd, MASKDWORD);
-
-               for (index = 0; index < APK_AFE_REG_NUM; index++)
-                       rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
-                                     afe_on_off[path]);
-
-               if (path == RF90_PATH_A) {
-                       for (index = 0; index < APK_BB_REG_NUM; index++) {
-                               if (index == 0)
-                                       continue;
-                               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
-                                             bb_ap_mode[index]);
-                       }
-               }
-
-               _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
-
-               if (path == 0) {
-                       rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
-               } else {
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
-                                     0x10000);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
-                                     0x1000f);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
-                                     0x20103);
-               }
-
-               delta_offset = ((delta + 14) / 2);
-               if (delta_offset < 0)
-                       delta_offset = 0;
-               else if (delta_offset > 12)
-                       delta_offset = 12;
-
-               for (index = 0; index < APK_BB_REG_NUM; index++) {
-                       if (index != 1)
-                               continue;
-
-                       tmpreg = apk_rf_init_value[path][index];
-
-                       if (!rtlefuse->b_apk_thermalmeterignore) {
-                               bb_offset = (tmpreg & 0xF0000) >> 16;
-
-                               if (!(tmpreg & BIT(15)))
-                                       bb_offset = -bb_offset;
-
-                               delta_v =
-                                   apk_delta_mapping[index][delta_offset];
-
-                               bb_offset += delta_v;
-
-                               if (bb_offset < 0) {
-                                       tmpreg = tmpreg & (~BIT(15));
-                                       bb_offset = -bb_offset;
-                               } else {
-                                       tmpreg = tmpreg | BIT(15);
-                               }
-
-                               tmpreg =
-                                   (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
-                       }
-
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
-                                     MASKDWORD, 0x8992e);
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
-                                     MASKDWORD, apk_rf_value_0[path][index]);
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
-                                     MASKDWORD, tmpreg);
-
-                       i = 0;
-                       do {
-                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
-                               rtl_set_bbreg(hw, apk_offset[path],
-                                             MASKDWORD, apk_value[0]);
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset 0x%x "
-                                        "value 0x%x\n",
-                                        apk_offset[path],
-                                        rtl_get_bbreg(hw, apk_offset[path],
-                                                      MASKDWORD)));
-
-                               mdelay(3);
-
-                               rtl_set_bbreg(hw, apk_offset[path],
-                                             MASKDWORD, apk_value[1]);
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset 0x%x "
-                                        "value 0x%x\n",
-                                        apk_offset[path],
-                                        rtl_get_bbreg(hw, apk_offset[path],
-                                                      MASKDWORD)));
-
-                               mdelay(20);
-
-                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-
-                               if (path == RF90_PATH_A)
-                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
-                                                              0x03E00000);
-                               else
-                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
-                                                              0xF8000000);
-
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset "
-                                        "0xbd8[25:21] %x\n", tmpreg));
-
-                               i++;
-
-                       } while (tmpreg > apkbound && i < 4);
-
-                       apk_result[path][index] = tmpreg;
-               }
-       }
-
-       _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               if (index == 0)
-                       continue;
-               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
-       }
-
-       _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
-
-       for (path = 0; path < pathbound; path++) {
-               rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
-                             MASKDWORD, reg_d[path]);
-
-               if (path == RF90_PATH_B) {
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
-                                     0x1000f);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
-                                     0x20101);
-               }
-
-               if (apk_result[path][1] > 6)
-                       apk_result[path][1] = 6;
-       }
-
-       for (path = 0; path < pathbound; path++) {
-               rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
-                             ((apk_result[path][1] << 15) |
-                              (apk_result[path][1] << 10) |
-                              (apk_result[path][1] << 5) |
-                              apk_result[path][1]));
-
-               if (path == RF90_PATH_A)
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
-                                     ((apk_result[path][1] << 15) |
-                                      (apk_result[path][1] << 10) |
-                                      (0x00 << 5) | 0x05));
-               else
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
-                                     ((apk_result[path][1] << 15) |
-                                      (apk_result[path][1] << 10) |
-                                      (0x02 << 5) | 0x05));
-
-               rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
-                             ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
-                              0x08));
-
-       }
-
-       rtlphy->b_apk_done = true;
-#endif
-}
-
-static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
-                                         bool bmain, bool is2t)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (is_hal_stop(rtlhal)) {
-               rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
-               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
-       }
-       if (is2t) {
-               if (bmain)
-                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
-                                     BIT(5) | BIT(6), 0x1);
-               else
-                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
-                                     BIT(5) | BIT(6), 0x2);
-       } else {
-               if (bmain)
-                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
-               else
-                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
-
-       }
-}
-
-#undef IQK_ADDA_REG_NUM
-#undef IQK_DELAY_TIME
-
-void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       long result[4][8];
-       u8 i, final_candidate;
-       bool b_patha_ok, b_pathb_ok;
-       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
-           reg_ecc, reg_tmp = 0;
-       bool is12simular, is13simular, is23simular;
-       bool b_start_conttx = false, b_singletone = false;
-       u32 iqk_bb_reg[10] = {
-               ROFDM0_XARXIQIMBALANCE,
-               ROFDM0_XBRXIQIMBALANCE,
-               ROFDM0_ECCATHRESHOLD,
-               ROFDM0_AGCRSSITABLE,
-               ROFDM0_XATXIQIMBALANCE,
-               ROFDM0_XBTXIQIMBALANCE,
-               ROFDM0_XCTXIQIMBALANCE,
-               ROFDM0_XCTXAFE,
-               ROFDM0_XDTXAFE,
-               ROFDM0_RXIQEXTANTA
-       };
-
-       if (b_recovery) {
-               _rtl92c_phy_reload_adda_registers(hw,
-                                                 iqk_bb_reg,
-                                                 rtlphy->iqk_bb_backup, 10);
-               return;
-       }
-       if (b_start_conttx || b_singletone)
-               return;
-       for (i = 0; i < 8; i++) {
-               result[0][i] = 0;
-               result[1][i] = 0;
-               result[2][i] = 0;
-               result[3][i] = 0;
-       }
-       final_candidate = 0xff;
-       b_patha_ok = false;
-       b_pathb_ok = false;
-       is12simular = false;
-       is23simular = false;
-       is13simular = false;
-       for (i = 0; i < 3; i++) {
-               if (IS_92C_SERIAL(rtlhal->version))
-                       _rtl92c_phy_iq_calibrate(hw, result, i, true);
-               else
-                       _rtl92c_phy_iq_calibrate(hw, result, i, false);
-               if (i == 1) {
-                       is12simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 0,
-                                                                    1);
-                       if (is12simular) {
-                               final_candidate = 0;
-                               break;
-                       }
-               }
-               if (i == 2) {
-                       is13simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 0,
-                                                                    2);
-                       if (is13simular) {
-                               final_candidate = 0;
-                               break;
-                       }
-                       is23simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 1,
-                                                                    2);
-                       if (is23simular)
-                               final_candidate = 1;
-                       else {
-                               for (i = 0; i < 8; i++)
-                                       reg_tmp += result[3][i];
-
-                               if (reg_tmp != 0)
-                                       final_candidate = 3;
-                               else
-                                       final_candidate = 0xFF;
-                       }
-               }
-       }
-       for (i = 0; i < 4; i++) {
-               reg_e94 = result[i][0];
-               reg_e9c = result[i][1];
-               reg_ea4 = result[i][2];
-               reg_eac = result[i][3];
-               reg_eb4 = result[i][4];
-               reg_ebc = result[i][5];
-               reg_ec4 = result[i][6];
-               reg_ecc = result[i][7];
-       }
-       if (final_candidate != 0xff) {
-               rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
-               rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
-               reg_ea4 = result[final_candidate][2];
-               reg_eac = result[final_candidate][3];
-               rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
-               rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
-               reg_ec4 = result[final_candidate][6];
-               reg_ecc = result[final_candidate][7];
-               b_patha_ok = b_pathb_ok = true;
-       } else {
-               rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
-               rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
-       }
-       if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
-               _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
-                                                  final_candidate,
-                                                  (reg_ea4 == 0));
-       if (IS_92C_SERIAL(rtlhal->version)) {
-               if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
-                       _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
-                                                          result,
-                                                          final_candidate,
-                                                          (reg_ec4 == 0));
-       }
-       _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
-                                       rtlphy->iqk_bb_backup, 10);
-}
-
-void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       bool b_start_conttx = false, b_singletone = false;
-
-       if (b_start_conttx || b_singletone)
-               return;
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_lc_calibrate(hw, true);
-       else
-               _rtl92c_phy_lc_calibrate(hw, false);
-}
-
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (rtlphy->b_apk_done)
-               return;
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_ap_calibrate(hw, delta, true);
-       else
-               _rtl92c_phy_ap_calibrate(hw, delta, false);
-}
-
-void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
-       else
-               _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
-}
-
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       bool b_postprocessing = false;
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
-                 iotype, rtlphy->set_io_inprogress));
-       do {
-               switch (iotype) {
-               case IO_CMD_RESUME_DM_BY_SCAN:
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                                ("[IO CMD] Resume DM after scan.\n"));
-                       b_postprocessing = true;
-                       break;
-               case IO_CMD_PAUSE_DM_BY_SCAN:
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                                ("[IO CMD] Pause DM before scan.\n"));
-                       b_postprocessing = true;
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-       } while (false);
-       if (b_postprocessing && !rtlphy->set_io_inprogress) {
-               rtlphy->set_io_inprogress = true;
-               rtlphy->current_io_type = iotype;
-       } else {
-               return false;
-       }
-       rtl92c_phy_set_io(hw);
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
-       return true;
-}
-
-void rtl92c_phy_set_io(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("--->Cmd(%#x), set_io_inprogress(%d)\n",
-                 rtlphy->current_io_type, rtlphy->set_io_inprogress));
-       switch (rtlphy->current_io_type) {
-       case IO_CMD_RESUME_DM_BY_SCAN:
-               dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
-               rtl92c_dm_write_dig(hw);
-               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
-               break;
-       case IO_CMD_PAUSE_DM_BY_SCAN:
-               rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
-               dm_digtable.cur_igvalue = 0x17;
-               rtl92c_dm_write_dig(hw);
-               break;
-       default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("switch case not process\n"));
-               break;
-       }
-       rtlphy->set_io_inprogress = false;
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("<---(%#x)\n", rtlphy->current_io_type));
-}
-
-void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-}
-
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
-       u32 u4b_tmp;
-       u8 delay = 5;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
-       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-       u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-       while (u4b_tmp != 0 && delay > 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
-               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-               u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-               delay--;
-       }
-       if (delay == 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
-                        ("Switch RF timeout !!!.\n"));
-               return;
-       }
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
 static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                            enum rf_pwrstate rfpwr_state)
 {
@@ -2648,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                  jiffies_to_msecs(jiffies -
                                                   ppsc->last_awake_jiffies)));
                        ppsc->last_sleep_jiffies = jiffies;
-                       _rtl92ce_phy_set_rf_sleep(hw);
+                       _rtl92c_phy_set_rf_sleep(hw);
                        break;
                }
        default:
@@ -2663,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
        return bresult;
 }
 
-bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                   enum rf_pwrstate rfpwr_state)
 {
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
index ca4daee..a37267e 100644 (file)
@@ -57,8 +57,6 @@
 #define IQK_MAC_REG_NUM                        4
 
 #define RF90_PATH_MAX                  2
-#define CHANNEL_MAX_NUMBER             14
-#define CHANNEL_GROUP_MAX              3
 
 #define CT_OFFSET_MAC_ADDR             0X16
 
@@ -78,9 +76,7 @@
 #define CT_OFFSET_CUSTOMER_ID          0x7F
 
 #define RTL92C_MAX_PATH_NUM            2
-#define CHANNEL_MAX_NUMBER             14
-#define CHANNEL_GROUP_MAX              3
-
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER        255
 enum swchnlcmd_id {
        CMDID_END,
        CMDID_SET_TXPOWEROWER_LEVEL,
@@ -195,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
 extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
                                   enum radio_path rfpath, u32 regaddr,
                                   u32 bitmask);
-extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
                                  enum radio_path rfpath, u32 regaddr,
                                  u32 bitmask, u32 data);
 extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
                                                 enum radio_path rfpath);
@@ -227,11 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
                                              u32 rfpath);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                          enum rf_pwrstate rfpwr_state);
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+                                     enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+                                        enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                       enum radio_path rfpath, u32 offset,
+                                       u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data);
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+                                          enum radio_path rfpath, u32 offset,
+                                          u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
 
 #endif
index 875d514..b0868a6 100644 (file)
 #define REG_LEDCFG3                            0x004F
 #define REG_FSIMR                              0x0050
 #define REG_FSISR                              0x0054
-
+#define REG_HSIMR                              0x0058
+#define REG_HSISR                              0x005c
+
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2                    0x0060
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2                      0x0062
+/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL                    0x0068
 #define REG_MCUFWDL                            0x0080
 
 #define REG_HMEBOX_EXT_0                       0x0088
@@ -79,6 +87,7 @@
 #define REG_PCIE_MIO_INTD                      0x00E8
 #define REG_HPON_FSM                           0x00EC
 #define REG_SYS_CFG                            0x00F0
+#define REG_GPIO_OUTSTS                                0x00F4  /* For RTL8723 only.*/
 
 #define REG_CR                                 0x0100
 #define REG_PBP                                        0x0104
 #define REG_RDG_PIFS                           0x0513
 #define REG_SIFS_CTX                           0x0514
 #define REG_SIFS_TRX                           0x0516
+#define REG_SIFS_CCK                           0x0514
+#define REG_SIFS_OFDM                          0x0516
 #define REG_AGGR_BREAK_TIME                    0x051A
 #define REG_SLOT                               0x051B
 #define REG_TX_PTCL_CTRL                       0x0520
 #define REG_MAC_SPEC_SIFS                      0x063A
 #define REG_RESP_SIFS_CCK                      0x063C
 #define REG_RESP_SIFS_OFDM                     0x063E
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS                           0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS                           0x063E
 #define REG_ACKTO                              0x0640
 #define REG_CTS2TO                             0x0641
 #define REG_EIFS                               0x0642
 #define        STOPBE                                  BIT(1)
 #define        STOPBK                                  BIT(0)
 
-#define        RCR_APPFCS                              BIT(31)
+#define        RCR_APP_FCS                             BIT(31)
 #define        RCR_APP_MIC                             BIT(30)
 #define        RCR_APP_ICV                             BIT(29)
+#define        RCR_APP_PHYSTS                          BIT(28)
 #define        RCR_APP_PHYST_RXFF                      BIT(28)
 #define        RCR_APP_BA_SSN                          BIT(27)
 #define        RCR_ENMBID                              BIT(24)
 
 #define        BOOT_FROM_EEPROM                        BIT(4)
 #define        EEPROM_EN                               BIT(5)
+#define        EEPROMSEL                               BOOT_FROM_EEPROM
 
 #define AFE_BGEN                               BIT(0)
 #define AFE_MBEN                               BIT(1)
 #define BD_MAC2                                        BIT(9)
 #define BD_MAC1                                        BIT(10)
 #define IC_MACPHY_MODE                         BIT(11)
+#define BT_FUNC                                        BIT(16)
+#define VENDOR_ID                              BIT(19)
 #define PAD_HWPD_IDN                           BIT(22)
 #define TRP_VAUX_EN                            BIT(23)
 #define TRP_BT_EN                              BIT(24)
 #define BD_HCI_SEL                             BIT(26)
 #define TYPE_ID                                        BIT(27)
 
+/* REG_GPIO_OUTSTS (For RTL8723 only) */
+#define        EFS_HCI_SEL                             (BIT(0)|BIT(1))
+#define        PAD_HCI_SEL                             (BIT(2)|BIT(3))
+#define        HCI_SEL                                 (BIT(4)|BIT(5))
+#define        PKG_SEL_HCI                             BIT(6)
+#define        FEN_GPS                                 BIT(7)
+#define        FEN_BT                                  BIT(8)
+#define        FEN_WL                                  BIT(9)
+#define        FEN_PCI                                 BIT(10)
+#define        FEN_USB                                 BIT(11)
+#define        BTRF_HWPDN_N                            BIT(12)
+#define        WLRF_HWPDN_N                            BIT(13)
+#define        PDN_BT_N                                BIT(14)
+#define        PDN_GPS_N                               BIT(15)
+#define        BT_CTL_HWPDN                            BIT(16)
+#define        GPS_CTL_HWPDN                           BIT(17)
+#define        PPHY_SUSB                               BIT(20)
+#define        UPHY_SUSB                               BIT(21)
+#define        PCI_SUSEN                               BIT(22)
+#define        USB_SUSEN                               BIT(23)
+#define        RF_RL_ID                        (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
 #define CHIP_VER_RTL_MASK                      0xF000
 #define CHIP_VER_RTL_SHIFT                     12
 
 #define _RARF_RC7(x)                           (((x) & 0x1F) << 16)
 #define _RARF_RC8(x)                           (((x) & 0x1F) << 24)
 
-#define AC_PARAM_TXOP_LIMIT_OFFSET             16
+#define AC_PARAM_TXOP_OFFSET                   16
 #define AC_PARAM_ECW_MAX_OFFSET                        12
 #define AC_PARAM_ECW_MIN_OFFSET                        8
 #define AC_PARAM_AIFS_OFFSET                   0
 
 #define        HAL_8192C_HW_GPIO_WPS_BIT               BIT(2)
 
+/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define        WL_HWPDN_EN                             BIT(0)
+/* WiFi HW PDn polarity control */
+#define        WL_HWPDN_SL                             BIT(1)
+/* WiFi function enable */
+#define        WL_FUNC_EN                              BIT(2)
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define        WL_HWROF_EN                             BIT(3)
+/* Enable GPIO[11] as BT HW PDn source */
+#define        BT_HWPDN_EN                             BIT(16)
+/* BT HW PDn polarity control */
+#define        BT_HWPDN_SL                             BIT(17)
+/* BT function enable */
+#define        BT_FUNC_EN                              BIT(18)
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define        BT_HWROF_EN                             BIT(19)
+/* Enable GPIO[10] as GPS HW PDn source */
+#define        GPS_HWPDN_EN                            BIT(20)
+/* GPS HW PDn polarity control */
+#define        GPS_HWPDN_SL                            BIT(21)
+/* GPS function enable */
+#define        GPS_FUNC_EN                             BIT(22)
+
 #define        RPMAC_RESET                             0x100
 #define        RPMAC_TXSTART                           0x104
 #define        RPMAC_TXLEGACYSIG                       0x108
 #define        BTXHTSTBC                               0x30
 #define        BTXHTADVANCECODING                      0x40
 #define        BTXHTSHORTGI                            0x80
-#define        BTXHTNUMBERHT_LT        F               0x300
+#define        BTXHTNUMBERHT_LTF                       0x300
 #define        BTXHTCRC8                               0x3fc00
 #define        BCOUNTERRESET                           0x10000
 #define        BNUMOFOFDMTX                            0xffff
index ffd8e04..669b116 100644 (file)
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
        }
 }
 
-void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                       u8 *ppowerlevel)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
        }
 }
 
-void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
                                        u8 *ppowerlevel, u8 channel)
 {
        u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
        }
 }
 
-bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
 
                switch (rfpath) {
                case RF90_PATH_A:
-                       rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+                       rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
                                        (enum radio_path) rfpath);
                        break;
                case RF90_PATH_B:
-                       rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+                       rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
                                        (enum radio_path) rfpath);
                        break;
                case RF90_PATH_C:
index d3014f9..3aa520c 100644 (file)
@@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                              u8 *ppowerlevel);
 extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
                                               u8 *ppowerlevel, u8 channel);
-extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+
 #endif
index b366e88..b1cc4d4 100644 (file)
@@ -37,6 +37,7 @@
 #include "phy.h"
 #include "dm.h"
 #include "hw.h"
+#include "rf.h"
 #include "sw.h"
 #include "trx.h"
 #include "led.h"
@@ -46,13 +47,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtlpriv->dm.b_dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.b_disable_framebursting = 0;;
+       rtlpriv->dm.disable_framebursting = 0;
        rtlpriv->dm.thermalvalue = 0;
        rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
 
-       rtlpci->receive_config = (RCR_APPFCS |
+       rtlpci->receive_config = (RCR_APP_FCS |
                                  RCR_AMF |
                                  RCR_ADF |
                                  RCR_APP_MIC |
@@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .switch_channel = rtl92c_phy_sw_chnl,
        .dm_watchdog = rtl92c_dm_watchdog,
        .scan_operation_backup = rtl92c_phy_scan_operation_backup,
-       .set_rf_power_state = rtl92c_phy_set_rf_power_state,
+       .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
        .led_control = rtl92ce_led_control,
        .set_desc = rtl92ce_set_desc,
        .get_desc = rtl92ce_get_desc,
@@ -133,8 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .deinit_sw_leds = rtl92ce_deinit_sw_leds,
        .get_bbreg = rtl92c_phy_query_bb_reg,
        .set_bbreg = rtl92c_phy_set_bb_reg,
-       .get_rfreg = rtl92c_phy_query_rf_reg,
-       .set_rfreg = rtl92c_phy_set_rf_reg,
+       .get_rfreg = rtl92ce_phy_query_rf_reg,
+       .set_rfreg = rtl92ce_phy_set_rf_reg,
+       .cmd_send_packet = _rtl92c_cmd_send_packet,
+       .phy_rf6052_config = rtl92ce_phy_rf6052_config,
+       .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
+       .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
+       .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
+       .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
+       .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
+       .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
+       .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
 };
 
 static struct rtl_mod_params rtl92ce_mod_params = {
index de1198c..36e6576 100644 (file)
 int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
 void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
 void rtl92c_init_var_map(struct ieee80211_hw *hw);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+                            struct sk_buff *skb);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel, u8 channel);
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                                 u8 configtype);
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype);
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
 
 #endif
index bf5852f..aa2b581 100644 (file)
@@ -36,7 +36,7 @@
 #include "trx.h"
 #include "led.h"
 
-static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc,
+static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
                                                          unsigned int
                                                          skb_queue)
 {
@@ -245,24 +245,24 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                                       struct rtl_stats *pstats,
                                       struct rx_desc_92c *pdesc,
                                       struct rx_fwinfo_92c *p_drvinfo,
-                                      bool bpacket_match_bssid,
-                                      bool bpacket_toself,
-                                      bool b_packet_beacon)
+                                      bool packet_match_bssid,
+                                      bool packet_toself,
+                                      bool packet_beacon)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct phy_sts_cck_8192s_t *cck_buf;
        s8 rx_pwr_all, rx_pwr[4];
-       u8 rf_rx_num, evm, pwdb_all;
+       u8 evm, pwdb_all, rf_rx_num = 0;
        u8 i, max_spatial_stream;
-       u32 rssi, total_rssi;
+       u32 rssi, total_rssi = 0;
        bool is_cck_rate;
 
        is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
-       pstats->b_packet_matchbssid = bpacket_match_bssid;
-       pstats->b_packet_toself = bpacket_toself;
-       pstats->b_is_cck = is_cck_rate;
-       pstats->b_packet_beacon = b_packet_beacon;
-       pstats->b_is_cck = is_cck_rate;
+       pstats->packet_matchbssid = packet_match_bssid;
+       pstats->packet_toself = packet_toself;
+       pstats->is_cck = is_cck_rate;
+       pstats->packet_beacon = packet_beacon;
+       pstats->is_cck = is_cck_rate;
        pstats->rx_mimo_signalquality[0] = -1;
        pstats->rx_mimo_signalquality[1] = -1;
 
@@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->rx_pwdb_all = pwdb_all;
                pstats->recvsignalpower = rx_pwr_all;
 
-               if (bpacket_match_bssid) {
+               if (packet_match_bssid) {
                        u8 sq;
                        if (pstats->rx_pwdb_all > 40)
                                sq = 100;
@@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                        pstats->rx_mimo_signalquality[1] = -1;
                }
        } else {
-               rtlpriv->dm.brfpath_rxenable[0] =
-                   rtlpriv->dm.brfpath_rxenable[1] = true;
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
                for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
-                       if (rtlpriv->dm.brfpath_rxenable[i])
+                       if (rtlpriv->dm.rfpath_rxenable[i])
                                rf_rx_num++;
 
                        rx_pwr[i] =
@@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                        rtlpriv->stats.rx_snr_db[i] =
                            (long)(p_drvinfo->rxsnr[i] / 2);
 
-                       if (bpacket_match_bssid)
+                       if (packet_match_bssid)
                                pstats->rx_mimo_signalstrength[i] = (u8) rssi;
                }
 
@@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                for (i = 0; i < max_spatial_stream; i++) {
                        evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
 
-                       if (bpacket_match_bssid) {
+                       if (packet_match_bssid) {
                                if (i == 0)
                                        pstats->signalquality =
                                            (u8) (evm & 0xff);
@@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
        u8 rfpath;
        u32 last_rssi, tmpval;
 
-       if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+       if (pstats->packet_toself || pstats->packet_beacon) {
                rtlpriv->stats.rssi_calculate_cnt++;
 
                if (rtlpriv->stats.ui_rssi.total_num++ >=
@@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
                pstats->rssi = rtlpriv->stats.signal_strength;
        }
 
-       if (!pstats->b_is_cck && pstats->b_packet_toself) {
+       if (!pstats->is_cck && pstats->packet_toself) {
                for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
                     rfpath++) {
 
@@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
                                               struct rtl_stats *pstats)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       int weighting;
+       int weighting = 0;
 
        if (rtlpriv->stats.recv_signal_power == 0)
                rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
@@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
                    rtlpriv->dm.undecorated_smoothed_pwdb;
        }
 
-       if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+       if (pstats->packet_toself || pstats->packet_beacon) {
                if (undecorated_smoothed_pwdb < 0)
                        undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
 
@@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
        u32 last_evm, n_spatialstream, tmpval;
 
        if (pstats->signalquality != 0) {
-               if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+               if (pstats->packet_toself || pstats->packet_beacon) {
 
                        if (rtlpriv->stats.ui_link_quality.total_num++ >=
                            PHY_LINKQUALITY_SLID_WIN_MAX) {
@@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
                                     struct rtl_stats *pcurrent_stats)
 {
 
-       if (!pcurrent_stats->b_packet_matchbssid &&
-           !pcurrent_stats->b_packet_beacon)
+       if (!pcurrent_stats->packet_matchbssid &&
+           !pcurrent_stats->packet_beacon)
                return;
 
        _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
@@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *tmp_buf;
        u8 *praddr;
        u8 *psaddr;
-       u16 fc, type;
-       bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+       __le16 fc;
+       u16 type, c_fc;
+       bool packet_matchbssid, packet_toself, packet_beacon;
 
        tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
 
        hdr = (struct ieee80211_hdr *)tmp_buf;
-       fc = le16_to_cpu(hdr->frame_control);
+       fc = hdr->frame_control;
+       c_fc = le16_to_cpu(fc);
        type = WLAN_FC_GET_TYPE(fc);
        praddr = hdr->addr1;
        psaddr = hdr->addr2;
 
-       b_packet_matchbssid =
+       packet_matchbssid =
            ((IEEE80211_FTYPE_CTL != type) &&
             (!compare_ether_addr(mac->bssid,
-                                 (fc & IEEE80211_FCTL_TODS) ?
-                                 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+                                 (c_fc & IEEE80211_FCTL_TODS) ?
+                                 hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
                                  hdr->addr2 : hdr->addr3)) &&
-            (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv));
+            (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 
-       b_packet_toself = b_packet_matchbssid &&
+       packet_toself = packet_matchbssid &&
            (!compare_ether_addr(praddr, rtlefuse->dev_addr));
 
        if (ieee80211_is_beacon(fc))
-               b_packet_beacon = true;
+               packet_beacon = true;
 
        _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
-                                  b_packet_matchbssid, b_packet_toself,
-                                  b_packet_beacon);
+                                  packet_matchbssid, packet_toself,
+                                  packet_beacon);
 
        _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
 }
@@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
        stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
            RX_DRV_INFO_SIZE_UNIT;
        stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
-       stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
-       stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
-       stats->b_hwerror = (stats->b_crc | stats->b_icv);
+       stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+       stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+       stats->hwerror = (stats->crc | stats->icv);
        stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
        stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
-       stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
-       stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
-       stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+       stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+       stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
                                   && (GET_RX_DESC_FAGGR(pdesc) == 1));
        stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
        stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
@@ -689,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
        if (GET_RX_DESC_RXHT(pdesc))
                rx_status->flag |= RX_FLAG_HT;
 
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (stats->decrypted)
                rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       bool b_defaultadapter = true;
-
-       struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
-
+       bool defaultadapter = true;
+       struct ieee80211_sta *sta;
        u8 *pdesc = (u8 *) pdesc_tx;
        struct rtl_tcb_desc tcb_desc;
        u8 *qc = ieee80211_get_qos_ctl(hdr);
        u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        u16 seq_number;
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 rate_flag = info->control.rates[0].flags;
 
        enum rtl_desc_qsel fw_qsel =
-           _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control),
-                                           queue_index);
+           _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
 
-       bool b_firstseg = ((hdr->seq_ctrl &
-                           cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+       bool firstseg = ((hdr->seq_ctrl &
+                         cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
 
-       bool b_lastseg = ((hdr->frame_control &
-                          cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+       bool lastseg = ((hdr->frame_control &
+                        cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
 
        dma_addr_t mapping = pci_map_single(rtlpci->pdev,
                                            skb->data, skb->len,
@@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 
        CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
 
-       if (b_firstseg) {
+       if (firstseg) {
                SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
 
                SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
@@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                }
                SET_TX_DESC_SEQ(pdesc, seq_number);
 
-               SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable &&
+               SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
                                                !tcb_desc.
-                                               b_cts_enable) ? 1 : 0));
+                                               cts_enable) ? 1 : 0));
                SET_TX_DESC_HW_RTS_ENABLE(pdesc,
-                                         ((tcb_desc.b_rts_enable
-                                           || tcb_desc.b_cts_enable) ? 1 : 0));
-               SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0));
-               SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0));
+                                         ((tcb_desc.rts_enable
+                                           || tcb_desc.cts_enable) ? 1 : 0));
+               SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+               SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
 
                SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
                SET_TX_DESC_RTS_BW(pdesc, 0);
                SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc,
                                      ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
-                                     (tcb_desc.b_rts_use_shortpreamble ? 1 : 0)
-                                     : (tcb_desc.b_rts_use_shortgi ? 1 : 0)));
+                                     (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+                                     : (tcb_desc.rts_use_shortgi ? 1 : 0)));
 
                if (mac->bw_40) {
-                       if (tcb_desc.b_packet_bw) {
+                       if (tcb_desc.packet_bw) {
                                SET_TX_DESC_DATA_BW(pdesc, 1);
                                SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
                        } else {
@@ -811,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_LINIP(pdesc, 0);
                SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
 
+               rcu_read_lock();
+               sta = ieee80211_find_sta(mac->vif, mac->bssid);
                if (sta) {
                        u8 ampdu_density = sta->ht_cap.ampdu_density;
                        SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
                }
+               rcu_read_unlock();
 
                if (info->control.hw_key) {
                        struct ieee80211_key_conf *keyconf =
@@ -854,14 +856,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                }
        }
 
-       SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
-       SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+       SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+       SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
 
        SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
 
        SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
 
-       if (rtlpriv->dm.b_useramask) {
+       if (rtlpriv->dm.useramask) {
                SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
                SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
        } else {
@@ -869,16 +871,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
        }
 
-       if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps &&
-           ppsc->b_fwctrl_lps) {
+       if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+           ppsc->fwctrl_lps) {
                SET_TX_DESC_HWSEQ_EN(pdesc, 1);
                SET_TX_DESC_PKT_ID(pdesc, 8);
 
-               if (!b_defaultadapter)
+               if (!defaultadapter)
                        SET_TX_DESC_QOS(pdesc, 1);
        }
 
-       SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+       SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
 
        if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
            is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
@@ -889,8 +891,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 }
 
 void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
-                            u8 *pdesc, bool b_firstseg,
-                            bool b_lastseg, struct sk_buff *skb)
+                            u8 *pdesc, bool firstseg,
+                            bool lastseg, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -901,11 +903,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
                                            PCI_DMA_TODEVICE);
 
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
 
-       if (b_firstseg)
+       if (firstseg)
                SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
 
        SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
@@ -1029,3 +1031,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
                               BIT(0) << (hw_queue));
        }
 }
+
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+                            struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring;
+       struct rtl_tx_desc *pdesc;
+       u8 own;
+       unsigned long flags;
+       struct sk_buff *pskb = NULL;
+
+       ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+       pskb = __skb_dequeue(&ring->queue);
+       if (pskb)
+               kfree_skb(pskb);
+
+       pdesc = &ring->desc[0];
+       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+       __skb_queue_tail(&ring->queue, skb);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+       return true;
+}
index 53d0e0a..803adcc 100644 (file)
 #define USB_HWDESC_HEADER_LEN                  32
 #define CRCLENGTH                              4
 
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask)            \
+       ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+       BIT_LEN_MASK_32(__mask))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val)     \
+       (*(__le32 *)(__pdesc) =                                 \
+       (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) &     \
+       (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) |                \
+       (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read/write various fields in RX or TX descriptors */
+
 #define SET_TX_DESC_PKT_SIZE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
 #define SET_TX_DESC_OFFSET(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
 #define SET_TX_DESC_BMC(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
 #define SET_TX_DESC_HTC(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
 #define SET_TX_DESC_LAST_SEG(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
 #define SET_TX_DESC_FIRST_SEG(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
 #define SET_TX_DESC_LINIP(__pdesc, __val)              \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
 #define SET_TX_DESC_NO_ACM(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
 #define SET_TX_DESC_GF(__pdesc, __val)                 \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
 #define SET_TX_DESC_OWN(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
 
 #define GET_TX_DESC_PKT_SIZE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+       SHIFT_AND_MASK_LE(__pdesc, 0, 16)
 #define GET_TX_DESC_OFFSET(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+       SHIFT_AND_MASK_LE(__pdesc, 16, 8)
 #define GET_TX_DESC_BMC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 24, 1)
 #define GET_TX_DESC_HTC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 25, 1)
 #define GET_TX_DESC_LAST_SEG(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 26, 1)
 #define GET_TX_DESC_FIRST_SEG(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 27, 1)
 #define GET_TX_DESC_LINIP(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 28, 1)
 #define GET_TX_DESC_NO_ACM(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 29, 1)
 #define GET_TX_DESC_GF(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 30, 1)
 #define GET_TX_DESC_OWN(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 31, 1)
 
 #define SET_TX_DESC_MACID(__pdesc, __val)              \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
 #define SET_TX_DESC_AGG_BREAK(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
 #define SET_TX_DESC_BK(__pdesc, __val)                 \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
 #define SET_TX_DESC_RDG_ENABLE(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
 #define SET_TX_DESC_QUEUE_SEL(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
 #define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
 #define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val)       \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
 #define SET_TX_DESC_PIFS(__pdesc, __val)               \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
 #define SET_TX_DESC_RATE_ID(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
 #define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
 #define SET_TX_DESC_EN_DESC_ID(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
 #define SET_TX_DESC_SEC_TYPE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
 #define SET_TX_DESC_PKT_OFFSET(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
 
 #define GET_TX_DESC_MACID(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
 #define GET_TX_DESC_AGG_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
 #define GET_TX_DESC_AGG_BREAK(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
 #define GET_TX_DESC_RDG_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
 #define GET_TX_DESC_QUEUE_SEL(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
 #define GET_TX_DESC_RDG_NAV_EXT(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
 #define GET_TX_DESC_LSIG_TXOP_EN(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
 #define GET_TX_DESC_PIFS(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
 #define GET_TX_DESC_RATE_ID(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
 #define GET_TX_DESC_NAV_USE_HDR(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
 #define GET_TX_DESC_EN_DESC_ID(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
 #define GET_TX_DESC_SEC_TYPE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
+       SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
 #define GET_TX_DESC_PKT_OFFSET(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
+       SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
 
 #define SET_TX_DESC_RTS_RC(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
 #define SET_TX_DESC_DATA_RC(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
 #define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
 #define SET_TX_DESC_MORE_FRAG(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
 #define SET_TX_DESC_RAW(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
 #define SET_TX_DESC_CCX(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
 #define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
 #define SET_TX_DESC_ANTSEL_A(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
 #define SET_TX_DESC_ANTSEL_B(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
 #define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
 #define SET_TX_DESC_TX_ANTL(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
 #define SET_TX_DESC_TX_ANT_HT(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
 
 #define GET_TX_DESC_RTS_RC(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
 #define GET_TX_DESC_DATA_RC(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
+       SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
 #define GET_TX_DESC_BAR_RTY_TH(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
 #define GET_TX_DESC_MORE_FRAG(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
 #define GET_TX_DESC_RAW(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
 #define GET_TX_DESC_CCX(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
 #define GET_TX_DESC_AMPDU_DENSITY(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
+       SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
 #define GET_TX_DESC_ANTSEL_A(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
 #define GET_TX_DESC_ANTSEL_B(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
 #define GET_TX_DESC_TX_ANT_CCK(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
 #define GET_TX_DESC_TX_ANTL(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
 #define GET_TX_DESC_TX_ANT_HT(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
 
 #define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
 #define SET_TX_DESC_TAIL_PAGE(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
 #define SET_TX_DESC_SEQ(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
 #define SET_TX_DESC_PKT_ID(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
 
 #define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
+       SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
 #define GET_TX_DESC_TAIL_PAGE(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
+       SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
 #define GET_TX_DESC_SEQ(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
+       SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
 #define GET_TX_DESC_PKT_ID(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
 
 #define SET_TX_DESC_RTS_RATE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
 #define SET_TX_DESC_AP_DCFE(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
 #define SET_TX_DESC_QOS(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
 #define SET_TX_DESC_HWSEQ_EN(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
 #define SET_TX_DESC_USE_RATE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
 #define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
 #define SET_TX_DESC_DISABLE_FB(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
 #define SET_TX_DESC_CTS2SELF(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
 #define SET_TX_DESC_RTS_ENABLE(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
 #define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
 #define SET_TX_DESC_PORT_ID(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
 #define SET_TX_DESC_WAIT_DCTS(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
 #define SET_TX_DESC_CTS2AP_EN(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
 #define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
 #define SET_TX_DESC_TX_STBC(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
 #define SET_TX_DESC_DATA_SHORT(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
 #define SET_TX_DESC_DATA_BW(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
 #define SET_TX_DESC_RTS_SHORT(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
 #define SET_TX_DESC_RTS_BW(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
 #define SET_TX_DESC_RTS_SC(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
 #define SET_TX_DESC_RTS_STBC(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
 
 #define GET_TX_DESC_RTS_RATE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
 #define GET_TX_DESC_AP_DCFE(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
 #define GET_TX_DESC_QOS(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
 #define GET_TX_DESC_HWSEQ_EN(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
 #define GET_TX_DESC_USE_RATE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
 #define GET_TX_DESC_DISABLE_RTS_FB(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
 #define GET_TX_DESC_DISABLE_FB(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
 #define GET_TX_DESC_CTS2SELF(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
 #define GET_TX_DESC_RTS_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
 #define GET_TX_DESC_HW_RTS_ENABLE(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
 #define GET_TX_DESC_PORT_ID(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
 #define GET_TX_DESC_WAIT_DCTS(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
 #define GET_TX_DESC_CTS2AP_EN(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
 #define GET_TX_DESC_TX_SUB_CARRIER(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
 #define GET_TX_DESC_TX_STBC(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
 #define GET_TX_DESC_DATA_SHORT(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
 #define GET_TX_DESC_DATA_BW(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
 #define GET_TX_DESC_RTS_SHORT(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
 #define GET_TX_DESC_RTS_BW(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
 #define GET_TX_DESC_RTS_SC(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
 #define GET_TX_DESC_RTS_STBC(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
 
 #define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
 #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
 #define SET_TX_DESC_CCX_TAG(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
 #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
 #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
 #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
 #define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val)   \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
 #define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
 
 #define GET_TX_DESC_TX_RATE(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
 #define GET_TX_DESC_DATA_SHORTGI(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
 #define GET_TX_DESC_CCX_TAG(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
 #define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc)                \
-       LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
+       SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
 #define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
+       SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
 #define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc)                \
-       LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
 #define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc)          \
-       LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
+       SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
 #define GET_TX_DESC_USB_TXAGG_NUM(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
+       SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
 
 #define SET_TX_DESC_TXAGC_A(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
 #define SET_TX_DESC_TXAGC_B(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
 #define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val)                \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
 #define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val)                \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
 #define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
 #define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
 #define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
 #define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)   \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
 
 #define GET_TX_DESC_TXAGC_A(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
 #define GET_TX_DESC_TXAGC_B(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
 #define GET_TX_DESC_USE_MAX_LEN(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
 #define GET_TX_DESC_MAX_AGG_NUM(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
 #define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
 #define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
 #define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
 #define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc)          \
-       LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
 
 #define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
 #define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
 #define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
 #define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
 #define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
 
 #define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+       SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
 #define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
 #define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
 #define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
 #define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
 
 #define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
 #define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
 
 #define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
 #define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc)       \
-       LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
 
 #define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
 #define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
 
 #define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
 #define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc)       \
-       LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
 
 #define GET_RX_DESC_PKT_LEN(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+       SHIFT_AND_MASK_LE(__pdesc, 0, 14)
 #define GET_RX_DESC_CRC32(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 14, 1)
 #define GET_RX_DESC_ICV(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 15, 1)
 #define GET_RX_DESC_DRV_INFO_SIZE(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc, 16, 4)
 #define GET_RX_DESC_SECURITY(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+       SHIFT_AND_MASK_LE(__pdesc, 20, 3)
 #define GET_RX_DESC_QOS(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 23, 1)
 #define GET_RX_DESC_SHIFT(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+       SHIFT_AND_MASK_LE(__pdesc, 24, 2)
 #define GET_RX_DESC_PHYST(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 26, 1)
 #define GET_RX_DESC_SWDEC(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 27, 1)
 #define GET_RX_DESC_LS(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 28, 1)
 #define GET_RX_DESC_FS(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 29, 1)
 #define GET_RX_DESC_EOR(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 30, 1)
 #define GET_RX_DESC_OWN(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 31, 1)
 
 #define SET_RX_DESC_PKT_LEN(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
 #define SET_RX_DESC_EOR(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
 #define SET_RX_DESC_OWN(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
 
 #define GET_RX_DESC_MACID(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
 #define GET_RX_DESC_TID(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
 #define GET_RX_DESC_HWRSVD(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
 #define GET_RX_DESC_PAGGR(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
 #define GET_RX_DESC_FAGGR(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
 #define GET_RX_DESC_A1_FIT(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
 #define GET_RX_DESC_A2_FIT(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
 #define GET_RX_DESC_PAM(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
 #define GET_RX_DESC_PWR(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
 #define GET_RX_DESC_MD(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
 #define GET_RX_DESC_MF(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
 #define GET_RX_DESC_TYPE(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
 #define GET_RX_DESC_MC(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
 #define GET_RX_DESC_BC(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
 #define GET_RX_DESC_SEQ(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+       SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
 #define GET_RX_DESC_FRAG(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+       SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
 #define GET_RX_DESC_NEXT_PKT_LEN(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
+       SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
 #define GET_RX_DESC_NEXT_IND(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
 #define GET_RX_DESC_RSVD(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
 
 #define GET_RX_DESC_RXMCS(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
 #define GET_RX_DESC_RXHT(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
 #define GET_RX_DESC_SPLCP(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
 #define GET_RX_DESC_BW(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
 #define GET_RX_DESC_HTC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
 #define GET_RX_DESC_HWPC_ERR(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
 #define GET_RX_DESC_HWPC_IND(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
 #define GET_RX_DESC_IV0(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
+       SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
 
 #define GET_RX_DESC_IV1(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
 #define GET_RX_DESC_TSFL(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
 
 #define GET_RX_DESC_BUFF_ADDR(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
 #define GET_RX_DESC_BUFF_ADDR64(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
 
 #define SET_RX_DESC_BUFF_ADDR(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
 #define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
 
 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)      \
 do {                                                   \
@@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
 void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                             bool b_firstseg, bool b_lastseg,
                             struct sk_buff *skb);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644 (file)
index 0000000..ad2de6b
--- /dev/null
@@ -0,0 +1,14 @@
+rtl8192cu-objs :=              \
+               dm.o            \
+               hw.o            \
+               led.o           \
+               mac.o           \
+               phy.o           \
+               rf.o            \
+               sw.o            \
+               table.o         \
+               trx.o
+
+obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644 (file)
index 0000000..c54940e
--- /dev/null
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/def.h"
+
+/*-------------------------------------------------------------------------
+ *     Chip specific
+ *-------------------------------------------------------------------------*/
+#define CHIP_8723                      BIT(2) /* RTL8723 With BT feature */
+#define CHIP_8723_DRV_REV              BIT(3) /* RTL8723 Driver Revised */
+#define NORMAL_CHIP                    BIT(4)
+#define CHIP_VENDOR_UMC                        BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT          BIT(6)
+
+#define IS_NORMAL_CHIP(version)                \
+       (((version) & NORMAL_CHIP) ? true : false)
+
+#define IS_8723_SERIES(version)                \
+       (((version) & CHIP_8723) ? true : false)
+
+#define IS_92C_1T2R(version)           \
+       (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+
+#define IS_VENDOR_UMC(version)         \
+       (((version) & CHIP_VENDOR_UMC) ? true : false)
+
+#define IS_VENDOR_UMC_A_CUT(version)   \
+       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
+       false : true) : false)
+
+#define IS_VENDOR_8723_A_CUT(version)  \
+       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
+       false : true) : false)
+
+#define CHIP_BONDING_92C_1T2R  0x1
+#define CHIP_BONDING_IDENTIFIER(_value)        (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644 (file)
index 0000000..f311bae
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long undecorated_smoothed_pwdb;
+
+       if (!rtlpriv->dm.dynamic_txpower_enable)
+               return;
+
+       if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+               return;
+       }
+
+       if ((mac->link_state < MAC80211_LINKED) &&
+           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+                        ("Not connected to any\n"));
+
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+               rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+               return;
+       }
+
+       if (mac->link_state >= MAC80211_LINKED) {
+               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       undecorated_smoothed_pwdb =
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                ("AP Client PWDB = 0x%lx\n",
+                                 undecorated_smoothed_pwdb));
+               } else {
+                       undecorated_smoothed_pwdb =
+                           rtlpriv->dm.undecorated_smoothed_pwdb;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                ("STA Default Port PWDB = 0x%lx\n",
+                                 undecorated_smoothed_pwdb));
+               }
+       } else {
+               undecorated_smoothed_pwdb =
+                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("AP Ext Port PWDB = 0x%lx\n",
+                         undecorated_smoothed_pwdb));
+       }
+
+       if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+       } else if ((undecorated_smoothed_pwdb <
+                   (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+                  (undecorated_smoothed_pwdb >=
+                   TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+       } else if (undecorated_smoothed_pwdb <
+                  (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_NORMAL\n"));
+       }
+
+       if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+                         rtlphy->current_channel));
+               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+       }
+
+       rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644 (file)
index 0000000..7f966c6
--- /dev/null
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644 (file)
index 0000000..9444e76
--- /dev/null
@@ -0,0 +1,2504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../usb.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+
+static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+
+       rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
+       rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
+       if (IS_HIGHT_PA(rtlefuse->board_type)) {
+               rtlphy->hwparam_tables[PHY_REG_PG].length =
+                       RTL8192CUPHY_REG_Array_PG_HPLength;
+               rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+                       RTL8192CUPHY_REG_Array_PG_HP;
+       } else {
+               rtlphy->hwparam_tables[PHY_REG_PG].length =
+                       RTL8192CUPHY_REG_ARRAY_PGLENGTH;
+               rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+                       RTL8192CUPHY_REG_ARRAY_PG;
+       }
+       /* 2T */
+       rtlphy->hwparam_tables[PHY_REG_2T].length =
+                       RTL8192CUPHY_REG_2TARRAY_LENGTH;
+       rtlphy->hwparam_tables[PHY_REG_2T].pdata =
+                       RTL8192CUPHY_REG_2TARRAY;
+       rtlphy->hwparam_tables[RADIOA_2T].length =
+                       RTL8192CURADIOA_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[RADIOA_2T].pdata =
+                       RTL8192CURADIOA_2TARRAY;
+       rtlphy->hwparam_tables[RADIOB_2T].length =
+                       RTL8192CURADIOB_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[RADIOB_2T].pdata =
+                       RTL8192CU_RADIOB_2TARRAY;
+       rtlphy->hwparam_tables[AGCTAB_2T].length =
+                       RTL8192CUAGCTAB_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[AGCTAB_2T].pdata =
+                       RTL8192CUAGCTAB_2TARRAY;
+       /* 1T */
+       if (IS_HIGHT_PA(rtlefuse->board_type)) {
+               rtlphy->hwparam_tables[PHY_REG_1T].length =
+                       RTL8192CUPHY_REG_1T_HPArrayLength;
+               rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+                       RTL8192CUPHY_REG_1T_HPArray;
+               rtlphy->hwparam_tables[RADIOA_1T].length =
+                       RTL8192CURadioA_1T_HPArrayLength;
+               rtlphy->hwparam_tables[RADIOA_1T].pdata =
+                       RTL8192CURadioA_1T_HPArray;
+               rtlphy->hwparam_tables[RADIOB_1T].length =
+                       RTL8192CURADIOB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOB_1T].pdata =
+                       RTL8192CU_RADIOB_1TARRAY;
+               rtlphy->hwparam_tables[AGCTAB_1T].length =
+                       RTL8192CUAGCTAB_1T_HPArrayLength;
+               rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+                       Rtl8192CUAGCTAB_1T_HPArray;
+       } else {
+               rtlphy->hwparam_tables[PHY_REG_1T].length =
+                        RTL8192CUPHY_REG_1TARRAY_LENGTH;
+               rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+                       RTL8192CUPHY_REG_1TARRAY;
+               rtlphy->hwparam_tables[RADIOA_1T].length =
+                       RTL8192CURADIOA_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOA_1T].pdata =
+                       RTL8192CU_RADIOA_1TARRAY;
+               rtlphy->hwparam_tables[RADIOB_1T].length =
+                       RTL8192CURADIOB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOB_1T].pdata =
+                       RTL8192CU_RADIOB_1TARRAY;
+               rtlphy->hwparam_tables[AGCTAB_1T].length =
+                       RTL8192CUAGCTAB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+                       RTL8192CUAGCTAB_1TARRAY;
+       }
+}
+
+static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+                                                bool autoload_fail,
+                                                u8 *hwinfo)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 rf_path, index, tempval;
+       u16 i;
+
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 3; i++) {
+                       if (!autoload_fail) {
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_cck[rf_path][i] =
+                                   hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+                                   hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
+                                          i];
+                       } else {
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_cck[rf_path][i] =
+                                   EEPROM_DEFAULT_TXPOWERLEVEL;
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+                                   EEPROM_DEFAULT_TXPOWERLEVEL;
+                       }
+               }
+       }
+       for (i = 0; i < 3; i++) {
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+               else
+                       tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+               rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+                   (tempval & 0xf);
+               rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+                   ((tempval & 0xf0) >> 4);
+       }
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+                                i, rtlefuse->
+                                eeprom_chnlarea_txpwr_cck[rf_path][i]));
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->
+                                eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->
+                                eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+                                [i]));
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 14; i++) {
+                       index = _rtl92c_get_chnl_group((u8) i);
+                       rtlefuse->txpwrlevel_cck[rf_path][i] =
+                           rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
+                       rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+                           rtlefuse->
+                           eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
+                       if ((rtlefuse->
+                            eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+                            rtlefuse->
+                            eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+                           > 0) {
+                               rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+                                   rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path]
+                                   [index] - rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+                                   [index];
+                       } else {
+                               rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+                       }
+               }
+               for (i = 0; i < 14; i++) {
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+                                "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+                                rtlefuse->txpwrlevel_cck[rf_path][i],
+                                rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+                                rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+               }
+       }
+       for (i = 0; i < 3; i++) {
+               if (!autoload_fail) {
+                       rtlefuse->eeprom_pwrlimit_ht40[i] =
+                           hwinfo[EEPROM_TXPWR_GROUP + i];
+                       rtlefuse->eeprom_pwrlimit_ht20[i] =
+                           hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+               } else {
+                       rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+                       rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+               }
+       }
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 14; i++) {
+                       index = _rtl92c_get_chnl_group((u8) i);
+                       if (rf_path == RF90_PATH_A) {
+                               rtlefuse->pwrgroup_ht20[rf_path][i] =
+                                   (rtlefuse->eeprom_pwrlimit_ht20[index]
+                                    & 0xf);
+                               rtlefuse->pwrgroup_ht40[rf_path][i] =
+                                   (rtlefuse->eeprom_pwrlimit_ht40[index]
+                                    & 0xf);
+                       } else if (rf_path == RF90_PATH_B) {
+                               rtlefuse->pwrgroup_ht20[rf_path][i] =
+                                   ((rtlefuse->eeprom_pwrlimit_ht20[index]
+                                     & 0xf0) >> 4);
+                               rtlefuse->pwrgroup_ht40[rf_path][i] =
+                                   ((rtlefuse->eeprom_pwrlimit_ht40[index]
+                                     & 0xf0) >> 4);
+                       }
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->pwrgroup_ht20[rf_path][i]));
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->pwrgroup_ht40[rf_path][i]));
+               }
+       }
+       for (i = 0; i < 14; i++) {
+               index = _rtl92c_get_chnl_group((u8) i);
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+               else
+                       tempval = EEPROM_DEFAULT_HT20_DIFF;
+               rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+               rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+                   ((tempval >> 4) & 0xF);
+               if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+                       rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+               if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+                       rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+               index = _rtl92c_get_chnl_group((u8) i);
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+               else
+                       tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+               rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+               rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+                   ((tempval >> 4) & 0xF);
+       }
+       rtlefuse->legacy_ht_txpowerdiff =
+           rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+       if (!autoload_fail)
+               rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+       else
+               rtlefuse->eeprom_regulatory = 0;
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+       if (!autoload_fail) {
+               rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+               rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
+       } else {
+               rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+               rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
+       }
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+                rtlefuse->eeprom_tssi[RF90_PATH_A],
+                rtlefuse->eeprom_tssi[RF90_PATH_B]));
+       if (!autoload_fail)
+               tempval = hwinfo[EEPROM_THERMAL_METER];
+       else
+               tempval = EEPROM_DEFAULT_THERMALMETER;
+       rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+       if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+           rtlefuse->eeprom_thermalmeter > 0x1c)
+               rtlefuse->eeprom_thermalmeter = 0x12;
+       if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+               rtlefuse->apk_thermalmeterignore = true;
+       rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+}
+
+static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
+{
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 boardType;
+
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               boardType = ((contents[EEPROM_RF_OPT1]) &
+                           BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
+       } else {
+               boardType = contents[EEPROM_RF_OPT4];
+               boardType &= BOARD_TYPE_TEST_MASK;
+       }
+       rtlefuse->board_type = boardType;
+       if (IS_HIGHT_PA(rtlefuse->board_type))
+               rtlefuse->external_pa = 1;
+       printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
+
+#ifdef CONFIG_ANTENNA_DIVERSITY
+       /* Antenna Diversity setting. */
+       if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
+               rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
+       else
+               rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
+
+       printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
+              rtl_efuse->antenna_cfg);
+#endif
+}
+
+#ifdef CONFIG_BT_COEXIST
+static void _update_bt_param(_adapter *padapter)
+{
+       struct btcoexist_priv    *pbtpriv = &(padapter->halpriv.bt_coexist);
+       struct registry_priv    *registry_par = &padapter->registrypriv;
+       if (2 != registry_par->bt_iso) {
+               /* 0:Low, 1:High, 2:From Efuse */
+               pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
+       }
+       if (registry_par->bt_sco == 1) {
+               /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
+                * 5.OtherBusy */
+               pbtpriv->BT_Service = BT_OtherAction;
+       } else if (registry_par->bt_sco == 2) {
+               pbtpriv->BT_Service = BT_SCO;
+       } else if (registry_par->bt_sco == 4) {
+               pbtpriv->BT_Service = BT_Busy;
+       } else if (registry_par->bt_sco == 5) {
+               pbtpriv->BT_Service = BT_OtherBusy;
+       } else {
+               pbtpriv->BT_Service = BT_Idle;
+       }
+       pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
+       pbtpriv->bCOBT = _TRUE;
+       pbtpriv->BtEdcaUL = 0;
+       pbtpriv->BtEdcaDL = 0;
+       pbtpriv->BtRssiState = 0xff;
+       pbtpriv->bInitSet = _FALSE;
+       pbtpriv->bBTBusyTraffic = _FALSE;
+       pbtpriv->bBTTrafficModeSet = _FALSE;
+       pbtpriv->bBTNonTrafficModeSet = _FALSE;
+       pbtpriv->CurrentState = 0;
+       pbtpriv->PreviousState = 0;
+       printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
+              (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
+       if (pbtpriv->BT_Coexist) {
+               if (pbtpriv->BT_Ant_Num == Ant_x2)
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "Ant_Num = Antx2\n");
+               else if (pbtpriv->BT_Ant_Num == Ant_x1)
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "Ant_Num = Antx1\n");
+               switch (pbtpriv->BT_CoexistType) {
+               case BT_2Wire:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_2Wire\n");
+                       break;
+               case BT_ISSC_3Wire:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_ISSC_3Wire\n");
+                       break;
+               case BT_Accel:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_Accel\n");
+                       break;
+               case BT_CSR_BC4:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_CSR_BC4\n");
+                       break;
+               case BT_CSR_BC8:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_CSR_BC8\n");
+                       break;
+               case BT_RTL8756:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_RTL8756\n");
+                       break;
+               default:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = Unknown\n");
+                       break;
+               }
+               printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
+                      pbtpriv->BT_Ant_isolation);
+               switch (pbtpriv->BT_Service) {
+               case BT_OtherAction:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_OtherAction\n");
+                       break;
+               case BT_SCO:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_SCO\n");
+                       break;
+               case BT_Busy:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_Busy\n");
+                       break;
+               case BT_OtherBusy:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_OtherBusy\n");
+                       break;
+               default:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_Idle\n");
+                       break;
+               }
+               printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
+                      pbtpriv->BT_RadioSharedType);
+       }
+}
+
+#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
+
+static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
+                                               u8 *contents,
+                                               bool bautoloadfailed);
+{
+       HAL_DATA_TYPE   *pHalData = GET_HAL_DATA(Adapter);
+       bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
+       struct btcoexist_priv    *pbtpriv = &pHalData->bt_coexist;
+       u8      rf_opt4;
+
+       _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
+       if (AutoloadFail) {
+               pbtpriv->BT_Coexist = _FALSE;
+               pbtpriv->BT_CoexistType = BT_2Wire;
+               pbtpriv->BT_Ant_Num = Ant_x2;
+               pbtpriv->BT_Ant_isolation = 0;
+               pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
+               return;
+       }
+       if (isNormal) {
+               if (pHalData->BoardType == BOARD_USB_COMBO)
+                       pbtpriv->BT_Coexist = _TRUE;
+               else
+                       pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
+                                             0x20) >> 5); /* bit[5] */
+               rf_opt4 = PROMContent[EEPROM_RF_OPT4];
+               pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
+               pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
+               pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
+               pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
+       } else {
+               pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
+                                      _TRUE : _FALSE;
+       }
+       _update_bt_param(Adapter);
+}
+#endif
+
+static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u16 i, usvalue;
+       u8 hwinfo[HWSET_MAX_SIZE] = {0};
+       u16 eeprom_id;
+
+       if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+               rtl_efuse_shadow_map_update(hw);
+               memcpy((void *)hwinfo,
+                      (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+                      HWSET_MAX_SIZE);
+       } else if (rtlefuse->epromtype == EEPROM_93C46) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("RTL819X Not boot from eeprom, check it !!"));
+       }
+       RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+                     hwinfo, HWSET_MAX_SIZE);
+       eeprom_id = *((u16 *)&hwinfo[0]);
+       if (eeprom_id != RTL8190_EEPROM_ID) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+               rtlefuse->autoload_failflag = true;
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+               rtlefuse->autoload_failflag = false;
+       }
+       if (rtlefuse->autoload_failflag == true)
+               return;
+       for (i = 0; i < 6; i += 2) {
+               usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+               *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+       }
+       printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
+       _rtl92cu_read_txpower_info_from_hwpg(hw,
+                                          rtlefuse->autoload_failflag, hwinfo);
+       rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+       rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                (" VID = 0x%02x PID = 0x%02x\n",
+                rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
+       rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+       rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+       rtlefuse->txpwr_fromeprom = true;
+       rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+       if (rtlhal->oem_id == RT_CID_DEFAULT) {
+               switch (rtlefuse->eeprom_oemid) {
+               case EEPROM_CID_DEFAULT:
+                       if (rtlefuse->eeprom_did == 0x8176) {
+                               if ((rtlefuse->eeprom_svid == 0x103C &&
+                                    rtlefuse->eeprom_smid == 0x1629))
+                                       rtlhal->oem_id = RT_CID_819x_HP;
+                               else
+                                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       } else {
+                               rtlhal->oem_id = RT_CID_DEFAULT;
+                       }
+                       break;
+               case EEPROM_CID_TOSHIBA:
+                       rtlhal->oem_id = RT_CID_TOSHIBA;
+                       break;
+               case EEPROM_CID_QMI:
+                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       break;
+               case EEPROM_CID_WHQL:
+               default:
+                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       break;
+               }
+       }
+       _rtl92cu_read_board_type(hw, hwinfo);
+#ifdef CONFIG_BT_COEXIST
+       _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
+                                           rtlefuse->autoload_failflag);
+#endif
+}
+
+static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       switch (rtlhal->oem_id) {
+       case RT_CID_819x_HP:
+               usb_priv->ledctl.led_opendrain = true;
+               break;
+       case RT_CID_819x_Lenovo:
+       case RT_CID_DEFAULT:
+       case RT_CID_TOSHIBA:
+       case RT_CID_CCX:
+       case RT_CID_819x_Acer:
+       case RT_CID_WHQL:
+       default:
+               break;
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}
+
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
+{
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_u1b;
+
+       if (!IS_NORMAL_CHIP(rtlhal->version))
+               return;
+       tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+       rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
+                              EEPROM_93C46 : EEPROM_BOOT_EFUSE;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
+                (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
+       rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
+                (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
+       _rtl92cu_read_adapter_info(hw);
+       _rtl92cu_hal_customized_behavior(hw);
+       return;
+}
+
+static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int             status = 0;
+       u16             value16;
+       u8              value8;
+       /*  polling autoload done. */
+       u32     pollingCount = 0;
+
+       do {
+               if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                                ("Autoload Done!\n"));
+                       break;
+               }
+               if (pollingCount++ > 100) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                                ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
+                                " done!\n"));
+                       return -ENODEV;
+               }
+       } while (true);
+       /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+       /* Power on when re-enter from IPS/Radio off/card disable */
+       /* enable SPS into PWM mode */
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+       udelay(100);
+       value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+       if (0 == (value8 & LDV12_EN)) {
+               value8 |= LDV12_EN;
+               rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
+                        value8));
+               udelay(100);
+               value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+               value8 &= ~ISO_MD2PP;
+               rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
+       }
+       /*  auto enable WLAN */
+       pollingCount = 0;
+       value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+       value16 |= APFM_ONMAC;
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
+       do {
+               if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
+                       printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
+                       break;
+               }
+               if (pollingCount++ > 100) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                                ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
+                                " done!\n"));
+                       return -ENODEV;
+               }
+       } while (true);
+       /* Enable Radio ,GPIO ,and LED function */
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
+       /* release RF digital isolation */
+       value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+       value16 &= ~ISO_DIOR;
+       rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
+       /* Reconsider when to do this operation after asking HWSD. */
+       pollingCount = 0;
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
+                                               REG_APSD_CTRL) & ~BIT(6)));
+       do {
+               pollingCount++;
+       } while ((pollingCount < 200) &&
+                (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
+       /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+       value16 = rtl_read_word(rtlpriv,  REG_CR);
+       value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+                   PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
+       rtl_write_word(rtlpriv, REG_CR, value16);
+       return status;
+}
+
+static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
+                                             bool wmm_enable,
+                                             u8 out_ep_num,
+                                             u8 queue_sel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
+       u32 outEPNum = (u32)out_ep_num;
+       u32 numHQ = 0;
+       u32 numLQ = 0;
+       u32 numNQ = 0;
+       u32 numPubQ;
+       u32 value32;
+       u8 value8;
+       u32 txQPageNum, txQPageUnit, txQRemainPage;
+
+       if (!wmm_enable) {
+               numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
+                         CHIP_A_PAGE_NUM_PUBQ;
+               txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
+
+               txQPageUnit = txQPageNum/outEPNum;
+               txQRemainPage = txQPageNum % outEPNum;
+               if (queue_sel & TX_SELE_HQ)
+                       numHQ = txQPageUnit;
+               if (queue_sel & TX_SELE_LQ)
+                       numLQ = txQPageUnit;
+               /* HIGH priority queue always present in the configuration of
+                * 2 out-ep. Remainder pages have assigned to High queue */
+               if ((outEPNum > 1) && (txQRemainPage))
+                       numHQ += txQRemainPage;
+               /* NOTE: This step done before writting REG_RQPN. */
+               if (isChipN) {
+                       if (queue_sel & TX_SELE_NQ)
+                               numNQ = txQPageUnit;
+                       value8 = (u8)_NPQ(numNQ);
+                       rtl_write_byte(rtlpriv,  REG_RQPN_NPQ, value8);
+               }
+       } else {
+               /* for WMM ,number of out-ep must more than or equal to 2! */
+               numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
+                         WMM_CHIP_A_PAGE_NUM_PUBQ;
+               if (queue_sel & TX_SELE_HQ) {
+                       numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
+                               WMM_CHIP_A_PAGE_NUM_HPQ;
+               }
+               if (queue_sel & TX_SELE_LQ) {
+                       numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
+                               WMM_CHIP_A_PAGE_NUM_LPQ;
+               }
+               /* NOTE: This step done before writting REG_RQPN. */
+               if (isChipN) {
+                       if (queue_sel & TX_SELE_NQ)
+                               numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
+                       value8 = (u8)_NPQ(numNQ);
+                       rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+               }
+       }
+       /* TX DMA */
+       value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+       rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8      txpktbuf_bndy;
+       u8      value8;
+
+       if (!wmm_enable)
+               txpktbuf_bndy = TX_PAGE_BOUNDARY;
+       else /* for WMM */
+               txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
+                                               ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+                                               : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
+       rtl_write_word(rtlpriv,  (REG_TRXFF_BNDY + 2), 0x27FF);
+       value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
+       rtl_write_byte(rtlpriv, REG_PBP, value8);
+}
+
+static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
+                                           u16 bkQ, u16 viQ, u16 voQ,
+                                           u16 mgtQ, u16 hiQ)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
+
+       value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+                  _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+                  _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+       rtl_write_word(rtlpriv,  REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
+                                                   bool wmm_enable,
+                                                   u8 queue_sel)
+{
+       u16 uninitialized_var(value);
+
+       switch (queue_sel) {
+       case TX_SELE_HQ:
+               value = QUEUE_HIGH;
+               break;
+       case TX_SELE_LQ:
+               value = QUEUE_LOW;
+               break;
+       case TX_SELE_NQ:
+               value = QUEUE_NORMAL;
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
+                                       value, value);
+       printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
+                                                               bool wmm_enable,
+                                                               u8 queue_sel)
+{
+       u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+       u16 uninitialized_var(valueHi);
+       u16 uninitialized_var(valueLow);
+
+       switch (queue_sel) {
+       case (TX_SELE_HQ | TX_SELE_LQ):
+               valueHi = QUEUE_HIGH;
+               valueLow = QUEUE_LOW;
+               break;
+       case (TX_SELE_NQ | TX_SELE_LQ):
+               valueHi = QUEUE_NORMAL;
+               valueLow = QUEUE_LOW;
+               break;
+       case (TX_SELE_HQ | TX_SELE_NQ):
+               valueHi = QUEUE_HIGH;
+               valueLow = QUEUE_NORMAL;
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+       if (!wmm_enable) {
+               beQ = valueLow;
+               bkQ = valueLow;
+               viQ = valueHi;
+               voQ = valueHi;
+               mgtQ = valueHi;
+               hiQ = valueHi;
+       } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+               beQ = valueHi;
+               bkQ = valueLow;
+               viQ = valueLow;
+               voQ = valueHi;
+               mgtQ = valueHi;
+               hiQ = valueHi;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+       printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
+                                                     bool wmm_enable,
+                                                     u8 queue_sel)
+{
+       u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (!wmm_enable) { /* typical setting */
+               beQ     = QUEUE_LOW;
+               bkQ     = QUEUE_LOW;
+               viQ     = QUEUE_NORMAL;
+               voQ     = QUEUE_HIGH;
+               mgtQ    = QUEUE_HIGH;
+               hiQ     = QUEUE_HIGH;
+       } else { /* for WMM */
+               beQ     = QUEUE_LOW;
+               bkQ     = QUEUE_NORMAL;
+               viQ     = QUEUE_NORMAL;
+               voQ     = QUEUE_HIGH;
+               mgtQ    = QUEUE_HIGH;
+               hiQ     = QUEUE_HIGH;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                ("Tx queue select :0x%02x..\n", queue_sel));
+}
+
+static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
+                                              bool wmm_enable,
+                                              u8 out_ep_num,
+                                              u8 queue_sel)
+{
+       switch (out_ep_num) {
+       case 1:
+               _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
+                                                       queue_sel);
+               break;
+       case 2:
+               _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
+                                                       queue_sel);
+               break;
+       case 3:
+               _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
+                                                         queue_sel);
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+}
+
+static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
+                                              bool wmm_enable,
+                                              u8 out_ep_num,
+                                              u8 queue_sel)
+{
+       u8      hq_sele;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       switch (out_ep_num) {
+       case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
+               if (!wmm_enable) /* typical setting */
+                       hq_sele =  HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
+                                  HQSEL_HIQ;
+               else    /* for WMM */
+                       hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
+                                 HQSEL_HIQ;
+               break;
+       case 1:
+               if (TX_SELE_LQ == queue_sel) {
+                       /* map all endpoint to Low queue */
+                       hq_sele = 0;
+               } else if (TX_SELE_HQ == queue_sel) {
+                       /* map all endpoint to High queue */
+                       hq_sele =  HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
+                                  HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
+               }
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+       rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                ("Tx queue select :0x%02x..\n", hq_sele));
+}
+
+static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
+                                               bool wmm_enable,
+                                               u8 out_ep_num,
+                                               u8 queue_sel)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
+                                                  queue_sel);
+       else
+               _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
+                                                  queue_sel);
+}
+
+static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
+{
+}
+
+static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
+{
+       u16                     value16;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
+                     RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+                     RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+       rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+       /* Accept all multicast address */
+       rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
+       rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
+       /* Accept all management frames */
+       value16 = 0xFFFF;
+       rtl92c_set_mgt_filter(hw, value16);
+       /* Reject all control frame - default value is 0 */
+       rtl92c_set_ctrl_filter(hw, 0x0);
+       /* Accept all data frames */
+       value16 = 0xFFFF;
+       rtl92c_set_data_filter(hw, value16);
+}
+
+static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       int err = 0;
+       u32     boundary = 0;
+       u8 wmm_enable = false; /* TODO */
+       u8 out_ep_nums = rtlusb->out_ep_nums;
+       u8 queue_sel = rtlusb->out_queue_sel;
+       err = _rtl92cu_init_power_on(hw);
+
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       ("Failed to init power on!\n"));
+               return err;
+       }
+       if (!wmm_enable) {
+               boundary = TX_PAGE_BOUNDARY;
+       } else { /* for WMM */
+               boundary = (IS_NORMAL_CHIP(rtlhal->version))
+                                       ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+                                       : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+       }
+       if (false == rtl92c_init_llt_table(hw, boundary)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       ("Failed to init LLT Table!\n"));
+               return -EINVAL;
+       }
+       _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
+                                         queue_sel);
+       _rtl92c_init_trx_buffer(hw, wmm_enable);
+       _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
+                                    queue_sel);
+       /* Get Rx PHY status in order to report RSSI and others. */
+       rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
+       rtl92c_init_interrupt(hw);
+       rtl92c_init_network_type(hw);
+       _rtl92cu_init_wmac_setting(hw);
+       rtl92c_init_adaptive_ctrl(hw);
+       rtl92c_init_edca(hw);
+       rtl92c_init_rate_fallback(hw);
+       rtl92c_init_retry_function(hw);
+       _rtl92cu_init_usb_aggregation(hw);
+       rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
+       rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
+       rtl92c_init_beacon_parameters(hw, rtlhal->version);
+       rtl92c_init_ampdu_aggregation(hw);
+       rtl92c_init_beacon_max_error(hw, true);
+       return err;
+}
+
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 sec_reg_value = 0x0;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+                 rtlpriv->sec.pairwise_enc_algorithm,
+                 rtlpriv->sec.group_enc_algorithm));
+       if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                        ("not open sw encryption\n"));
+               return;
+       }
+       sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+       if (rtlpriv->sec.use_defaultkey) {
+               sec_reg_value |= SCR_TxUseDK;
+               sec_reg_value |= SCR_RxUseDK;
+       }
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+       rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                ("The SECR-value %x\n", sec_reg_value));
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       /* To Fix MAC loopback mode fail. */
+       rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+       rtl_write_byte(rtlpriv, 0x15, 0xe9);
+       /* HW SEQ CTRL */
+       /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+       rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+       /* fixed USB interface interference issue */
+       rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
+       rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
+       rtl_write_byte(rtlpriv, 0xfe42, 0x80);
+       rtlusb->reg_bcn_ctrl_val = 0x18;
+       rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _InitPABias(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 pa_setting;
+
+       /* FIXED PA current issue */
+       pa_setting = efuse_read_1byte(hw, 0x1FA);
+       if (!(pa_setting & BIT(0))) {
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
+       }
+       if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
+           IS_92C_SERIAL(rtlhal->version)) {
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
+       }
+       if (!(pa_setting & BIT(4))) {
+               pa_setting = rtl_read_byte(rtlpriv, 0x16);
+               pa_setting &= 0x0F;
+               rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
+       }
+}
+
+static void _InitAntenna_Selection(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_ANTENNA_DIVERSITY
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (pHalData->AntDivCfg == 0)
+               return;
+
+       if (rtlphy->rf_type == RF_1T1R) {
+               rtl_write_dword(rtlpriv, REG_LEDCFG0,
+                               rtl_read_dword(rtlpriv,
+                               REG_LEDCFG0)|BIT(23));
+               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+               if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
+                   Antenna_A)
+                       pHalData->CurAntenna = Antenna_A;
+               else
+                       pHalData->CurAntenna = Antenna_B;
+       }
+#endif
+}
+
+static void _dump_registers(struct ieee80211_hw *hw)
+{
+}
+
+static void _update_mac_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
+       mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+       mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+       mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+int rtl92cu_hw_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       int err = 0;
+       static bool iqk_initialized;
+
+       rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+       err = _rtl92cu_init_mac(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+               return err;
+       }
+       err = rtl92c_download_fw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        ("Failed to download FW. Init HW without FW now..\n"));
+               err = 1;
+               rtlhal->fw_ready = false;
+               return err;
+       } else {
+               rtlhal->fw_ready = true;
+       }
+       rtlhal->last_hmeboxnum = 0; /* h2c */
+       _rtl92cu_phy_param_tab_init(hw);
+       rtl92cu_phy_mac_config(hw);
+       rtl92cu_phy_bb_config(hw);
+       rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+       rtl92c_phy_rf_config(hw);
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version)) {
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+       }
+       rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtl92cu_bb_block_on(hw);
+       rtl_cam_reset_all_entry(hw);
+       rtl92cu_enable_hw_security_config(hw);
+       ppsc->rfpwr_state = ERFON;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+       if (ppsc->rfpwr_state == ERFON) {
+               rtl92c_phy_set_rfpath_switch(hw, 1);
+               if (iqk_initialized) {
+                       rtl92c_phy_iq_calibrate(hw, false);
+               } else {
+                       rtl92c_phy_iq_calibrate(hw, false);
+                       iqk_initialized = true;
+               }
+               rtl92c_dm_check_txpower_tracking(hw);
+               rtl92c_phy_lc_calibrate(hw);
+       }
+       _rtl92cu_hw_configure(hw);
+       _InitPABias(hw);
+       _InitAntenna_Selection(hw);
+       _update_mac_setting(hw);
+       rtl92c_dm_init(hw);
+       _dump_registers(hw);
+       return err;
+}
+
+static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/**************************************
+a.     TXPAUSE 0x522[7:0] = 0xFF       Pause MAC TX queue
+b.     RF path 0 offset 0x00 = 0x00    disable RF
+c.     APSD_CTRL 0x600[7:0] = 0x40
+d.     SYS_FUNC_EN 0x02[7:0] = 0x16    reset BB state machine
+e.     SYS_FUNC_EN 0x02[7:0] = 0x14    reset BB state machine
+***************************************/
+       u8 eRFPath = 0, value8 = 0;
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
+
+       value8 |= APSDOFF;
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
+       value8 = 0;
+       value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
+       value8 &= (~FEN_BB_GLB_RSTn);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
+}
+
+static void  _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlhal->fw_version <=  0x20) {
+               /*****************************
+               f. MCUFWDL 0x80[7:0]=0          reset MCU ready status
+               g. SYS_FUNC_EN 0x02[10]= 0      reset MCU reg, (8051 reset)
+               h. SYS_FUNC_EN 0x02[15-12]= 5   reset MAC reg, DCORE
+               i. SYS_FUNC_EN 0x02[10]= 1      enable MCU reg, (8051 enable)
+               ******************************/
+               u16 valu16 = 0;
+
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
+                              (~FEN_CPUEN))); /* reset MCU ,8051 */
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+                             (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+                              FEN_CPUEN)); /* enable MCU ,8051 */
+       } else {
+               u8 retry_cnts = 0;
+
+               /* IF fw in RAM code, do reset */
+               if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
+                       /* reset MCU ready status */
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+                       if (rtlhal->fw_ready) {
+                               /* 8051 reset by self */
+                               rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
+                               while ((retry_cnts++ < 100) &&
+                                      (FEN_CPUEN & rtl_read_word(rtlpriv,
+                                      REG_SYS_FUNC_EN))) {
+                                       udelay(50);
+                               }
+                               if (retry_cnts >= 100) {
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                               ("#####=> 8051 reset failed!.."
+                                               ".......................\n"););
+                                       /* if 8051 reset fail, reset MAC. */
+                                       rtl_write_byte(rtlpriv,
+                                                      REG_SYS_FUNC_EN + 1,
+                                                      0x50);
+                                       udelay(100);
+                               }
+                       }
+               }
+               /* Reset MAC and Enable 8051 */
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+       }
+       if (bWithoutHWSM) {
+               /*****************************
+                 Without HW auto state machine
+               g.SYS_CLKR 0x08[15:0] = 0x30A3          disable MAC clock
+               h.AFE_PLL_CTRL 0x28[7:0] = 0x80         disable AFE PLL
+               i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F     gated AFE DIG_CLOCK
+               j.SYS_ISu_CTRL 0x00[7:0] = 0xF9         isolated digital to PON
+               ******************************/
+               rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+               rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+               rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
+               rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
+       }
+}
+
+static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*****************************
+k. SYS_FUNC_EN 0x03[7:0] = 0x44                disable ELDR runction
+l. SYS_CLKR 0x08[15:0] = 0x3083                disable ELDR clock
+m. SYS_ISO_CTRL 0x01[7:0] = 0x83       isolated ELDR to PON
+******************************/
+       rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+       rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
+}
+
+static void _DisableGPIO(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/***************************************
+j. GPIO_PIN_CTRL 0x44[31:0]=0x000
+k. Value = GPIO_PIN_CTRL[7:0]
+l.  GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
+m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
+n. LEDCFG 0x4C[15:0] = 0x8080
+***************************************/
+       u8      value8;
+       u16     value16;
+       u32     value32;
+
+       /* 1. Disable GPIO[7:0] */
+       rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
+       value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
+       value8 = (u8) (value32&0x000000FF);
+       value32 |= ((value8<<8) | 0x00FF0000);
+       rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
+       /* 2. Disable GPIO[10:8] */
+       rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
+       value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
+       value8 = (u8) (value16&0x000F);
+       value16 |= ((value8<<4) | 0x0780);
+       rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
+       /* 3. Disable LED0 & 1 */
+       rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+}
+
+static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 value16 = 0;
+       u8 value8 = 0;
+
+       if (bWithoutHWSM) {
+               /*****************************
+               n. LDOA15_CTRL 0x20[7:0] = 0x04  disable A15 power
+               o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
+               r. When driver call disable, the ASIC will turn off remaining
+                  clock automatically
+               ******************************/
+               rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+               value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+               value8 &= (~LDV12_EN);
+               rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+       }
+
+/*****************************
+h. SPS0_CTRL 0x11[7:0] = 0x23          enter PFM mode
+i. APS_FSMCO 0x04[15:0] = 0x4802       set USB suspend
+******************************/
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+       value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
+}
+
+static void _CardDisableHWSM(struct ieee80211_hw *hw)
+{
+       /* ==== RF Off Sequence ==== */
+       _DisableRFAFEAndResetBB(hw);
+       /* ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure1(hw, false);
+       /*  ==== Pull GPIO PIN to balance level and LED control ====== */
+       _DisableGPIO(hw);
+       /* ==== Disable analog sequence === */
+       _DisableAnalog(hw, false);
+}
+
+static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
+{
+       /*==== RF Off Sequence ==== */
+       _DisableRFAFEAndResetBB(hw);
+       /*  ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure1(hw, true);
+       /*  ==== Pull GPIO PIN to balance level and LED control ====== */
+       _DisableGPIO(hw);
+       /*  ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure2(hw);
+       /*  ==== Disable analog sequence === */
+       _DisableAnalog(hw, true);
+}
+
+static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+                                     u8 set_bits, u8 clear_bits)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlusb->reg_bcn_ctrl_val |= set_bits;
+       rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+       rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 tmp1byte = 0;
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                              tmp1byte & (~BIT(6)));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+               tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+               tmp1byte &= ~(BIT(0));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE,
+                              rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
+       }
+}
+
+static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 tmp1byte = 0;
+
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                              tmp1byte | BIT(6));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+               tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+               tmp1byte |= BIT(0);
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE,
+                              rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
+       }
+}
+
+static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
+       else
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+}
+
+static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
+       else
+               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+}
+
+static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
+                                    enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+       enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+       bt_msr &= 0xfc;
+       rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+       if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
+           NL80211_IFTYPE_STATION) {
+               _rtl92cu_stop_tx_beacon(hw);
+               _rtl92cu_enable_bcn_sub_func(hw);
+       } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+               _rtl92cu_resume_tx_beacon(hw);
+               _rtl92cu_disable_bcn_sub_func(hw);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
+                        "STATUS:No such media status(%x).\n", type));
+       }
+       switch (type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               bt_msr |= MSR_NOLINK;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to NO LINK!\n"));
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               bt_msr |= MSR_ADHOC;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to Ad Hoc!\n"));
+               break;
+       case NL80211_IFTYPE_STATION:
+               bt_msr |= MSR_INFRA;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to STA!\n"));
+               break;
+       case NL80211_IFTYPE_AP:
+               bt_msr |= MSR_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to AP!\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Network type %d not support!\n", type));
+               goto error_out;
+       }
+       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtlpriv->cfg->ops->led_control(hw, ledaction);
+       if ((bt_msr & 0xfc) == MSR_AP)
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+       else
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+       return 0;
+error_out:
+       return 1;
+}
+
+void rtl92cu_card_disable(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       enum nl80211_iftype opmode;
+
+       mac->link_state = MAC80211_NOLINK;
+       opmode = NL80211_IFTYPE_UNSPECIFIED;
+       _rtl92cu_set_media_status(hw, opmode);
+       rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+       if (rtlusb->disableHWSM)
+               _CardDisableHWSM(hw);
+       else
+               _CardDisableWithoutHWSM(hw);
+}
+
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+       /* dummy routine needed for callback from rtl_op_configure_filter() */
+}
+
+/*========================================================================== */
+
+static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+                             enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 filterout_non_associated_bssid = false;
+
+       switch (type) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_STATION:
+               filterout_non_associated_bssid = true;
+               break;
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_AP:
+       default:
+               break;
+       }
+       if (filterout_non_associated_bssid == true) {
+               if (IS_NORMAL_CHIP(rtlhal->version)) {
+                       switch (rtlphy->current_io_type) {
+                       case IO_CMD_RESUME_DM_BY_SCAN:
+                               reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
+                               /* enable update TSF */
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+                               break;
+                       case IO_CMD_PAUSE_DM_BY_SCAN:
+                               reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
+                               /* disable update TSF */
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                               break;
+                       }
+               } else {
+                       reg_rcr |= (RCR_CBSSID);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+               }
+       } else if (filterout_non_associated_bssid == false) {
+               if (IS_NORMAL_CHIP(rtlhal->version)) {
+                       reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+               } else {
+                       reg_rcr &= (~RCR_CBSSID);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+               }
+       }
+}
+
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+       if (_rtl92cu_set_media_status(hw, type))
+               return -EOPNOTSUPP;
+       _rtl92cu_set_check_bssid(hw, type);
+       return 0;
+}
+
+static void _InitBeaconParameters(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+       /* TODO: Remove these magic number */
+       rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
+       rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+       /* Change beacon AIFS to the largest number
+        * beacause test chip does not contension before sending beacon. */
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+       else
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
+                                   bool Linked)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
+       rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
+}
+
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval, atim_window;
+       u32 value32;
+
+       bcn_interval = mac->beacon_interval;
+       atim_window = 2;        /*FIX MERGE */
+       rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+       _InitBeaconParameters(hw);
+       rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+       /*
+        * Force beacon frame transmission even after receiving beacon frame
+        * from other ad hoc STA
+        *
+        *
+        * Reset TSF Timer to zero, added by Roger. 2008.06.24
+        */
+       value32 = rtl_read_dword(rtlpriv, REG_TCR);
+       value32 &= ~TSFRST;
+       rtl_write_dword(rtlpriv, REG_TCR, value32);
+       value32 |= TSFRST;
+       rtl_write_dword(rtlpriv, REG_TCR, value32);
+       RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
+                ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+                value32));
+       /* TODO: Modify later (Find the right parameters)
+        * NOTE: Fix test chip's bug (about contention windows's randomness) */
+       if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
+           (mac->opmode == NL80211_IFTYPE_AP)) {
+               rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
+               rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
+       }
+       _beacon_function_enable(hw, true, true);
+}
+
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval = mac->beacon_interval;
+
+       RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+                ("beacon_interval:%d\n", bcn_interval));
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+}
+
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+                                  u32 add_msr, u32 rm_msr)
+{
+}
+
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       switch (variable) {
+       case HW_VAR_RCR:
+               *((u32 *)(val)) = mac->rx_conf;
+               break;
+       case HW_VAR_RF_STATE:
+               *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+               break;
+       case HW_VAR_FWLPS_RF_ON:{
+                       enum rf_pwrstate rfState;
+                       u32 val_rcr;
+
+                       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+                                                     (u8 *)(&rfState));
+                       if (rfState == ERFOFF) {
+                               *((bool *) (val)) = true;
+                       } else {
+                               val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+                               val_rcr &= 0x00070000;
+                               if (val_rcr)
+                                       *((bool *) (val)) = false;
+                               else
+                                       *((bool *) (val)) = true;
+                       }
+                       break;
+               }
+       case HW_VAR_FW_PSMODE_STATUS:
+               *((bool *) (val)) = ppsc->fw_current_inpsmode;
+               break;
+       case HW_VAR_CORRECT_TSF:{
+                       u64 tsf;
+                       u32 *ptsf_low = (u32 *)&tsf;
+                       u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+                       *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+                       *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+                       *((u64 *)(val)) = tsf;
+                       break;
+               }
+       case HW_VAR_MGT_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+               break;
+       case HW_VAR_CTRL_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+               break;
+       case HW_VAR_DATA_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+}
+
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       enum wireless_mode wirelessmode = mac->mode;
+       u8 idx = 0;
+
+       switch (variable) {
+       case HW_VAR_ETHER_ADDR:{
+                       for (idx = 0; idx < ETH_ALEN; idx++) {
+                               rtl_write_byte(rtlpriv, (REG_MACID + idx),
+                                              val[idx]);
+                       }
+                       break;
+               }
+       case HW_VAR_BASIC_RATE:{
+                       u16 rate_cfg = ((u16 *) val)[0];
+                       u8 rate_index = 0;
+
+                       rate_cfg &= 0x15f;
+                       /* TODO */
+                       /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
+                        *     && ((rate_cfg & 0x150) == 0)) {
+                        *        rate_cfg |= 0x010;
+                        * } */
+                       rate_cfg |= 0x01;
+                       rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+                       rtl_write_byte(rtlpriv, REG_RRSR + 1,
+                                      (rate_cfg >> 8) & 0xff);
+                       while (rate_cfg > 0x1) {
+                               rate_cfg >>= 1;
+                               rate_index++;
+                       }
+                       rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+                                      rate_index);
+                       break;
+               }
+       case HW_VAR_BSSID:{
+                       for (idx = 0; idx < ETH_ALEN; idx++) {
+                               rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+                                              val[idx]);
+                       }
+                       break;
+               }
+       case HW_VAR_SIFS:{
+                       rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
+                       rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("HW_VAR_SIFS\n"));
+                       break;
+               }
+       case HW_VAR_SLOT_TIME:{
+                       u8 e_aci;
+                       u8 QOS_MODE = 1;
+
+                       rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("HW_VAR_SLOT_TIME %x\n", val[0]));
+                       if (QOS_MODE) {
+                               for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+                                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                               HW_VAR_AC_PARAM,
+                                                               (u8 *)(&e_aci));
+                       } else {
+                               u8 sifstime = 0;
+                               u8      u1bAIFS;
+
+                               if (IS_WIRELESS_MODE_A(wirelessmode) ||
+                                   IS_WIRELESS_MODE_N_24G(wirelessmode) ||
+                                   IS_WIRELESS_MODE_N_5G(wirelessmode))
+                                       sifstime = 16;
+                               else
+                                       sifstime = 10;
+                               u1bAIFS = sifstime + (2 *  val[0]);
+                               rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
+                                              u1bAIFS);
+                       }
+                       break;
+               }
+       case HW_VAR_ACK_PREAMBLE:{
+                       u8 reg_tmp;
+                       u8 short_preamble = (bool) (*(u8 *) val);
+                       reg_tmp = 0;
+                       if (short_preamble)
+                               reg_tmp |= 0x80;
+                       rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+                       break;
+               }
+       case HW_VAR_AMPDU_MIN_SPACE:{
+                       u8 min_spacing_to_set;
+                       u8 sec_min_space;
+
+                       min_spacing_to_set = *((u8 *) val);
+                       if (min_spacing_to_set <= 7) {
+                               switch (rtlpriv->sec.pairwise_enc_algorithm) {
+                               case NO_ENCRYPTION:
+                               case AESCCMP_ENCRYPTION:
+                                       sec_min_space = 0;
+                                       break;
+                               case WEP40_ENCRYPTION:
+                               case WEP104_ENCRYPTION:
+                               case TKIP_ENCRYPTION:
+                                       sec_min_space = 6;
+                                       break;
+                               default:
+                                       sec_min_space = 7;
+                                       break;
+                               }
+                               if (min_spacing_to_set < sec_min_space)
+                                       min_spacing_to_set = sec_min_space;
+                               mac->min_space_cfg = ((mac->min_space_cfg &
+                                                    0xf8) |
+                                                    min_spacing_to_set);
+                               *val = min_spacing_to_set;
+                               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                       ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+                                       mac->min_space_cfg));
+                               rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                                              mac->min_space_cfg);
+                       }
+                       break;
+               }
+       case HW_VAR_SHORTGI_DENSITY:{
+                       u8 density_to_set;
+
+                       density_to_set = *((u8 *) val);
+                       density_to_set &= 0x1f;
+                       mac->min_space_cfg &= 0x07;
+                       mac->min_space_cfg |= (density_to_set << 3);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+                                 mac->min_space_cfg));
+                       rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                                      mac->min_space_cfg);
+                       break;
+               }
+       case HW_VAR_AMPDU_FACTOR:{
+                       u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+                       u8 factor_toset;
+                       u8 *p_regtoset = NULL;
+                       u8 index = 0;
+
+                       p_regtoset = regtoset_normal;
+                       factor_toset = *((u8 *) val);
+                       if (factor_toset <= 3) {
+                               factor_toset = (1 << (factor_toset + 2));
+                               if (factor_toset > 0xf)
+                                       factor_toset = 0xf;
+                               for (index = 0; index < 4; index++) {
+                                       if ((p_regtoset[index] & 0xf0) >
+                                           (factor_toset << 4))
+                                               p_regtoset[index] =
+                                                    (p_regtoset[index] & 0x0f)
+                                                    | (factor_toset << 4);
+                                       if ((p_regtoset[index] & 0x0f) >
+                                            factor_toset)
+                                               p_regtoset[index] =
+                                                    (p_regtoset[index] & 0xf0)
+                                                    | (factor_toset);
+                                       rtl_write_byte(rtlpriv,
+                                                      (REG_AGGLEN_LMT + index),
+                                                      p_regtoset[index]);
+                               }
+                               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                        ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+                                         factor_toset));
+                       }
+                       break;
+               }
+       case HW_VAR_AC_PARAM:{
+                       u8 e_aci = *((u8 *) val);
+                       u32 u4b_ac_param;
+                       u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+                       u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+                       u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
+
+                       u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+                       u4b_ac_param |= (u32) ((cw_min & 0xF) <<
+                                        AC_PARAM_ECW_MIN_OFFSET);
+                       u4b_ac_param |= (u32) ((cw_max & 0xF) <<
+                                        AC_PARAM_ECW_MAX_OFFSET);
+                       u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("queue:%x, ac_param:%x\n", e_aci,
+                                 u4b_ac_param));
+                       switch (e_aci) {
+                       case AC1_BK:
+                               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC0_BE:
+                               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC2_VI:
+                               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC3_VO:
+                               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       default:
+                               RT_ASSERT(false, ("SetHwReg8185(): invalid"
+                                         " aci: %d !\n", e_aci));
+                               break;
+                       }
+                       if (rtlusb->acm_method != eAcmWay2_SW)
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                        HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+                       break;
+               }
+       case HW_VAR_ACM_CTRL:{
+                       u8 e_aci = *((u8 *) val);
+                       union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
+                                                       (&(mac->ac[0].aifs));
+                       u8 acm = p_aci_aifsn->f.acm;
+                       u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+                       acm_ctrl =
+                           acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
+                       if (acm) {
+                               switch (e_aci) {
+                               case AC0_BE:
+                                       acm_ctrl |= AcmHw_BeqEn;
+                                       break;
+                               case AC2_VI:
+                                       acm_ctrl |= AcmHw_ViqEn;
+                                       break;
+                               case AC3_VO:
+                                       acm_ctrl |= AcmHw_VoqEn;
+                                       break;
+                               default:
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                                ("HW_VAR_ACM_CTRL acm set "
+                                                 "failed: eACI is %d\n", acm));
+                                       break;
+                               }
+                       } else {
+                               switch (e_aci) {
+                               case AC0_BE:
+                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       break;
+                               case AC2_VI:
+                                       acm_ctrl &= (~AcmHw_ViqEn);
+                                       break;
+                               case AC3_VO:
+                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       break;
+                               default:
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                                ("switch case not process\n"));
+                                       break;
+                               }
+                       }
+                       RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+                                ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+                                 "Write 0x%X\n", acm_ctrl));
+                       rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+                       break;
+               }
+       case HW_VAR_RCR:{
+                       rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+                       mac->rx_conf = ((u32 *) (val))[0];
+                       RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
+                                ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
+                       break;
+               }
+       case HW_VAR_RETRY_LIMIT:{
+                       u8 retry_limit = ((u8 *) (val))[0];
+
+                       rtl_write_word(rtlpriv, REG_RL,
+                                      retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+                                      retry_limit << RETRY_LIMIT_LONG_SHIFT);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
+                                "ETRY_LIMIT(0x%08x)\n", retry_limit));
+                       break;
+               }
+       case HW_VAR_DUAL_TSF_RST:
+               rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+               break;
+       case HW_VAR_EFUSE_BYTES:
+               rtlefuse->efuse_usedbytes = *((u16 *) val);
+               break;
+       case HW_VAR_EFUSE_USAGE:
+               rtlefuse->efuse_usedpercentage = *((u8 *) val);
+               break;
+       case HW_VAR_IO_CMD:
+               rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
+               break;
+       case HW_VAR_WPA_CONFIG:
+               rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+               break;
+       case HW_VAR_SET_RPWM:{
+                       u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
+
+                       if (rpwm_val & BIT(7))
+                               rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+                                              (*(u8 *)val));
+                       else
+                               rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+                                              ((*(u8 *)val) | BIT(7)));
+                       break;
+               }
+       case HW_VAR_H2C_FW_PWRMODE:{
+                       u8 psmode = (*(u8 *) val);
+
+                       if ((psmode != FW_PS_ACTIVE_MODE) &&
+                          (!IS_92C_SERIAL(rtlhal->version)))
+                               rtl92c_dm_rf_saving(hw, true);
+                       rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+                       break;
+               }
+       case HW_VAR_FW_PSMODE_STATUS:
+               ppsc->fw_current_inpsmode = *((bool *) val);
+               break;
+       case HW_VAR_H2C_FW_JOINBSSRPT:{
+                       u8 mstatus = (*(u8 *) val);
+                       u8 tmp_reg422;
+                       bool recover = false;
+
+                       if (mstatus == RT_MEDIA_CONNECT) {
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                        HW_VAR_AID, NULL);
+                               rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                               tmp_reg422 = rtl_read_byte(rtlpriv,
+                                                       REG_FWHW_TXQ_CTRL + 2);
+                               if (tmp_reg422 & BIT(6))
+                                       recover = true;
+                               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                                              tmp_reg422 & (~BIT(6)));
+                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+                               if (recover)
+                                       rtl_write_byte(rtlpriv,
+                                                REG_FWHW_TXQ_CTRL + 2,
+                                               tmp_reg422 | BIT(6));
+                               rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+                       }
+                       rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+                       break;
+               }
+       case HW_VAR_AID:{
+                       u16 u2btmp;
+
+                       u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+                       u2btmp &= 0xC000;
+                       rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+                                      (u2btmp | mac->assoc_id));
+                       break;
+               }
+       case HW_VAR_CORRECT_TSF:{
+                       u8 btype_ibss = ((u8 *) (val))[0];
+
+                       if (btype_ibss == true)
+                               _rtl92cu_stop_tx_beacon(hw);
+                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+                       rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
+                                       0xffffffff));
+                       rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+                                       (u32)((mac->tsf >> 32) & 0xffffffff));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+                       if (btype_ibss == true)
+                               _rtl92cu_resume_tx_beacon(hw);
+                       break;
+               }
+       case HW_VAR_MGT_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+               break;
+       case HW_VAR_CTRL_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+               break;
+       case HW_VAR_DATA_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+                                                       "not process\n"));
+               break;
+       }
+}
+
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 ratr_value = (u32) mac->basic_rates;
+       u8 *mcsrate = mac->mcs;
+       u8 ratr_index = 0;
+       u8 nmode = mac->ht_enable;
+       u8 mimo_ps = 1;
+       u16 shortgi_rate = 0;
+       u32 tmp_ratr_value = 0;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
+       enum wireless_mode wirelessmode = mac->mode;
+
+       ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               if (ratr_value & 0x0000000c)
+                       ratr_value &= 0x0000000d;
+               else
+                       ratr_value &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_value &= 0x00000FF5;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               nmode = 1;
+               if (mimo_ps == 0) {
+                       ratr_value &= 0x0007F005;
+               } else {
+                       u32 ratr_mask;
+
+                       if (get_rf_type(rtlphy) == RF_1T2R ||
+                           get_rf_type(rtlphy) == RF_1T1R)
+                               ratr_mask = 0x000ff005;
+                       else
+                               ratr_mask = 0x0f0ff005;
+                       if (curtxbw_40mhz)
+                               ratr_mask |= 0x00000010;
+                       ratr_value &= ratr_mask;
+               }
+               break;
+       default:
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_value &= 0x000ff0ff;
+               else
+                       ratr_value &= 0x0f0ff0ff;
+               break;
+       }
+       ratr_value &= 0x0FFFFFFF;
+       if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+           (!curtxbw_40mhz && curshortgi_20mhz))) {
+               ratr_value |= 0x10000000;
+               tmp_ratr_value = (ratr_value >> 12);
+               for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+                       if ((1 << shortgi_rate) & tmp_ratr_value)
+                               break;
+               }
+               shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+                              (shortgi_rate << 4) | (shortgi_rate);
+       }
+       rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+                REG_ARFR0)));
+}
+
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 ratr_bitmap = (u32) mac->basic_rates;
+       u8 *p_mcsrate = mac->mcs;
+       u8 ratr_index = 0;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
+       enum wireless_mode wirelessmode = mac->mode;
+       bool shortgi = false;
+       u8 rate_mask[5];
+       u8 macid = 0;
+       u8 mimops = 1;
+
+       ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               ratr_index = RATR_INX_WIRELESS_B;
+               if (ratr_bitmap & 0x0000000c)
+                       ratr_bitmap &= 0x0000000d;
+               else
+                       ratr_bitmap &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_index = RATR_INX_WIRELESS_GB;
+               if (rssi_level == 1)
+                       ratr_bitmap &= 0x00000f00;
+               else if (rssi_level == 2)
+                       ratr_bitmap &= 0x00000ff0;
+               else
+                       ratr_bitmap &= 0x00000ff5;
+               break;
+       case WIRELESS_MODE_A:
+               ratr_index = RATR_INX_WIRELESS_A;
+               ratr_bitmap &= 0x00000ff0;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+               if (mimops == 0) {
+                       if (rssi_level == 1)
+                               ratr_bitmap &= 0x00070000;
+                       else if (rssi_level == 2)
+                               ratr_bitmap &= 0x0007f000;
+                       else
+                               ratr_bitmap &= 0x0007f005;
+               } else {
+                       if (rtlphy->rf_type == RF_1T2R ||
+                           rtlphy->rf_type == RF_1T1R) {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff005;
+                               }
+                       } else {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f0f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f0ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f0ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f0f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f0ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f0ff005;
+                               }
+                       }
+               }
+               if ((curtxbw_40mhz && curshortgi_40mhz) ||
+                   (!curtxbw_40mhz && curshortgi_20mhz)) {
+                       if (macid == 0)
+                               shortgi = true;
+                       else if (macid == 1)
+                               shortgi = false;
+               }
+               break;
+       default:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_bitmap &= 0x000ff0ff;
+               else
+                       ratr_bitmap &= 0x0f0ff0ff;
+               break;
+       }
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
+                ratr_bitmap));
+       *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
+                                     ratr_index << 28);
+       rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+                                               "ratr_val:%x, %x:%x:%x:%x:%x\n",
+                                               ratr_index, ratr_bitmap,
+                                               rate_mask[0], rate_mask[1],
+                                               rate_mask[2], rate_mask[3],
+                                               rate_mask[4]));
+       rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 sifs_timer;
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+                                     (u8 *)&mac->slot_time);
+       if (!mac->ht_enable)
+               sifs_timer = 0x0a0a;
+       else
+               sifs_timer = 0x0e0e;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+       u8 u1tmp = 0;
+       bool actuallyset = false;
+       unsigned long flag = 0;
+       /* to do - usb autosuspend */
+       u8 usb_autosuspend = 0;
+
+       if (ppsc->swrf_processing)
+               return false;
+       spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+       if (ppsc->rfchange_inprogress) {
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+               return false;
+       } else {
+               ppsc->rfchange_inprogress = true;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       }
+       cur_rfstate = ppsc->rfpwr_state;
+       if (usb_autosuspend) {
+               /* to do................... */
+       } else {
+               if (ppsc->pwrdown_mode) {
+                       u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
+                       e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
+                                              ERFOFF : ERFON;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+                                ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
+               } else {
+                       rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+                                      rtl_read_byte(rtlpriv,
+                                      REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+                       u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+                       e_rfpowerstate_toset  = (u1tmp & BIT(3)) ?
+                                                ERFON : ERFOFF;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+                               ("GPIO_IN=%02x\n", u1tmp));
+               }
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
+                        e_rfpowerstate_toset));
+       }
+       if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF  - HW "
+                        "Radio ON, RF ON\n"));
+               ppsc->hwradiooff = false;
+               actuallyset = true;
+       } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset  ==
+                   ERFOFF)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF  - HW"
+                        " Radio OFF\n"));
+               ppsc->hwradiooff = true;
+               actuallyset = true;
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+                        ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
+                        " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
+                        "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
+       }
+       if (actuallyset) {
+               ppsc->hwradiooff = 1;
+               if (e_rfpowerstate_toset == ERFON) {
+                       if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM) &&
+                            RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
+                               RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+                       else if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_PCI_D3)
+                                && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
+                               RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               }
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+               /* For power down module, we need to enable register block
+                * contrl reg at 0x1c. Then enable power down control bit
+                * of register 0x04 BIT4 and BIT15 as 1.
+                */
+               if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
+                       /* Enable register area 0x0-0xc. */
+                       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+                       if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
+                               /*
+                                * We should configure HW PDn source for WiFi
+                                * ONLY, and then our HW will be set in
+                                * power-down mode if PDn source from all
+                                * functions are configured.
+                                */
+                               u1tmp = rtl_read_byte(rtlpriv,
+                                                     REG_MULTI_FUNC_CTRL);
+                               rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
+                                              (u1tmp|WL_HWPDN_EN));
+                       } else {
+                               rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
+                       }
+               }
+               if (e_rfpowerstate_toset == ERFOFF) {
+                       if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
+                               RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+                       else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+                               RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               }
+       } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
+               /* Enter D3 or ASPM after GPIO had been done. */
+               if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+               else if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_PCI_D3)
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       } else {
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       }
+       *valid = 1;
+       return !ppsc->hwradiooff;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644 (file)
index 0000000..62af555
--- /dev/null
@@ -0,0 +1,116 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_HW_H__
+#define __RTL92CU_HW_H__
+
+#define H2C_RA_MASK    6
+
+#define LLT_POLLING_LLT_THRESHOLD              20
+#define LLT_POLLING_READY_TIMEOUT_COUNT                100
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
+
+#define RX_PAGE_SIZE_REG_VALUE                 PBP_128
+/* Note: We will divide number of page equally for each queue
+ * other than public queue! */
+#define TX_TOTAL_PAGE_NUMBER                   0xF8
+#define TX_PAGE_BOUNDARY                       (TX_TOTAL_PAGE_NUMBER + 1)
+
+
+#define CHIP_B_PAGE_NUM_PUBQ                   0xE7
+
+/* For Test Chip Setting
+ * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define CHIP_A_PAGE_NUM_PUBQ                   0x7E
+
+
+/* For Chip A Setting */
+#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER                0xF5
+#define WMM_CHIP_A_TX_PAGE_BOUNDARY            \
+       (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_A_PAGE_NUM_PUBQ               0xA3
+#define WMM_CHIP_A_PAGE_NUM_HPQ                        0x29
+#define WMM_CHIP_A_PAGE_NUM_LPQ                        0x29
+
+
+
+/* Note: For Chip B Setting ,modify later */
+#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER                0xF5
+#define WMM_CHIP_B_TX_PAGE_BOUNDARY            \
+       (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_B_PAGE_NUM_PUBQ               0xB0
+#define WMM_CHIP_B_PAGE_NUM_HPQ                        0x29
+#define WMM_CHIP_B_PAGE_NUM_LPQ                        0x1C
+#define WMM_CHIP_B_PAGE_NUM_NPQ                        0x1C
+
+#define BOARD_TYPE_NORMAL_MASK                 0xE0
+#define BOARD_TYPE_TEST_MASK                   0x0F
+
+/* should be renamed and moved to another file */
+enum _BOARD_TYPE_8192CUSB {
+       BOARD_USB_DONGLE                = 0,    /* USB dongle */
+       BOARD_USB_High_PA               = 1,    /* USB dongle - high power PA */
+       BOARD_MINICARD                  = 2,    /* Minicard */
+       BOARD_USB_SOLO                  = 3,    /* USB solo-Slim module */
+       BOARD_USB_COMBO                 = 4,    /* USB Combo-Slim module */
+};
+
+#define IS_HIGHT_PA(boardtype)         \
+       ((boardtype == BOARD_USB_High_PA) ? true : false)
+
+#define RTL92C_DRIVER_INFO_SIZE                                4
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
+int rtl92cu_hw_init(struct ieee80211_hw *hw);
+void rtl92cu_card_disable(struct ieee80211_hw *hw);
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+                                  u32 add_msr, u32 rm_msr);
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+u8 _rtl92c_get_chnl_group(u8 chnl);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644 (file)
index 0000000..332c743
--- /dev/null
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl92cu_init_led(struct ieee80211_hw *hw,
+                             struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+       pled->hw = hw;
+       pled->ledpin = ledpin;
+       pled->ledon = false;
+}
+
+static void _rtl92cu_deInit_led(struct rtl_led *pled)
+{
+}
+
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       u8 ledcfg;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+       ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               rtl_write_byte(rtlpriv,
+                              REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+               break;
+       case LED_PIN_LED1:
+               rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       pled->ledon = true;
+}
+
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       u8 ledcfg;
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+       ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               ledcfg &= 0xf0;
+               if (usbpriv->ledctl.led_opendrain == true)
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2,
+                                      (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+               else
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2,
+                                      (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+               break;
+       case LED_PIN_LED1:
+               ledcfg &= 0x0f;
+               rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       pled->ledon = false;
+}
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
+{
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
+       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
+{
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
+       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+}
+
+static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
+                                   enum led_ctl_mode ledaction)
+{
+}
+
+void rtl92cu_led_control(struct ieee80211_hw *hw,
+                       enum led_ctl_mode ledaction)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+           (ledaction == LED_CTL_TX ||
+            ledaction == LED_CTL_RX ||
+            ledaction == LED_CTL_SITE_SURVEY ||
+            ledaction == LED_CTL_LINK ||
+            ledaction == LED_CTL_NO_LINK ||
+            ledaction == LED_CTL_START_TO_LINK ||
+            ledaction == LED_CTL_POWER_ON)) {
+               return;
+       }
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
+                               ledaction));
+       _rtl92cu_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644 (file)
index 0000000..decaee4
--- /dev/null
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_LED_H__
+#define __RTL92CU_LED_H__
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644 (file)
index 0000000..f8514cb
--- /dev/null
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+****************************************************************************/
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../cam.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+/* macro to shorten lines */
+
+#define LINK_Q ui_link_quality
+#define RX_EVM rx_evm_percentage
+#define RX_SIGQ        rx_mimo_signalquality
+
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       enum version_8192c chip_version = VERSION_UNKNOWN;
+       u32 value32;
+
+       value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+       if (value32 & TRP_VAUX_EN) {
+               chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
+                              VERSION_TEST_CHIP_88C;
+       } else {
+               /* Normal mass production chip. */
+               chip_version = NORMAL_CHIP;
+               chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
+               chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
+               /* RTL8723 with BT function. */
+               chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
+               if (IS_VENDOR_UMC(chip_version))
+                       chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
+                                        CHIP_VENDOR_UMC_B_CUT : 0);
+               if (IS_92C_SERIAL(chip_version)) {
+                       value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+                       chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
+                                CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
+               } else if (IS_8723_SERIES(chip_version)) {
+                       value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
+                       chip_version |= ((value32 & RF_RL_ID) ?
+                                         CHIP_8723_DRV_REV : 0);
+               }
+       }
+       rtlhal->version  = (enum version_8192c)chip_version;
+       switch (rtlhal->version) {
+       case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+               break;
+       case VERSION_NORMAL_TSMC_CHIP_92C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
+               break;
+       case VERSION_NORMAL_TSMC_CHIP_88C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
+                       "92C_1T2R_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
+                       "92C_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_88C_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_92C_1T2R_B_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_92C_B_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_88C_B_CUT.\n"));
+               break;
+       case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+                       "_8723_1T1R_A_CUT.\n"));
+               break;
+       case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+                       "_8723_1T1R_B_CUT.\n"));
+               break;
+       case VERSION_TEST_CHIP_92C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
+               break;
+       case VERSION_TEST_CHIP_88C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: ???????????????.\n"));
+               break;
+       }
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtlphy->rf_type =
+                        (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
+       else
+               rtlphy->rf_type = RF_1T1R;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+                 "RF_2T2R" : "RF_1T1R"));
+       if (get_rf_type(rtlphy) == RF_1T1R)
+               rtlpriv->dm.rfpath_rxenable[0] = true;
+       else
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+                                               rtlhal->version));
+}
+
+/**
+ * writeLLT - LLT table write access
+ * @io: io callback
+ * @address: LLT logical address.
+ * @data: LLT data content
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       bool status = true;
+       long count = 0;
+       u32 value = _LLT_INIT_ADDR(address) |
+           _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+       rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+       do {
+               value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+               if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+                       break;
+               if (count > POLLING_LLT_THRESHOLD) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Failed to polling write LLT done at"
+                                " address %d! _LLT_OP_VALUE(%x)\n",
+                                address, _LLT_OP_VALUE(value)));
+                       status = false;
+                       break;
+               }
+       } while (++count);
+       return status;
+}
+/**
+ * rtl92c_init_LLT_table - Init LLT table
+ * @io: io callback
+ * @boundary:
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
+{
+       bool rst = true;
+       u32     i;
+
+       for (i = 0; i < (boundary - 1); i++) {
+               rst = rtl92c_llt_write(hw, i , i + 1);
+               if (true != rst) {
+                       printk(KERN_ERR "===> %s #1 fail\n", __func__);
+                       return rst;
+               }
+       }
+       /* end of list */
+       rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
+       if (true != rst) {
+               printk(KERN_ERR "===> %s #2 fail\n", __func__);
+               return rst;
+       }
+       /* Make the other pages as ring buffer
+        * This ring buffer is used as beacon buffer if we config this MAC
+        *  as two MAC transfer.
+        * Otherwise used as local loopback buffer.
+        */
+       for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
+               rst = rtl92c_llt_write(hw, i, (i + 1));
+               if (true != rst) {
+                       printk(KERN_ERR "===> %s #3 fail\n", __func__);
+                       return rst;
+               }
+       }
+       /* Let last entry point to the start entry of ring buffer */
+       rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
+       if (true != rst) {
+               printk(KERN_ERR "===> %s #4 fail\n", __func__);
+               return rst;
+       }
+       return rst;
+}
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+                    u8 *p_macaddr, bool is_group, u8 enc_algo,
+                    bool is_wepkey, bool clear_all)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 *macaddr = p_macaddr;
+       u32 entry_id = 0;
+       bool is_pairwise = false;
+       static u8 cam_const_addr[4][6] = {
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+       };
+       static u8 cam_const_broad[] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+
+       if (clear_all) {
+               u8 idx = 0;
+               u8 cam_offset = 0;
+               u8 clear_number = 5;
+
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+               for (idx = 0; idx < clear_number; idx++) {
+                       rtl_cam_mark_invalid(hw, cam_offset + idx);
+                       rtl_cam_empty_entry(hw, cam_offset + idx);
+                       if (idx < 5) {
+                               memset(rtlpriv->sec.key_buf[idx], 0,
+                                      MAX_KEY_LEN);
+                               rtlpriv->sec.key_len[idx] = 0;
+                       }
+               }
+       } else {
+               switch (enc_algo) {
+               case WEP40_ENCRYPTION:
+                       enc_algo = CAM_WEP40;
+                       break;
+               case WEP104_ENCRYPTION:
+                       enc_algo = CAM_WEP104;
+                       break;
+               case TKIP_ENCRYPTION:
+                       enc_algo = CAM_TKIP;
+                       break;
+               case AESCCMP_ENCRYPTION:
+                       enc_algo = CAM_AES;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                               ("iillegal switch case\n"));
+                       enc_algo = CAM_TKIP;
+                       break;
+               }
+               if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+                       macaddr = cam_const_addr[key_index];
+                       entry_id = key_index;
+               } else {
+                       if (is_group) {
+                               macaddr = cam_const_broad;
+                               entry_id = key_index;
+                       } else {
+                               key_index = PAIRWISE_KEYIDX;
+                               entry_id = CAM_PAIRWISE_KEY_POSITION;
+                               is_pairwise = true;
+                       }
+               }
+               if (rtlpriv->sec.key_len[key_index] == 0) {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                ("delete one entry\n"));
+                       rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                                ("The insert KEY length is %d\n",
+                                 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                                ("The insert KEY  is %x %x\n",
+                                 rtlpriv->sec.key_buf[0][0],
+                                 rtlpriv->sec.key_buf[0][1]));
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                ("add one entry\n"));
+                       if (is_pairwise) {
+                               RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+                                             "Pairwiase Key content :",
+                                             rtlpriv->sec.pairwise_key,
+                                             rtlpriv->sec.
+                                             key_len[PAIRWISE_KEYIDX]);
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        ("set Pairwiase key\n"));
+
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                               entry_id, enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.
+                                               key_buf[key_index]);
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        ("set group key\n"));
+                               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                                       rtl_cam_add_one_entry(hw,
+                                               rtlefuse->dev_addr,
+                                               PAIRWISE_KEYIDX,
+                                               CAM_PAIRWISE_KEY_POSITION,
+                                               enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.key_buf
+                                               [entry_id]);
+                               }
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                               entry_id, enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.key_buf[entry_id]);
+                       }
+               }
+       }
+}
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
+}
+
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+               rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
+                               0xFFFFFFFF);
+               rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
+                               0xFFFFFFFF);
+               rtlpci->irq_enabled = true;
+       } else {
+               rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
+                               0xFFFFFFFF);
+               rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
+                               0xFFFFFFFF);
+               rtlusb->irq_enabled = true;
+       }
+}
+
+void rtl92c_init_interrupt(struct ieee80211_hw *hw)
+{
+        rtl92c_enable_interrupt(hw);
+}
+
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+       rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+               rtlpci->irq_enabled = false;
+       else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+               rtlusb->irq_enabled = false;
+}
+
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 u4b_ac_param;
+
+       rtl92c_dm_init_edca_turbo(hw);
+       u4b_ac_param = (u32) mac->ac[aci].aifs;
+       u4b_ac_param |=
+           ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
+           AC_PARAM_ECW_MIN_OFFSET;
+       u4b_ac_param |=
+           ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
+           AC_PARAM_ECW_MAX_OFFSET;
+       u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
+                        AC_PARAM_TXOP_OFFSET;
+       RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
+                ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
+       switch (aci) {
+       case AC1_BK:
+               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
+               break;
+       case AC0_BE:
+               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
+               break;
+       case AC2_VI:
+               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
+               break;
+       case AC3_VO:
+               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
+               break;
+       default:
+               RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+               break;
+       }
+}
+
+/*-------------------------------------------------------------------------
+ * HW MAC Address
+ *-------------------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
+{
+       u32 i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       for (i = 0 ; i < ETH_ALEN ; i++)
+               rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
+               "%02X-%02X-%02X\n",
+               rtl_read_byte(rtlpriv, REG_MACID),
+               rtl_read_byte(rtlpriv, REG_MACID+1),
+               rtl_read_byte(rtlpriv, REG_MACID+2),
+               rtl_read_byte(rtlpriv, REG_MACID+3),
+               rtl_read_byte(rtlpriv, REG_MACID+4),
+               rtl_read_byte(rtlpriv, REG_MACID+5)));
+}
+
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
+}
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+       u8 value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       switch (type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               value = NT_NO_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to NO LINK!\n"));
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               value = NT_LINK_AD_HOC;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to Ad Hoc!\n"));
+               break;
+       case NL80211_IFTYPE_STATION:
+               value = NT_LINK_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to STA!\n"));
+               break;
+       case NL80211_IFTYPE_AP:
+               value = NT_AS_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to AP!\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Network type %d not support!\n", type));
+               return -EOPNOTSUPP;
+       }
+       rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+       return 0;
+}
+
+void rtl92c_init_network_type(struct ieee80211_hw *hw)
+{
+       rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
+}
+
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+       u16     value16;
+       u32     value32;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* Response Rate Set */
+       value32 = rtl_read_dword(rtlpriv, REG_RRSR);
+       value32 &= ~RATE_BITMAP_ALL;
+       value32 |= RATE_RRSR_CCK_ONLY_1M;
+       rtl_write_dword(rtlpriv, REG_RRSR, value32);
+       /* SIFS (used in NAV) */
+       value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+       rtl_write_word(rtlpriv,  REG_SPEC_SIFS, value16);
+       /* Retry Limit */
+       value16 = _LRL(0x30) | _SRL(0x30);
+       rtl_write_dword(rtlpriv,  REG_RL, value16);
+}
+
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* Set Data Auto Rate Fallback Retry Count register. */
+       rtl_write_dword(rtlpriv,  REG_DARFRC, 0x00000000);
+       rtl_write_dword(rtlpriv,  REG_DARFRC+4, 0x10080404);
+       rtl_write_dword(rtlpriv,  REG_RARFRC, 0x04030201);
+       rtl_write_dword(rtlpriv,  REG_RARFRC+4, 0x08070605);
+}
+
+static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+                               u8 ctx_sifs)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
+       rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
+}
+
+static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+                                u8 ctx_sifs)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
+       rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
+}
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+                           u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
+{
+       /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
+        * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
+        */
+       u32 value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       value = (u32)aifs;
+       value |= ((u32)cw_min & 0xF) << 8;
+       value |= ((u32)cw_max & 0xF) << 12;
+       value |= (u32)txop << 16;
+       /* 92C hardware register sequence is the same as queue number. */
+       rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
+}
+
+void rtl92c_init_edca(struct ieee80211_hw *hw)
+{
+       u16 value16;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* disable EDCCA count down, to reduce collison and retry */
+       value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+       value16 |= DIS_EDCA_CNT_DWN;
+       rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
+       /* Update SIFS timing.  ??????????
+        * pHalData->SifsTime = 0x0e0e0a0a; */
+       rtl92c_set_cck_sifs(hw, 0xa, 0xa);
+       rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
+       /* Set CCK/OFDM SIFS to be 10us. */
+       rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
+       rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
+       rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+       rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+       /* TXOP */
+       rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+       rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+       rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+       rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+       /* PIFS */
+       rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+       /* AGGR BREAK TIME Register */
+       rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+       rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
+       rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
+}
+
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
+       rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+       /* init AMPDU aggregation number, tuning for Tx's TP, */
+       rtl_write_word(rtlpriv, 0x4CA, 0x0708);
+}
+
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+}
+
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
+       rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+       rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+void rtl92c_init_retry_function(struct ieee80211_hw *hw)
+{
+       u8      value8;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+       value8 |= EN_AMPDU_RTY_NEW;
+       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
+       /* Set ACK timeout */
+       rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+                                  enum version_8192c version)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+       rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+       else
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
+}
+
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
+
+       rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
+}
+
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+}
+
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
+}
+
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+}
+
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
+}
+
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
+}
+
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
+}
+/*==============================================================*/
+
+static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+{
+       if ((antpower <= -100) || (antpower >= 20))
+               return 0;
+       else if (antpower >= 0)
+               return 100;
+       else
+               return 100 + antpower;
+}
+
+static u8 _rtl92c_evm_db_to_percentage(char value)
+{
+       char ret_val;
+
+       ret_val = value;
+       if (ret_val >= 0)
+               ret_val = 0;
+       if (ret_val <= -33)
+               ret_val = -33;
+       ret_val = 0 - ret_val;
+       ret_val *= 3;
+       if (ret_val == 99)
+               ret_val = 100;
+       return ret_val;
+}
+
+static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
+                                    u8 signal_strength_index)
+{
+       long signal_power;
+
+       signal_power = (long)((signal_strength_index + 1) >> 1);
+       signal_power -= 95;
+       return signal_power;
+}
+
+static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+               long currsig)
+{
+       long retsig;
+
+       if (currsig >= 61 && currsig <= 100)
+               retsig = 90 + ((currsig - 60) / 4);
+       else if (currsig >= 41 && currsig <= 60)
+               retsig = 78 + ((currsig - 40) / 2);
+       else if (currsig >= 31 && currsig <= 40)
+               retsig = 66 + (currsig - 30);
+       else if (currsig >= 21 && currsig <= 30)
+               retsig = 54 + (currsig - 20);
+       else if (currsig >= 5 && currsig <= 20)
+               retsig = 42 + (((currsig - 5) * 2) / 3);
+       else if (currsig == 4)
+               retsig = 36;
+       else if (currsig == 3)
+               retsig = 27;
+       else if (currsig == 2)
+               retsig = 18;
+       else if (currsig == 1)
+               retsig = 9;
+       else
+               retsig = currsig;
+       return retsig;
+}
+
+static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+                                     struct rtl_stats *pstats,
+                                     struct rx_desc_92c *pdesc,
+                                     struct rx_fwinfo_92c *p_drvinfo,
+                                     bool packet_match_bssid,
+                                     bool packet_toself,
+                                     bool packet_beacon)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct phy_sts_cck_8192s_t *cck_buf;
+       s8 rx_pwr_all = 0, rx_pwr[4];
+       u8 rf_rx_num = 0, evm, pwdb_all;
+       u8 i, max_spatial_stream;
+       u32 rssi, total_rssi = 0;
+       bool in_powersavemode = false;
+       bool is_cck_rate;
+
+       is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+       pstats->packet_matchbssid = packet_match_bssid;
+       pstats->packet_toself = packet_toself;
+       pstats->is_cck = is_cck_rate;
+       pstats->packet_beacon = packet_beacon;
+       pstats->is_cck = is_cck_rate;
+       pstats->RX_SIGQ[0] = -1;
+       pstats->RX_SIGQ[1] = -1;
+       if (is_cck_rate) {
+               u8 report, cck_highpwr;
+               cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
+               if (!in_powersavemode)
+                       cck_highpwr = rtlphy->cck_high_power;
+               else
+                       cck_highpwr = false;
+               if (!cck_highpwr) {
+                       u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+                       report = cck_buf->cck_agc_rpt & 0xc0;
+                       report = report >> 6;
+                       switch (report) {
+                       case 0x3:
+                               rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x2:
+                               rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x1:
+                               rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x0:
+                               rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+                               break;
+                       }
+               } else {
+                       u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+                       report = p_drvinfo->cfosho[0] & 0x60;
+                       report = report >> 5;
+                       switch (report) {
+                       case 0x3:
+                               rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x2:
+                               rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x1:
+                               rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x0:
+                               rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       }
+               }
+               pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+               pstats->rx_pwdb_all = pwdb_all;
+               pstats->recvsignalpower = rx_pwr_all;
+               if (packet_match_bssid) {
+                       u8 sq;
+                       if (pstats->rx_pwdb_all > 40)
+                               sq = 100;
+                       else {
+                               sq = cck_buf->sq_rpt;
+                               if (sq > 64)
+                                       sq = 0;
+                               else if (sq < 20)
+                                       sq = 100;
+                               else
+                                       sq = ((64 - sq) * 100) / 44;
+                       }
+                       pstats->signalquality = sq;
+                       pstats->RX_SIGQ[0] = sq;
+                       pstats->RX_SIGQ[1] = -1;
+               }
+       } else {
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
+               for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+                       if (rtlpriv->dm.rfpath_rxenable[i])
+                               rf_rx_num++;
+                       rx_pwr[i] =
+                           ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+                       rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+                       total_rssi += rssi;
+                       rtlpriv->stats.rx_snr_db[i] =
+                           (long)(p_drvinfo->rxsnr[i] / 2);
+
+                       if (packet_match_bssid)
+                               pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+               }
+               rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+               pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+               pstats->rx_pwdb_all = pwdb_all;
+               pstats->rxpower = rx_pwr_all;
+               pstats->recvsignalpower = rx_pwr_all;
+               if (GET_RX_DESC_RX_MCS(pdesc) &&
+                   GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
+                   GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
+                       max_spatial_stream = 2;
+               else
+                       max_spatial_stream = 1;
+               for (i = 0; i < max_spatial_stream; i++) {
+                       evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+                       if (packet_match_bssid) {
+                               if (i == 0)
+                                       pstats->signalquality =
+                                           (u8) (evm & 0xff);
+                               pstats->RX_SIGQ[i] =
+                                   (u8) (evm & 0xff);
+                       }
+               }
+       }
+       if (is_cck_rate)
+               pstats->signalstrength =
+                   (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+       else if (rf_rx_num != 0)
+               pstats->signalstrength =
+                   (u8) (_rtl92c_signal_scale_mapping
+                         (hw, total_rssi /= rf_rx_num));
+}
+
+static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
+               struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 rfpath;
+       u32 last_rssi, tmpval;
+
+       if (pstats->packet_toself || pstats->packet_beacon) {
+               rtlpriv->stats.rssi_calculate_cnt++;
+               if (rtlpriv->stats.ui_rssi.total_num++ >=
+                   PHY_RSSI_SLID_WIN_MAX) {
+                       rtlpriv->stats.ui_rssi.total_num =
+                           PHY_RSSI_SLID_WIN_MAX;
+                       last_rssi =
+                           rtlpriv->stats.ui_rssi.elements[rtlpriv->
+                                                          stats.ui_rssi.index];
+                       rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+               }
+               rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+               rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
+                                       index++] = pstats->signalstrength;
+               if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+                       rtlpriv->stats.ui_rssi.index = 0;
+               tmpval = rtlpriv->stats.ui_rssi.total_val /
+                   rtlpriv->stats.ui_rssi.total_num;
+               rtlpriv->stats.signal_strength =
+                   _rtl92c_translate_todbm(hw, (u8) tmpval);
+               pstats->rssi = rtlpriv->stats.signal_strength;
+       }
+       if (!pstats->is_cck && pstats->packet_toself) {
+               for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+                    rfpath++) {
+                       if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
+                               continue;
+                       if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   pstats->rx_mimo_signalstrength[rfpath];
+                       }
+                       if (pstats->rx_mimo_signalstrength[rfpath] >
+                           rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   ((rtlpriv->stats.
+                                     rx_rssi_percentage[rfpath] *
+                                     (RX_SMOOTH_FACTOR - 1)) +
+                                    (pstats->rx_mimo_signalstrength[rfpath])) /
+                                   (RX_SMOOTH_FACTOR);
+
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   rtlpriv->stats.rx_rssi_percentage[rfpath] +
+                                   1;
+                       } else {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   ((rtlpriv->stats.
+                                     rx_rssi_percentage[rfpath] *
+                                     (RX_SMOOTH_FACTOR - 1)) +
+                                    (pstats->rx_mimo_signalstrength[rfpath])) /
+                                   (RX_SMOOTH_FACTOR);
+                       }
+               }
+       }
+}
+
+static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
+                                              struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int weighting = 0;
+
+       if (rtlpriv->stats.recv_signal_power == 0)
+               rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+       if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+               weighting = 5;
+       else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+               weighting = (-5);
+       rtlpriv->stats.recv_signal_power =
+           (rtlpriv->stats.recv_signal_power * 5 +
+            pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
+               struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long undecorated_smoothed_pwdb = 0;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+               return;
+       } else {
+               undecorated_smoothed_pwdb =
+                   rtlpriv->dm.undecorated_smoothed_pwdb;
+       }
+       if (pstats->packet_toself || pstats->packet_beacon) {
+               if (undecorated_smoothed_pwdb < 0)
+                       undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+               if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+                       undecorated_smoothed_pwdb =
+                           (((undecorated_smoothed_pwdb) *
+                             (RX_SMOOTH_FACTOR - 1)) +
+                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+                       undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+                           + 1;
+               } else {
+                       undecorated_smoothed_pwdb =
+                           (((undecorated_smoothed_pwdb) *
+                             (RX_SMOOTH_FACTOR - 1)) +
+                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+               }
+               rtlpriv->dm.undecorated_smoothed_pwdb =
+                   undecorated_smoothed_pwdb;
+               _rtl92c_update_rxsignalstatistics(hw, pstats);
+       }
+}
+
+static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
+                                            struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 last_evm = 0, n_stream, tmpval;
+
+       if (pstats->signalquality != 0) {
+               if (pstats->packet_toself || pstats->packet_beacon) {
+                       if (rtlpriv->stats.LINK_Q.total_num++ >=
+                           PHY_LINKQUALITY_SLID_WIN_MAX) {
+                               rtlpriv->stats.LINK_Q.total_num =
+                                   PHY_LINKQUALITY_SLID_WIN_MAX;
+                               last_evm =
+                                   rtlpriv->stats.LINK_Q.elements
+                                   [rtlpriv->stats.LINK_Q.index];
+                               rtlpriv->stats.LINK_Q.total_val -=
+                                   last_evm;
+                       }
+                       rtlpriv->stats.LINK_Q.total_val +=
+                           pstats->signalquality;
+                       rtlpriv->stats.LINK_Q.elements
+                          [rtlpriv->stats.LINK_Q.index++] =
+                           pstats->signalquality;
+                       if (rtlpriv->stats.LINK_Q.index >=
+                           PHY_LINKQUALITY_SLID_WIN_MAX)
+                               rtlpriv->stats.LINK_Q.index = 0;
+                       tmpval = rtlpriv->stats.LINK_Q.total_val /
+                           rtlpriv->stats.LINK_Q.total_num;
+                       rtlpriv->stats.signal_quality = tmpval;
+                       rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+                       for (n_stream = 0; n_stream < 2;
+                            n_stream++) {
+                               if (pstats->RX_SIGQ[n_stream] != -1) {
+                                       if (!rtlpriv->stats.RX_EVM[n_stream]) {
+                                               rtlpriv->stats.RX_EVM[n_stream]
+                                                = pstats->RX_SIGQ[n_stream];
+                                       }
+                                       rtlpriv->stats.RX_EVM[n_stream] =
+                                           ((rtlpriv->stats.RX_EVM
+                                           [n_stream] *
+                                           (RX_SMOOTH_FACTOR - 1)) +
+                                           (pstats->RX_SIGQ
+                                           [n_stream] * 1)) /
+                                           (RX_SMOOTH_FACTOR);
+                               }
+                       }
+               }
+       } else {
+               ;
+       }
+}
+
+static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
+                                    u8 *buffer,
+                                    struct rtl_stats *pcurrent_stats)
+{
+       if (!pcurrent_stats->packet_matchbssid &&
+           !pcurrent_stats->packet_beacon)
+               return;
+       _rtl92c_process_ui_rssi(hw, pcurrent_stats);
+       _rtl92c_process_pwdb(hw, pcurrent_stats);
+       _rtl92c_process_LINK_Q(hw, pcurrent_stats);
+}
+
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+                                              struct sk_buff *skb,
+                                              struct rtl_stats *pstats,
+                                              struct rx_desc_92c *pdesc,
+                                              struct rx_fwinfo_92c *p_drvinfo)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct ieee80211_hdr *hdr;
+       u8 *tmp_buf;
+       u8 *praddr;
+       u8 *psaddr;
+       __le16 fc;
+       u16 type, cpu_fc;
+       bool packet_matchbssid, packet_toself, packet_beacon;
+
+       tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+       hdr = (struct ieee80211_hdr *)tmp_buf;
+       fc = hdr->frame_control;
+       cpu_fc = le16_to_cpu(fc);
+       type = WLAN_FC_GET_TYPE(fc);
+       praddr = hdr->addr1;
+       psaddr = hdr->addr2;
+       packet_matchbssid =
+           ((IEEE80211_FTYPE_CTL != type) &&
+            (!compare_ether_addr(mac->bssid,
+                         (cpu_fc & IEEE80211_FCTL_TODS) ?
+                         hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
+                         hdr->addr2 : hdr->addr3)) &&
+            (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+
+       packet_toself = packet_matchbssid &&
+           (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+       if (ieee80211_is_beacon(fc))
+               packet_beacon = true;
+       _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+                                  packet_matchbssid, packet_toself,
+                                  packet_beacon);
+       _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644 (file)
index 0000000..298fdb7
--- /dev/null
@@ -0,0 +1,180 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_MAC_H__
+#define __RTL92C_MAC_H__
+
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
+#define DRIVER_EARLY_INT_TIME                                  0x05
+#define BCN_DMA_ATIME_INT_TIME                         0x02
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw);
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+                    u8 *p_macaddr, bool is_group, u8 enc_algo,
+                    bool is_wepkey, bool clear_all);
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
+
+
+/*---------------------------------------------------------------
+ *     Hardware init functions
+ *---------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
+void rtl92c_init_interrupt(struct ieee80211_hw *hw);
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92c_init_network_type(struct ieee80211_hw *hw);
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+                                                       u16 queue,
+                                                       u16 txop,
+                                                       u8 ecwmax,
+                                                       u8 ecwmin,
+                                                       u8 aifs);
+
+void rtl92c_init_edca(struct ieee80211_hw *hw);
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
+void rtl92c_init_retry_function(struct ieee80211_hw *hw);
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+                                  enum version_8192c version);
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
+
+/* For filter */
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
+
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+       (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
+
+struct rx_fwinfo_92c {
+       u8 gain_trsw[4];
+       u8 pwdb_all;
+       u8 cfosho[4];
+       u8 cfotail[4];
+       char rxevm[2];
+       char rxsnr[4];
+       u8 pdsnr[2];
+       u8 csi_current[2];
+       u8 csi_target[2];
+       u8 sigevm;
+       u8 max_ex_pwr;
+       u8 ex_intf_flag:1;
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 reserve:4;
+} __packed;
+
+struct rx_desc_92c {
+       u32 length:14;
+       u32 crc32:1;
+       u32 icverror:1;
+       u32 drv_infosize:4;
+       u32 security:3;
+       u32 qos:1;
+       u32 shift:2;
+       u32 phystatus:1;
+       u32 swdec:1;
+       u32 lastseg:1;
+       u32 firstseg:1;
+       u32 eor:1;
+       u32 own:1;
+       u32 macid:5;    /* word 1 */
+       u32 tid:4;
+       u32 hwrsvd:5;
+       u32 paggr:1;
+       u32 faggr:1;
+       u32 a1_fit:4;
+       u32 a2_fit:4;
+       u32 pam:1;
+       u32 pwr:1;
+       u32 moredata:1;
+       u32 morefrag:1;
+       u32 type:2;
+       u32 mc:1;
+       u32 bc:1;
+       u32 seq:12;     /* word 2 */
+       u32 frag:4;
+       u32 nextpktlen:14;
+       u32 nextind:1;
+       u32 rsvd:1;
+       u32 rxmcs:6;    /* word 3 */
+       u32 rxht:1;
+       u32 amsdu:1;
+       u32 splcp:1;
+       u32 bandwidth:1;
+       u32 htc:1;
+       u32 tcpchk_rpt:1;
+       u32 ipcchk_rpt:1;
+       u32 tcpchk_valid:1;
+       u32 hwpcerr:1;
+       u32 hwpcind:1;
+       u32 iv0:16;
+       u32 iv1;        /* word 4 */
+       u32 tsfl;       /* word 5 */
+       u32 bufferaddress;      /* word 6 */
+       u32 bufferaddress64;    /* word 7 */
+} __packed;
+
+enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
+                                                         unsigned int
+                                                         skb_queue);
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+                                     struct sk_buff *skb,
+                                     struct rtl_stats *pstats,
+                                     struct rx_desc_92c *pdesc,
+                                     struct rx_fwinfo_92c *p_drvinfo);
+
+/*---------------------------------------------------------------
+ *     Card disable functions
+ *---------------------------------------------------------------*/
+
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644 (file)
index 0000000..4e020e6
--- /dev/null
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 original_value, readback_value, bitshift;
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "rfpath(%#x), bitmask(%#x)\n",
+                                              regaddr, rfpath, bitmask));
+       if (rtlphy->rf_mode != RF_OP_BY_FW) {
+               original_value = _rtl92c_phy_rf_serial_read(hw,
+                                                           rfpath, regaddr);
+       } else {
+               original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+                                                              rfpath, regaddr);
+       }
+       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+       readback_value = (original_value & bitmask) >> bitshift;
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                ("regaddr(%#x), rfpath(%#x), "
+                 "bitmask(%#x), original_value(%#x)\n",
+                 regaddr, rfpath, bitmask, original_value));
+       return readback_value;
+}
+
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 original_value, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                 regaddr, bitmask, data, rfpath));
+       if (rtlphy->rf_mode != RF_OP_BY_FW) {
+               if (bitmask != RFREG_OFFSET_MASK) {
+                       original_value = _rtl92c_phy_rf_serial_read(hw,
+                                                                   rfpath,
+                                                                   regaddr);
+                       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+                       data =
+                           ((original_value & (~bitmask)) |
+                            (data << bitshift));
+               }
+               _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
+       } else {
+               if (bitmask != RFREG_OFFSET_MASK) {
+                       original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+                                                                      rfpath,
+                                                                      regaddr);
+                       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+                       data =
+                           ((original_value & (~bitmask)) |
+                            (data << bitshift));
+               }
+               _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+       }
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                                              regaddr, bitmask, data, rfpath));
+}
+
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
+{
+       bool rtstatus;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool is92c = IS_92C_SERIAL(rtlhal->version);
+
+       rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
+       if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
+               rtl_write_byte(rtlpriv, 0x14, 0x71);
+       return rtstatus;
+}
+
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
+{
+       bool rtstatus = true;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u16 regval;
+       u8 b_reg_hwparafile = 1;
+
+       _rtl92c_phy_init_bb_rf_register_definition(hw);
+       regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+       rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
+                      BIT(0) | BIT(1));
+       rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+       rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+       rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
+                              FEN_DIO_PCIE |   FEN_BB_GLB_RSTn | FEN_BBRSTB);
+       } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
+                              FEN_BB_GLB_RSTn | FEN_BBRSTB);
+               rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+       }
+       rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+       if (b_reg_hwparafile == 1)
+               rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
+       return rtstatus;
+}
+
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 i;
+       u32 arraylength;
+       u32 *ptrarray;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+       arraylength =  rtlphy->hwparam_tables[MAC_REG].length ;
+       ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+       for (i = 0; i < arraylength; i = i + 2)
+               rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+       return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                                 u8 configtype)
+{
+       int i;
+       u32 *phy_regarray_table;
+       u32 *agctab_array_table;
+       u16 phy_reg_arraylen, agctab_arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
+               agctab_array_table =  rtlphy->hwparam_tables[AGCTAB_2T].pdata;
+               phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
+               phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
+       } else {
+               agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
+               agctab_array_table =  rtlphy->hwparam_tables[AGCTAB_1T].pdata;
+               phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
+               phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
+       }
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+                       if (phy_regarray_table[i] == 0xfe)
+                               mdelay(50);
+                       else if (phy_regarray_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (phy_regarray_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (phy_regarray_table[i] == 0xfb)
+                               udelay(50);
+                       else if (phy_regarray_table[i] == 0xfa)
+                               udelay(5);
+                       else if (phy_regarray_table[i] == 0xf9)
+                               udelay(1);
+                       rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+                                     phy_regarray_table[i + 1]);
+                       udelay(1);
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("The phy_regarray_table[0] is %x"
+                                 " Rtl819XPHY_REGArray[1] is %x\n",
+                                 phy_regarray_table[i],
+                                 phy_regarray_table[i + 1]));
+               }
+       } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+               for (i = 0; i < agctab_arraylen; i = i + 2) {
+                       rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+                                     agctab_array_table[i + 1]);
+                       udelay(1);
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("The agctab_array_table[0] is "
+                                 "%x Rtl819XPHY_REGArray[1] is %x\n",
+                                 agctab_array_table[i],
+                                 agctab_array_table[i + 1]));
+               }
+       }
+       return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       int i;
+       u32 *phy_regarray_table_pg;
+       u16 phy_regarray_pg_len;
+
+       rtlphy->pwrgroup_cnt = 0;
+       phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
+       phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+                       if (phy_regarray_table_pg[i] == 0xfe)
+                               mdelay(50);
+                       else if (phy_regarray_table_pg[i] == 0xfd)
+                               mdelay(5);
+                       else if (phy_regarray_table_pg[i] == 0xfc)
+                               mdelay(1);
+                       else if (phy_regarray_table_pg[i] == 0xfb)
+                               udelay(50);
+                       else if (phy_regarray_table_pg[i] == 0xfa)
+                               udelay(5);
+                       else if (phy_regarray_table_pg[i] == 0xf9)
+                               udelay(1);
+                       _rtl92c_store_pwrIndex_diffrate_offset(hw,
+                                                 phy_regarray_table_pg[i],
+                                                 phy_regarray_table_pg[i + 1],
+                                                 phy_regarray_table_pg[i + 2]);
+               }
+       } else {
+               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                        ("configtype != BaseBand_Config_PHY_REG\n"));
+       }
+       return true;
+}
+
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath)
+{
+       int i;
+       u32 *radioa_array_table;
+       u32 *radiob_array_table;
+       u16 radioa_arraylen, radiob_arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
+               radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
+               radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
+               radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+       } else {
+               radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
+               radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
+               radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
+               radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+       switch (rfpath) {
+       case RF90_PATH_A:
+               for (i = 0; i < radioa_arraylen; i = i + 2) {
+                       if (radioa_array_table[i] == 0xfe)
+                               mdelay(50);
+                       else if (radioa_array_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (radioa_array_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (radioa_array_table[i] == 0xfb)
+                               udelay(50);
+                       else if (radioa_array_table[i] == 0xfa)
+                               udelay(5);
+                       else if (radioa_array_table[i] == 0xf9)
+                               udelay(1);
+                       else {
+                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+                                             RFREG_OFFSET_MASK,
+                                             radioa_array_table[i + 1]);
+                               udelay(1);
+                       }
+               }
+               break;
+       case RF90_PATH_B:
+               for (i = 0; i < radiob_arraylen; i = i + 2) {
+                       if (radiob_array_table[i] == 0xfe) {
+                               mdelay(50);
+                       } else if (radiob_array_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (radiob_array_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (radiob_array_table[i] == 0xfb)
+                               udelay(50);
+                       else if (radiob_array_table[i] == 0xfa)
+                               udelay(5);
+                       else if (radiob_array_table[i] == 0xf9)
+                               udelay(1);
+                       else {
+                               rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+                                             RFREG_OFFSET_MASK,
+                                             radiob_array_table[i + 1]);
+                               udelay(1);
+                       }
+               }
+               break;
+       case RF90_PATH_C:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       case RF90_PATH_D:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       return true;
+}
+
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u8 reg_bw_opmode;
+       u8 reg_prsr_rsc;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                ("Switch to %s bandwidth\n",
+                 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+                 "20MHz" : "40MHz"))
+       if (is_hal_stop(rtlhal)) {
+               rtlphy->set_bwmode_inprogress = false;
+               return;
+       }
+       reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+       reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               reg_bw_opmode |= BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               reg_prsr_rsc =
+                   (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+               rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+               break;
+       }
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+               rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+                             (mac->cur_40_prime_sc >> 1));
+               rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+               rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+               rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+                             (mac->cur_40_prime_sc ==
+                              HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+               break;
+       }
+       rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+       rtlphy->set_bwmode_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       mutex_lock(&rtlpriv->io.bb_mutex);
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+       mutex_unlock(&rtlpriv->io.bb_mutex);
+}
+
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+       u8 tmpreg;
+       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+       if ((tmpreg & 0x70) != 0)
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+       else
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+       if ((tmpreg & 0x70) != 0) {
+               rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+               if (is2t)
+                       rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+                                                 MASK12BITS);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+                             (rf_a_mode & 0x8FFFF) | 0x10000);
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+                                     (rf_b_mode & 0x8FFFF) | 0x10000);
+       }
+       lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+       mdelay(100);
+       if ((tmpreg & 0x70) != 0) {
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+                                     rf_b_mode);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+       }
+}
+
+bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                           enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool bresult = true;
+       u8 i, queue_id;
+       struct rtl8192_tx_ring *ring = NULL;
+
+       ppsc->set_rfpowerstate_inprogress = true;
+       switch (rfpwr_state) {
+       case ERFON:
+               if ((ppsc->rfpwr_state == ERFOFF) &&
+                   RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+                       bool rtstatus;
+                       u32 InitializeCount = 0;
+
+                       do {
+                               InitializeCount++;
+                               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                        ("IPS Set eRf nic enable\n"));
+                               rtstatus = rtl_ps_enable_nic(hw);
+                       } while ((rtstatus != true)
+                                && (InitializeCount < 10));
+                       RT_CLEAR_PS_LEVEL(ppsc,
+                                         RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                ("Set ERFON sleeped:%d ms\n",
+                                 jiffies_to_msecs(jiffies -
+                                                  ppsc->
+                                                  last_sleep_jiffies)));
+                       ppsc->last_awake_jiffies = jiffies;
+                       rtl92ce_phy_set_rf_on(hw);
+               }
+               if (mac->link_state == MAC80211_LINKED) {
+                       rtlpriv->cfg->ops->led_control(hw,
+                                                      LED_CTL_LINK);
+               } else {
+                       rtlpriv->cfg->ops->led_control(hw,
+                                                      LED_CTL_NO_LINK);
+               }
+               break;
+       case ERFOFF:
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0 ||
+                               queue_id == BEACON_QUEUE) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] "
+                                         "=%d before doze!\n", (i + 1),
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("\nERFOFF: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               break;
+                       }
+               }
+               if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                ("IPS Set eRf nic disable\n"));
+                       rtl_ps_disable_nic(hw);
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                        LED_CTL_NO_LINK);
+                       } else {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                        LED_CTL_POWER_OFF);
+                       }
+               }
+               break;
+       case ERFSLEEP:
+               if (ppsc->rfpwr_state == ERFOFF)
+                       break;
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] =%d before "
+                                         "doze!\n", (i + 1), queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("\n ERFSLEEP: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               break;
+                       }
+               }
+               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                        ("Set ERFSLEEP awaked:%d ms\n",
+                         jiffies_to_msecs(jiffies -
+                                          ppsc->last_awake_jiffies)));
+               ppsc->last_sleep_jiffies = jiffies;
+               _rtl92c_phy_set_rf_sleep(hw);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               bresult = false;
+               break;
+       }
+       if (bresult)
+               ppsc->rfpwr_state = rfpwr_state;
+       ppsc->set_rfpowerstate_inprogress = false;
+       return bresult;
+}
+
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool bresult = false;
+
+       if (rfpwr_state == ppsc->rfpwr_state)
+               return bresult;
+       bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
+       return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644 (file)
index 0000000..0629955
--- /dev/null
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/phy.h"
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644 (file)
index 0000000..7f1be61
--- /dev/null
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644 (file)
index 0000000..1c79c22
--- /dev/null
@@ -0,0 +1,493 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       switch (bandwidth) {
+       case HT_CHANNEL_WIDTH_20:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff) | 0x0400);
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff));
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", bandwidth));
+               break;
+       }
+}
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
+       bool turbo_scanoff = false;
+       u8 idx1, idx2;
+       u8 *ptr;
+
+       if (rtlhal->interface == INTF_PCI) {
+               if (rtlefuse->eeprom_regulatory != 0)
+                       turbo_scanoff = true;
+       } else {
+               if ((rtlefuse->eeprom_regulatory != 0) ||
+                   (rtlefuse->external_pa))
+                       turbo_scanoff = true;
+       }
+       if (mac->act_scanning == true) {
+               tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+               tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+               if (turbo_scanoff) {
+                       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                               tx_agc[idx1] = ppowerlevel[idx1] |
+                                   (ppowerlevel[idx1] << 8) |
+                                   (ppowerlevel[idx1] << 16) |
+                                   (ppowerlevel[idx1] << 24);
+                               if (rtlhal->interface == INTF_USB) {
+                                       if (tx_agc[idx1] > 0x20 &&
+                                           rtlefuse->external_pa)
+                                               tx_agc[idx1] = 0x20;
+                               }
+                       }
+               }
+       } else {
+               if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                   TXHIGHPWRLEVEL_LEVEL1) {
+                       tx_agc[RF90_PATH_A] = 0x10101010;
+                       tx_agc[RF90_PATH_B] = 0x10101010;
+               } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                          TXHIGHPWRLEVEL_LEVEL1) {
+                       tx_agc[RF90_PATH_A] = 0x00000000;
+                       tx_agc[RF90_PATH_B] = 0x00000000;
+               } else{
+                       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                               tx_agc[idx1] = ppowerlevel[idx1] |
+                                   (ppowerlevel[idx1] << 8) |
+                                   (ppowerlevel[idx1] << 16) |
+                                   (ppowerlevel[idx1] << 24);
+                       }
+                       if (rtlefuse->eeprom_regulatory == 0) {
+                               tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][6]) +
+                                       (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][7] <<  8);
+                               tx_agc[RF90_PATH_A] += tmpval;
+                               tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][14]) +
+                                       (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][15] << 24);
+                               tx_agc[RF90_PATH_B] += tmpval;
+                       }
+               }
+       }
+       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+               ptr = (u8 *) (&(tx_agc[idx1]));
+               for (idx2 = 0; idx2 < 4; idx2++) {
+                       if (*ptr > RF6052_MAX_TX_PWR)
+                               *ptr = RF6052_MAX_TX_PWR;
+                       ptr++;
+               }
+       }
+       tmpval = tx_agc[RF90_PATH_A] & 0xff;
+       rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_A_CCK1_MCS32));
+
+       tmpval = tx_agc[RF90_PATH_A] >> 8;
+       if (mac->mode == WIRELESS_MODE_B)
+               tmpval = tmpval & 0xff00ffff;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11));
+       tmpval = tx_agc[RF90_PATH_B] >> 24;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11));
+       tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
+                                     u8 *ppowerlevel, u8 channel,
+                                     u32 *ofdmbase, u32 *mcsbase)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 powerBase0, powerBase1;
+       u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
+       u8 i, powerlevel[2];
+
+       for (i = 0; i < 2; i++) {
+               powerlevel[i] = ppowerlevel[i];
+               legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+               powerBase0 = powerlevel[i] + legacy_pwrdiff;
+               powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+                   (powerBase0 << 8) | powerBase0;
+               *(ofdmbase + i) = powerBase0;
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       (" [OFDM power base index rf(%c) = 0x%x]\n",
+                        ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+       }
+       for (i = 0; i < 2; i++) {
+               if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+                       ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+                       powerlevel[i] += ht20_pwrdiff;
+               }
+               powerBase1 = powerlevel[i];
+               powerBase1 = (powerBase1 << 24) |
+                   (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+               *(mcsbase + i) = powerBase1;
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       (" [MCS power base index rf(%c) = 0x%x]\n",
+                        ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+       }
+}
+
+static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+                                                      u8 channel, u8 index,
+                                                      u32 *powerBase0,
+                                                      u32 *powerBase1,
+                                                      u32 *p_outwriteval)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 i, chnlgroup = 0, pwr_diff_limit[4];
+       u32 writeVal, customer_limit, rf;
+
+       for (rf = 0; rf < 2; rf++) {
+               switch (rtlefuse->eeprom_regulatory) {
+               case 0:
+                       chnlgroup = 0;
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset
+                           [chnlgroup][index + (rf ? 8 : 0)]
+                           + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("RTK better performance,writeVal(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 1:
+                       if (rtlphy->pwrgroup_cnt == 1)
+                               chnlgroup = 0;
+                       if (rtlphy->pwrgroup_cnt >= 3) {
+                               if (channel <= 3)
+                                       chnlgroup = 0;
+                               else if (channel >= 4 && channel <= 9)
+                                       chnlgroup = 1;
+                               else if (channel > 9)
+                                       chnlgroup = 2;
+                               if (rtlphy->current_chan_bw ==
+                                   HT_CHANNEL_WIDTH_20)
+                                       chnlgroup++;
+                               else
+                                       chnlgroup += 4;
+                       }
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset
+                                       [chnlgroup][index +
+                                       (rf ? 8 : 0)] +
+                                       ((index < 2) ? powerBase0[rf] :
+                                       powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Realtek regulatory, 20MHz, "
+                               "writeVal(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 2:
+                       writeVal = ((index < 2) ? powerBase0[rf] :
+                                  powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Better regulatory,writeVal(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 3:
+                       chnlgroup = 0;
+                       if (rtlphy->current_chan_bw ==
+                           HT_CHANNEL_WIDTH_20_40) {
+                               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                                       ("customer's limit, 40MHzrf(%c) = "
+                                       "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+                                       rtlefuse->pwrgroup_ht40[rf]
+                                       [channel - 1]));
+                       } else {
+                               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                                       ("customer's limit, 20MHz rf(%c) = "
+                                       "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+                                       rtlefuse->pwrgroup_ht20[rf]
+                                       [channel - 1]));
+                       }
+                       for (i = 0; i < 4; i++) {
+                               pwr_diff_limit[i] =
+                                   (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+                                   [chnlgroup][index + (rf ? 8 : 0)]
+                                   & (0x7f << (i * 8))) >> (i * 8));
+                               if (rtlphy->current_chan_bw ==
+                                   HT_CHANNEL_WIDTH_20_40) {
+                                       if (pwr_diff_limit[i] >
+                                           rtlefuse->pwrgroup_ht40[rf]
+                                               [channel - 1])
+                                               pwr_diff_limit[i] = rtlefuse->
+                                                   pwrgroup_ht40[rf]
+                                                   [channel - 1];
+                               } else {
+                                       if (pwr_diff_limit[i] >
+                                           rtlefuse->pwrgroup_ht20[rf]
+                                               [channel - 1])
+                                               pwr_diff_limit[i] =
+                                                   rtlefuse->pwrgroup_ht20[rf]
+                                                   [channel - 1];
+                               }
+                       }
+                       customer_limit = (pwr_diff_limit[3] << 24) |
+                           (pwr_diff_limit[2] << 16) |
+                           (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Customer's limit rf(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), customer_limit));
+                       writeVal = customer_limit + ((index < 2) ?
+                                  powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Customer, writeVal rf(%c)= 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               default:
+                       chnlgroup = 0;
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+                                  [index + (rf ? 8 : 0)] + ((index < 2) ?
+                                  powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
+                               "performance, writeValrf(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               }
+               if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                   TXHIGHPWRLEVEL_LEVEL1)
+                       writeVal = 0x14141414;
+               else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                        TXHIGHPWRLEVEL_LEVEL2)
+                       writeVal = 0x00000000;
+               if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+                       writeVal = writeVal - 0x06060606;
+               else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                        TXHIGHPWRLEVEL_BT2)
+                       writeVal = writeVal;
+               *(p_outwriteval + rf) = writeVal;
+       }
+}
+
+static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+                                        u8 index, u32 *pValue)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u16 regoffset_a[6] = {
+               RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+               RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+               RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+       };
+       u16 regoffset_b[6] = {
+               RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+               RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+               RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+       };
+       u8 i, rf, pwr_val[4];
+       u32 writeVal;
+       u16 regoffset;
+
+       for (rf = 0; rf < 2; rf++) {
+               writeVal = pValue[rf];
+               for (i = 0; i < 4; i++) {
+                       pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
+                                         (i * 8));
+                       if (pwr_val[i] > RF6052_MAX_TX_PWR)
+                               pwr_val[i] = RF6052_MAX_TX_PWR;
+               }
+               writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+                   (pwr_val[1] << 8) | pwr_val[0];
+               if (rf == 0)
+                       regoffset = regoffset_a[index];
+               else
+                       regoffset = regoffset_b[index];
+               rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       ("Set 0x%x = %08x\n", regoffset, writeVal));
+               if (((get_rf_type(rtlphy) == RF_2T2R) &&
+                    (regoffset == RTXAGC_A_MCS15_MCS12 ||
+                     regoffset == RTXAGC_B_MCS15_MCS12)) ||
+                   ((get_rf_type(rtlphy) != RF_2T2R) &&
+                    (regoffset == RTXAGC_A_MCS07_MCS04 ||
+                     regoffset == RTXAGC_B_MCS07_MCS04))) {
+                       writeVal = pwr_val[3];
+                       if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+                           regoffset == RTXAGC_A_MCS07_MCS04)
+                               regoffset = 0xc90;
+                       if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+                           regoffset == RTXAGC_B_MCS07_MCS04)
+                               regoffset = 0xc98;
+                       for (i = 0; i < 3; i++) {
+                               writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+                               rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+                                             (u8)writeVal);
+                       }
+               }
+       }
+}
+
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel)
+{
+       u32 writeVal[2], powerBase0[2], powerBase1[2];
+       u8 index = 0;
+
+       rtl92c_phy_get_power_base(hw, ppowerlevel,
+                                 channel, &powerBase0[0], &powerBase1[0]);
+       for (index = 0; index < 6; index++) {
+               _rtl92c_get_txpower_writeval_by_regulatory(hw,
+                                                          channel, index,
+                                                          &powerBase0[0],
+                                                          &powerBase1[0],
+                                                          &writeVal[0]);
+               _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+       }
+}
+
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool rtstatus = true;
+       u8 b_reg_hwparafile = 1;
+
+       if (rtlphy->rf_type == RF_1T1R)
+               rtlphy->num_total_rfpath = 1;
+       else
+               rtlphy->num_total_rfpath = 2;
+       if (b_reg_hwparafile == 1)
+               rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
+       return rtstatus;
+}
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 u4_regvalue = 0;
+       u8 rfpath;
+       bool rtstatus = true;
+       struct bb_reg_def *pphyreg;
+
+       for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+               pphyreg = &rtlphy->phyreg_def[rfpath];
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV << 16);
+                       break;
+               }
+               rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+                             B3WIREADDREAALENGTH, 0x0);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+               udelay(1);
+               switch (rfpath) {
+               case RF90_PATH_A:
+                       rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+                                       (enum radio_path) rfpath);
+                       break;
+               case RF90_PATH_B:
+                       rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+                                       (enum radio_path) rfpath);
+                       break;
+               case RF90_PATH_C:
+                       break;
+               case RF90_PATH_D:
+                       break;
+               }
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV, u4_regvalue);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV << 16, u4_regvalue);
+                       break;
+               }
+               if (rtstatus != true) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("Radio[%d] Fail!!", rfpath));
+                       goto phy_rf_cfg_fail;
+               }
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+       return rtstatus;
+phy_rf_cfg_fail:
+       return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644 (file)
index 0000000..86c2728
--- /dev/null
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_RF_H__
+#define __RTL92CU_RF_H__
+
+#define RF6052_MAX_TX_PWR              0x3F
+#define RF6052_MAX_REG                 0x3F
+#define RF6052_MAX_PATH                        2
+
+extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+                                           u8 bandwidth);
+extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                             u8 *ppowerlevel);
+extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                              u8 *ppowerlevel, u8 channel);
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644 (file)
index 0000000..71244a3
--- /dev/null
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../efuse.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "rf.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "hw.h"
+#include <linux/vmalloc.h>
+
+MODULE_AUTHOR("Georgia         <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang       <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+
+static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_flag = 0;
+       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.thermalvalue = 0;
+       rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
+       if (!rtlpriv->rtlhal.pfirmware) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't alloc buffer for fw.\n"));
+               return 1;
+       }
+       return 0;
+}
+
+static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->rtlhal.pfirmware) {
+               vfree(rtlpriv->rtlhal.pfirmware);
+               rtlpriv->rtlhal.pfirmware = NULL;
+       }
+}
+
+static struct rtl_hal_ops rtl8192cu_hal_ops = {
+       .init_sw_vars = rtl92cu_init_sw_vars,
+       .deinit_sw_vars = rtl92cu_deinit_sw_vars,
+       .read_chip_version = rtl92c_read_chip_version,
+       .read_eeprom_info = rtl92cu_read_eeprom_info,
+       .enable_interrupt = rtl92c_enable_interrupt,
+       .disable_interrupt = rtl92c_disable_interrupt,
+       .hw_init = rtl92cu_hw_init,
+       .hw_disable = rtl92cu_card_disable,
+       .set_network_type = rtl92cu_set_network_type,
+       .set_chk_bssid = rtl92cu_set_check_bssid,
+       .set_qos = rtl92c_set_qos,
+       .set_bcn_reg = rtl92cu_set_beacon_related_registers,
+       .set_bcn_intv = rtl92cu_set_beacon_interval,
+       .update_interrupt_mask = rtl92cu_update_interrupt_mask,
+       .get_hw_reg = rtl92cu_get_hw_reg,
+       .set_hw_reg = rtl92cu_set_hw_reg,
+       .update_rate_table = rtl92cu_update_hal_rate_table,
+       .update_rate_mask = rtl92cu_update_hal_rate_mask,
+       .fill_tx_desc = rtl92cu_tx_fill_desc,
+       .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
+       .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
+       .cmd_send_packet = rtl92cu_cmd_send_packet,
+       .query_rx_desc = rtl92cu_rx_query_desc,
+       .set_channel_access = rtl92cu_update_channel_access_setting,
+       .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
+       .set_bw_mode = rtl92c_phy_set_bw_mode,
+       .switch_channel = rtl92c_phy_sw_chnl,
+       .dm_watchdog = rtl92c_dm_watchdog,
+       .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+       .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
+       .led_control = rtl92cu_led_control,
+       .enable_hw_sec = rtl92cu_enable_hw_security_config,
+       .set_key = rtl92c_set_key,
+       .init_sw_leds = rtl92cu_init_sw_leds,
+       .deinit_sw_leds = rtl92cu_deinit_sw_leds,
+       .get_bbreg = rtl92c_phy_query_bb_reg,
+       .set_bbreg = rtl92c_phy_set_bb_reg,
+       .get_rfreg = rtl92cu_phy_query_rf_reg,
+       .set_rfreg = rtl92cu_phy_set_rf_reg,
+       .phy_rf6052_config = rtl92cu_phy_rf6052_config,
+       .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
+       .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
+       .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
+       .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
+       .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
+       .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
+       .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+};
+
+static struct rtl_mod_params rtl92cu_mod_params = {
+       .sw_crypto = 0,
+};
+
+static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
+       /* rx */
+       .in_ep_num = RTL92C_USB_BULK_IN_NUM,
+       .rx_urb_num = RTL92C_NUM_RX_URBS,
+       .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
+       .usb_rx_hdl = rtl8192cu_rx_hdl,
+       .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
+       /* tx */
+       .usb_tx_cleanup = rtl8192c_tx_cleanup,
+       .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
+       .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
+       /* endpoint mapping */
+       .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
+       .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
+};
+
+static struct rtl_hal_cfg rtl92cu_hal_cfg = {
+       .name = "rtl92c_usb",
+       .fw_name = "rtlwifi/rtl8192cufw.bin",
+       .ops = &rtl8192cu_hal_ops,
+       .mod_params = &rtl92cu_mod_params,
+       .usb_interface_cfg = &rtl92cu_interface_cfg,
+
+       .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+       .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+       .maps[SYS_CLK] = REG_SYS_CLKR,
+       .maps[MAC_RCR_AM] = AM,
+       .maps[MAC_RCR_AB] = AB,
+       .maps[MAC_RCR_ACRC32] = ACRC32,
+       .maps[MAC_RCR_ACF] = ACF,
+       .maps[MAC_RCR_AAP] = AAP,
+
+       .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+       .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_CLK] = 0,
+       .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+       .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+       .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+       .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
+       .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+       .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+       .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+       .maps[RWCAM] = REG_CAMCMD,
+       .maps[WCAMI] = REG_CAMWRITE,
+       .maps[RCAMO] = REG_CAMREAD,
+       .maps[CAMDBG] = REG_CAMDBG,
+       .maps[SECR] = REG_SECCFG,
+       .maps[SEC_CAM_NONE] = CAM_NONE,
+       .maps[SEC_CAM_WEP40] = CAM_WEP40,
+       .maps[SEC_CAM_TKIP] = CAM_TKIP,
+       .maps[SEC_CAM_AES] = CAM_AES,
+       .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+       .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+       .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+       .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+       .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+       .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+       .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+       .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+       .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+       .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+       .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+       .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+       .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+       .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+       .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+       .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+       .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+       .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+       .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+       .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
+       .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+       .maps[RTL_IMR_RDU] = IMR_RDU,
+       .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+       .maps[RTL_IMR_BDOK] = IMR_BDOK,
+       .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+       .maps[RTL_IMR_TBDER] = IMR_TBDER,
+       .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+       .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+       .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+       .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+       .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+       .maps[RTL_IMR_VODOK] = IMR_VODOK,
+       .maps[RTL_IMR_ROK] = IMR_ROK,
+       .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+       .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+       .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+#define USB_VENDER_ID_REALTEK          0x0bda
+
+/* 2010-10-19 DID_USB_V3.4 */
+static struct usb_device_id rtl8192c_usb_ids[] = {
+
+       /*=== Realtek demoboard ===*/
+       /* Default ID */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+
+       /****** 8188CU ********/
+       /* 8188CE-VAU USB minCard */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+       /* 8188cu 1*1 dongle */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+       /* 8188cu 1*1 dongle, (b/g mode only) */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+       /* 8188cu Slim Solo */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+       /* 8188cu Slim Combo */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+       /* 8188RU High-power USB Dongle */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+       /* 8188CE-VAU USB minCard (b/g mode only) */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+       /* 8188 Combo for BC4 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+       /****** 8192CU ********/
+       /* 8191cu 1*2 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+       /* 8192cu 2*2 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+       /* 8192CE-VAU USB minCard */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+
+       /*=== Customer ID ===*/
+       /****** 8188CU ********/
+       {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+       {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+       {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+       {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       /* HP - Lite-On ,8188CUS Slim Combo */
+       {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+       {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+       {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+       {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
+       {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
+       {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
+       {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+       /* Russian customer -Azwave (8188CE-VAU  b/g mode only) */
+       {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+
+       /****** 8192CU ********/
+       {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
+       {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
+       {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+       {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
+       {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+       {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+       {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+       {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
+static struct usb_driver rtl8192cu_driver = {
+       .name = "rtl8192cu",
+       .probe = rtl_usb_probe,
+       .disconnect = rtl_usb_disconnect,
+       .id_table = rtl8192c_usb_ids,
+
+#ifdef CONFIG_PM
+       /* .suspend = rtl_usb_suspend, */
+       /* .resume = rtl_usb_resume, */
+       /* .reset_resume = rtl8192c_resume, */
+#endif /* CONFIG_PM */
+#ifdef CONFIG_AUTOSUSPEND
+       .supports_autosuspend = 1,
+#endif
+};
+
+static int __init rtl8192cu_init(void)
+{
+       return usb_register(&rtl8192cu_driver);
+}
+
+static void __exit rtl8192cu_exit(void)
+{
+       usb_deregister(&rtl8192cu_driver);
+}
+
+module_init(rtl8192cu_init);
+module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644 (file)
index 0000000..43b1177
--- /dev/null
@@ -0,0 +1,53 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_SW_H__
+#define __RTL92CU_SW_H__
+
+#define EFUSE_MAX_SECTION      16
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *powerlevel);
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                           u8 configtype);
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype);
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data);
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644 (file)
index 0000000..d57ef5e
--- /dev/null
@@ -0,0 +1,1888 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x800, 0x80040002,
+       0x804, 0x00000003,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390004,
+       0x828, 0x01000100,
+       0x82c, 0x00390004,
+       0x830, 0x27272727,
+       0x834, 0x27272727,
+       0x838, 0x27272727,
+       0x83c, 0x27272727,
+       0x840, 0x00010000,
+       0x844, 0x00010000,
+       0x848, 0x27272727,
+       0x84c, 0x27272727,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x0c1b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x27272727,
+       0x86c, 0x2b2b2b27,
+       0x870, 0x07000700,
+       0x874, 0x22184000,
+       0x878, 0x08080808,
+       0x87c, 0x00000000,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xcc0000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121313,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x1a1b0000,
+       0xa24, 0x090e1317,
+       0xa28, 0x00000204,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05633,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954341e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954341e,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c7f000d,
+       0xc74, 0x0186115b,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b99612,
+       0xc80, 0x40000100,
+       0xc84, 0x20f60000,
+       0xc88, 0x40000100,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020403,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x2a2a2a2a,
+       0xe04, 0x2a2a2a2a,
+       0xe08, 0x03902a2a,
+       0xe10, 0x2a2a2a2a,
+       0xe14, 0x2a2a2a2a,
+       0xe18, 0x2a2a2a2a,
+       0xe1c, 0x2a2a2a2a,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000010,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x63db25a4,
+       0xe70, 0x63db25a4,
+       0xe74, 0x0c1b25a4,
+       0xe78, 0x0c1b25a4,
+       0xe7c, 0x0c1b25a4,
+       0xe80, 0x0c1b25a4,
+       0xe84, 0x63db25a4,
+       0xe88, 0x0c1b25a4,
+       0xe8c, 0x63db25a4,
+       0xed0, 0x63db25a4,
+       0xed4, 0x63db25a4,
+       0xed8, 0x63db25a4,
+       0xedc, 0x001b25a4,
+       0xee0, 0x001b25a4,
+       0xeec, 0x6fdb25a4,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x800, 0x80040000,
+       0x804, 0x00000001,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390004,
+       0x828, 0x00000000,
+       0x82c, 0x00000000,
+       0x830, 0x00000000,
+       0x834, 0x00000000,
+       0x838, 0x00000000,
+       0x83c, 0x00000000,
+       0x840, 0x00010000,
+       0x844, 0x00000000,
+       0x848, 0x00000000,
+       0x84c, 0x00000000,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x001b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x00000000,
+       0x86c, 0x32323200,
+       0x870, 0x07000700,
+       0x874, 0x22004000,
+       0x878, 0x00000808,
+       0x87c, 0x00000000,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xccc000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121111,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x1a1b0000,
+       0xa24, 0x090e1317,
+       0xa28, 0x00000204,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05611,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954341e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954341e,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c7f000d,
+       0xc74, 0x018610db,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b91612,
+       0xc80, 0x40000100,
+       0xc84, 0x20f60000,
+       0xc88, 0x40000100,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020401,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x2a2a2a2a,
+       0xe04, 0x2a2a2a2a,
+       0xe08, 0x03902a2a,
+       0xe10, 0x2a2a2a2a,
+       0xe14, 0x2a2a2a2a,
+       0xe18, 0x2a2a2a2a,
+       0xe1c, 0x2a2a2a2a,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000008,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x631b25a0,
+       0xe70, 0x631b25a0,
+       0xe74, 0x081b25a0,
+       0xe78, 0x081b25a0,
+       0xe7c, 0x081b25a0,
+       0xe80, 0x081b25a0,
+       0xe84, 0x631b25a0,
+       0xe88, 0x081b25a0,
+       0xe8c, 0x631b25a0,
+       0xed0, 0x631b25a0,
+       0xed4, 0x631b25a0,
+       0xed8, 0x631b25a0,
+       0xedc, 0x001b25a0,
+       0xee0, 0x001b25a0,
+       0xeec, 0x6b1b25a0,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
+       0xe00, 0xffffffff, 0x07090c0c,
+       0xe04, 0xffffffff, 0x01020405,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x0b0c0c0e,
+       0xe14, 0xffffffff, 0x01030506,
+       0xe18, 0xffffffff, 0x0b0c0d0e,
+       0xe1c, 0xffffffff, 0x01030509,
+       0x830, 0xffffffff, 0x07090c0c,
+       0x834, 0xffffffff, 0x01020405,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x0b0c0d0e,
+       0x848, 0xffffffff, 0x01030509,
+       0x84c, 0xffffffff, 0x0b0c0d0e,
+       0x868, 0xffffffff, 0x01030509,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x04040404,
+       0xe04, 0xffffffff, 0x00020204,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x06060606,
+       0xe14, 0xffffffff, 0x00020406,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x04040404,
+       0x834, 0xffffffff, 0x00020204,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x06060606,
+       0x848, 0xffffffff, 0x00020406,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x04040404,
+       0xe04, 0xffffffff, 0x00020204,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x04040404,
+       0x834, 0xffffffff, 0x00020204,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00010255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x00000000,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000577c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287af,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x00014297,
+       0x013, 0x00010295,
+       0x013, 0x0000c298,
+       0x013, 0x0000819c,
+       0x013, 0x000040a8,
+       0x013, 0x0000001c,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f424,
+       0x015, 0x0004f424,
+       0x015, 0x0008f424,
+       0x015, 0x000cf424,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287af,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x00014297,
+       0x013, 0x00010295,
+       0x013, 0x0000c298,
+       0x013, 0x0000819c,
+       0x013, 0x000040a8,
+       0x013, 0x0000001c,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f424,
+       0x015, 0x0004f424,
+       0x015, 0x0008f424,
+       0x015, 0x000cf424,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+};
+
+u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00010255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x00000000,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000577c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287b3,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x0001429b,
+       0x013, 0x00010299,
+       0x013, 0x0000c29c,
+       0x013, 0x000081a0,
+       0x013, 0x000040ac,
+       0x013, 0x00000020,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f405,
+       0x015, 0x0004f405,
+       0x015, 0x0008f405,
+       0x015, 0x000cf405,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
+       0x0,
+};
+
+u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
+       0x420, 0x00000080,
+       0x423, 0x00000000,
+       0x430, 0x00000000,
+       0x431, 0x00000000,
+       0x432, 0x00000000,
+       0x433, 0x00000001,
+       0x434, 0x00000004,
+       0x435, 0x00000005,
+       0x436, 0x00000006,
+       0x437, 0x00000007,
+       0x438, 0x00000000,
+       0x439, 0x00000000,
+       0x43a, 0x00000000,
+       0x43b, 0x00000001,
+       0x43c, 0x00000004,
+       0x43d, 0x00000005,
+       0x43e, 0x00000006,
+       0x43f, 0x00000007,
+       0x440, 0x0000005d,
+       0x441, 0x00000001,
+       0x442, 0x00000000,
+       0x444, 0x00000015,
+       0x445, 0x000000f0,
+       0x446, 0x0000000f,
+       0x447, 0x00000000,
+       0x458, 0x00000041,
+       0x459, 0x000000a8,
+       0x45a, 0x00000072,
+       0x45b, 0x000000b9,
+       0x460, 0x00000066,
+       0x461, 0x00000066,
+       0x462, 0x00000008,
+       0x463, 0x00000003,
+       0x4c8, 0x000000ff,
+       0x4c9, 0x00000008,
+       0x4cc, 0x000000ff,
+       0x4cd, 0x000000ff,
+       0x4ce, 0x00000001,
+       0x500, 0x00000026,
+       0x501, 0x000000a2,
+       0x502, 0x0000002f,
+       0x503, 0x00000000,
+       0x504, 0x00000028,
+       0x505, 0x000000a3,
+       0x506, 0x0000005e,
+       0x507, 0x00000000,
+       0x508, 0x0000002b,
+       0x509, 0x000000a4,
+       0x50a, 0x0000005e,
+       0x50b, 0x00000000,
+       0x50c, 0x0000004f,
+       0x50d, 0x000000a4,
+       0x50e, 0x00000000,
+       0x50f, 0x00000000,
+       0x512, 0x0000001c,
+       0x514, 0x0000000a,
+       0x515, 0x00000010,
+       0x516, 0x0000000a,
+       0x517, 0x00000010,
+       0x51a, 0x00000016,
+       0x524, 0x0000000f,
+       0x525, 0x0000004f,
+       0x546, 0x00000040,
+       0x547, 0x00000000,
+       0x550, 0x00000010,
+       0x551, 0x00000010,
+       0x559, 0x00000002,
+       0x55a, 0x00000002,
+       0x55d, 0x000000ff,
+       0x605, 0x00000030,
+       0x608, 0x0000000e,
+       0x609, 0x0000002a,
+       0x652, 0x00000020,
+       0x63c, 0x0000000a,
+       0x63d, 0x0000000e,
+       0x63e, 0x0000000a,
+       0x63f, 0x0000000e,
+       0x66e, 0x00000005,
+       0x700, 0x00000021,
+       0x701, 0x00000043,
+       0x702, 0x00000065,
+       0x703, 0x00000087,
+       0x708, 0x00000021,
+       0x709, 0x00000043,
+       0x70a, 0x00000065,
+       0x70b, 0x00000087,
+};
+
+u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7a060001,
+       0xc78, 0x79070001,
+       0xc78, 0x78080001,
+       0xc78, 0x77090001,
+       0xc78, 0x760a0001,
+       0xc78, 0x750b0001,
+       0xc78, 0x740c0001,
+       0xc78, 0x730d0001,
+       0xc78, 0x720e0001,
+       0xc78, 0x710f0001,
+       0xc78, 0x70100001,
+       0xc78, 0x6f110001,
+       0xc78, 0x6e120001,
+       0xc78, 0x6d130001,
+       0xc78, 0x6c140001,
+       0xc78, 0x6b150001,
+       0xc78, 0x6a160001,
+       0xc78, 0x69170001,
+       0xc78, 0x68180001,
+       0xc78, 0x67190001,
+       0xc78, 0x661a0001,
+       0xc78, 0x651b0001,
+       0xc78, 0x641c0001,
+       0xc78, 0x631d0001,
+       0xc78, 0x621e0001,
+       0xc78, 0x611f0001,
+       0xc78, 0x60200001,
+       0xc78, 0x49210001,
+       0xc78, 0x48220001,
+       0xc78, 0x47230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7a460001,
+       0xc78, 0x79470001,
+       0xc78, 0x78480001,
+       0xc78, 0x77490001,
+       0xc78, 0x764a0001,
+       0xc78, 0x754b0001,
+       0xc78, 0x744c0001,
+       0xc78, 0x734d0001,
+       0xc78, 0x724e0001,
+       0xc78, 0x714f0001,
+       0xc78, 0x70500001,
+       0xc78, 0x6f510001,
+       0xc78, 0x6e520001,
+       0xc78, 0x6d530001,
+       0xc78, 0x6c540001,
+       0xc78, 0x6b550001,
+       0xc78, 0x6a560001,
+       0xc78, 0x69570001,
+       0xc78, 0x68580001,
+       0xc78, 0x67590001,
+       0xc78, 0x665a0001,
+       0xc78, 0x655b0001,
+       0xc78, 0x645c0001,
+       0xc78, 0x635d0001,
+       0xc78, 0x625e0001,
+       0xc78, 0x615f0001,
+       0xc78, 0x60600001,
+       0xc78, 0x49610001,
+       0xc78, 0x48620001,
+       0xc78, 0x47630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7a060001,
+       0xc78, 0x79070001,
+       0xc78, 0x78080001,
+       0xc78, 0x77090001,
+       0xc78, 0x760a0001,
+       0xc78, 0x750b0001,
+       0xc78, 0x740c0001,
+       0xc78, 0x730d0001,
+       0xc78, 0x720e0001,
+       0xc78, 0x710f0001,
+       0xc78, 0x70100001,
+       0xc78, 0x6f110001,
+       0xc78, 0x6e120001,
+       0xc78, 0x6d130001,
+       0xc78, 0x6c140001,
+       0xc78, 0x6b150001,
+       0xc78, 0x6a160001,
+       0xc78, 0x69170001,
+       0xc78, 0x68180001,
+       0xc78, 0x67190001,
+       0xc78, 0x661a0001,
+       0xc78, 0x651b0001,
+       0xc78, 0x641c0001,
+       0xc78, 0x631d0001,
+       0xc78, 0x621e0001,
+       0xc78, 0x611f0001,
+       0xc78, 0x60200001,
+       0xc78, 0x49210001,
+       0xc78, 0x48220001,
+       0xc78, 0x47230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7a460001,
+       0xc78, 0x79470001,
+       0xc78, 0x78480001,
+       0xc78, 0x77490001,
+       0xc78, 0x764a0001,
+       0xc78, 0x754b0001,
+       0xc78, 0x744c0001,
+       0xc78, 0x734d0001,
+       0xc78, 0x724e0001,
+       0xc78, 0x714f0001,
+       0xc78, 0x70500001,
+       0xc78, 0x6f510001,
+       0xc78, 0x6e520001,
+       0xc78, 0x6d530001,
+       0xc78, 0x6c540001,
+       0xc78, 0x6b550001,
+       0xc78, 0x6a560001,
+       0xc78, 0x69570001,
+       0xc78, 0x68580001,
+       0xc78, 0x67590001,
+       0xc78, 0x665a0001,
+       0xc78, 0x655b0001,
+       0xc78, 0x645c0001,
+       0xc78, 0x635d0001,
+       0xc78, 0x625e0001,
+       0xc78, 0x615f0001,
+       0xc78, 0x60600001,
+       0xc78, 0x49610001,
+       0xc78, 0x48620001,
+       0xc78, 0x47630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x040, 0x000c0004,
+       0x800, 0x80040000,
+       0x804, 0x00000001,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390204,
+       0x828, 0x00000000,
+       0x82c, 0x00000000,
+       0x830, 0x00000000,
+       0x834, 0x00000000,
+       0x838, 0x00000000,
+       0x83c, 0x00000000,
+       0x840, 0x00010000,
+       0x844, 0x00000000,
+       0x848, 0x00000000,
+       0x84c, 0x00000000,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x001b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x00000000,
+       0x86c, 0x20202000,
+       0x870, 0x03000300,
+       0x874, 0x22004000,
+       0x878, 0x00000808,
+       0x87c, 0x00ffc3f1,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xccc000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121111,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x15160000,
+       0xa24, 0x070b0f12,
+       0xa28, 0x00000104,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05611,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954342e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954342f,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c46000d,
+       0xc74, 0x018610db,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b91612,
+       0xc80, 0x24000090,
+       0xc84, 0x20f60000,
+       0xc88, 0x24000090,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020401,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x24242424,
+       0xe04, 0x24242424,
+       0xe08, 0x03902024,
+       0xe10, 0x24242424,
+       0xe14, 0x24242424,
+       0xe18, 0x24242424,
+       0xe1c, 0x24242424,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000008,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x631b25a0,
+       0xe70, 0x631b25a0,
+       0xe74, 0x081b25a0,
+       0xe78, 0x081b25a0,
+       0xe7c, 0x081b25a0,
+       0xe80, 0x081b25a0,
+       0xe84, 0x631b25a0,
+       0xe88, 0x081b25a0,
+       0xe8c, 0x631b25a0,
+       0xed0, 0x631b25a0,
+       0xed4, 0x631b25a0,
+       0xed8, 0x631b25a0,
+       0xedc, 0x001b25a0,
+       0xee0, 0x001b25a0,
+       0xeec, 0x6b1b25a0,
+       0xee8, 0x31555448,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
+       0xe00, 0xffffffff, 0x06080808,
+       0xe04, 0xffffffff, 0x00040406,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x04060608,
+       0xe14, 0xffffffff, 0x00020204,
+       0xe18, 0xffffffff, 0x04060608,
+       0xe1c, 0xffffffff, 0x00020204,
+       0x830, 0xffffffff, 0x06080808,
+       0x834, 0xffffffff, 0x00040406,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x04060608,
+       0x848, 0xffffffff, 0x00020204,
+       0x84c, 0xffffffff, 0x04060608,
+       0x868, 0xffffffff, 0x00020204,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb0,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e529,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00000255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x0000083c,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000977c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x000d8000,
+       0x012, 0x00090000,
+       0x012, 0x00051000,
+       0x012, 0x00012000,
+       0x013, 0x00028fb4,
+       0x013, 0x00024fa8,
+       0x013, 0x000207a4,
+       0x013, 0x0001c798,
+       0x013, 0x000183a4,
+       0x013, 0x00014398,
+       0x013, 0x000101a4,
+       0x013, 0x0000c198,
+       0x013, 0x000080a4,
+       0x013, 0x00004098,
+       0x013, 0x00000000,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f405,
+       0x015, 0x0004f405,
+       0x015, 0x0008f405,
+       0x015, 0x000cf405,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7b060001,
+       0xc78, 0x7b070001,
+       0xc78, 0x7b080001,
+       0xc78, 0x7a090001,
+       0xc78, 0x790a0001,
+       0xc78, 0x780b0001,
+       0xc78, 0x770c0001,
+       0xc78, 0x760d0001,
+       0xc78, 0x750e0001,
+       0xc78, 0x740f0001,
+       0xc78, 0x73100001,
+       0xc78, 0x72110001,
+       0xc78, 0x71120001,
+       0xc78, 0x70130001,
+       0xc78, 0x6f140001,
+       0xc78, 0x6e150001,
+       0xc78, 0x6d160001,
+       0xc78, 0x6c170001,
+       0xc78, 0x6b180001,
+       0xc78, 0x6a190001,
+       0xc78, 0x691a0001,
+       0xc78, 0x681b0001,
+       0xc78, 0x671c0001,
+       0xc78, 0x661d0001,
+       0xc78, 0x651e0001,
+       0xc78, 0x641f0001,
+       0xc78, 0x63200001,
+       0xc78, 0x62210001,
+       0xc78, 0x61220001,
+       0xc78, 0x60230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7b460001,
+       0xc78, 0x7b470001,
+       0xc78, 0x7b480001,
+       0xc78, 0x7a490001,
+       0xc78, 0x794a0001,
+       0xc78, 0x784b0001,
+       0xc78, 0x774c0001,
+       0xc78, 0x764d0001,
+       0xc78, 0x754e0001,
+       0xc78, 0x744f0001,
+       0xc78, 0x73500001,
+       0xc78, 0x72510001,
+       0xc78, 0x71520001,
+       0xc78, 0x70530001,
+       0xc78, 0x6f540001,
+       0xc78, 0x6e550001,
+       0xc78, 0x6d560001,
+       0xc78, 0x6c570001,
+       0xc78, 0x6b580001,
+       0xc78, 0x6a590001,
+       0xc78, 0x695a0001,
+       0xc78, 0x685b0001,
+       0xc78, 0x675c0001,
+       0xc78, 0x665d0001,
+       0xc78, 0x655e0001,
+       0xc78, 0x645f0001,
+       0xc78, 0x63600001,
+       0xc78, 0x62610001,
+       0xc78, 0x61620001,
+       0xc78, 0x60630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644 (file)
index 0000000..c3d5cd8
--- /dev/null
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TABLE__H_
+#define __RTL92CU_TABLE__H_
+
+#include <linux/types.h>
+
+#define RTL8192CUPHY_REG_2TARRAY_LENGTH                374
+extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
+#define RTL8192CUPHY_REG_1TARRAY_LENGTH                374
+extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
+
+#define RTL8192CUPHY_REG_ARRAY_PGLENGTH                336
+extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
+
+#define RTL8192CURADIOA_2TARRAYLENGTH  282
+extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
+#define RTL8192CURADIOB_2TARRAYLENGTH  78
+extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
+#define RTL8192CURADIOA_1TARRAYLENGTH  282
+extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
+#define RTL8192CURADIOB_1TARRAYLENGTH  1
+extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
+
+#define RTL8192CUMAC_2T_ARRAYLENGTH            172
+extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
+
+#define RTL8192CUAGCTAB_2TARRAYLENGTH  320
+extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
+#define RTL8192CUAGCTAB_1TARRAYLENGTH  320
+extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
+
+#define RTL8192CUPHY_REG_1T_HPArrayLength 378
+extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
+
+#define RTL8192CUPHY_REG_Array_PG_HPLength 336
+extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
+
+#define RTL8192CURadioA_1T_HPArrayLength 282
+extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
+#define RTL8192CUAGCTAB_1T_HPArrayLength 320
+extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644 (file)
index 0000000..d0b0d43
--- /dev/null
@@ -0,0 +1,687 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
+{
+       u8 ep_cfg, txqsele;
+       u8 ep_nums = 0;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->out_queue_sel = 0;
+       ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
+       ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
+       switch (ep_cfg) {
+       case 0:         /* 2 bulk OUT, 1 bulk IN */
+       case 3:
+               rtlusb->out_queue_sel  = TX_SELE_HQ | TX_SELE_LQ;
+               ep_nums = 2;
+               break;
+       case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
+       case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
+               txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
+               if (txqsele & 0x0F) /* /map all endpoint to High queue */
+                       rtlusb->out_queue_sel =  TX_SELE_HQ;
+               else if (txqsele&0xF0) /* map all endpoint to Low queue */
+                       rtlusb->out_queue_sel =  TX_SELE_LQ;
+               ep_nums = 1;
+               break;
+       default:
+               break;
+       }
+       return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
+{
+       u8 ep_cfg;
+       u8 ep_nums = 0;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->out_queue_sel = 0;
+       /* Normal and High queue */
+       ep_cfg =  rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
+       if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_HQ;
+               ep_nums++;
+       }
+       if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_NQ;
+               ep_nums++;
+       }
+       /* Low queue */
+       ep_cfg =  rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
+       if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_LQ;
+               ep_nums++;
+       }
+       return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
+                            bool  bwificfg, struct rtl_ep_map *ep_map)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (bwificfg) { /* for WMM */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB Chip-B & WMM Setting.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       } else { /* typical setting */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB typical Setting.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       }
+}
+
+static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool  bwificfg,
+                              struct rtl_ep_map *ep_map)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       if (bwificfg) { /* for WMM */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB 3EP Setting for WMM.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       } else { /* typical setting */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB 3EP Setting for typical.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       }
+}
+
+static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
+{
+       ep_map->ep_mapping[RTL_TXQ_BE]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_BK]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_VI]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+       ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+       ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+       ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+}
+static int _out_ep_mapping(struct ieee80211_hw *hw)
+{
+       int err = 0;
+       bool bIsChipN, bwificfg = false;
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
+
+       bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
+       switch (rtlusb->out_ep_nums) {
+       case 2:
+               _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
+               break;
+       case 3:
+               /* Test chip doesn't support three out EPs. */
+               if (!bIsChipN) {
+                       err  =  -EINVAL;
+                       goto err_out;
+               }
+               _ThreeOutEpMapping(hw, bIsChipN, ep_map);
+               break;
+       case 1:
+               _OneOutEpMapping(hw, ep_map);
+               break;
+       default:
+               err  =  -EINVAL;
+               break;
+       }
+err_out:
+       return err;
+
+}
+/* endpoint mapping */
+int  rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       int error = 0;
+       if (likely(IS_NORMAL_CHIP(rtlhal->version)))
+               error = _ConfigVerNOutEP(hw);
+       else
+               error = _ConfigVerTOutEP(hw);
+       if (error)
+               goto err_out;
+       error = _out_ep_mapping(hw);
+       if (error)
+               goto err_out;
+err_out:
+       return error;
+}
+
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+       u16 hw_queue_index;
+
+       if (unlikely(ieee80211_is_beacon(fc))) {
+               hw_queue_index = RTL_TXQ_BCN;
+               goto out;
+       }
+       if (ieee80211_is_mgmt(fc)) {
+               hw_queue_index = RTL_TXQ_MGT;
+               goto out;
+       }
+       switch (mac80211_queue_index) {
+       case 0:
+               hw_queue_index = RTL_TXQ_VO;
+               break;
+       case 1:
+               hw_queue_index = RTL_TXQ_VI;
+               break;
+       case 2:
+               hw_queue_index = RTL_TXQ_BE;
+               break;
+       case 3:
+               hw_queue_index = RTL_TXQ_BK;
+               break;
+       default:
+               hw_queue_index = RTL_TXQ_BE;
+               RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
+                         mac80211_queue_index));
+               break;
+       }
+out:
+       return hw_queue_index;
+}
+
+static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
+                                        __le16 fc, u16 mac80211_queue_index)
+{
+       enum rtl_desc_qsel qsel;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (unlikely(ieee80211_is_beacon(fc))) {
+               qsel = QSLT_BEACON;
+               goto out;
+       }
+       if (ieee80211_is_mgmt(fc)) {
+               qsel = QSLT_MGNT;
+               goto out;
+       }
+       switch (mac80211_queue_index) {
+       case 0: /* VO */
+               qsel = QSLT_VO;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("VO queue, set qsel = 0x%x\n", QSLT_VO));
+               break;
+       case 1: /* VI */
+               qsel = QSLT_VI;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("VI queue, set qsel = 0x%x\n", QSLT_VI));
+               break;
+       case 3: /* BK */
+               qsel = QSLT_BK;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("BK queue, set qsel = 0x%x\n", QSLT_BK));
+               break;
+       case 2: /* BE */
+       default:
+               qsel = QSLT_BE;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("BE queue, set qsel = 0x%x\n", QSLT_BE));
+               break;
+       }
+out:
+       return qsel;
+}
+
+/* =============================================================== */
+
+/*----------------------------------------------------------------------
+ *
+ *     Rx handler
+ *
+ *---------------------------------------------------------------------- */
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+                          struct rtl_stats *stats,
+                          struct ieee80211_rx_status *rx_status,
+                          u8 *p_desc, struct sk_buff *skb)
+{
+       struct rx_fwinfo_92c *p_drvinfo;
+       struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+       u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+       stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+       stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
+                                RX_DRV_INFO_SIZE_UNIT;
+       stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+       stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+       stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+       stats->hwerror = (stats->crc | stats->icv);
+       stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+       stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
+       stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+       stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+                                  && (GET_RX_DESC_FAGGR(pdesc) == 1));
+       stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+       stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+       rx_status->freq = hw->conf.channel->center_freq;
+       rx_status->band = hw->conf.channel->band;
+       if (GET_RX_DESC_CRC32(pdesc))
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (!GET_RX_DESC_SWDEC(pdesc))
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       if (GET_RX_DESC_BW(pdesc))
+               rx_status->flag |= RX_FLAG_40MHZ;
+       if (GET_RX_DESC_RX_HT(pdesc))
+               rx_status->flag |= RX_FLAG_HT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+       if (stats->decrypted)
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+                                               (bool)GET_RX_DESC_RX_HT(pdesc),
+                                               (u8)GET_RX_DESC_RX_MCS(pdesc),
+                                               (bool)GET_RX_DESC_PAGGR(pdesc));
+       rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+       if (phystatus == true) {
+               p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+               rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+                                                p_drvinfo);
+       }
+       /*rx_status->qual = stats->signal; */
+       rx_status->signal = stats->rssi + 10;
+       /*rx_status->noise = -stats->noise; */
+       return true;
+}
+
+#define RTL_RX_DRV_INFO_UNIT           8
+
+static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *rx_status =
+                (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
+       u32 skb_len, pkt_len, drvinfo_len;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc;
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+       struct rx_fwinfo_92c *p_drvinfo;
+       bool bv;
+       __le16 fc;
+       struct ieee80211_hdr *hdr;
+
+       memset(rx_status, 0, sizeof(rx_status));
+       rxdesc  = skb->data;
+       skb_len = skb->len;
+       drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
+       pkt_len         = GET_RX_DESC_PKT_LEN(rxdesc);
+       /* TODO: Error recovery. drop this skb or something. */
+       WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
+       stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
+       stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
+                               RX_DRV_INFO_SIZE_UNIT;
+       stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
+       stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
+       stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
+       stats.hwerror = (stats.crc | stats.icv);
+       stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
+       stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
+       stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
+       stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
+                                  && (GET_RX_DESC_FAGGR(rxdesc) == 1));
+       stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
+       stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+       /* TODO: is center_freq changed when doing scan? */
+       /* TODO: Shall we add protection or just skip those two step? */
+       rx_status->freq = hw->conf.channel->center_freq;
+       rx_status->band = hw->conf.channel->band;
+       if (GET_RX_DESC_CRC32(rxdesc))
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (!GET_RX_DESC_SWDEC(rxdesc))
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       if (GET_RX_DESC_BW(rxdesc))
+               rx_status->flag |= RX_FLAG_40MHZ;
+       if (GET_RX_DESC_RX_HT(rxdesc))
+               rx_status->flag |= RX_FLAG_HT;
+       /* Data rate */
+       rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+                                               (bool)GET_RX_DESC_RX_HT(rxdesc),
+                                               (u8)GET_RX_DESC_RX_MCS(rxdesc),
+                                               (bool)GET_RX_DESC_PAGGR(rxdesc)
+                                               );
+       /*  There is a phy status after this rx descriptor. */
+       if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
+               p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
+               rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
+                                (struct rx_desc_92c *)rxdesc, p_drvinfo);
+       }
+       skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       bv = ieee80211_is_probe_resp(fc);
+       if (bv)
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("Got probe response frame.\n"));
+       if (ieee80211_is_beacon(fc))
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("Got beacon frame.\n"));
+       if (ieee80211_is_data(fc))
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
+                "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+                (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
+                (u32)hdr->addr1[5]));
+       memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+       ieee80211_rx_irqsafe(hw, skb);
+}
+
+void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
+{
+       _rtl_rx_process(hw, skb);
+}
+
+void rtl8192c_rx_segregate_hdl(
+       struct ieee80211_hw *hw,
+       struct sk_buff *skb,
+       struct sk_buff_head *skb_list)
+{
+}
+
+/*----------------------------------------------------------------------
+ *
+ *     Tx handler
+ *
+ *---------------------------------------------------------------------- */
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb)
+{
+}
+
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+                        struct sk_buff *skb)
+{
+       return 0;
+}
+
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
+                                          struct sk_buff_head *list)
+{
+       return skb_dequeue(list);
+}
+
+/*======================================== trx ===============================*/
+
+static void _rtl_fill_usb_tx_desc(u8 *txdesc)
+{
+       SET_TX_DESC_OWN(txdesc, 1);
+       SET_TX_DESC_LAST_SEG(txdesc, 1);
+       SET_TX_DESC_FIRST_SEG(txdesc, 1);
+}
+/**
+ *     For HW recovery information
+ */
+static void _rtl_tx_desc_checksum(u8 *txdesc)
+{
+       u16 *ptr = (u16 *)txdesc;
+       u16     checksum = 0;
+       u32 index;
+
+       /* Clear first */
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
+       for (index = 0; index < 16; index++)
+               checksum = checksum ^ (*(ptr + index));
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+}
+
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+                         struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         unsigned int queue_index)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool defaultadapter = true;
+       struct ieee80211_sta *sta;
+       struct rtl_tcb_desc tcb_desc;
+       u8 *qc = ieee80211_get_qos_ctl(hdr);
+       u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+       u16 seq_number;
+       __le16 fc = hdr->frame_control;
+       u8 rate_flag = info->control.rates[0].flags;
+       u16 pktlen = skb->len;
+       enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
+                                               skb_get_queue_mapping(skb));
+       u8 *txdesc;
+
+       seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+       rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
+       txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+       memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
+       SET_TX_DESC_LINIP(txdesc, 0);
+       SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
+       SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
+       if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
+               SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
+       if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
+                   info->flags & IEEE80211_TX_CTL_AMPDU) {
+               SET_TX_DESC_AGG_ENABLE(txdesc, 1);
+               SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
+       } else {
+               SET_TX_DESC_AGG_BREAK(txdesc, 1);
+       }
+       SET_TX_DESC_SEQ(txdesc, seq_number);
+       SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
+                              !tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
+                                 tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
+       SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
+       SET_TX_DESC_RTS_BW(txdesc, 0);
+       SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
+       SET_TX_DESC_RTS_SHORT(txdesc,
+                             ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
+                              (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+                              : (tcb_desc.rts_use_shortgi ? 1 : 0)));
+       if (mac->bw_40) {
+               if (tcb_desc.packet_bw) {
+                       SET_TX_DESC_DATA_BW(txdesc, 1);
+                       SET_TX_DESC_DATA_SC(txdesc, 3);
+               } else {
+                       SET_TX_DESC_DATA_BW(txdesc, 0);
+                               if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
+                                       SET_TX_DESC_DATA_SC(txdesc,
+                                                         mac->cur_40_prime_sc);
+                       }
+       } else {
+               SET_TX_DESC_DATA_BW(txdesc, 0);
+               SET_TX_DESC_DATA_SC(txdesc, 0);
+       }
+       rcu_read_lock();
+       sta = ieee80211_find_sta(mac->vif, mac->bssid);
+       if (sta) {
+               u8 ampdu_density = sta->ht_cap.ampdu_density;
+               SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
+       }
+       rcu_read_unlock();
+       if (info->control.hw_key) {
+               struct ieee80211_key_conf *keyconf = info->control.hw_key;
+               switch (keyconf->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+               case WLAN_CIPHER_SUITE_TKIP:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
+                       break;
+               default:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
+                       break;
+               }
+       }
+       SET_TX_DESC_PKT_ID(txdesc, 0);
+       SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
+       SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
+       SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
+       SET_TX_DESC_DISABLE_FB(txdesc, 0);
+       SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
+       if (ieee80211_is_data_qos(fc)) {
+               if (mac->rdg_en) {
+                       RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                                ("Enable RDG function.\n"));
+                       SET_TX_DESC_RDG_ENABLE(txdesc, 1);
+                       SET_TX_DESC_HTC(txdesc, 1);
+               }
+       }
+       if (rtlpriv->dm.useramask) {
+               SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
+               SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
+       } else {
+               SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
+               SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
+       }
+       if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+             ppsc->fwctrl_lps) {
+               SET_TX_DESC_HWSEQ_EN(txdesc, 1);
+               SET_TX_DESC_PKT_ID(txdesc, 8);
+               if (!defaultadapter)
+                       SET_TX_DESC_QOS(txdesc, 1);
+       }
+       if (ieee80211_has_morefrags(fc))
+               SET_TX_DESC_MORE_FRAG(txdesc, 1);
+       if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+           is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+               SET_TX_DESC_BMC(txdesc, 1);
+       _rtl_fill_usb_tx_desc(txdesc);
+       _rtl_tx_desc_checksum(txdesc);
+       RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
+}
+
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+                             u32 buffer_len, bool bIsPsPoll)
+{
+       /* Clear all status */
+       memset(pDesc, 0, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
+       SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
+       SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
+       SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
+       SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+       /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
+        * vlaue by Hw. */
+       if (bIsPsPoll) {
+               SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
+       } else {
+               SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
+               SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
+       }
+       SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
+       SET_TX_DESC_OWN(pDesc, 1);
+       SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
+       _rtl_tx_desc_checksum(pDesc);
+}
+
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+                            u8 *pdesc, bool firstseg,
+                            bool lastseg, struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 fw_queue = QSLT_BEACON;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+
+       memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
+       if (firstseg)
+               SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+       SET_TX_DESC_SEQ(pdesc, 0);
+       SET_TX_DESC_LINIP(pdesc, 0);
+       SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+       SET_TX_DESC_RATE_ID(pdesc, 7);
+       SET_TX_DESC_MACID(pdesc, 0);
+       SET_TX_DESC_OWN(pdesc, 1);
+       SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+       SET_TX_DESC_OFFSET(pdesc, 0x20);
+       SET_TX_DESC_USE_RATE(pdesc, 1);
+       if (!ieee80211_is_data_qos(fc)) {
+               SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+               SET_TX_DESC_PKT_ID(pdesc, 8);
+       }
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
+                     pdesc, RTL_TX_DESC_SIZE);
+}
+
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644 (file)
index 0000000..b396d46
--- /dev/null
@@ -0,0 +1,430 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TRX_H__
+#define __RTL92CU_TRX_H__
+
+#define RTL92C_USB_BULK_IN_NUM                 1
+#define RTL92C_NUM_RX_URBS                     8
+#define RTL92C_NUM_TX_URBS                     32
+
+#define RTL92C_SIZE_MAX_RX_BUFFER              15360   /* 8192 */
+#define RX_DRV_INFO_SIZE_UNIT                  8
+
+enum usb_rx_agg_mode {
+       USB_RX_AGG_DISABLE,
+       USB_RX_AGG_DMA,
+       USB_RX_AGG_USB,
+       USB_RX_AGG_DMA_USB
+};
+
+#define TX_SELE_HQ                             BIT(0)  /* High Queue */
+#define TX_SELE_LQ                             BIT(1)  /* Low Queue */
+#define TX_SELE_NQ                             BIT(2)  /* Normal Queue */
+
+#define RTL_USB_TX_AGG_NUM_DESC                        5
+
+#define RTL_USB_RX_AGG_PAGE_NUM                        4
+#define RTL_USB_RX_AGG_PAGE_TIMEOUT            3
+
+#define RTL_USB_RX_AGG_BLOCK_NUM               5
+#define RTL_USB_RX_AGG_BLOCK_TIMEOUT           3
+
+/*======================== rx status =========================================*/
+
+struct rx_drv_info_92c {
+       /*
+        * Driver info contain PHY status and other variabel size info
+        * PHY Status content as below
+        */
+
+       /* DWORD 0 */
+       u8 gain_trsw[4];
+
+       /* DWORD 1 */
+       u8 pwdb_all;
+       u8 cfosho[4];
+
+       /* DWORD 2 */
+       u8 cfotail[4];
+
+       /* DWORD 3 */
+       s8 rxevm[2];
+       s8 rxsnr[4];
+
+       /* DWORD 4 */
+       u8 pdsnr[2];
+
+       /* DWORD 5 */
+       u8 csi_current[2];
+       u8 csi_target[2];
+
+       /* DWORD 6 */
+       u8 sigevm;
+       u8 max_ex_pwr;
+       u8 ex_intf_flag:1;
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 reserve:4;
+} __packed;
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits)            \
+       ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+       BIT_LEN_MASK_32(__bits))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val)     \
+       (*(__le32 *)(__pdesc) =                                 \
+       (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) &     \
+       (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) |                \
+       (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read various fields in RX descriptor */
+
+/* DWORD 0 */
+#define GET_RX_DESC_PKT_LEN(__rxdesc)          \
+       SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+#define GET_RX_DESC_CRC32(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+#define GET_RX_DESC_ICV(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc)     \
+       SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+#define GET_RX_DESC_QOS(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+#define GET_RX_DESC_PHY_STATUS(__rxdesc)       \
+       SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+#define GET_RX_DESC_LAST_SEG(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+#define GET_RX_DESC_FIRST_SEG(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+#define GET_RX_DESC_EOR(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+#define GET_RX_DESC_OWN(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+
+/* DWORD 1 */
+#define GET_RX_DESC_MACID(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+#define GET_RX_DESC_PAGGR(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+#define GET_RX_DESC_MORE_DATA(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+#define GET_RX_DESC_MORE_FRAG(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+
+/* DWORD 2 */
+#define GET_RX_DESC_SEQ(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc)   \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+#define GET_RX_DESC_NEXT_IND(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+
+/* DWORD 3 */
+#define GET_RX_DESC_RX_MCS(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+#define GET_RX_DESC_RX_HT(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+#define GET_RX_DESC_AMSDU(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+#define GET_RX_DESC_SPLCP(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc)      \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+#define GET_RX_DESC_IP_CHK_RPT(__rxdesc)       \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc)    \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+#define GET_RX_DESC_HWPC_ERR(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+
+/* DWORD 4 */
+#define GET_RX_DESC_IV1(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+
+/* DWORD 5 */
+#define GET_RX_DESC_TSFL(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+
+/*======================= tx desc ============================================*/
+
+/* macros to set various fields in TX descriptor */
+
+/* Dword 0 */
+#define SET_TX_DESC_PKT_SIZE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+#define SET_TX_DESC_OFFSET(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+#define SET_TX_DESC_BMC(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+#define SET_TX_DESC_HTC(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+#define SET_TX_DESC_LAST_SEG(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+#define SET_TX_DESC_FIRST_SEG(__txdesc, __value)       \
+        SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+#define SET_TX_DESC_LINIP(__txdesc, __value)           \
+       SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+#define SET_TX_DESC_NO_ACM(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+#define SET_TX_DESC_GF(__txdesc, __value)              \
+       SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+#define SET_TX_DESC_OWN(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+
+
+/* Dword 1 */
+#define SET_TX_DESC_MACID(__txdesc, __value)           \
+       SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+#define SET_TX_DESC_AGG_BREAK(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value)    \
+       SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+#define SET_TX_DESC_PIFS(__txdesc, __value)            \
+       SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+#define SET_TX_DESC_RATE_ID(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+#define SET_TX_DESC_SEC_TYPE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+
+/* Dword 2 */
+#define SET_TX_DESC_RTS_RC(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+#define SET_TX_DESC_DATA_RC(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+#define SET_TX_DESC_MORE_FRAG(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+#define SET_TX_DESC_RAW(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+#define SET_TX_DESC_CCX(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+#define SET_TX_DESC_ANTSEL_A(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+#define SET_TX_DESC_ANTSEL_B(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+#define SET_TX_DESC_TX_ANTL(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+
+/* Dword 3 */
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+#define SET_TX_DESC_SEQ(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+#define SET_TX_DESC_PKT_ID(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+
+/* Dword 4 */
+#define SET_TX_DESC_RTS_RATE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+#define SET_TX_DESC_AP_DCFE(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+#define SET_TX_DESC_QOS(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+#define SET_TX_DESC_USE_RATE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+#define SET_TX_DESC_DISABLE_FB(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+#define SET_TX_DESC_CTS2SELF(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+#define SET_TX_DESC_DATA_SC(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+#define SET_TX_DESC_DATA_STBC(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+#define SET_TX_DESC_DATA_SHORT(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+#define SET_TX_DESC_DATA_BW(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+#define SET_TX_DESC_RTS_SHORT(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+#define SET_TX_DESC_RTS_BW(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+#define SET_TX_DESC_RTS_SC(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+#define SET_TX_DESC_RTS_STBC(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+
+/* Dword 5 */
+#define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
+       SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
+       SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val)            \
+       SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value)        \
+       SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+
+/* Dword 6 */
+#define SET_TX_DESC_TXAGC_A(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+#define SET_TX_DESC_TXAGC_B(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+
+/* Dword 7 */
+#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+
+
+int  rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+                          struct rtl_stats *stats,
+                          struct ieee80211_rx_status *rx_status,
+                          u8 *p_desc, struct sk_buff *skb);
+void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
+void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
+                              struct sk_buff_head *);
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb);
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+                        struct sk_buff *skb);
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
+                                          struct sk_buff_head *);
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+                         struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         unsigned int queue_index);
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+                             u32 buffer_len, bool bIsPsPoll);
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+                            u8 *pdesc, bool b_firstseg,
+                            bool b_lastseg, struct sk_buff *skb);
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644 (file)
index 0000000..a4b2613
--- /dev/null
@@ -0,0 +1,1035 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+#include <linux/usb.h>
+#include "core.h"
+#include "wifi.h"
+#include "usb.h"
+#include "base.h"
+#include "ps.h"
+
+#define        REALTEK_USB_VENQT_READ                  0xC0
+#define        REALTEK_USB_VENQT_WRITE                 0x40
+#define REALTEK_USB_VENQT_CMD_REQ              0x05
+#define        REALTEK_USB_VENQT_CMD_IDX               0x00
+
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE         254
+
+static void usbctrl_async_callback(struct urb *urb)
+{
+       if (urb)
+               kfree(urb->context);
+}
+
+static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+                                         u16 value, u16 index, void *pdata,
+                                         u16 len)
+{
+       int rc;
+       unsigned int pipe;
+       u8 reqtype;
+       struct usb_ctrlrequest *dr;
+       struct urb *urb;
+       struct rtl819x_async_write_data {
+               u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+               struct usb_ctrlrequest dr;
+       } *buf;
+
+       pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+       reqtype =  REALTEK_USB_VENQT_WRITE;
+
+       buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+       if (!buf)
+               return -ENOMEM;
+
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+
+       dr = &buf->dr;
+
+       dr->bRequestType = reqtype;
+       dr->bRequest = request;
+       dr->wValue = cpu_to_le16(value);
+       dr->wIndex = cpu_to_le16(index);
+       dr->wLength = cpu_to_le16(len);
+       memcpy(buf, pdata, len);
+       usb_fill_control_urb(urb, udev, pipe,
+                            (unsigned char *)dr, buf, len,
+                            usbctrl_async_callback, buf);
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+       if (rc < 0)
+               kfree(buf);
+       usb_free_urb(urb);
+       return rc;
+}
+
+static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+                                       u16 value, u16 index, void *pdata,
+                                       u16 len)
+{
+       unsigned int pipe;
+       int status;
+       u8 reqtype;
+
+       pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+       reqtype =  REALTEK_USB_VENQT_READ;
+
+       status = usb_control_msg(udev, pipe, request, reqtype, value, index,
+                                pdata, len, 0); /* max. timeout */
+
+       if (status < 0)
+               printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
+                      "status:0x%x value=0x%x\n", value, status,
+                      *(u32 *)pdata);
+       return status;
+}
+
+static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+{
+       u8 request;
+       u16 wvalue;
+       u16 index;
+       u32 *data;
+       u32 ret;
+
+       data = kmalloc(sizeof(u32), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+       wvalue = (u16)addr;
+       _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+       ret = *data;
+       kfree(data);
+       return ret;
+}
+
+static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+}
+
+static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+}
+
+static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_read_sync(to_usb_device(dev), addr, 4);
+}
+
+static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
+                            u16 len)
+{
+       u8 request;
+       u16 wvalue;
+       u16 index;
+       u32 data;
+
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+       wvalue = (u16)(addr&0x0000ffff);
+       data = val;
+       _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
+                                      len);
+}
+
+static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 1);
+}
+
+static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 2);
+}
+
+static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 4);
+}
+
+static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
+                                 u16 len, u8 *pdata)
+{
+       int status;
+       u8 request;
+       u16 wvalue;
+       u16 index;
+
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+       wvalue = (u16)addr;
+       if (read)
+               status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
+                                                     index, pdata, len);
+       else
+               status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
+                                                       index, pdata, len);
+       return status;
+}
+
+static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                          u8 *pdata)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
+                                      pdata);
+}
+
+static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                            u8 *pdata)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
+                                     pdata);
+}
+
+static void _rtl_usb_io_handler_init(struct device *dev,
+                                    struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->io.dev = dev;
+       mutex_init(&rtlpriv->io.bb_mutex);
+       rtlpriv->io.write8_async        = _usb_write8_async;
+       rtlpriv->io.write16_async       = _usb_write16_async;
+       rtlpriv->io.write32_async       = _usb_write32_async;
+       rtlpriv->io.writeN_async        = _usb_writeN_async;
+       rtlpriv->io.read8_sync          = _usb_read8_sync;
+       rtlpriv->io.read16_sync         = _usb_read16_sync;
+       rtlpriv->io.read32_sync         = _usb_read32_sync;
+       rtlpriv->io.readN_sync          = _usb_readN_sync;
+}
+
+static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       mutex_destroy(&rtlpriv->io.bb_mutex);
+}
+
+/**
+ *
+ *     Default aggregation handler. Do nothing and just return the oldest skb.
+ */
+static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
+                                                 struct sk_buff_head *list)
+{
+       return skb_dequeue(list);
+}
+
+#define IS_HIGH_SPEED_USB(udev) \
+               ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
+
+static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
+{
+       u32 i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
+                                                   ? USB_HIGH_SPEED_BULK_SIZE
+                                                   : USB_FULL_SPEED_BULK_SIZE;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
+                rtlusb->max_bulk_out_size));
+
+       for (i = 0; i < __RTL_TXQ_NUM; i++) {
+               u32 ep_num = rtlusb->ep_map.ep_mapping[i];
+               if (!ep_num) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                                ("Invalid endpoint map setting!\n"));
+                       return -EINVAL;
+               }
+       }
+
+       rtlusb->usb_tx_post_hdl =
+                rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
+       rtlusb->usb_tx_cleanup  =
+                rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
+       rtlusb->usb_tx_aggregate_hdl =
+                (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
+                ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
+                : &_none_usb_tx_aggregate_hdl;
+
+       init_usb_anchor(&rtlusb->tx_submitted);
+       for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+               skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
+               init_usb_anchor(&rtlusb->tx_pending[i]);
+       }
+       return 0;
+}
+
+static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
+       rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
+       rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
+       rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
+       rtlusb->usb_rx_segregate_hdl =
+               rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
+
+       printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
+               rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
+       init_usb_anchor(&rtlusb->rx_submitted);
+       return 0;
+}
+
+static int _rtl_usb_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       int err;
+       u8 epidx;
+       struct usb_interface    *usb_intf = rtlusb->intf;
+       u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
+
+       rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
+       for (epidx = 0; epidx < epnums; epidx++) {
+               struct usb_endpoint_descriptor *pep_desc;
+               pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
+
+               if (usb_endpoint_dir_in(pep_desc))
+                       rtlusb->in_ep_nums++;
+               else if (usb_endpoint_dir_out(pep_desc))
+                       rtlusb->out_ep_nums++;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
+                        pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+                        pep_desc->bInterval));
+       }
+       if (rtlusb->in_ep_nums <  rtlpriv->cfg->usb_interface_cfg->in_ep_num)
+               return -EINVAL ;
+
+       /* usb endpoint mapping */
+       err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
+       rtlusb->usb_mq_to_hwq =  rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+       _rtl_usb_init_tx(hw);
+       _rtl_usb_init_rx(hw);
+       return err;
+}
+
+static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlhal->hw = hw;
+       ppsc->inactiveps = false;
+       ppsc->leisure_ps = false;
+       ppsc->fwctrl_lps = false;
+       ppsc->reg_fwctrl_lps = 3;
+       ppsc->reg_max_lps_awakeintvl = 5;
+       ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
+
+        /* IBSS */
+       mac->beacon_interval = 100;
+
+        /* AMPDU */
+       mac->min_space_cfg = 0;
+       mac->max_mss_density = 0;
+
+       /* set sane AMPDU defaults */
+       mac->current_ampdu_density = 7;
+       mac->current_ampdu_factor = 3;
+
+       /* QOS */
+       rtlusb->acm_method = eAcmWay2_SW;
+
+       /* IRQ */
+       /* HIMR - turn all on */
+       rtlusb->irq_mask[0] = 0xFFFFFFFF;
+       /* HIMR_EX - turn all on */
+       rtlusb->irq_mask[1] = 0xFFFFFFFF;
+       rtlusb->disableHWSM =  true;
+       return 0;
+}
+
+#define __RADIO_TAP_SIZE_RSV   32
+
+static void _rtl_rx_completed(struct urb *urb);
+
+static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
+                                       struct rtl_usb *rtlusb,
+                                       struct urb *urb,
+                                       gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
+                              gfp_mask);
+       if (!skb) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Failed to __dev_alloc_skb!!\n"))
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* reserve some space for mac80211's radiotap */
+       skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
+       usb_fill_bulk_urb(urb, rtlusb->udev,
+                         usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
+                         skb->data, min(skb_tailroom(skb),
+                         (int)rtlusb->rx_max_size),
+                         _rtl_rx_completed, skb);
+
+       _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
+       return skb;
+}
+
+#undef __RADIO_TAP_SIZE_RSV
+
+static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
+                                   struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc = skb->data;
+       struct ieee80211_hdr *hdr;
+       bool unicast = false;
+       __le16 fc;
+       struct ieee80211_rx_status rx_status = {0};
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+
+       skb_pull(skb, RTL_RX_DESC_SIZE);
+       rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+       skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       if (!stats.crc) {
+               memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+               if (is_broadcast_ether_addr(hdr->addr1)) {
+                       /*TODO*/;
+               } else if (is_multicast_ether_addr(hdr->addr1)) {
+                       /*TODO*/
+               } else {
+                       unicast = true;
+                       rtlpriv->stats.rxbytesunicast +=  skb->len;
+               }
+
+               rtl_is_special_data(hw, skb, false);
+
+               if (ieee80211_is_data(fc)) {
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+                       if (unicast)
+                               rtlpriv->link_info.num_rx_inperiod++;
+               }
+       }
+}
+
+static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+                                     struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc = skb->data;
+       struct ieee80211_hdr *hdr;
+       bool unicast = false;
+       __le16 fc;
+       struct ieee80211_rx_status rx_status = {0};
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+
+       skb_pull(skb, RTL_RX_DESC_SIZE);
+       rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+       skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       if (!stats.crc) {
+               memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+               if (is_broadcast_ether_addr(hdr->addr1)) {
+                       /*TODO*/;
+               } else if (is_multicast_ether_addr(hdr->addr1)) {
+                       /*TODO*/
+               } else {
+                       unicast = true;
+                       rtlpriv->stats.rxbytesunicast +=  skb->len;
+               }
+
+               rtl_is_special_data(hw, skb, false);
+
+               if (ieee80211_is_data(fc)) {
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+                       if (unicast)
+                               rtlpriv->link_info.num_rx_inperiod++;
+               }
+               if (likely(rtl_action_proc(hw, skb, false))) {
+                       struct sk_buff *uskb = NULL;
+                       u8 *pdata;
+
+                       uskb = dev_alloc_skb(skb->len + 128);
+                       memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+                              sizeof(rx_status));
+                       pdata = (u8 *)skb_put(uskb, skb->len);
+                       memcpy(pdata, skb->data, skb->len);
+                       dev_kfree_skb_any(skb);
+                       ieee80211_rx_irqsafe(hw, uskb);
+               } else {
+                       dev_kfree_skb_any(skb);
+               }
+       }
+}
+
+static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct sk_buff *_skb;
+       struct sk_buff_head rx_queue;
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       skb_queue_head_init(&rx_queue);
+       if (rtlusb->usb_rx_segregate_hdl)
+               rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
+       WARN_ON(skb_queue_empty(&rx_queue));
+       while (!skb_queue_empty(&rx_queue)) {
+               _skb = skb_dequeue(&rx_queue);
+               _rtl_usb_rx_process_agg(hw, skb);
+               ieee80211_rx_irqsafe(hw, skb);
+       }
+}
+
+static void _rtl_rx_completed(struct urb *_urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)_urb->context;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+       struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int err = 0;
+
+       if (unlikely(IS_USB_STOP(rtlusb)))
+               goto free;
+
+       if (likely(0 == _urb->status)) {
+               /* If this code were moved to work queue, would CPU
+                * utilization be improved?  NOTE: We shall allocate another skb
+                * and reuse the original one.
+                */
+               skb_put(skb, _urb->actual_length);
+
+               if (likely(!rtlusb->usb_rx_segregate_hdl)) {
+                       struct sk_buff *_skb;
+                       _rtl_usb_rx_process_noagg(hw, skb);
+                       _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
+                       if (IS_ERR(_skb)) {
+                               err = PTR_ERR(_skb);
+                               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                       ("Can't allocate skb for bulk IN!\n"));
+                               return;
+                       }
+                       skb = _skb;
+               } else{
+                       /* TO DO */
+                       _rtl_rx_pre_process(hw, skb);
+                       printk(KERN_ERR "rtlwifi: rx agg not supported\n");
+               }
+               goto resubmit;
+       }
+
+       switch (_urb->status) {
+       /* disconnect */
+       case -ENOENT:
+       case -ECONNRESET:
+       case -ENODEV:
+       case -ESHUTDOWN:
+               goto free;
+       default:
+               break;
+       }
+
+resubmit:
+       skb_reset_tail_pointer(skb);
+       skb_trim(skb, 0);
+
+       usb_anchor_urb(_urb, &rtlusb->rx_submitted);
+       err = usb_submit_urb(_urb, GFP_ATOMIC);
+       if (unlikely(err)) {
+               usb_unanchor_urb(_urb);
+               goto free;
+       }
+       return;
+
+free:
+       dev_kfree_skb_irq(skb);
+}
+
+static int _rtl_usb_receive(struct ieee80211_hw *hw)
+{
+       struct urb *urb;
+       struct sk_buff *skb;
+       int err;
+       int i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       WARN_ON(0 == rtlusb->rx_urb_num);
+       /* 1600 == 1514 + max WLAN header + rtk info */
+       WARN_ON(rtlusb->rx_max_size < 1600);
+
+       for (i = 0; i < rtlusb->rx_urb_num; i++) {
+               err = -ENOMEM;
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                ("Failed to alloc URB!!\n"))
+                       goto err_out;
+               }
+
+               skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
+               if (IS_ERR(skb)) {
+                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                ("Failed to prep_rx_urb!!\n"))
+                       err = PTR_ERR(skb);
+                       goto err_out;
+               }
+
+               usb_anchor_urb(urb, &rtlusb->rx_submitted);
+               err = usb_submit_urb(urb, GFP_KERNEL);
+               if (err)
+                       goto err_out;
+               usb_free_urb(urb);
+       }
+       return 0;
+
+err_out:
+       usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+       return err;
+}
+
+static int rtl_usb_start(struct ieee80211_hw *hw)
+{
+       int err;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       err = rtlpriv->cfg->ops->hw_init(hw);
+       rtl_init_rx_config(hw);
+
+       /* Enable software */
+       SET_USB_START(rtlusb);
+       /* should after adapter start and interrupt enable. */
+       set_hal_start(rtlhal);
+
+       /* Start bulk IN */
+       _rtl_usb_receive(hw);
+
+       return err;
+}
+/**
+ *
+ *
+ */
+
+/*=======================  tx =========================================*/
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+       u32 i;
+       struct sk_buff *_skb;
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct ieee80211_tx_info *txinfo;
+
+       SET_USB_STOP(rtlusb);
+
+       /* clean up rx stuff. */
+       usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+       /* clean up tx stuff */
+       for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+               while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
+                       rtlusb->usb_tx_cleanup(hw, _skb);
+                       txinfo = IEEE80211_SKB_CB(_skb);
+                       ieee80211_tx_info_clear_status(txinfo);
+                       txinfo->flags |= IEEE80211_TX_STAT_ACK;
+                       ieee80211_tx_status_irqsafe(hw, _skb);
+               }
+               usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
+       }
+       usb_kill_anchored_urbs(&rtlusb->tx_submitted);
+}
+
+/**
+ *
+ * We may add some struct into struct rtl_usb later. Do deinit here.
+ *
+ */
+static void rtl_usb_deinit(struct ieee80211_hw *hw)
+{
+       rtl_usb_cleanup(hw);
+}
+
+static void rtl_usb_stop(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       /* should after adapter start and interrupt enable. */
+       set_hal_stop(rtlhal);
+       /* Enable software */
+       SET_USB_STOP(rtlusb);
+       rtl_usb_deinit(hw);
+       rtlpriv->cfg->ops->hw_disable(hw);
+}
+
+static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
+{
+       int err;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       usb_anchor_urb(_urb, &rtlusb->tx_submitted);
+       err = usb_submit_urb(_urb, GFP_ATOMIC);
+       if (err < 0) {
+               struct sk_buff *skb;
+
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Failed to submit urb.\n"));
+               usb_unanchor_urb(_urb);
+               skb = (struct sk_buff *)_urb->context;
+               kfree_skb(skb);
+       }
+       usb_free_urb(_urb);
+}
+
+static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
+                       struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct ieee80211_tx_info *txinfo;
+
+       rtlusb->usb_tx_post_hdl(hw, urb, skb);
+       skb_pull(skb, RTL_TX_HEADER_SIZE);
+       txinfo = IEEE80211_SKB_CB(skb);
+       ieee80211_tx_info_clear_status(txinfo);
+       txinfo->flags |= IEEE80211_TX_STAT_ACK;
+
+       if (urb->status) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Urb has error status 0x%X\n", urb->status));
+               goto out;
+       }
+       /*  TODO:       statistics */
+out:
+       ieee80211_tx_status_irqsafe(hw, skb);
+       return urb->status;
+}
+
+static void _rtl_tx_complete(struct urb *urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+       struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+       int err;
+
+       if (unlikely(IS_USB_STOP(rtlusb)))
+               return;
+       err = _usb_tx_post(hw, urb, skb);
+       if (err) {
+               /* Ignore error and keep issuiing other urbs */
+               return;
+       }
+}
+
+static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
+                               struct sk_buff *skb, u32 ep_num)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct urb *_urb;
+
+       WARN_ON(NULL == skb);
+       _urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!_urb) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Can't allocate URB for bulk out!\n"));
+               kfree_skb(skb);
+               return NULL;
+       }
+       _rtl_install_trx_info(rtlusb, skb, ep_num);
+       usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
+                         ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
+       _urb->transfer_flags |= URB_ZERO_PACKET;
+       return _urb;
+}
+
+static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      enum rtl_txq qnum)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       u32 ep_num;
+       struct urb *_urb = NULL;
+       struct sk_buff *_skb = NULL;
+       struct sk_buff_head *skb_list;
+       struct usb_anchor *urb_list;
+
+       WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
+       if (unlikely(IS_USB_STOP(rtlusb))) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("USB device is stopping...\n"));
+               kfree_skb(skb);
+               return;
+       }
+       ep_num = rtlusb->ep_map.ep_mapping[qnum];
+       skb_list = &rtlusb->tx_skb_queue[ep_num];
+       _skb = skb;
+       _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+       if (unlikely(!_urb)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't allocate urb. Drop skb!\n"));
+               return;
+       }
+       urb_list = &rtlusb->tx_pending[ep_num];
+       _rtl_submit_tx_urb(hw, _urb);
+}
+
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
+                           u16 hw_queue)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_tx_desc *pdesc = NULL;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+       u8 *pda_addr = hdr->addr1;
+       /* ssn */
+       u8 *qc = NULL;
+       u8 tid = 0;
+       u16 seq_number = 0;
+
+       if (ieee80211_is_mgmt(fc))
+               rtl_tx_mgmt_proc(hw, skb);
+       rtl_action_proc(hw, skb, true);
+       if (is_multicast_ether_addr(pda_addr))
+               rtlpriv->stats.txbytesmulticast += skb->len;
+       else if (is_broadcast_ether_addr(pda_addr))
+               rtlpriv->stats.txbytesbroadcast += skb->len;
+       else
+               rtlpriv->stats.txbytesunicast += skb->len;
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+                            IEEE80211_SCTL_SEQ) >> 4;
+               seq_number += 1;
+               seq_number <<= 4;
+       }
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+                                       hw_queue);
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               if (qc)
+                       mac->tids[tid].seq_number = seq_number;
+       }
+       if (ieee80211_is_data(fc))
+               rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+}
+
+static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+       u16 hw_queue;
+
+       if (unlikely(is_hal_stop(rtlhal)))
+               goto err_free;
+       hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
+       _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+       _rtl_usb_transmit(hw, skb, hw_queue);
+       return NETDEV_TX_OK;
+
+err_free:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct sk_buff *skb)
+{
+       return false;
+}
+
+static struct rtl_intf_ops rtl_usb_ops = {
+       .adapter_start = rtl_usb_start,
+       .adapter_stop = rtl_usb_stop,
+       .adapter_tx = rtl_usb_tx,
+       .waitq_insert = rtl_usb_tx_chk_waitq_insert,
+};
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+                       const struct usb_device_id *id)
+{
+       int err;
+       struct ieee80211_hw *hw = NULL;
+       struct rtl_priv *rtlpriv = NULL;
+       struct usb_device       *udev;
+       struct rtl_usb_priv *usb_priv;
+
+       hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
+                               sizeof(struct rtl_usb_priv), &rtl_ops);
+       if (!hw) {
+               RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
+               return -ENOMEM;
+       }
+       rtlpriv = hw->priv;
+       SET_IEEE80211_DEV(hw, &intf->dev);
+       udev = interface_to_usbdev(intf);
+       usb_get_dev(udev);
+       usb_priv = rtl_usbpriv(hw);
+       memset(usb_priv, 0, sizeof(*usb_priv));
+       usb_priv->dev.intf = intf;
+       usb_priv->dev.udev = udev;
+       usb_set_intfdata(intf, hw);
+       /* init cfg & intf_ops */
+       rtlpriv->rtlhal.interface = INTF_USB;
+       rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+       rtlpriv->intf_ops = &rtl_usb_ops;
+       rtl_dbgp_flag_init(hw);
+       /* Init IO handler */
+       _rtl_usb_io_handler_init(&udev->dev, hw);
+       rtlpriv->cfg->ops->read_chip_version(hw);
+       /*like read eeprom and so on */
+       rtlpriv->cfg->ops->read_eeprom_info(hw);
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't init_sw_vars.\n"));
+               goto error_out;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
+       err = _rtl_usb_init(hw);
+       err = _rtl_usb_init_sw(hw);
+       /* Init mac80211 sw */
+       err = rtl_init_core(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't allocate sw for mac80211.\n"));
+               goto error_out;
+       }
+
+       /*init rfkill */
+       /* rtl_init_rfkill(hw); */
+
+       err = ieee80211_register_hw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                        ("Can't register mac80211 hw.\n"));
+               goto error_out;
+       } else {
+               rtlpriv->mac80211.mac80211_registered = 1;
+       }
+       set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+       return 0;
+error_out:
+       rtl_deinit_core(hw);
+       _rtl_usb_io_handler_release(hw);
+       ieee80211_free_hw(hw);
+       usb_put_dev(udev);
+       return -ENODEV;
+}
+EXPORT_SYMBOL(rtl_usb_probe);
+
+void rtl_usb_disconnect(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       if (unlikely(!rtlpriv))
+               return;
+       /*ieee80211_unregister_hw will call ops_stop */
+       if (rtlmac->mac80211_registered == 1) {
+               ieee80211_unregister_hw(hw);
+               rtlmac->mac80211_registered = 0;
+       } else {
+               rtl_deinit_deferred_work(hw);
+               rtlpriv->intf_ops->adapter_stop(hw);
+       }
+       /*deinit rfkill */
+       /* rtl_deinit_rfkill(hw); */
+       rtl_usb_deinit(hw);
+       rtl_deinit_core(hw);
+       rtlpriv->cfg->ops->deinit_sw_leds(hw);
+       rtlpriv->cfg->ops->deinit_sw_vars(hw);
+       _rtl_usb_io_handler_release(hw);
+       usb_put_dev(rtlusb->udev);
+       usb_set_intfdata(intf, NULL);
+       ieee80211_free_hw(hw);
+}
+EXPORT_SYMBOL(rtl_usb_disconnect);
+
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+       return 0;
+}
+EXPORT_SYMBOL(rtl_usb_suspend);
+
+int rtl_usb_resume(struct usb_interface *pusb_intf)
+{
+       return 0;
+}
+EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644 (file)
index 0000000..abadfe9
--- /dev/null
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_USB_H__
+#define __RTL_USB_H__
+
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#define RTL_USB_DEVICE(vend, prod, cfg) \
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+       .idVendor = (vend), \
+       .idProduct = (prod), \
+       .driver_info = (kernel_ulong_t)&(cfg)
+
+#define USB_HIGH_SPEED_BULK_SIZE       512
+#define USB_FULL_SPEED_BULK_SIZE       64
+
+
+#define RTL_USB_MAX_TXQ_NUM            4               /* max tx queue */
+#define RTL_USB_MAX_EP_NUM             6               /* max ep number */
+#define RTL_USB_MAX_TX_URBS_NUM                8
+
+enum rtl_txq {
+       /* These definitions shall be consistent with value
+        * returned by skb_get_queue_mapping
+        *------------------------------------*/
+       RTL_TXQ_BK,
+       RTL_TXQ_BE,
+       RTL_TXQ_VI,
+       RTL_TXQ_VO,
+       /*------------------------------------*/
+       RTL_TXQ_BCN,
+       RTL_TXQ_MGT,
+       RTL_TXQ_HI,
+
+       /* Must be last */
+       __RTL_TXQ_NUM,
+};
+
+struct rtl_ep_map {
+       u32 ep_mapping[__RTL_TXQ_NUM];
+};
+
+struct _trx_info {
+       struct rtl_usb *rtlusb;
+       u32 ep_num;
+};
+
+static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
+                                        struct sk_buff *skb,
+                                        u32 ep_num)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       info->rate_driver_data[0] = rtlusb;
+       info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
+}
+
+
+/*  Add suspend/resume later */
+enum rtl_usb_state {
+       USB_STATE_STOP  = 0,
+       USB_STATE_START = 1,
+};
+
+#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
+#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
+#define SET_USB_STOP(rtlusb_ptr) \
+       do {                                                    \
+               (rtlusb_ptr)->state = USB_STATE_STOP;           \
+       } while (0)
+
+#define SET_USB_START(rtlusb_ptr)                              \
+       do { \
+               (rtlusb_ptr)->state = USB_STATE_START;          \
+       } while (0)
+
+struct rtl_usb {
+       struct usb_device *udev;
+       struct usb_interface *intf;
+       enum rtl_usb_state state;
+
+       /* Bcn control register setting */
+       u32 reg_bcn_ctrl_val;
+       /* for 88/92cu card disable */
+       u8      disableHWSM;
+       /*QOS & EDCA */
+       enum acm_method acm_method;
+       /* irq  . HIMR,HIMR_EX */
+       u32 irq_mask[2];
+       bool irq_enabled;
+
+       u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+
+       /* Tx */
+       u8 out_ep_nums ;
+       u8 out_queue_sel;
+       struct rtl_ep_map ep_map;
+
+       u32 max_bulk_out_size;
+       u32 tx_submitted_urbs;
+       struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
+
+       struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
+       struct usb_anchor tx_submitted;
+
+       struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+                                               struct sk_buff_head *);
+       int (*usb_tx_post_hdl)(struct ieee80211_hw *,
+                              struct urb *, struct sk_buff *);
+       void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+
+       /* Rx */
+       u8 in_ep_nums ;
+       u32 in_ep;              /* Bulk IN endpoint number */
+       u32 rx_max_size;        /* Bulk IN max buffer size */
+       u32 rx_urb_num;         /* How many Bulk INs are submitted to host. */
+       struct usb_anchor       rx_submitted;
+       void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+                                    struct sk_buff_head *);
+       void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+};
+
+struct rtl_usb_priv {
+       struct rtl_usb dev;
+       struct rtl_led_ctl ledctl;
+};
+
+#define rtl_usbpriv(hw)         (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
+#define rtl_usbdev(usbpriv)    (&((usbpriv)->dev))
+
+
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+                           const struct usb_device_id *id);
+void rtl_usb_disconnect(struct usb_interface *intf);
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+int rtl_usb_resume(struct usb_interface *pusb_intf);
+
+#endif
index d44d796..01226f8 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/firmware.h>
 #include <linux/version.h>
 #include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/usb.h>
 #include <net/mac80211.h>
 #include "debug.h"
 
 #define MAC80211_3ADDR_LEN                     24
 #define MAC80211_4ADDR_LEN                     30
 
+#define CHANNEL_MAX_NUMBER     (14 + 24 + 21)  /* 14 is the max channel no */
+#define CHANNEL_GROUP_MAX      (3 + 9) /*  ch1~3, 4~9, 10~14 = three groups */
+#define MAX_PG_GROUP                   13
+#define        CHANNEL_GROUP_MAX_2G            3
+#define        CHANNEL_GROUP_IDX_5GL           3
+#define        CHANNEL_GROUP_IDX_5GM           6
+#define        CHANNEL_GROUP_IDX_5GH           9
+#define        CHANNEL_GROUP_MAX_5G            9
+#define CHANNEL_MAX_NUMBER_2G          14
+#define AVG_THERMAL_NUM                        8
+
+/* for early mode */
+#define EM_HDR_LEN                     8
 enum intf_type {
        INTF_PCI = 0,
        INTF_USB = 1,
@@ -113,11 +128,38 @@ enum hardware_type {
        HARDWARE_TYPE_RTL8192CU,
        HARDWARE_TYPE_RTL8192DE,
        HARDWARE_TYPE_RTL8192DU,
+       HARDWARE_TYPE_RTL8723E,
+       HARDWARE_TYPE_RTL8723U,
 
-       /*keep it last*/
+       /* keep it last */
        HARDWARE_TYPE_NUM
 };
 
+#define IS_HARDWARE_TYPE_8192SU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
+#define IS_HARDWARE_TYPE_8192SE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+#define IS_HARDWARE_TYPE_8192CE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+#define IS_HARDWARE_TYPE_8192CU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
+#define IS_HARDWARE_TYPE_8192DE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+#define IS_HARDWARE_TYPE_8192DU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
+#define IS_HARDWARE_TYPE_8723E(rtlhal)                 \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
+#define IS_HARDWARE_TYPE_8723U(rtlhal)                 \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
+#define        IS_HARDWARE_TYPE_8192S(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
+#define        IS_HARDWARE_TYPE_8192C(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
+#define        IS_HARDWARE_TYPE_8192D(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
+#define        IS_HARDWARE_TYPE_8723(rtlhal)                   \
+(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
+
 enum scan_operation_backup_opt {
        SCAN_OPT_BACKUP = 0,
        SCAN_OPT_RESTORE,
@@ -315,6 +357,7 @@ enum rf_type {
        RF_1T1R = 0,
        RF_1T2R = 1,
        RF_2T2R = 2,
+       RF_2T2R_GREEN = 3,
 };
 
 enum ht_channel_width {
@@ -359,6 +402,8 @@ enum rtl_var_map {
        EFUSE_LOADER_CLK_EN,
        EFUSE_ANA8M,
        EFUSE_HWSET_MAX_SIZE,
+       EFUSE_MAX_SECTION_MAP,
+       EFUSE_REAL_CONTENT_SIZE,
 
        /*CAM map */
        RWCAM,
@@ -397,6 +442,7 @@ enum rtl_var_map {
        RTL_IMR_ATIMEND,        /*For 92C,ATIM Window End Interrupt */
        RTL_IMR_BDOK,           /*Beacon Queue DMA OK Interrup */
        RTL_IMR_HIGHDOK,        /*High Queue DMA OK Interrupt */
+       RTL_IMR_COMDOK,         /*Command Queue DMA OK Interrupt*/
        RTL_IMR_TBDOK,          /*Transmit Beacon OK interrup */
        RTL_IMR_MGNTDOK,        /*Management Queue DMA OK Interrupt */
        RTL_IMR_TBDER,          /*For 92C,Transmit Beacon Error Interrupt */
@@ -405,7 +451,8 @@ enum rtl_var_map {
        RTL_IMR_VIDOK,          /*AC_VI DMA OK Interrupt */
        RTL_IMR_VODOK,          /*AC_VO DMA Interrupt */
        RTL_IMR_ROK,            /*Receive DMA OK Interrupt */
-       RTL_IBSS_INT_MASKS,     /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/
+       RTL_IBSS_INT_MASKS,     /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+                                * RTL_IMR_TBDER) */
 
        /*CCK Rates, TxHT = 0 */
        RTL_RC_CCK_RATE1M,
@@ -481,6 +528,19 @@ enum acm_method {
        eAcmWay2_SW = 2,
 };
 
+enum macphy_mode {
+       SINGLEMAC_SINGLEPHY = 0,
+       DUALMAC_DUALPHY,
+       DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+       BAND_ON_2_4G = 0,
+       BAND_ON_5G,
+       BAND_ON_BOTH,
+       BANDMAX
+};
+
 /*aci/aifsn Field.
 Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
 union aci_aifsn {
@@ -505,6 +565,17 @@ enum wireless_mode {
        WIRELESS_MODE_N_5G = 0x20
 };
 
+#define IS_WIRELESS_MODE_A(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_A)
+#define IS_WIRELESS_MODE_B(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_B)
+#define IS_WIRELESS_MODE_G(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_G)
+#define IS_WIRELESS_MODE_N_24G(wirelessmode)   \
+       (wirelessmode == WIRELESS_MODE_N_24G)
+#define IS_WIRELESS_MODE_N_5G(wirelessmode)    \
+       (wirelessmode == WIRELESS_MODE_N_5G)
+
 enum ratr_table_mode {
        RATR_INX_WIRELESS_NGB = 0,
        RATR_INX_WIRELESS_NG = 1,
@@ -574,11 +645,11 @@ struct rtl_probe_rsp {
 struct rtl_led {
        void *hw;
        enum rtl_led_pin ledpin;
-       bool b_ledon;
+       bool ledon;
 };
 
 struct rtl_led_ctl {
-       bool bled_opendrain;
+       bool led_opendrain;
        struct rtl_led sw_led0;
        struct rtl_led sw_led1;
 };
@@ -603,6 +674,8 @@ struct false_alarm_statistics {
        u32 cnt_rate_illegal;
        u32 cnt_crc8_fail;
        u32 cnt_mcs_fail;
+       u32 cnt_fast_fsync_fail;
+       u32 cnt_sb_search_fail;
        u32 cnt_ofdm_fail;
        u32 cnt_cck_fail;
        u32 cnt_all;
@@ -690,6 +763,32 @@ struct rtl_rfkill {
        bool rfkill_state;      /*0 is off, 1 is on */
 };
 
+#define IQK_MATRIX_REG_NUM     8
+#define IQK_MATRIX_SETTINGS_NUM        (1 + 24 + 21)
+struct iqk_matrix_regs {
+       bool b_iqk_done;
+       long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct phy_parameters {
+       u16 length;
+       u32 *pdata;
+};
+
+enum hw_param_tab_index {
+       PHY_REG_2T,
+       PHY_REG_1T,
+       PHY_REG_PG,
+       RADIOA_2T,
+       RADIOB_2T,
+       RADIOA_1T,
+       RADIOB_1T,
+       MAC_REG,
+       AGCTAB_2T,
+       AGCTAB_1T,
+       MAX_TAB
+};
+
 struct rtl_phy {
        struct bb_reg_def phyreg_def[4];        /*Radio A/B/C/D */
        struct init_gain initgain_backup;
@@ -705,8 +804,9 @@ struct rtl_phy {
        u8 current_channel;
        u8 h2c_box_num;
        u8 set_io_inprogress;
+       u8 lck_inprogress;
 
-       /*record for power tracking*/
+       /* record for power tracking */
        s32 reg_e94;
        s32 reg_e9c;
        s32 reg_ea4;
@@ -723,26 +823,32 @@ struct rtl_phy {
        u32 iqk_mac_backup[IQK_MAC_REG_NUM];
        u32 iqk_bb_backup[10];
 
-       bool b_rfpi_enable;
+       /* Dual mac */
+       bool need_iqk;
+       struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+       bool rfpi_enable;
 
        u8 pwrgroup_cnt;
-       u8 bcck_high_power;
-       /* 3 groups of pwr diff by rates*/
-       u32 mcs_txpwrlevel_origoffset[4][16];
+       u8 cck_high_power;
+       /* MAX_PG_GROUP groups of pwr diff by rates */
+       u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
        u8 default_initialgain[4];
 
-       /*the current Tx power level*/
+       /* the current Tx power level */
        u8 cur_cck_txpwridx;
        u8 cur_ofdm24g_txpwridx;
 
        u32 rfreg_chnlval[2];
-       bool b_apk_done;
+       bool apk_done;
+       u32 reg_rf3c[2];        /* pathA / pathB  */
 
-       /*fsync*/
        u8 framesync;
        u32 framesync_c34;
 
        u8 num_total_rfpath;
+       struct phy_parameters hwparam_tables[MAX_TAB];
+       u16 rf_pathmap;
 };
 
 #define MAX_TID_COUNT                          9
@@ -768,6 +874,7 @@ struct rtl_tid_data {
 struct rtl_priv;
 struct rtl_io {
        struct device *dev;
+       struct mutex bb_mutex;
 
        /*PCI MEM map */
        unsigned long pci_mem_end;      /*shared mem end        */
@@ -779,11 +886,14 @@ struct rtl_io {
        void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
        void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
        void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
-
-        u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
-        u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
-        u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
-
+       int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                            u8 *pdata);
+
+       u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                           u8 *pdata);
 };
 
 struct rtl_mac {
@@ -815,16 +925,24 @@ struct rtl_mac {
        bool act_scanning;
        u8 cnt_after_linked;
 
-        /*RDG*/ bool rdg_en;
+       /* early mode */
+       /* skb wait queue */
+       struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+       u8 earlymode_threshold;
+
+       /*RDG*/
+       bool rdg_en;
 
-        /*AP*/ u8 bssid[6];
-       u8 mcs[16];     /*16 bytes mcs for HT rates.*/
-       u32 basic_rates; /*b/g rates*/
+       /*AP*/
+       u8 bssid[6];
+       u32 vendor;
+       u8 mcs[16];     /* 16 bytes mcs for HT rates. */
+       u32 basic_rates; /* b/g rates */
        u8 ht_enable;
        u8 sgi_40;
        u8 sgi_20;
        u8 bw_40;
-       u8 mode;                /*wireless mode*/
+       u8 mode;                /* wireless mode */
        u8 slot_time;
        u8 short_preamble;
        u8 use_cts_protect;
@@ -835,9 +953,11 @@ struct rtl_mac {
        u8 retry_long;
        u16 assoc_id;
 
-        /*IBSS*/ int beacon_interval;
+       /*IBSS*/
+       int beacon_interval;
 
-        /*AMPDU*/ u8 min_space_cfg;    /*For Min spacing configurations */
+       /*AMPDU*/
+       u8 min_space_cfg;       /*For Min spacing configurations */
        u8 max_mss_density;
        u8 current_ampdu_factor;
        u8 current_ampdu_density;
@@ -852,17 +972,54 @@ struct rtl_hal {
 
        enum intf_type interface;
        u16 hw_type;            /*92c or 92d or 92s and so on */
+       u8 ic_class;
        u8 oem_id;
-       u8 version;             /*version of chip */
+       u32 version;            /*version of chip */
        u8 state;               /*stop 0, start 1 */
 
        /*firmware */
+       u32 fwsize;
        u8 *pfirmware;
-       bool b_h2c_setinprogress;
+       u16 fw_version;
+       u16 fw_subversion;
+       bool h2c_setinprogress;
        u8 last_hmeboxnum;
-       bool bfw_ready;
+       bool fw_ready;
        /*Reserve page start offset except beacon in TxQ. */
        u8 fw_rsvdpage_startoffset;
+       u8 h2c_txcmd_seq;
+
+       /* FW Cmd IO related */
+       u16 fwcmd_iomap;
+       u32 fwcmd_ioparam;
+       bool set_fwcmd_inprogress;
+       u8 current_fwcmd_io;
+
+       /**/
+       bool driver_going2unload;
+
+       /*AMPDU init min space*/
+       u8 minspace_cfg;        /*For Min spacing configurations */
+
+       /* Dual mac */
+       enum macphy_mode macphymode;
+       enum band_type current_bandtype;        /* 0:2.4G, 1:5G */
+       enum band_type current_bandtypebackup;
+       enum band_type bandset;
+       /* dual MAC 0--Mac0 1--Mac1 */
+       u32 interfaceindex;
+       /* just for DualMac S3S4 */
+       u8 macphyctl_reg;
+       bool earlymode_enable;
+       /* Dual mac*/
+       bool during_mac0init_radiob;
+       bool during_mac1init_radioa;
+       bool reloadtxpowerindex;
+       /* True if IMR or IQK  have done
+       for 2.4G in scan progress */
+       bool load_imrandiqk_setting_for2g;
+
+       bool disable_amsdu_8k;
 };
 
 struct rtl_security {
@@ -887,48 +1044,61 @@ struct rtl_security {
 };
 
 struct rtl_dm {
-       /*PHY status for DM */
+       /*PHY status for Dynamic Management */
        long entry_min_undecoratedsmoothed_pwdb;
        long undecorated_smoothed_pwdb; /*out dm */
        long entry_max_undecoratedsmoothed_pwdb;
-       bool b_dm_initialgain_enable;
-       bool bdynamic_txpower_enable;
-       bool bcurrent_turbo_edca;
-       bool bis_any_nonbepkts; /*out dm */
-       bool bis_cur_rdlstate;
-       bool btxpower_trackingInit;
-       bool b_disable_framebursting;
-       bool b_cck_inch14;
-       bool btxpower_tracking;
-       bool b_useramask;
-       bool brfpath_rxenable[4];
-
+       bool dm_initialgain_enable;
+       bool dynamic_txpower_enable;
+       bool current_turbo_edca;
+       bool is_any_nonbepkts;  /*out dm */
+       bool is_cur_rdlstate;
+       bool txpower_trackingInit;
+       bool disable_framebursting;
+       bool cck_inch14;
+       bool txpower_tracking;
+       bool useramask;
+       bool rfpath_rxenable[4];
+       bool inform_fw_driverctrldm;
+       bool current_mrc_switch;
+       u8 txpowercount;
+
+       u8 thermalvalue_rxgain;
        u8 thermalvalue_iqk;
        u8 thermalvalue_lck;
        u8 thermalvalue;
        u8 last_dtp_lvl;
+       u8 thermalvalue_avg[AVG_THERMAL_NUM];
+       u8 thermalvalue_avg_index;
+       bool done_txpower;
        u8 dynamic_txhighpower_lvl;     /*Tx high power level */
-       u8 dm_flag;     /*Indicate if each dynamic mechanism's status. */
+       u8 dm_flag;             /*Indicate each dynamic mechanism's status. */
        u8 dm_type;
        u8 txpower_track_control;
-
+       bool interrupt_migration;
+       bool disable_tx_int;
        char ofdm_index[2];
        char cck_index;
+       u8 power_index_backup[6];
 };
 
-#define        EFUSE_MAX_LOGICAL_SIZE                   128
+#define        EFUSE_MAX_LOGICAL_SIZE                  256
 
 struct rtl_efuse {
-       bool bautoLoad_ok;
+       bool autoLoad_ok;
        bool bootfromefuse;
        u16 max_physical_size;
-       u8 contents[EFUSE_MAX_LOGICAL_SIZE];
 
        u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
        u16 efuse_usedbytes;
        u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+       bool efuse_re_pg_sec1flag;
+       u8 efuse_re_pg_data[8];
+#endif
 
        u8 autoload_failflag;
+       u8 autoload_status;
 
        short epromtype;
        u16 eeprom_vid;
@@ -938,69 +1108,90 @@ struct rtl_efuse {
        u8 eeprom_oemid;
        u16 eeprom_channelplan;
        u8 eeprom_version;
+       u8 board_type;
+       u8 external_pa;
 
        u8 dev_addr[6];
 
-       bool b_txpwr_fromeprom;
+       bool txpwr_fromeprom;
+       u8 eeprom_crystalcap;
        u8 eeprom_tssi[2];
-       u8 eeprom_pwrlimit_ht20[3];
-       u8 eeprom_pwrlimit_ht40[3];
-       u8 eeprom_chnlarea_txpwr_cck[2][3];
-       u8 eeprom_chnlarea_txpwr_ht40_1s[2][3];
-       u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3];
-       u8 txpwrlevel_cck[2][14];
-       u8 txpwrlevel_ht40_1s[2][14];   /*For HT 40MHZ pwr */
-       u8 txpwrlevel_ht40_2s[2][14];   /*For HT 40MHZ pwr */
+       u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+       u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+       u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+       u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+       u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+       u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+       u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
+       u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
+       u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
+
+       u8 internal_pa_5g[2];   /* pathA / pathB */
+       u8 eeprom_c9;
+       u8 eeprom_cc;
 
        /*For power group */
-       u8 pwrgroup_ht20[2][14];
-       u8 pwrgroup_ht40[2][14];
-
-       char txpwr_ht20diff[2][14];     /*HT 20<->40 Pwr diff */
-       u8 txpwr_legacyhtdiff[2][14];   /*For HT<->legacy pwr diff */
+       u8 eeprom_pwrgroup[2][3];
+       u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+       u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+       char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
+       /*For HT<->legacy pwr diff*/
+       u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+       u8 txpwr_safetyflag;                    /* Band edge enable flag */
+       u16 eeprom_txpowerdiff;
+       u8 legacy_httxpowerdiff;        /* Legacy to HT rate power diff */
+       u8 antenna_txpwdiff[3];
 
        u8 eeprom_regulatory;
        u8 eeprom_thermalmeter;
-       /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
-       u8 thermalmeter[2];
+       u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
+       u16 tssi_13dbm;
+       u8 crystalcap;          /* CrystalCap. */
+       u8 delta_iqk;
+       u8 delta_lck;
 
        u8 legacy_ht_txpowerdiff;       /*Legacy to HT rate power diff */
-       bool b_apk_thermalmeterignore;
+       bool apk_thermalmeterignore;
+
+       bool b1x1_recvcombine;
+       bool b1ss_support;
+
+       /*channel plan */
+       u8 channel_plan;
 };
 
 struct rtl_ps_ctl {
+       bool pwrdomain_protect;
        bool set_rfpowerstate_inprogress;
-       bool b_in_powersavemode;
+       bool in_powersavemode;
        bool rfchange_inprogress;
-       bool b_swrf_processing;
-       bool b_hwradiooff;
-
-       u32 last_sleep_jiffies;
-       u32 last_awake_jiffies;
-       u32 last_delaylps_stamp_jiffies;
+       bool swrf_processing;
+       bool hwradiooff;
 
        /*
         * just for PCIE ASPM
         * If it supports ASPM, Offset[560h] = 0x40,
         * otherwise Offset[560h] = 0x00.
         * */
-       bool b_support_aspm;
-       bool b_support_backdoor;
+       bool support_aspm;
+       bool support_backdoor;
 
        /*for LPS */
        enum rt_psmode dot11_psmode;    /*Power save mode configured. */
-       bool b_leisure_ps;
-       bool b_fwctrl_lps;
+       bool swctrl_lps;
+       bool leisure_ps;
+       bool fwctrl_lps;
        u8 fwctrl_psmode;
        /*For Fw control LPS mode */
-       u8 b_reg_fwctrl_lps;
+       u8 reg_fwctrl_lps;
        /*Record Fw PS mode status. */
-       bool b_fw_current_inpsmode;
+       bool fw_current_inpsmode;
        u8 reg_max_lps_awakeintvl;
        bool report_linked;
 
        /*for IPS */
-       bool b_inactiveps;
+       bool inactiveps;
 
        u32 rfoff_reason;
 
@@ -1011,8 +1202,26 @@ struct rtl_ps_ctl {
        /*just for PCIE ASPM */
        u8 const_amdpci_aspm;
 
+       bool pwrdown_mode;
+
        enum rf_pwrstate inactive_pwrstate;
        enum rf_pwrstate rfpwr_state;   /*cur power state */
+
+       /* for SW LPS*/
+       bool sw_ps_enabled;
+       bool state;
+       bool state_inap;
+       bool multi_buffered;
+       u16 nullfunc_seq;
+       unsigned int dtim_counter;
+       unsigned int sleep_ms;
+       unsigned long last_sleep_jiffies;
+       unsigned long last_awake_jiffies;
+       unsigned long last_delaylps_stamp_jiffies;
+       unsigned long last_dtim;
+       unsigned long last_beacon;
+       unsigned long last_action;
+       unsigned long last_slept;
 };
 
 struct rtl_stats {
@@ -1038,10 +1247,10 @@ struct rtl_stats {
        s32 recvsignalpower;
        s8 rxpower;             /*in dBm Translate from PWdB */
        u8 signalstrength;      /*in 0-100 index. */
-       u16 b_hwerror:1;
-       u16 b_crc:1;
-       u16 b_icv:1;
-       u16 b_shortpreamble:1;
+       u16 hwerror:1;
+       u16 crc:1;
+       u16 icv:1;
+       u16 shortpreamble:1;
        u16 antenna:1;
        u16 decrypted:1;
        u16 wakeup:1;
@@ -1050,15 +1259,16 @@ struct rtl_stats {
 
        u8 rx_drvinfo_size;
        u8 rx_bufshift;
-       bool b_isampdu;
+       bool isampdu;
+       bool isfirst_ampdu;
        bool rx_is40Mhzpacket;
        u32 rx_pwdb_all;
        u8 rx_mimo_signalstrength[4];   /*in 0~100 index */
        s8 rx_mimo_signalquality[2];
-       bool b_packet_matchbssid;
-       bool b_is_cck;
-       bool b_packet_toself;
-       bool b_packet_beacon;   /*for rssi */
+       bool packet_matchbssid;
+       bool is_cck;
+       bool packet_toself;
+       bool packet_beacon;     /*for rssi */
        char cck_adc_pwdb[4];   /*for rx path selection */
 };
 
@@ -1069,23 +1279,23 @@ struct rt_link_detect {
        u32 num_tx_inperiod;
        u32 num_rx_inperiod;
 
-       bool b_busytraffic;
-       bool b_higher_busytraffic;
-       bool b_higher_busyrxtraffic;
+       bool busytraffic;
+       bool higher_busytraffic;
+       bool higher_busyrxtraffic;
 };
 
 struct rtl_tcb_desc {
-       u8 b_packet_bw:1;
-       u8 b_multicast:1;
-       u8 b_broadcast:1;
-
-       u8 b_rts_stbc:1;
-       u8 b_rts_enable:1;
-       u8 b_cts_enable:1;
-       u8 b_rts_use_shortpreamble:1;
-       u8 b_rts_use_shortgi:1;
+       u8 packet_bw:1;
+       u8 multicast:1;
+       u8 broadcast:1;
+
+       u8 rts_stbc:1;
+       u8 rts_enable:1;
+       u8 cts_enable:1;
+       u8 rts_use_shortpreamble:1;
+       u8 rts_use_shortgi:1;
        u8 rts_sc:1;
-       u8 b_rts_bw:1;
+       u8 rts_bw:1;
        u8 rts_rate;
 
        u8 use_shortgi:1;
@@ -1096,20 +1306,34 @@ struct rtl_tcb_desc {
        u8 ratr_index;
        u8 mac_id;
        u8 hw_rate;
+
+       u8 last_inipkt:1;
+       u8 cmd_or_init:1;
+       u8 queue_index;
+
+       /* early mode */
+       u8 empkt_num;
+       /* The max value by HW */
+       u32 empkt_len[5];
 };
 
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
+       void (*read_chip_version)(struct ieee80211_hw *hw);
        void (*read_eeprom_info) (struct ieee80211_hw *hw);
        void (*interrupt_recognized) (struct ieee80211_hw *hw,
                                      u32 *p_inta, u32 *p_intb);
        int (*hw_init) (struct ieee80211_hw *hw);
        void (*hw_disable) (struct ieee80211_hw *hw);
+       void (*hw_suspend) (struct ieee80211_hw *hw);
+       void (*hw_resume) (struct ieee80211_hw *hw);
        void (*enable_interrupt) (struct ieee80211_hw *hw);
        void (*disable_interrupt) (struct ieee80211_hw *hw);
        int (*set_network_type) (struct ieee80211_hw *hw,
                                 enum nl80211_iftype type);
+       void (*set_chk_bssid)(struct ieee80211_hw *hw,
+                               bool check_bssid);
        void (*set_bw_mode) (struct ieee80211_hw *hw,
                             enum nl80211_channel_type ch_type);
         u8(*switch_channel) (struct ieee80211_hw *hw);
@@ -1126,23 +1350,26 @@ struct rtl_hal_ops {
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
                              struct ieee80211_tx_info *info,
                              struct sk_buff *skb, unsigned int queue_index);
+       void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
+                                 u32 buffer_len, bool bIsPsPoll);
        void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
-                                bool b_firstseg, bool b_lastseg,
+                                bool firstseg, bool lastseg,
                                 struct sk_buff *skb);
-        bool(*query_rx_desc) (struct ieee80211_hw *hw,
+       bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       bool (*query_rx_desc) (struct ieee80211_hw *hw,
                               struct rtl_stats *stats,
                               struct ieee80211_rx_status *rx_status,
                               u8 *pdesc, struct sk_buff *skb);
        void (*set_channel_access) (struct ieee80211_hw *hw);
-        bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
+       bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
        void (*dm_watchdog) (struct ieee80211_hw *hw);
        void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
-        bool(*set_rf_power_state) (struct ieee80211_hw *hw,
+       bool (*set_rf_power_state) (struct ieee80211_hw *hw,
                                    enum rf_pwrstate rfpwr_state);
        void (*led_control) (struct ieee80211_hw *hw,
                             enum led_ctl_mode ledaction);
        void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
-        u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+       u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
        void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
        void (*enable_hw_sec) (struct ieee80211_hw *hw);
        void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1150,22 +1377,36 @@ struct rtl_hal_ops {
                         bool is_wepkey, bool clear_all);
        void (*init_sw_leds) (struct ieee80211_hw *hw);
        void (*deinit_sw_leds) (struct ieee80211_hw *hw);
-        u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+       u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
        void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
                           u32 data);
-        u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
+       u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                          u32 regaddr, u32 bitmask);
        void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data);
+       bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
+       void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
+                                           u8 *powerlevel);
+       void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
+                                            u8 *ppowerlevel, u8 channel);
+       bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
+                                          u8 configtype);
+       bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
+                                            u8 configtype);
+       void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
+       void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
+       void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
 };
 
 struct rtl_intf_ops {
        /*com */
+       void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
        int (*adapter_start) (struct ieee80211_hw *hw);
        void (*adapter_stop) (struct ieee80211_hw *hw);
 
        int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
        int (*reset_trx_ring) (struct ieee80211_hw *hw);
+       bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
 
        /*pci */
        void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1179,11 +1420,36 @@ struct rtl_mod_params {
        int sw_crypto;
 };
 
+struct rtl_hal_usbint_cfg {
+       /* data - rx */
+       u32 in_ep_num;
+       u32 rx_urb_num;
+       u32 rx_max_size;
+
+       /* op - rx */
+       void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+       void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+                                    struct sk_buff_head *);
+
+       /* tx */
+       void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+       int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
+                              struct sk_buff *);
+       struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+                                               struct sk_buff_head *);
+
+       /* endpoint mapping */
+       int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
+       u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+};
+
 struct rtl_hal_cfg {
+       u8 bar_id;
        char *name;
        char *fw_name;
        struct rtl_hal_ops *ops;
        struct rtl_mod_params *mod_params;
+       struct rtl_hal_usbint_cfg *usb_interface_cfg;
 
        /*this map used for some registers or vars
           defined int HAL but used in MAIN */
@@ -1202,6 +1468,11 @@ struct rtl_locks {
        spinlock_t rf_ps_lock;
        spinlock_t rf_lock;
        spinlock_t lps_lock;
+       spinlock_t waitq_lock;
+       spinlock_t tx_urb_lock;
+
+       /*Dual mac*/
+       spinlock_t cck_and_rw_pagea_lock;
 };
 
 struct rtl_works {
@@ -1218,12 +1489,20 @@ struct rtl_works {
        struct workqueue_struct *rtl_wq;
        struct delayed_work watchdog_wq;
        struct delayed_work ips_nic_off_wq;
+
+       /* For SW LPS */
+       struct delayed_work ps_work;
+       struct delayed_work ps_rfon_wq;
 };
 
 struct rtl_debug {
        u32 dbgp_type[DBGP_TYPE_MAX];
        u32 global_debuglevel;
        u64 global_debugcomponents;
+
+       /* add for proc debug */
+       struct proc_dir_entry *proc_dir;
+       char proc_name[20];
 };
 
 struct rtl_priv {
@@ -1274,6 +1553,91 @@ struct rtl_priv {
 #define rtl_efuse(rtlpriv)     (&((rtlpriv)->efuse))
 #define rtl_psc(rtlpriv)       (&((rtlpriv)->psc))
 
+
+/***************************************
+    Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+       ANT_X2 = 0,
+       ANT_X1 = 1,
+};
+
+enum bt_co_type {
+       BT_2WIRE = 0,
+       BT_ISSC_3WIRE = 1,
+       BT_ACCEL = 2,
+       BT_CSR_BC4 = 3,
+       BT_CSR_BC8 = 4,
+       BT_RTL8756 = 5,
+};
+
+enum bt_cur_state {
+       BT_OFF = 0,
+       BT_ON = 1,
+};
+
+enum bt_service_type {
+       BT_SCO = 0,
+       BT_A2DP = 1,
+       BT_HID = 2,
+       BT_HID_IDLE = 3,
+       BT_SCAN = 4,
+       BT_IDLE = 5,
+       BT_OTHER_ACTION = 6,
+       BT_BUSY = 7,
+       BT_OTHERBUSY = 8,
+       BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+       BT_RADIO_SHARED = 0,
+       BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+       /* EEPROM BT info. */
+       u8 eeprom_bt_coexist;
+       u8 eeprom_bt_type;
+       u8 eeprom_bt_ant_num;
+       u8 eeprom_bt_ant_isolation;
+       u8 eeprom_bt_radio_shared;
+
+       u8 bt_coexistence;
+       u8 bt_ant_num;
+       u8 bt_coexist_type;
+       u8 bt_state;
+       u8 bt_cur_state;        /* 0:on, 1:off */
+       u8 bt_ant_isolation;    /* 0:good, 1:bad */
+       u8 bt_pape_ctrl;        /* 0:SW, 1:SW/HW dynamic */
+       u8 bt_service;
+       u8 bt_radio_shared_type;
+       u8 bt_rfreg_origin_1e;
+       u8 bt_rfreg_origin_1f;
+       u8 bt_rssi_state;
+       u32 ratio_tx;
+       u32 ratio_pri;
+       u32 bt_edca_ul;
+       u32 bt_edca_dl;
+
+       bool b_init_set;
+       bool b_bt_busy_traffic;
+       bool b_bt_traffic_mode_set;
+       bool b_bt_non_traffic_mode_set;
+
+       bool b_fw_coexist_all_off;
+       bool b_sw_coexist_all_off;
+       u32 current_state;
+       u32 previous_state;
+       u8 bt_pre_rssi_state;
+
+       u8 b_reg_bt_iso;
+       u8 b_reg_bt_sco;
+
+};
+
+
 /****************************************
        mem access macro define start
        Call endian free function when
@@ -1281,7 +1645,7 @@ struct rtl_priv {
        2. Before write integer to IO.
        3. After read integer from IO.
 ****************************************/
-/* Convert little data endian to host */
+/* Convert little data endian to host ordering */
 #define EF1BYTE(_val)          \
        ((u8)(_val))
 #define EF2BYTE(_val)          \
@@ -1289,27 +1653,21 @@ struct rtl_priv {
 #define EF4BYTE(_val)          \
        (le32_to_cpu(_val))
 
-/* Read data from memory */
-#define READEF1BYTE(_ptr)      \
-       EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
 #define READEF2BYTE(_ptr)      \
        EF2BYTE(*((u16 *)(_ptr)))
-#define READEF4BYTE(_ptr)      \
-       EF4BYTE(*((u32 *)(_ptr)))
 
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val)       \
-       (*((u8 *)(_ptr))) = EF1BYTE(_val)
+/* Write le16 data to memory in host ordering */
 #define WRITEEF2BYTE(_ptr, _val)       \
        (*((u16 *)(_ptr))) = EF2BYTE(_val)
-#define WRITEEF4BYTE(_ptr, _val)       \
-       (*((u32 *)(_ptr))) = EF4BYTE(_val)
-
-/*Example:
-BIT_LEN_MASK_32(0) => 0x00000000
-BIT_LEN_MASK_32(1) => 0x00000001
-BIT_LEN_MASK_32(2) => 0x00000003
-BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
 #define BIT_LEN_MASK_32(__bitlen)       \
        (0xFFFFFFFF >> (32 - (__bitlen)))
 #define BIT_LEN_MASK_16(__bitlen)       \
@@ -1317,9 +1675,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
 #define BIT_LEN_MASK_8(__bitlen) \
        (0xFF >> (8 - (__bitlen)))
 
-/*Example:
-BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
-BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
 #define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
        (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
 #define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
@@ -1328,8 +1688,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
        (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
 
 /*Description:
-Return 4-byte value in host byte ordering from
-4-byte pointer in little-endian system.*/
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
 #define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
        (EF4BYTE(*((u32 *)(__pstart))))
 #define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
@@ -1337,28 +1698,10 @@ Return 4-byte value in host byte ordering from
 #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
        (EF1BYTE(*((u8 *)(__pstart))))
 
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset))  & \
-               BIT_LEN_MASK_32(__bitlen) \
-       )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
-               BIT_LEN_MASK_16(__bitlen) \
-       )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
-               BIT_LEN_MASK_8(__bitlen) \
-       )
-
-/*Description:
-Mask subfield (continuous bits in little-endian) of 4-byte value
-and return the result in 4-byte value in host byte ordering.*/
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
 #define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
        ( \
                LE_P4BYTE_TO_HOST_4BYTE(__pstart)  & \
@@ -1375,20 +1718,9 @@ and return the result in 4-byte value in host byte ordering.*/
                (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
        )
 
-/*Description:
-Set subfield of little-endian 4-byte value to specified value. */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u32 *)(__pstart)) = EF4BYTE \
-       ( \
-               LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
-               ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
-       );
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u16 *)(__pstart)) = EF2BYTE \
-       ( \
-               LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
-               ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
-       );
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
 #define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
        *((u8 *)(__pstart)) = EF1BYTE \
        ( \
@@ -1400,13 +1732,14 @@ Set subfield of little-endian 4-byte value to specified value.  */
        mem access macro define end
 ****************************************/
 
-#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
+
 #define RTL_WATCH_DOG_TIME     2000
 #define MSECS(t)               msecs_to_jiffies(t)
-#define WLAN_FC_GET_VERS(fc)   ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc)   ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc)  ((fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc)  ((fc) & IEEE80211_FCTL_MOREDATA)
+#define WLAN_FC_GET_VERS(fc)   (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc)   (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc)  (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc)  (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
 #define SEQ_TO_SN(seq)         (((seq) & IEEE80211_SCTL_SEQ) >> 4)
 #define SN_TO_SEQ(ssn)         (((ssn) << 4) & IEEE80211_SCTL_SEQ)
 #define MAX_SN                 ((IEEE80211_SCTL_SEQ) >> 4)
@@ -1420,6 +1753,8 @@ Set subfield of little-endian 4-byte value to specified value.    */
 #define        RT_RF_OFF_LEVL_FW_32K           BIT(5)  /*FW in 32k */
 /*Always enable ASPM and Clock Req in initialization.*/
 #define        RT_RF_PS_LEVEL_ALWAYS_ASPM      BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define        RT_PS_LEVEL_ASPM                BIT(7)
 /*When LPS is on, disable 2R if no packet is received or transmittd.*/
 #define        RT_RF_LPS_DISALBE_2R            BIT(30)
 #define        RT_RF_LPS_LEVEL_ASPM            BIT(31) /*LPS with ASPM */
@@ -1433,15 +1768,6 @@ Set subfield of little-endian 4-byte value to specified value.   */
 #define container_of_dwork_rtl(x, y, z) \
        container_of(container_of(x, struct delayed_work, work), y, z)
 
-#define FILL_OCTET_STRING(_os, _octet, _len)   \
-               (_os).octet = (u8 *)(_octet);           \
-               (_os).length = (_len);
-
-#define CP_MACADDR(des, src)   \
-       ((des)[0] = (src)[0], (des)[1] = (src)[1],\
-       (des)[2] = (src)[2], (des)[3] = (src)[3],\
-       (des)[4] = (src)[4], (des)[5] = (src)[5])
-
 static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
 {
        return rtlpriv->io.read8_sync(rtlpriv, addr);
index 64a0214..ef8370e 100644 (file)
@@ -776,6 +776,31 @@ out:
        return ret;
 }
 
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+                       u8 depth, enum wl1251_acx_low_rssi_type type)
+{
+       struct acx_low_rssi *rssi;
+       int ret;
+
+       wl1251_debug(DEBUG_ACX, "acx low rssi");
+
+       rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
+       if (!rssi)
+               return -ENOMEM;
+
+       rssi->threshold = threshold;
+       rssi->weight = weight;
+       rssi->depth = depth;
+       rssi->type = type;
+
+       ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
+       if (ret < 0)
+               wl1251_warning("failed to set low rssi threshold: %d", ret);
+
+       kfree(rssi);
+       return ret;
+}
+
 int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
 {
        struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
        return ret;
 }
 
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+                         u8 max_consecutive)
+{
+       struct wl1251_acx_bet_enable *acx;
+       int ret;
+
+       wl1251_debug(DEBUG_ACX, "acx bet enable");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->enable = mode;
+       acx->max_consecutive = max_consecutive;
+
+       ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1251_warning("wl1251 acx bet enable failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop)
 {
index efcc3aa..c2ba100 100644 (file)
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
        u8 pad[2];
 } __packed;
 
+enum wl1251_acx_low_rssi_type {
+       /*
+        * The event is a "Level" indication which keeps triggering
+        * as long as the average RSSI is below the threshold.
+        */
+       WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
+
+       /*
+        * The event is an "Edge" indication which triggers
+        * only when the RSSI threshold is crossed from above.
+        */
+       WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
+};
+
+struct acx_low_rssi {
+       struct acx_header header;
+
+       /*
+        * The threshold (in dBm) below (or above after low rssi
+        * indication) which the firmware generates an interrupt to the
+        * host. This parameter is signed.
+        */
+       s8 threshold;
+
+       /*
+        * The weight of the current RSSI sample, before adding the new
+        * sample, that is used to calculate the average RSSI.
+        */
+       u8 weight;
+
+       /*
+        * The number of Beacons/Probe response frames that will be
+        * received before issuing the Low or Regained RSSI event.
+        */
+       u8 depth;
+
+       /*
+        * Configures how the Low RSSI Event is triggered. Refer to
+        * enum wl1251_acx_low_rssi_type for more.
+        */
+       u8 type;
+} __packed;
+
 struct acx_beacon_filter_option {
        struct acx_header header;
 
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
        u8  padding;
 } __packed;
 
+enum wl1251_acx_bet_mode {
+       WL1251_ACX_BET_DISABLE = 0,
+       WL1251_ACX_BET_ENABLE = 1,
+};
+
+struct wl1251_acx_bet_enable {
+       struct acx_header header;
+
+       /*
+        * Specifies if beacon early termination procedure is enabled or
+        * disabled, see enum wl1251_acx_bet_mode.
+        */
+       u8 enable;
+
+       /*
+        * Specifies the maximum number of consecutive beacons that may be
+        * early terminated. After this number is reached at least one full
+        * beacon must be correctly received in FW before beacon ET
+        * resumes. Range 0 - 255.
+        */
+       u8 max_consecutive;
+
+       u8 padding[2];
+} __packed;
+
 struct wl1251_acx_ac_cfg {
        struct acx_header header;
 
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
 int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
 int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
 int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+                       u8 depth, enum wl1251_acx_low_rssi_type type);
 int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
 int wl1251_acx_cts_protect(struct wl1251 *wl,
                            enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
 int wl1251_acx_rate_policies(struct wl1251 *wl);
 int wl1251_acx_mem_cfg(struct wl1251 *wl);
 int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+                         u8 max_consecutive);
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop);
 int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
index 712372e..dfc4579 100644 (file)
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
+       if (wl->vif && wl->rssi_thold) {
+               if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
+                       wl1251_debug(DEBUG_EVENT,
+                                    "ROAMING_TRIGGER_LOW_RSSI_EVENT");
+                       ieee80211_cqm_rssi_notify(wl->vif,
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+                               GFP_KERNEL);
+               }
+
+               if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
+                       wl1251_debug(DEBUG_EVENT,
+                                    "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
+                       ieee80211_cqm_rssi_notify(wl->vif,
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+                               GFP_KERNEL);
+               }
+       }
+
        return 0;
 }
 
index 012e1a4..12c9e63 100644 (file)
@@ -375,7 +375,7 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1251 *wl = hw->priv;
        unsigned long flags;
@@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                wl->tx_queue_stopped = true;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
-
-       return NETDEV_TX_OK;
 }
 
 static int wl1251_op_start(struct ieee80211_hw *hw)
@@ -502,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
        wl->psm = 0;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+       wl->rssi_thold = 0;
        wl->channel = WL1251_DEFAULT_CHANNEL;
 
        wl1251_debugfs_reset(wl);
@@ -959,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
+       if (changed & BSS_CHANGED_CQM) {
+               ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
+                                         WL1251_DEFAULT_LOW_RSSI_WEIGHT,
+                                         WL1251_DEFAULT_LOW_RSSI_DEPTH,
+                                         WL1251_ACX_LOW_RSSI_TYPE_EDGE);
+               if (ret < 0)
+                       goto out;
+               wl->rssi_thold = bss_conf->cqm_rssi_thold;
+       }
+
        if (changed & BSS_CHANGED_BSSID) {
                memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
@@ -1039,6 +1048,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_BEACON) {
                beacon = ieee80211_beacon_get(hw, vif);
+               if (!beacon)
+                       goto out_sleep;
+
                ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
                                              beacon->len);
 
@@ -1310,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
        wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
                IEEE80211_HW_SUPPORTS_PS |
                IEEE80211_HW_BEACON_FILTER |
-               IEEE80211_HW_SUPPORTS_UAPSD;
+               IEEE80211_HW_SUPPORTS_UAPSD |
+               IEEE80211_HW_SUPPORTS_CQM_RSSI;
 
-       wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                        BIT(NL80211_IFTYPE_ADHOC);
        wl->hw->wiphy->max_scan_ssids = 1;
        wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
 
@@ -1374,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
        wl->psm_requested = false;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+       wl->rssi_thold = 0;
        wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
        wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
        wl->vif = NULL;
index 5ed47c8..9cc5147 100644 (file)
@@ -58,7 +58,6 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
        unsigned long delay;
 
        if (wl->psm) {
-               cancel_delayed_work(&wl->elp_work);
                delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
                ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
        }
@@ -69,6 +68,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
        unsigned long timeout, start;
        u32 elp_reg;
 
+       if (delayed_work_pending(&wl->elp_work))
+               cancel_delayed_work(&wl->elp_work);
+
        if (!wl->elp)
                return 0;
 
@@ -102,38 +104,6 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
        return 0;
 }
 
-static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable)
-{
-       int ret;
-
-       if (enable) {
-               wl1251_debug(DEBUG_PSM, "sleep auth psm/elp");
-
-               ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
-               if (ret < 0)
-                       return ret;
-
-               wl1251_ps_elp_sleep(wl);
-       } else {
-               wl1251_debug(DEBUG_PSM, "sleep auth cam");
-
-               /*
-                * When the target is in ELP, we can only
-                * access the ELP control register. Thus,
-                * we have to wake the target up before
-                * changing the power authorization.
-                */
-
-               wl1251_ps_elp_wakeup(wl);
-
-               ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
 int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
 {
        int ret;
@@ -153,11 +123,16 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
                if (ret < 0)
                        return ret;
 
+               ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
+                                           WL1251_DEFAULT_BET_CONSECUTIVE);
+               if (ret < 0)
+                       return ret;
+
                ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
 
-               ret = wl1251_ps_set_elp(wl, true);
+               ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
                if (ret < 0)
                        return ret;
 
@@ -166,7 +141,14 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
        case STATION_ACTIVE_MODE:
        default:
                wl1251_debug(DEBUG_PSM, "leaving psm");
-               ret = wl1251_ps_set_elp(wl, false);
+
+               ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
+               if (ret < 0)
+                       return ret;
+
+               /* disable BET */
+               ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
+                                           WL1251_DEFAULT_BET_CONSECUTIVE);
                if (ret < 0)
                        return ret;
 
index efa5360..c1b3b3f 100644 (file)
@@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl,
         */
        wl->noise = desc->rssi - desc->snr / 2;
 
-       status->freq = ieee80211_channel_to_frequency(desc->channel);
+       status->freq = ieee80211_channel_to_frequency(desc->channel,
+                                                     status->band);
 
-       status->flag |= RX_FLAG_TSFT;
+       status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
        if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
                status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
+       switch (desc->rate) {
+               /* skip 1 and 12 Mbps because they have same value 0x0a */
+       case RATE_2MBPS:
+               status->rate_idx = 1;
+               break;
+       case RATE_5_5MBPS:
+               status->rate_idx = 2;
+               break;
+       case RATE_11MBPS:
+               status->rate_idx = 3;
+               break;
+       case RATE_6MBPS:
+               status->rate_idx = 4;
+               break;
+       case RATE_9MBPS:
+               status->rate_idx = 5;
+               break;
+       case RATE_18MBPS:
+               status->rate_idx = 7;
+               break;
+       case RATE_24MBPS:
+               status->rate_idx = 8;
+               break;
+       case RATE_36MBPS:
+               status->rate_idx = 9;
+               break;
+       case RATE_48MBPS:
+               status->rate_idx = 10;
+               break;
+       case RATE_54MBPS:
+               status->rate_idx = 11;
+               break;
+       }
+
+       /* for 1 and 12 Mbps we have to check the modulation */
+       if (desc->rate == RATE_1MBPS) {
+               if (!(desc->mod_pre & OFDM_RATE_BIT))
+                       /* CCK -> RATE_1MBPS */
+                       status->rate_idx = 0;
+               else
+                       /* OFDM -> RATE_12MBPS */
+                       status->rate_idx = 6;
+       }
 
-       /* FIXME: set status->rate_idx */
+       if (desc->mod_pre & SHORT_PREAMBLE_BIT)
+               status->flag |= RX_FLAG_SHORTPRE;
 }
 
 static void wl1251_rx_body(struct wl1251 *wl,
index 554b4f9..28121c5 100644 (file)
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
                wl1251_debug(DEBUG_TX, "skb offset %d", offset);
 
                /* check whether the current skb can be used */
-               if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
-                       unsigned char *src = skb->data;
+               if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
+                       struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
+                                                                GFP_KERNEL);
+
+                       if (unlikely(newskb == NULL)) {
+                               wl1251_error("Can't allocate skb!");
+                               return -EINVAL;
+                       }
 
-                       /* align the buffer on a 4-byte boundary */
+                       tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
+
+                       dev_kfree_skb_any(skb);
+                       wl->tx_frames[tx_hdr->id] = skb = newskb;
+
+                       offset = (4 - (long)skb->data) & 0x03;
+                       wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
+               }
+
+               /* align the buffer on a 4-byte boundary */
+               if (offset) {
+                       unsigned char *src = skb->data;
                        skb_reserve(skb, offset);
                        memmove(skb->data, src, skb->len);
                        tx_hdr = (struct tx_double_buffer_desc *) skb->data;
-               } else {
-                       wl1251_info("No handler, fixme!");
-                       return -EINVAL;
                }
        }
 
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
 {
        struct ieee80211_tx_info *info;
        struct sk_buff *skb;
-       int hdrlen, ret;
+       int hdrlen;
        u8 *frame;
 
        skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
        ieee80211_tx_status(wl->hw, skb);
 
        wl->tx_frames[result->id] = NULL;
-
-       if (wl->tx_queue_stopped) {
-               wl1251_debug(DEBUG_TX, "cb: queue was stopped");
-
-               skb = skb_dequeue(&wl->tx_queue);
-
-               /* The skb can be NULL because tx_work might have been
-                  scheduled before the queue was stopped making the
-                  queue empty */
-
-               if (skb) {
-                       ret = wl1251_tx_frame(wl, skb);
-                       if (ret == -EBUSY) {
-                               /* firmware buffer is still full */
-                               wl1251_debug(DEBUG_TX, "cb: fw buffer "
-                                            "still full");
-                               skb_queue_head(&wl->tx_queue, skb);
-                               return;
-                       } else if (ret < 0) {
-                               dev_kfree_skb(skb);
-                               return;
-                       }
-               }
-
-               wl1251_debug(DEBUG_TX, "cb: waking queues");
-               ieee80211_wake_queues(wl->hw);
-               wl->tx_queue_stopped = false;
-       }
 }
 
 /* Called upon reception of a TX complete interrupt */
 void wl1251_tx_complete(struct wl1251 *wl)
 {
-       int i, result_index, num_complete = 0;
+       int i, result_index, num_complete = 0, queue_len;
        struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
        unsigned long flags;
 
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
                }
        }
 
-       if (wl->tx_queue_stopped
-           &&
-           skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
+       queue_len = skb_queue_len(&wl->tx_queue);
 
-               /* firmware buffer has space, restart queues */
+       if ((num_complete > 0) && (queue_len > 0)) {
+               /* firmware buffer has space, reschedule tx_work */
+               wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       }
+
+       if (wl->tx_queue_stopped &&
+           queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+               /* tx_queue has space, restart queues */
                wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
                spin_lock_irqsave(&wl->wl_lock, flags);
                ieee80211_wake_queues(wl->hw);
                wl->tx_queue_stopped = false;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
-
        }
 
        /* Every completed frame needs to be acknowledged */
index c0ce2c8..bb23cd5 100644 (file)
@@ -370,6 +370,8 @@ struct wl1251 {
        /* in dBm */
        int power_level;
 
+       int rssi_thold;
+
        struct wl1251_stats stats;
        struct wl1251_debugfs debugfs;
 
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
 
 #define WL1251_DEFAULT_CHANNEL 0
 
+#define WL1251_DEFAULT_BET_CONSECUTIVE 10
+
 #define CHIP_ID_1251_PG10                 (0x7010101)
 #define CHIP_ID_1251_PG11                 (0x7020101)
 #define CHIP_ID_1251_PG12                 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
 #define WL1251_PART_WORK_REG_START     REGISTERS_BASE
 #define WL1251_PART_WORK_REG_SIZE      REGISTERS_WORK_SIZE
 
+#define WL1251_DEFAULT_LOW_RSSI_WEIGHT          10
+#define WL1251_DEFAULT_LOW_RSSI_DEPTH           10
+
 #endif
index 1846280..1417b14 100644 (file)
@@ -54,7 +54,6 @@
 
 /* This really should be 8, but not for our firmware */
 #define MAX_SUPPORTED_RATES 32
-#define COUNTRY_STRING_LEN 3
 #define MAX_COUNTRY_TRIPLETS 32
 
 /* Headers */
@@ -98,7 +97,7 @@ struct country_triplet {
 
 struct wl12xx_ie_country {
        struct wl12xx_ie_header header;
-       u8 country_string[COUNTRY_STRING_LEN];
+       u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
        struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
 } __packed;
 
index 0e65bce..692ebff 100644 (file)
@@ -54,7 +54,7 @@ config WL12XX_SDIO
 
 config WL12XX_SDIO_TEST
        tristate "TI wl12xx SDIO testing support"
-       depends on WL12XX && MMC
+       depends on WL12XX && MMC && WL12XX_SDIO
        default n
        ---help---
          This module adds support for the SDIO bus testing with the
index cc4068d..a3db755 100644 (file)
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
        return 0;
 }
 
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
 {
-       struct acx_rate_policy *acx;
-       struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+       struct acx_sta_rate_policy *acx;
+       struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
        int idx = 0;
        int ret = 0;
 
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
 
        acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
 
+       wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+               acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
+               acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+
        ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
        if (ret < 0) {
                wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
        return ret;
 }
 
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+                     u8 idx)
+{
+       struct acx_ap_rate_policy *acx;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+       acx->rate_policy.short_retry_limit = c->short_retry_limit;
+       acx->rate_policy.long_retry_limit = c->long_retry_limit;
+       acx->rate_policy.aflags = c->aflags;
+
+       acx->rate_policy_idx = cpu_to_le32(idx);
+
+       ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("Setting of ap rate policy failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifsn, u16 txop)
 {
@@ -915,9 +951,9 @@ out:
        return ret;
 }
 
-int wl1271_acx_mem_cfg(struct wl1271 *wl)
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
 {
-       struct wl1271_acx_config_memory *mem_conf;
+       struct wl1271_acx_ap_config_memory *mem_conf;
        int ret;
 
        wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
        }
 
        /* memory config */
-       mem_conf->num_stations = DEFAULT_NUM_STATIONS;
-       mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
-       mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
-       mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
+       mem_conf->num_stations = wl->conf.mem.num_stations;
+       mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+       mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+       mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
        mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
 
        ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
        return ret;
 }
 
-int wl1271_acx_init_mem_config(struct wl1271 *wl)
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
 {
+       struct wl1271_acx_sta_config_memory *mem_conf;
        int ret;
 
-       ret = wl1271_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
+       wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
+
+       mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
+       if (!mem_conf) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* memory config */
+       mem_conf->num_stations = wl->conf.mem.num_stations;
+       mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+       mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+       mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
+       mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+       mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
+       mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
+       mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
+       mem_conf->tx_min = wl->conf.mem.tx_min;
+
+       ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
+                                  sizeof(*mem_conf));
+       if (ret < 0) {
+               wl1271_warning("wl1271 mem config failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(mem_conf);
+       return ret;
+}
+
+int wl1271_acx_init_mem_config(struct wl1271 *wl)
+{
+       int ret;
 
        wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
                                     GFP_KERNEL);
@@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
        struct wl1271_acx_ht_capabilities *acx;
        u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        int ret = 0;
+       u32 ht_capabilites = 0;
 
        wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
 
@@ -1244,27 +1313,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
 
        /* Allow HT Operation ? */
        if (allow_ht_operation) {
-               acx->ht_capabilites =
+               ht_capabilites =
                        WL1271_ACX_FW_CAP_HT_OPERATION;
                if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
                if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
                if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
 
                /* get data from A-MPDU parameters field */
                acx->ampdu_max_length = ht_cap->ampdu_factor;
                acx->ampdu_min_spacing = ht_cap->ampdu_density;
-
-               memcpy(acx->mac_address, mac_address, ETH_ALEN);
-       } else { /* HT operations are not allowed */
-               acx->ht_capabilites = 0;
        }
 
+       memcpy(acx->mac_address, mac_address, ETH_ALEN);
+       acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
        ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
        if (ret < 0) {
                wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1293,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
        acx->ht_protection =
                (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
        acx->rifs_mode = 0;
-       acx->gf_protection = 0;
+       acx->gf_protection =
+               !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
        acx->ht_tx_burst_limit = 0;
        acx->dual_cts_protection = 0;
 
@@ -1309,6 +1378,91 @@ out:
        return ret;
 }
 
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+                              enum ieee80211_back_parties direction,
+                              u8 tid_index, u8 policy)
+{
+       struct wl1271_acx_ba_session_policy *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* ANY role */
+       acx->role_id = 0xff;
+       acx->tid = tid_index;
+       acx->enable = policy;
+       acx->ba_direction = direction;
+
+       switch (direction) {
+       case WLAN_BACK_INITIATOR:
+               acx->win_size = wl->conf.ht.tx_ba_win_size;
+               acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+               break;
+       case WLAN_BACK_RECIPIENT:
+               acx->win_size = RX_BA_WIN_SIZE;
+               acx->inactivity_timeout = 0;
+               break;
+       default:
+               wl1271_error("Incorrect acx command id=%x\n", direction);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_cmd_configure(wl,
+                                  ACX_BA_SESSION_POLICY_CFG,
+                                  acx,
+                                  sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ba session setting failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+                                       bool enable)
+{
+       struct wl1271_acx_ba_receiver_setup *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* Single link for now */
+       acx->link_id = 1;
+       acx->tid = tid_index;
+       acx->enable = enable;
+       acx->win_size = 0;
+       acx->ssn = ssn;
+
+       ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+                                  sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ba receiver session failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
 {
        struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1488,82 @@ out:
        kfree(tsf_info);
        return ret;
 }
+
+int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+{
+       struct wl1271_acx_max_tx_retry *acx = NULL;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx max tx retry");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx)
+               return -ENOMEM;
+
+       acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+
+       ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx max tx retry failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
+int wl1271_acx_config_ps(struct wl1271 *wl)
+{
+       struct wl1271_acx_config_ps *config_ps;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx config ps");
+
+       config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
+       if (!config_ps) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
+       config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
+       config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+
+       ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
+                                  sizeof(*config_ps));
+
+       if (ret < 0) {
+               wl1271_warning("acx config ps failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(config_ps);
+       return ret;
+}
+
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+{
+       struct wl1271_acx_inconnection_sta *acx = NULL;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx)
+               return -ENOMEM;
+
+       memcpy(acx->addr, addr, ETH_ALEN);
+
+       ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
+                                  acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx set inconnaction sta failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
index 7bd8e4d..dd19b01 100644 (file)
@@ -133,7 +133,6 @@ enum {
 
 #define DEFAULT_UCAST_PRIORITY          0
 #define DEFAULT_RX_Q_PRIORITY           0
-#define DEFAULT_NUM_STATIONS            1
 #define DEFAULT_RXQ_PRIORITY            0 /* low 0 .. 15 high  */
 #define DEFAULT_RXQ_TYPE                0x07    /* All frames, Data/Ctrl/Mgmt */
 #define TRACE_BUFFER_MAX_SIZE           256
@@ -747,13 +746,23 @@ struct acx_rate_class {
 #define ACX_TX_BASIC_RATE      0
 #define ACX_TX_AP_FULL_RATE    1
 #define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
        struct acx_header header;
 
        __le32 rate_class_cnt;
        struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
 } __packed;
 
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+       struct acx_header header;
+
+       __le32 rate_policy_idx;
+       struct acx_rate_class rate_policy;
+} __packed;
+
 struct acx_ac_cfg {
        struct acx_header header;
        u8 ac;
@@ -787,12 +796,9 @@ struct acx_tx_config_options {
        __le16 tx_compl_threshold;   /* number of packets */
 } __packed;
 
-#define ACX_RX_MEM_BLOCKS     70
-#define ACX_TX_MIN_MEM_BLOCKS 40
 #define ACX_TX_DESCRIPTORS    32
-#define ACX_NUM_SSID_PROFILES 1
 
-struct wl1271_acx_config_memory {
+struct wl1271_acx_ap_config_memory {
        struct acx_header header;
 
        u8 rx_mem_block_num;
@@ -802,6 +808,20 @@ struct wl1271_acx_config_memory {
        __le32 total_tx_descriptors;
 } __packed;
 
+struct wl1271_acx_sta_config_memory {
+       struct acx_header header;
+
+       u8 rx_mem_block_num;
+       u8 tx_min_mem_block_num;
+       u8 num_stations;
+       u8 num_ssid_profiles;
+       __le32 total_tx_descriptors;
+       u8 dyn_mem_enable;
+       u8 tx_free_req;
+       u8 rx_free_req;
+       u8 tx_min;
+} __packed;
+
 struct wl1271_acx_mem_map {
        struct acx_header header;
 
@@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information {
        u8 padding[3];
 } __packed;
 
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+       struct acx_header header;
+       /*
+        * Specifies role Id, Range 0-7, 0xFF means ANY role.
+        * Future use. For now this field is irrelevant
+        */
+       u8 role_id;
+       /*
+        * Specifies Link Id, Range 0-31, 0xFF means ANY  Link Id.
+        * Not applicable if Role Id is set to ANY.
+        */
+       u8 link_id;
+
+       u8 tid;
+
+       u8 enable;
+
+       /* Windows size in number of packets */
+       u16 win_size;
+
+       /*
+        * As initiator inactivity timeout in time units(TU) of 1024us.
+        * As receiver reserved
+        */
+       u16 inactivity_timeout;
+
+       /* Initiator = 1/Receiver = 0 */
+       u8 ba_direction;
+
+       u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+       struct acx_header header;
+
+       /* Specifies Link Id, Range 0-31, 0xFF means ANY  Link Id */
+       u8 link_id;
+
+       u8 tid;
+
+       u8 enable;
+
+       u8 padding[1];
+
+       /* Windows size in number of packets */
+       u16 win_size;
+
+       /* BA session starting sequence number.  RANGE 0-FFF */
+       u16 ssn;
+} __packed;
+
 struct wl1271_acx_fw_tsf_information {
        struct acx_header header;
 
@@ -1062,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information {
        u8 padding[3];
 } __packed;
 
+struct wl1271_acx_max_tx_retry {
+       struct acx_header header;
+
+       /*
+        * the number of frames transmission failures before
+        * issuing the aging event.
+        */
+       __le16 max_tx_retry;
+       u8 padding_1[2];
+} __packed;
+
+struct wl1271_acx_config_ps {
+       struct acx_header header;
+
+       u8 exit_retries;
+       u8 enter_retries;
+       u8 padding[2];
+       __le32 null_data_rate;
+} __packed;
+
+struct wl1271_acx_inconnection_sta {
+       struct acx_header header;
+
+       u8 addr[ETH_ALEN];
+       u8 padding1[2];
+} __packed;
+
 enum {
        ACX_WAKE_UP_CONDITIONS      = 0x0002,
        ACX_MEM_CFG                 = 0x0003,
@@ -1113,22 +1213,24 @@ enum {
        ACX_RSSI_SNR_WEIGHTS        = 0x0052,
        ACX_KEEP_ALIVE_MODE         = 0x0053,
        ACX_SET_KEEP_ALIVE_CONFIG   = 0x0054,
-       ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
-       ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+       ACX_BA_SESSION_POLICY_CFG   = 0x0055,
+       ACX_BA_SESSION_RX_SETUP     = 0x0056,
        ACX_PEER_HT_CAP             = 0x0057,
        ACX_HT_BSS_OPERATION        = 0x0058,
        ACX_COEX_ACTIVITY           = 0x0059,
        ACX_SET_DCO_ITRIM_PARAMS    = 0x0061,
+       ACX_GEN_FW_CMD              = 0x0070,
+       ACX_HOST_IF_CFG_BITMAP      = 0x0071,
+       ACX_MAX_TX_FAILURE          = 0x0072,
+       ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
        DOT11_RX_MSDU_LIFE_TIME     = 0x1004,
        DOT11_CUR_TX_PWR            = 0x100D,
        DOT11_RX_DOT11_MODE         = 0x1012,
        DOT11_RTS_THRESHOLD         = 0x1013,
        DOT11_GROUP_ADDRESS_TBL     = 0x1014,
        ACX_PM_CONFIG               = 0x1016,
-
-       MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
-
-       MAX_IE = 0xFFFF
+       ACX_CONFIG_PS               = 0x1017,
+       ACX_CONFIG_HANGOVER         = 0x1018,
 };
 
 
@@ -1160,7 +1262,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
 int wl1271_acx_cts_protect(struct wl1271 *wl,
                           enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+                     u8 idx);
 int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifsn, u16 txop);
 int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,7 +1272,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
                       u32 apsd_conf0, u32 apsd_conf1);
 int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
 int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
 int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
@@ -1185,6 +1290,14 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
                                    bool allow_ht_operation);
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
                                   u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+                             enum ieee80211_back_parties direction,
+                             u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+                                      bool enable);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 
 #endif /* __WL1271_ACX_H__ */
index 4df04f8..6934dff 100644 (file)
@@ -28,6 +28,7 @@
 #include "boot.h"
 #include "io.h"
 #include "event.h"
+#include "rx.h"
 
 static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
        [PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
        wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
 }
 
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+                    &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+                    &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+                    &wl->chip.fw_ver[4]);
+
+       if (ret != 5) {
+               wl1271_warning("fw version incorrect value");
+               memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+               return;
+       }
+}
+
 static void wl1271_boot_fw_version(struct wl1271 *wl)
 {
        struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
        wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
                    false);
 
-       strncpy(wl->chip.fw_ver, static_data.fw_version,
-               sizeof(wl->chip.fw_ver));
+       strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+               sizeof(wl->chip.fw_ver_str));
 
        /* make sure the string is NULL-terminated */
-       wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+       wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+       wl1271_parse_fw_ver(wl);
 }
 
 static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
         */
        if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
            wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
-               if (wl->nvs->general_params.dual_mode_select)
+               /* for now 11a is unsupported in AP mode */
+               if (wl->bss_type != BSS_TYPE_AP_BSS &&
+                   wl->nvs->general_params.dual_mode_select)
                        wl->enable_11a = true;
        }
 
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
                PSPOLL_DELIVERY_FAILURE_EVENT_ID |
                SOFT_GEMINI_SENSE_EVENT_ID;
 
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+
        ret = wl1271_event_unmask(wl);
        if (ret < 0) {
                wl1271_error("EVENT mask setting failed");
@@ -464,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
        fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
 
        wl->hw_pg_ver = (s8)fuse;
+
+       if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
+               wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
 }
 
 /* uploads NVS and firmware */
@@ -595,8 +622,7 @@ int wl1271_boot(struct wl1271 *wl)
        wl1271_boot_enable_interrupts(wl);
 
        /* set the wl1271 default filters */
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl1271_set_default_filters(wl);
 
        wl1271_event_mbox_config(wl);
 
index d67dcff..17229b8 100644 (file)
@@ -59,6 +59,11 @@ struct wl1271_static_data {
 #define PG_VER_MASK          0x3c
 #define PG_VER_OFFSET        2
 
+#define PG_MAJOR_VER_MASK    0x3
+#define PG_MAJOR_VER_OFFSET  0x0
+#define PG_MINOR_VER_MASK    0xc
+#define PG_MINOR_VER_OFFSET  0x2
+
 #define CMD_MBOX_ADDRESS     0x407B4
 
 #define POLARITY_LOW         BIT(1)
index 0106628..f0aa7ab 100644 (file)
@@ -36,6 +36,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "tx.h"
 
 #define WL1271_CMD_FAST_POLL_COUNT       50
 
@@ -62,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
        cmd->status = 0;
 
        WARN_ON(len % 4 != 0);
+       WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
 
        wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
 
@@ -221,7 +223,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
  * Poll the mailbox event field until any of the bits in the mask is set or a
  * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
  */
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
 {
        u32 events_vector, event;
        unsigned long timeout;
@@ -230,7 +232,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
 
        do {
                if (time_after(jiffies, timeout)) {
-                       ieee80211_queue_work(wl->hw, &wl->recovery_work);
+                       wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+                                    (int)mask);
                        return -ETIMEDOUT;
                }
 
@@ -248,6 +251,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
        return 0;
 }
 
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+       int ret;
+
+       ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+       if (ret != 0) {
+               ieee80211_queue_work(wl->hw, &wl->recovery_work);
+               return ret;
+       }
+
+       return 0;
+}
+
 int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
 {
        struct wl1271_cmd_join *join;
@@ -271,6 +287,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
        join->rx_filter_options = cpu_to_le32(wl->rx_filter);
        join->bss_type = bss_type;
        join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+       join->supported_rate_set = cpu_to_le32(wl->rate_set);
 
        if (wl->band == IEEE80211_BAND_5GHZ)
                join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +305,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
        wl->tx_security_last_seq = 0;
        wl->tx_security_seq = 0;
 
+       wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
+               join->basic_rate_set, join->supported_rate_set);
+
        ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
        if (ret < 0) {
                wl1271_error("failed to initiate cmd join");
@@ -439,7 +459,7 @@ out:
        return ret;
 }
 
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
 {
        struct wl1271_cmd_ps_params *ps_params = NULL;
        int ret = 0;
@@ -453,10 +473,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
        }
 
        ps_params->ps_mode = ps_mode;
-       ps_params->send_null_data = send;
-       ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
-       ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
-       ps_params->null_data_rate = cpu_to_le32(rates);
 
        ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
                              sizeof(*ps_params), 0);
@@ -490,8 +506,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
        cmd->len = cpu_to_le16(buf_len);
        cmd->template_type = template_id;
        cmd->enabled_rates = cpu_to_le32(rates);
-       cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
-       cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+       cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+       cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
        cmd->index = index;
 
        if (buf)
@@ -659,15 +675,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
 
        /* llc layer */
        memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
-       tmpl.llc_type = htons(ETH_P_ARP);
+       tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
 
        /* arp header */
        arp_hdr = &tmpl.arp_hdr;
-       arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
-       arp_hdr->ar_pro = htons(ETH_P_IP);
+       arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+       arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
        arp_hdr->ar_hln = ETH_ALEN;
        arp_hdr->ar_pln = 4;
-       arp_hdr->ar_op = htons(ARPOP_REPLY);
+       arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
 
        /* arp payload */
        memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +718,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
                                       wl->basic_rate);
 }
 
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
 {
-       struct wl1271_cmd_set_keys *cmd;
+       struct wl1271_cmd_set_sta_keys *cmd;
        int ret = 0;
 
        wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +747,42 @@ out:
        return ret;
 }
 
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+       struct wl1271_cmd_set_ap_keys *cmd;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->hlid = WL1271_AP_BROADCAST_HLID;
+       cmd->key_id = id;
+       cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+       cmd->key_action = cpu_to_le16(KEY_SET_ID);
+       cmd->key_type = KEY_WEP;
+
+       ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(cmd);
+
+       return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, const u8 *addr,
                       u32 tx_seq_32, u16 tx_seq_16)
 {
-       struct wl1271_cmd_set_keys *cmd;
+       struct wl1271_cmd_set_sta_keys *cmd;
        int ret = 0;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +835,67 @@ out:
        return ret;
 }
 
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                       u16 tx_seq_16)
+{
+       struct wl1271_cmd_set_ap_keys *cmd;
+       int ret = 0;
+       u8 lid_type;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       if (hlid == WL1271_AP_BROADCAST_HLID) {
+               if (key_type == KEY_WEP)
+                       lid_type = WEP_DEFAULT_LID_TYPE;
+               else
+                       lid_type = BROADCAST_LID_TYPE;
+       } else {
+               lid_type = UNICAST_LID_TYPE;
+       }
+
+       wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+                    " hlid: %d", (int)action, (int)id, (int)lid_type,
+                    (int)key_type, (int)hlid);
+
+       cmd->lid_key_type = lid_type;
+       cmd->hlid = hlid;
+       cmd->key_action = cpu_to_le16(action);
+       cmd->key_size = key_size;
+       cmd->key_type = key_type;
+       cmd->key_id = id;
+       cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+       cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+       if (key_type == KEY_TKIP) {
+               /*
+                * We get the key in the following form:
+                * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+                * but the target is expecting:
+                * TKIP - RX MIC - TX MIC
+                */
+               memcpy(cmd->key, key, 16);
+               memcpy(cmd->key + 16, key + 24, 8);
+               memcpy(cmd->key + 24, key + 16, 8);
+       } else {
+               memcpy(cmd->key, key, key_size);
+       }
+
+       wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+       ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_warning("could not set ap keys");
+               goto out;
+       }
+
+out:
+       kfree(cmd);
+       return ret;
+}
+
 int wl1271_cmd_disconnect(struct wl1271 *wl)
 {
        struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +958,180 @@ out_free:
 out:
        return ret;
 }
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+       struct wl1271_cmd_bss_start *cmd;
+       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+       /*
+        * FIXME: We currently do not support hidden SSID. The real SSID
+        * should be fetched from mac80211 first.
+        */
+       if (wl->ssid_len == 0) {
+               wl1271_warning("Hidden SSID currently not supported for AP");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+       cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+       cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+       cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+       cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+       cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->dtim_interval = bss_conf->dtim_period;
+       cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+       cmd->channel = wl->channel;
+       cmd->ssid_len = wl->ssid_len;
+       cmd->ssid_type = SSID_TYPE_PUBLIC;
+       memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+       switch (wl->band) {
+       case IEEE80211_BAND_2GHZ:
+               cmd->band = RADIO_BAND_2_4GHZ;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               cmd->band = RADIO_BAND_5GHZ;
+               break;
+       default:
+               wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+               cmd->band = RADIO_BAND_2_4GHZ;
+               break;
+       }
+
+       ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd start bss");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+       struct wl1271_cmd_bss_start *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+       ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd stop bss");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+       struct wl1271_cmd_add_sta *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* currently we don't support UAPSD */
+       cmd->sp_len = 0;
+
+       memcpy(cmd->addr, sta->addr, ETH_ALEN);
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+       cmd->aid = sta->aid;
+       cmd->hlid = hlid;
+
+       /*
+        * FIXME: Does STA support QOS? We need to propagate this info from
+        * hostapd. Currently not that important since this is only used for
+        * sending the correct flavor of null-data packet in response to a
+        * trigger.
+        */
+       cmd->wmm = 0;
+
+       cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+                                               sta->supp_rates[wl->band]));
+
+       wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+       ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd add sta");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+       struct wl1271_cmd_remove_sta *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->hlid = hlid;
+       /* We never send a deauth, mac80211 is in charge of this */
+       cmd->reason_opcode = 0;
+       cmd->send_deauth_flag = 0;
+
+       ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd remove sta");
+               goto out_free;
+       }
+
+       /*
+        * We are ok with a timeout here. The event is sometimes not sent
+        * due to a firmware bug.
+        */
+       wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
index 2a1d9db..54c12e7 100644 (file)
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
                           size_t len);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
 int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
 int wl1271_build_qos_null_data(struct wl1271 *wl);
 int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-                      u8 key_size, const u8 *key, const u8 *addr,
-                      u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                          u8 key_size, const u8 *key, const u8 *addr,
+                          u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                         u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                         u16 tx_seq_16);
 int wl1271_cmd_disconnect(struct wl1271 *wl);
 int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
 
 enum wl1271_commands {
        CMD_INTERROGATE     = 1,    /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
        CMD_STOP_PERIODIC_SCAN       = 51,
        CMD_SET_STA_STATE            = 52,
 
+       /* AP mode commands */
+       CMD_BSS_START                = 60,
+       CMD_BSS_STOP                 = 61,
+       CMD_ADD_STA                  = 62,
+       CMD_REMOVE_STA               = 63,
+
        NUM_COMMANDS,
        MAX_COMMAND_ID = 0xFFFF,
 };
@@ -126,6 +140,14 @@ enum cmd_templ {
                                  * For CTS-to-self (FastCTS) mechanism
                                  * for BT/WLAN coexistence (SoftGemini). */
        CMD_TEMPL_ARP_RSP,
+       CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+
+       /* AP-mode specific */
+       CMD_TEMPL_AP_BEACON = 13,
+       CMD_TEMPL_AP_PROBE_RESPONSE,
+       CMD_TEMPL_AP_ARP_RSP,
+       CMD_TEMPL_DEAUTH_AP,
+
        CMD_TEMPL_MAX = 0xff
 };
 
@@ -195,6 +217,7 @@ struct wl1271_cmd_join {
         * ACK or CTS frames).
         */
        __le32 basic_rate_set;
+       __le32 supported_rate_set;
        u8 dtim_interval;
        /*
         * bits 0-2: This bitwise field specifies the type
@@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params {
        struct wl1271_cmd_header header;
 
        u8 ps_mode; /* STATION_* */
-       u8 send_null_data; /* Do we have to send NULL data packet ? */
-       u8 retries; /* Number of retires for the initial NULL data packet */
-
-        /*
-         * TUs during which the target stays awake after switching
-         * to power save mode.
-         */
-       u8 hang_over_period;
-       __le32 null_data_rate;
+       u8 padding[3];
 } __packed;
 
 /* HW encryption keys */
 #define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
 
 enum wl1271_cmd_key_action {
        KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +303,7 @@ enum wl1271_cmd_key_type {
 
 /* FIXME: Add description for key-types */
 
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
        struct wl1271_cmd_header header;
 
        /* Ignored for default WEP key */
@@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys {
        __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
 } __packed;
 
+enum wl1271_cmd_lid_key_type {
+       UNICAST_LID_TYPE     = 0,
+       BROADCAST_LID_TYPE   = 1,
+       WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+       struct wl1271_cmd_header header;
+
+       /*
+        * Indicates whether the HLID is a unicast key set
+        * or broadcast key set. A special value 0xFF is
+        * used to indicate that the HLID is on WEP-default
+        * (multi-hlids). of type wl1271_cmd_lid_key_type.
+        */
+       u8 hlid;
+
+       /*
+        * In WEP-default network (hlid == 0xFF) used to
+        * indicate which network STA/IBSS/AP role should be
+        * changed
+        */
+       u8 lid_key_type;
+
+       /*
+        * Key ID - For TKIP and AES key types, this field
+        * indicates the value that should be inserted into
+        * the KeyID field of frames transmitted using this
+        * key entry. For broadcast keys the index use as a
+        * marker for TX/RX key.
+        * For WEP default network (HLID=0xFF), this field
+        * indicates the ID of the key to add or remove.
+        */
+       u8 key_id;
+       u8 reserved_1;
+
+       /* key_action_e */
+       __le16 key_action;
+
+       /* key size in bytes */
+       u8 key_size;
+
+       /* key_type_e */
+       u8 key_type;
+
+       /* This field holds the security key data to add to the STA table */
+       u8 key[MAX_KEY_SIZE];
+       __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+       __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
 struct wl1271_cmd_test_header {
        u8 id;
        u8 padding[3];
@@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
        u8 padding[3];
 } __packed;
 
+enum wl1271_ssid_type {
+       SSID_TYPE_PUBLIC = 0,
+       SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+       struct wl1271_cmd_header header;
+
+       /* wl1271_ssid_type */
+       u8 ssid_type;
+       u8 ssid_len;
+       u8 ssid[IW_ESSID_MAX_SIZE];
+       u8 padding_1[2];
+
+       /* Basic rate set */
+       __le32 basic_rate_set;
+       /* Aging period in seconds*/
+       __le16 aging_period;
+
+       /*
+        * This field specifies the time between target beacon
+        * transmission times (TBTTs), in time units (TUs).
+        * Valid values are 1 to 1024.
+        */
+       __le16 beacon_interval;
+       u8 bssid[ETH_ALEN];
+       u8 bss_index;
+       /* Radio band */
+       u8 band;
+       u8 channel;
+       /* The host link id for the AP's global queue */
+       u8 global_hlid;
+       /* The host link id for the AP's broadcast queue */
+       u8 broadcast_hlid;
+       /* DTIM count */
+       u8 dtim_interval;
+       /* Beacon expiry time in ms */
+       u8 beacon_expiry;
+       u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+       struct wl1271_cmd_header header;
+
+       u8 addr[ETH_ALEN];
+       u8 hlid;
+       u8 aid;
+       u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+       __le32 supported_rates;
+       u8 bss_index;
+       u8 sp_len;
+       u8 wmm;
+       u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+       struct wl1271_cmd_header header;
+
+       u8 hlid;
+       u8 reason_opcode;
+       u8 send_deauth_flag;
+       u8 padding1;
+} __packed;
+
 #endif /* __WL1271_CMD_H__ */
index a16b361..856a8a2 100644 (file)
@@ -496,6 +496,26 @@ struct conf_rx_settings {
                                        CONF_HW_BIT_RATE_2MBPS)
 #define CONF_TX_RATE_RETRY_LIMIT       10
 
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES       (CONF_HW_BIT_RATE_1MBPS | \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |      \
+       CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS |        \
+       CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS |      \
+       CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS |      \
+       CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
+       CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES  (CONF_HW_BIT_RATE_1MBPS | \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
 struct conf_tx_rate_class {
 
        /*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
 
        /*
         * Configuration for rate classes for TX (currently only one
-        * rate class supported.)
+        * rate class supported). Used in non-AP mode.
         */
-       struct conf_tx_rate_class rc_conf;
+       struct conf_tx_rate_class sta_rc_conf;
 
        /*
         * Configuration for access categories for TX rate control.
@@ -646,6 +666,28 @@ struct conf_tx_settings {
        u8 ac_conf_count;
        struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
 
+       /*
+        * Configuration for rate classes in AP-mode. These rate classes
+        * are for the AC TX queues
+        */
+       struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+       /*
+        * Management TX rate class for AP-mode.
+        */
+       struct conf_tx_rate_class ap_mgmt_conf;
+
+       /*
+        * Broadcast TX rate class for AP-mode.
+        */
+       struct conf_tx_rate_class ap_bcst_conf;
+
+       /*
+        * AP-mode - allow this number of TX retries to a station before an
+        * event is triggered from FW.
+        */
+       u16 ap_max_tx_retries;
+
        /*
         * Configuration for TID parameters.
         */
@@ -687,6 +729,12 @@ struct conf_tx_settings {
         * Range: CONF_HW_BIT_RATE_* bit mask
         */
        u32 basic_rate_5;
+
+       /*
+        * TX retry limits for templates
+        */
+       u8 tmpl_short_retry_limit;
+       u8 tmpl_long_retry_limit;
 };
 
 enum {
@@ -911,6 +959,14 @@ struct conf_conn_settings {
         */
        u8 psm_entry_retries;
 
+       /*
+        * Specifies the maximum number of times to try PSM exit if it fails
+        * (if sending the appropriate null-func message fails.)
+        *
+        * Range 0 - 255
+        */
+       u8 psm_exit_retries;
+
        /*
         * Specifies the maximum number of times to try transmit the PSM entry
         * null-func frame for each PSM entry attempt
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
        /*
         * The minimum time to wait on each channel for active scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 min_dwell_time_active;
+       u32 min_dwell_time_active;
 
        /*
         * The maximum time to wait on each channel for active scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 max_dwell_time_active;
+       u32 max_dwell_time_active;
 
        /*
-        * The maximum time to wait on each channel for passive scans
+        * The minimum time to wait on each channel for passive scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 min_dwell_time_passive;
+       u32 min_dwell_time_passive;
 
        /*
         * The maximum time to wait on each channel for passive scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 max_dwell_time_passive;
+       u32 max_dwell_time_passive;
 
        /*
         * Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
        u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
 };
 
+struct conf_ht_setting {
+       u16 tx_ba_win_size;
+       u16 inactivity_timeout;
+};
+
+struct conf_memory_settings {
+       /* Number of stations supported in IBSS mode */
+       u8 num_stations;
+
+       /* Number of ssid profiles used in IBSS mode */
+       u8 ssid_profiles;
+
+       /* Number of memory buffers allocated to rx pool */
+       u8 rx_block_num;
+
+       /* Minimum number of blocks allocated to tx pool */
+       u8 tx_min_block_num;
+
+       /* Disable/Enable dynamic memory */
+       u8 dynamic_memory;
+
+       /*
+        * Minimum required free tx memory blocks in order to assure optimum
+        * performence
+        *
+        * Range: 0-120
+        */
+       u8 min_req_tx_blocks;
+
+       /*
+        * Minimum required free rx memory blocks in order to assure optimum
+        * performence
+        *
+        * Range: 0-120
+        */
+       u8 min_req_rx_blocks;
+
+       /*
+        * Minimum number of mem blocks (free+used) guaranteed for TX
+        *
+        * Range: 0-120
+        */
+       u8 tx_min;
+};
+
 struct conf_drv_settings {
        struct conf_sg_settings sg;
        struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
        struct conf_roam_trigger_settings roam_trigger;
        struct conf_scan_settings scan;
        struct conf_rf_settings rf;
+       struct conf_ht_setting ht;
+       struct conf_memory_settings mem;
 };
 
 #endif
index ec60777..8e75b09 100644 (file)
@@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
 
        mutex_lock(&wl->mutex);
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
        unsigned long value;
        int ret;
 
-       mutex_lock(&wl->mutex);
-
        len = min(count, sizeof(buf) - 1);
        if (copy_from_user(buf, user_buf, len)) {
-               ret = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
        buf[len] = '\0';
 
        ret = strict_strtoul(buf, 0, &value);
        if (ret < 0) {
                wl1271_warning("illegal value in gpio_power");
-               goto out;
+               return -EINVAL;
        }
 
+       mutex_lock(&wl->mutex);
+
        if (value)
                wl1271_power_on(wl);
        else
                wl1271_power_off(wl);
 
-out:
        mutex_unlock(&wl->mutex);
        return count;
 }
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
        .llseek = default_llseek,
 };
 
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+                                    struct dentry *rootdir)
 {
        int ret = 0;
        struct dentry *entry, *stats;
 
-       stats = debugfs_create_dir("fw-statistics", wl->rootdir);
+       stats = debugfs_create_dir("fw-statistics", rootdir);
        if (!stats || IS_ERR(stats)) {
                entry = stats;
                goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
        DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
        DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
 
-       DEBUGFS_ADD(tx_queue_len, wl->rootdir);
-       DEBUGFS_ADD(retry_count, wl->rootdir);
-       DEBUGFS_ADD(excessive_retries, wl->rootdir);
-
-       DEBUGFS_ADD(gpio_power, wl->rootdir);
+       DEBUGFS_ADD(tx_queue_len, rootdir);
+       DEBUGFS_ADD(retry_count, rootdir);
+       DEBUGFS_ADD(excessive_retries, rootdir);
 
-       entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
-                                  &wl12xx_debug_level);
-       if (!entry || IS_ERR(entry))
-               goto err;
+       DEBUGFS_ADD(gpio_power, rootdir);
 
        return 0;
 
@@ -419,7 +413,7 @@ err:
 
 void wl1271_debugfs_reset(struct wl1271 *wl)
 {
-       if (!wl->rootdir)
+       if (!wl->stats.fw_stats)
                return;
 
        memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
 int wl1271_debugfs_init(struct wl1271 *wl)
 {
        int ret;
+       struct dentry *rootdir;
 
-       wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
-                                        wl->hw->wiphy->debugfsdir);
+       rootdir = debugfs_create_dir(KBUILD_MODNAME,
+                                    wl->hw->wiphy->debugfsdir);
 
-       if (IS_ERR(wl->rootdir)) {
-               ret = PTR_ERR(wl->rootdir);
-               wl->rootdir = NULL;
+       if (IS_ERR(rootdir)) {
+               ret = PTR_ERR(rootdir);
                goto err;
        }
 
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
 
        wl->stats.fw_stats_update = jiffies;
 
-       ret = wl1271_debugfs_add_files(wl);
+       ret = wl1271_debugfs_add_files(wl, rootdir);
 
        if (ret < 0)
                goto err_file;
@@ -462,8 +456,7 @@ err_file:
        wl->stats.fw_stats = NULL;
 
 err_fw:
-       debugfs_remove_recursive(wl->rootdir);
-       wl->rootdir = NULL;
+       debugfs_remove_recursive(rootdir);
 
 err:
        return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
 {
        kfree(wl->stats.fw_stats);
        wl->stats.fw_stats = NULL;
-
-       debugfs_remove_recursive(wl->rootdir);
-       wl->rootdir = NULL;
-
 }
index f9146f5..1b170c5 100644 (file)
@@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
                /* go to extremely low power mode */
                wl1271_ps_elp_sleep(wl);
                break;
-       case EVENT_EXIT_POWER_SAVE_FAIL:
-               wl1271_debug(DEBUG_PSM, "PSM exit failed");
-
-               if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
-                       wl->psm_entry_retry = 0;
-                       break;
-               }
-
-               /* make sure the firmware goes to active mode - the frame to
-                  be sent next will indicate to the AP, that we are active. */
-               ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                        wl->basic_rate, false);
-               break;
-       case EVENT_EXIT_POWER_SAVE_SUCCESS:
        default:
                break;
        }
@@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
        int ret;
        u32 vector;
        bool beacon_loss = false;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
        wl1271_event_mbox_dump(mbox);
 
@@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
         * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
         *
         */
-       if (vector & BSS_LOSE_EVENT_ID) {
+       if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
                wl1271_info("Beacon loss detected.");
 
                /* indicate to the stack, that beacons have been lost */
                beacon_loss = true;
        }
 
-       if (vector & PS_REPORT_EVENT_ID) {
+       if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
                wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
                ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
                if (ret < 0)
                        return ret;
        }
 
-       if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+       if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
                wl1271_event_pspoll_delivery_fail(wl);
 
        if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
index 6cce014..0e80886 100644 (file)
@@ -59,6 +59,7 @@ enum {
        BSS_LOSE_EVENT_ID                        = BIT(18),
        REGAINED_BSS_EVENT_ID                    = BIT(19),
        ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID    = BIT(20),
+       STA_REMOVE_COMPLETE_EVENT_ID             = BIT(21), /* AP */
        SOFT_GEMINI_SENSE_EVENT_ID               = BIT(22),
        SOFT_GEMINI_PREDICTION_EVENT_ID          = BIT(23),
        SOFT_GEMINI_AVALANCHE_EVENT_ID           = BIT(24),
@@ -74,8 +75,6 @@ enum {
 enum {
        EVENT_ENTER_POWER_SAVE_FAIL = 0,
        EVENT_ENTER_POWER_SAVE_SUCCESS,
-       EVENT_EXIT_POWER_SAVE_FAIL,
-       EVENT_EXIT_POWER_SAVE_SUCCESS,
 };
 
 struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
        u8 scheduled_scan_status;
        u8 ps_status;
 
-       u8 reserved_5[29];
+       /* AP FW only */
+       u8 hlid_removed;
+       __le16 sta_aging_status;
+       __le16 sta_tx_retry_exceeded;
+
+       u8 reserved_5[24];
 } __packed;
 
 int wl1271_event_unmask(struct wl1271 *wl);
index 785a530..6072fe4 100644 (file)
 #include "acx.h"
 #include "cmd.h"
 #include "reg.h"
+#include "tx.h"
 
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
-       int ret;
-
-       ret = wl1271_acx_feature_cfg(wl);
-       if (ret < 0) {
-               wl1271_warning("couldn't set feature config");
-               return ret;
-       }
-
-       ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
-       if (ret < 0) {
-               wl1271_warning("couldn't set default key");
-               return ret;
-       }
-
-       return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
 {
        int ret, i;
 
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
        return 0;
 }
 
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+       struct wl12xx_disconn_template *tmpl;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                            IEEE80211_STYPE_DEAUTH);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+                                     tmpl, sizeof(*tmpl), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(tmpl);
+       return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+       struct ieee80211_hdr_3addr *nullfunc;
+       int ret;
+
+       nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+       if (!nullfunc) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                             IEEE80211_STYPE_NULLFUNC |
+                                             IEEE80211_FCTL_FROMDS);
+
+       /* nullfunc->addr1 is filled by FW */
+
+       memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+       memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+                                     sizeof(*nullfunc), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(nullfunc);
+       return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+       struct ieee80211_qos_hdr *qosnull;
+       int ret;
+
+       qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+       if (!qosnull) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                            IEEE80211_STYPE_QOS_NULLFUNC |
+                                            IEEE80211_FCTL_FROMDS);
+
+       /* qosnull->addr1 is filled by FW */
+
+       memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+       memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+                                     sizeof(*qosnull), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(qosnull);
+       return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+       int ret;
+
+       /*
+        * Put very large empty placeholders for all templates. These
+        * reserve memory for later.
+        */
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+                                     sizeof
+                                     (struct wl12xx_probe_resp_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+                                     sizeof
+                                     (struct wl12xx_beacon_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+                                     sizeof
+                                     (struct wl12xx_disconn_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+                                     sizeof(struct wl12xx_null_data_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+                                     sizeof
+                                     (struct wl12xx_qos_null_data_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
 {
        int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
-       if (ret < 0)
-               return ret;
-
        ret = wl1271_acx_service_period_timeout(wl);
        if (ret < 0)
                return ret;
@@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
        return 0;
 }
 
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = wl1271_cmd_ext_radio_parms(wl);
+       if (ret < 0)
+               return ret;
+
+       /* PS config */
+       ret = wl1271_acx_config_ps(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_sta_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Initialize connection monitoring thresholds */
+       ret = wl1271_acx_conn_monit_params(wl, false);
+       if (ret < 0)
+               return ret;
+
+       /* Beacon filtering */
+       ret = wl1271_init_beacon_filter(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Bluetooth WLAN coexistence */
+       ret = wl1271_init_pta(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Beacons and broadcast settings */
+       ret = wl1271_init_beacon_broadcast(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Configure for ELP power saving */
+       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+       if (ret < 0)
+               return ret;
+
+       /* Configure rssi/snr averaging weights */
+       ret = wl1271_acx_rssi_snr_avg_weights(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_sta_rate_policies(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_sta_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+       int ret, i;
+
+       ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+       if (ret < 0) {
+               wl1271_warning("couldn't set default key");
+               return ret;
+       }
+
+       /* disable all keep-alive templates */
+       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+               ret = wl1271_acx_keep_alive_config(wl, i,
+                                                  ACX_KEEP_ALIVE_TPL_INVALID);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* disable the keep-alive feature */
+       ret = wl1271_acx_keep_alive_mode(wl, false);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+       int ret, i;
+
+       ret = wl1271_ap_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Configure for power always on */
+       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+       if (ret < 0)
+               return ret;
+
+       /* Configure initial TX rate classes */
+       for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+               ret = wl1271_acx_ap_rate_policy(wl,
+                               &wl->conf.tx.ap_rc_conf[i], i);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = wl1271_acx_ap_rate_policy(wl,
+                                       &wl->conf.tx.ap_mgmt_conf,
+                                       ACX_TX_AP_MODE_MGMT_RATE);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_ap_rate_policy(wl,
+                                       &wl->conf.tx.ap_bcst_conf,
+                                       ACX_TX_AP_MODE_BCST_RATE);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_max_tx_retry(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_ap_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = wl1271_ap_init_deauth_template(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_ap_init_null_template(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_ap_init_qos_null_template(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+       /* validate FW cose ver x.x.x.50-60.x */
+       if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+           (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+               wl->ba_support = true;
+               return;
+       }
+
+       wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+       u8 tid_index;
+       int ret = 0;
+
+       /* Reset the BA RX indicators */
+       wl->ba_rx_bitmap = 0;
+
+       /* validate that FW support BA */
+       wl1271_check_ba_support(wl);
+
+       if (wl->ba_support)
+               /* 802.11n initiator BA session setting */
+               for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+                    ++tid_index) {
+                       ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+                                                       tid_index, true);
+                       if (ret < 0)
+                               break;
+               }
+
+       return ret;
+}
+
 int wl1271_hw_init(struct wl1271 *wl)
 {
        struct conf_tx_ac_category *conf_ac;
        struct conf_tx_tid *conf_tid;
        int ret, i;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
        ret = wl1271_cmd_general_parms(wl);
        if (ret < 0)
@@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_cmd_ext_radio_parms(wl);
-       if (ret < 0)
-               return ret;
+       /* Mode specific init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init(wl);
+       else
+               ret = wl1271_sta_hw_init(wl);
 
-       /* Template settings */
-       ret = wl1271_init_templates_config(wl);
        if (ret < 0)
                return ret;
 
@@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Beacon filtering */
-       ret = wl1271_init_beacon_filter(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Configure TX patch complete interrupt behavior */
        ret = wl1271_acx_tx_config_options(wl);
        if (ret < 0)
@@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Bluetooth WLAN coexistence */
-       ret = wl1271_init_pta(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Energy detection */
        ret = wl1271_init_energy_detection(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Beacons and boradcast settings */
-       ret = wl1271_init_beacon_broadcast(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Default fragmentation threshold */
        ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
        if (ret < 0)
@@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
                        goto out_free_memmap;
        }
 
-       /* Configure TX rate classes */
-       ret = wl1271_acx_rate_policies(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure for ELP power saving */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Configure HW encryption */
-       ret = wl1271_init_hwenc_config(wl);
+       ret = wl1271_acx_feature_cfg(wl);
        if (ret < 0)
                goto out_free_memmap;
 
@@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* disable all keep-alive templates */
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, i,
-                                                  ACX_KEEP_ALIVE_TPL_INVALID);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
+       /* Mode specific init - post mem init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init_post_mem(wl);
+       else
+               ret = wl1271_sta_hw_init_post_mem(wl);
 
-       /* disable the keep-alive feature */
-       ret = wl1271_acx_keep_alive_mode(wl, false);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure rssi/snr averaging weights */
-       ret = wl1271_acx_rssi_snr_avg_weights(wl);
+       /* Configure initiator BA sessions policies */
+       ret = wl1271_set_ba_policies(wl);
        if (ret < 0)
                goto out_free_memmap;
 
index 7762421..3a8bd3f 100644 (file)
@@ -27,7 +27,7 @@
 #include "wl12xx.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
 int wl1271_init_phy_config(struct wl1271 *wl);
 int wl1271_init_pta(struct wl1271 *wl);
 int wl1271_init_energy_detection(struct wl1271 *wl);
index 844b32b..c1aac82 100644 (file)
@@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
 int wl1271_init_ieee80211(struct wl1271 *wl);
 struct ieee80211_hw *wl1271_alloc_hw(void);
 int wl1271_free_hw(struct wl1271 *wl);
+irqreturn_t wl1271_irq(int irq, void *data);
 
 #endif
index 062247e..8b3c8d1 100644 (file)
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
        },
        .tx = {
                .tx_energy_detection         = 0,
-               .rc_conf                     = {
+               .sta_rc_conf                 = {
                        .enabled_rates       = 0,
                        .short_retry_limit   = 10,
                        .long_retry_limit    = 10,
-                       .aflags              = 0
+                       .aflags              = 0,
                },
                .ac_conf_count               = 4,
                .ac_conf                     = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
                                .tx_op_limit = 1504,
                        },
                },
+               .ap_rc_conf                  = {
+                       [0] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [1] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [2] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [3] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+               },
+               .ap_mgmt_conf = {
+                       .enabled_rates       = CONF_TX_AP_DEFAULT_MGMT_RATES,
+                       .short_retry_limit   = 10,
+                       .long_retry_limit    = 10,
+                       .aflags              = 0,
+               },
+               .ap_bcst_conf = {
+                       .enabled_rates       = CONF_HW_BIT_RATE_1MBPS,
+                       .short_retry_limit   = 10,
+                       .long_retry_limit    = 10,
+                       .aflags              = 0,
+               },
+               .ap_max_tx_retries = 100,
                .tid_conf_count = 4,
                .tid_conf = {
                        [CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
                .tx_compl_threshold          = 4,
                .basic_rate                  = CONF_HW_BIT_RATE_1MBPS,
                .basic_rate_5                = CONF_HW_BIT_RATE_6MBPS,
+               .tmpl_short_retry_limit      = 10,
+               .tmpl_long_retry_limit       = 10,
        },
        .conn = {
                .wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
                .bet_enable                  = CONF_BET_MODE_ENABLE,
                .bet_max_consecutive         = 10,
                .psm_entry_retries           = 5,
+               .psm_exit_retries            = 255,
                .psm_entry_nullfunc_retries  = 3,
                .psm_entry_hangover_period   = 1,
                .keep_alive_interval         = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
                .avg_weight_rssi_beacon       = 20,
                .avg_weight_rssi_data         = 10,
                .avg_weight_snr_beacon        = 20,
-               .avg_weight_snr_data          = 10
+               .avg_weight_snr_data          = 10,
        },
        .scan = {
                .min_dwell_time_active        = 7500,
                .max_dwell_time_active        = 30000,
-               .min_dwell_time_passive       = 30000,
-               .max_dwell_time_passive       = 60000,
+               .min_dwell_time_passive       = 100000,
+               .max_dwell_time_passive       = 100000,
                .num_probe_reqs               = 2,
        },
        .rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
                },
        },
+       .ht = {
+               .tx_ba_win_size = 64,
+               .inactivity_timeout = 10000,
+       },
+       .mem = {
+               .num_stations                 = 1,
+               .ssid_profiles                = 1,
+               .rx_block_num                 = 70,
+               .tx_min_block_num             = 40,
+               .dynamic_memory               = 0,
+               .min_req_tx_blocks            = 100,
+               .min_req_rx_blocks            = 22,
+               .tx_min                       = 27,
+       }
 };
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
 
 
 static void wl1271_device_release(struct device *dev)
@@ -317,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_init_templates_config(wl);
+       ret = wl1271_sta_init_templates_config(wl);
        if (ret < 0)
                return ret;
 
@@ -425,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
+       ret = wl1271_acx_sta_mem_cfg(wl);
+       if (ret < 0)
+               goto out_free_memmap;
+
        /* Default fragmentation threshold */
        ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
        if (ret < 0)
@@ -476,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl)
        return ret;
 }
 
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+       bool fw_ps;
+
+       /* only regulate station links */
+       if (hlid < WL1271_AP_STA_HLID_START)
+               return;
+
+       fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+       /*
+        * Wake up from high level PS if the STA is asleep with too little
+        * blocks in FW or if the STA is awake.
+        */
+       if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_end(wl, hlid);
+
+       /* Start high-level PS if the STA is asleep with enough blocks in FW */
+       else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+                                      struct wl1271_fw_ap_status *status)
+{
+       u32 cur_fw_ps_map;
+       u8 hlid;
+
+       cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+       if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+               wl1271_debug(DEBUG_PSM,
+                            "link ps prev 0x%x cur 0x%x changed 0x%x",
+                            wl->ap_fw_ps_map, cur_fw_ps_map,
+                            wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+               wl->ap_fw_ps_map = cur_fw_ps_map;
+       }
+
+       for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+               u8 cnt = status->tx_lnk_free_blks[hlid] -
+                       wl->links[hlid].prev_freed_blks;
+
+               wl->links[hlid].prev_freed_blks =
+                       status->tx_lnk_free_blks[hlid];
+               wl->links[hlid].allocated_blks -= cnt;
+
+               wl1271_irq_ps_regulate_link(wl, hlid,
+                                           wl->links[hlid].allocated_blks);
+       }
+}
+
 static void wl1271_fw_status(struct wl1271 *wl,
-                            struct wl1271_fw_status *status)
+                            struct wl1271_fw_full_status *full_status)
 {
+       struct wl1271_fw_common_status *status = &full_status->common;
        struct timespec ts;
        u32 total = 0;
        int i;
 
-       wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+                               sizeof(struct wl1271_fw_ap_status), false);
+       else
+               wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+                               sizeof(struct wl1271_fw_sta_status), false);
 
        wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
                     "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -507,22 +625,54 @@ static void wl1271_fw_status(struct wl1271 *wl,
        if (total)
                clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
+       /* for AP update num of allocated TX blocks per link and ps status */
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl1271_irq_update_links_status(wl, &full_status->ap);
+
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
        wl->time_offset = (timespec_to_ns(&ts) >> 10) -
                (s64)le32_to_cpu(status->fw_localtime);
 }
 
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+       struct sk_buff *skb;
+
+       /* Pass all received frames to the network stack */
+       while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+               ieee80211_rx_ni(wl->hw, skb);
+
+       /* Return sent skbs to the network stack */
+       while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+               ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+       struct wl1271 *wl =
+               container_of(work, struct wl1271, netstack_work);
+
+       do {
+               wl1271_flush_deferred_work(wl);
+       } while (skb_queue_len(&wl->deferred_rx_queue));
+}
+
+#define WL1271_IRQ_MAX_LOOPS 256
 
-static void wl1271_irq_work(struct work_struct *work)
+irqreturn_t wl1271_irq(int irq, void *cookie)
 {
        int ret;
        u32 intr;
        int loopcount = WL1271_IRQ_MAX_LOOPS;
+       struct wl1271 *wl = (struct wl1271 *)cookie;
+       bool done = false;
+       unsigned int defer_count;
        unsigned long flags;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, irq_work);
+
+       /* TX might be handled here, avoid redundant work */
+       set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+       cancel_work_sync(&wl->tx_work);
 
        mutex_lock(&wl->mutex);
 
@@ -531,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, true);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
-               clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-               loopcount--;
+       while (!done && loopcount--) {
+               /*
+                * In order to avoid a race with the hardirq, clear the flag
+                * before acknowledging the chip. Since the mutex is held,
+                * wl1271_ps_elp_wakeup cannot be called concurrently.
+                */
+               clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+               smp_mb__after_clear_bit();
 
                wl1271_fw_status(wl, wl->fw_status);
-               intr = le32_to_cpu(wl->fw_status->intr);
+               intr = le32_to_cpu(wl->fw_status->common.intr);
+               intr &= WL1271_INTR_MASK;
                if (!intr) {
-                       wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
-                       spin_lock_irqsave(&wl->wl_lock, flags);
+                       done = true;
                        continue;
                }
 
-               intr &= WL1271_INTR_MASK;
-
                if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
                        wl1271_error("watchdog interrupt received! "
                                     "starting recovery.");
@@ -560,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work)
                        goto out;
                }
 
-               if (intr & WL1271_ACX_INTR_DATA) {
+               if (likely(intr & WL1271_ACX_INTR_DATA)) {
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
-                       /* check for tx results */
-                       if (wl->fw_status->tx_results_counter !=
-                           (wl->tx_results_count & 0xff))
-                               wl1271_tx_complete(wl);
+                       wl1271_rx(wl, &wl->fw_status->common);
 
                        /* Check if any tx blocks were freed */
+                       spin_lock_irqsave(&wl->wl_lock, flags);
                        if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
                            wl->tx_queue_count) {
+                               spin_unlock_irqrestore(&wl->wl_lock, flags);
                                /*
                                 * In order to avoid starvation of the TX path,
                                 * call the work function directly.
                                 */
                                wl1271_tx_work_locked(wl);
+                       } else {
+                               spin_unlock_irqrestore(&wl->wl_lock, flags);
                        }
 
-                       wl1271_rx(wl, wl->fw_status);
+                       /* check for tx results */
+                       if (wl->fw_status->common.tx_results_counter !=
+                           (wl->tx_results_count & 0xff))
+                               wl1271_tx_complete(wl);
+
+                       /* Make sure the deferred queues don't get too long */
+                       defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+                                     skb_queue_len(&wl->deferred_rx_queue);
+                       if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+                               wl1271_flush_deferred_work(wl);
                }
 
                if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -597,28 +758,48 @@ static void wl1271_irq_work(struct work_struct *work)
 
                if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
        }
 
-       if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
-               ieee80211_queue_work(wl->hw, &wl->irq_work);
-       else
-               clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
        wl1271_ps_elp_sleep(wl);
 
 out:
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       /* In case TX was not handled here, queue TX work */
+       clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+           wl->tx_queue_count)
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
        mutex_unlock(&wl->mutex);
+
+       return IRQ_HANDLED;
 }
+EXPORT_SYMBOL_GPL(wl1271_irq);
 
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
        const struct firmware *fw;
+       const char *fw_name;
        int ret;
 
-       ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+       switch (wl->bss_type) {
+       case BSS_TYPE_AP_BSS:
+               fw_name = WL1271_AP_FW_NAME;
+               break;
+       case BSS_TYPE_IBSS:
+       case BSS_TYPE_STA_BSS:
+               fw_name = WL1271_FW_NAME;
+               break;
+       default:
+               wl1271_error("no compatible firmware for bss_type %d",
+                            wl->bss_type);
+               return -EINVAL;
+       }
+
+       wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+       ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
 
        if (ret < 0) {
                wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +813,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
                goto out;
        }
 
+       vfree(wl->fw);
        wl->fw_len = fw->size;
        wl->fw = vmalloc(wl->fw_len);
 
@@ -642,7 +824,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
        }
 
        memcpy(wl->fw, fw->data, wl->fw_len);
-
+       wl->fw_bss_type = wl->bss_type;
        ret = 0;
 
 out:
@@ -778,7 +960,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                goto out;
        }
 
-       if (wl->fw == NULL) {
+       /* Make sure the firmware type matches the BSS type */
+       if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
                ret = wl1271_fetch_firmware(wl);
                if (ret < 0)
                        goto out;
@@ -811,6 +994,8 @@ int wl1271_plt_start(struct wl1271 *wl)
                goto out;
        }
 
+       wl->bss_type = BSS_TYPE_STA_BSS;
+
        while (retries) {
                retries--;
                ret = wl1271_chip_wakeup(wl);
@@ -827,11 +1012,10 @@ int wl1271_plt_start(struct wl1271 *wl)
 
                wl->state = WL1271_STATE_PLT;
                wl1271_notice("firmware booted in PLT mode (%s)",
-                             wl->chip.fw_ver);
+                             wl->chip.fw_ver_str);
                goto out;
 
 irq_disable:
-               wl1271_disable_interrupts(wl);
                mutex_unlock(&wl->mutex);
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
@@ -840,7 +1024,9 @@ irq_disable:
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
-               cancel_work_sync(&wl->irq_work);
+               wl1271_disable_interrupts(wl);
+               wl1271_flush_deferred_work(wl);
+               cancel_work_sync(&wl->netstack_work);
                mutex_lock(&wl->mutex);
 power_off:
                wl1271_power_off(wl);
@@ -854,12 +1040,10 @@ out:
        return ret;
 }
 
-int wl1271_plt_stop(struct wl1271 *wl)
+int __wl1271_plt_stop(struct wl1271 *wl)
 {
        int ret = 0;
 
-       mutex_lock(&wl->mutex);
-
        wl1271_notice("power down");
 
        if (wl->state != WL1271_STATE_PLT) {
@@ -869,70 +1053,46 @@ int wl1271_plt_stop(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_disable_interrupts(wl);
        wl1271_power_off(wl);
 
        wl->state = WL1271_STATE_OFF;
        wl->rx_counter = 0;
 
-out:
        mutex_unlock(&wl->mutex);
-
-       cancel_work_sync(&wl->irq_work);
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
+       cancel_work_sync(&wl->netstack_work);
        cancel_work_sync(&wl->recovery_work);
+       mutex_lock(&wl->mutex);
+out:
+       return ret;
+}
+
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+       int ret;
 
+       mutex_lock(&wl->mutex);
+       ret = __wl1271_plt_stop(wl);
+       mutex_unlock(&wl->mutex);
        return ret;
 }
 
-static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = txinfo->control.sta;
        unsigned long flags;
        int q;
+       u8 hlid = 0;
 
-       /*
-        * peek into the rates configured in the STA entry.
-        * The rates set after connection stage, The first block only BG sets:
-        * the compare is for bit 0-16 of sta_rate_set. The second block add
-        * HT rates in case of HT supported.
-        */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       if (sta &&
-           (sta->supp_rates[conf->channel->band] !=
-           (wl->sta_rate_set & HW_BG_RATES_MASK))) {
-               wl->sta_rate_set = sta->supp_rates[conf->channel->band];
-               set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
-       }
-
-#ifdef CONFIG_WL12XX_HT
-       if (sta &&
-           sta->ht_cap.ht_supported &&
-           ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
-             sta->ht_cap.mcs.rx_mask[0])) {
-               /* Clean MCS bits before setting them */
-               wl->sta_rate_set &= HW_BG_RATES_MASK;
-               wl->sta_rate_set |=
-                       (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
-               set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
-       }
-#endif
-       wl->tx_queue_count++;
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-       /* queue the packet */
        q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-       skb_queue_tail(&wl->tx_queue[q], skb);
 
-       /*
-        * The chip specific setup must run before the first TX packet -
-        * before that, the tx_work will not be initialized!
-        */
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               hlid = wl1271_tx_get_hlid(skb);
 
-       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       spin_lock_irqsave(&wl->wl_lock, flags);
+
+       wl->tx_queue_count++;
 
        /*
         * The workqueue is slow to process the tx_queue and we need stop
@@ -940,14 +1100,28 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         */
        if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
                wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
                ieee80211_stop_queues(wl->hw);
                set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
-       return NETDEV_TX_OK;
+       /* queue the packet */
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+               skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+       } else {
+               skb_queue_tail(&wl->tx_queue[q], skb);
+       }
+
+       /*
+        * The chip specific setup must run before the first TX packet -
+        * before that, the tx_work will not be initialized!
+        */
+
+       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+           !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
+
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
 }
 
 static struct notifier_block wl1271_dev_notifier = {
@@ -967,6 +1141,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
         *
         * The MAC address is first known when the corresponding interface
         * is added. That is where we will initialize the hardware.
+        *
+        * In addition, we currently have different firmwares for AP and managed
+        * operation. We will know which to boot according to interface type.
         */
 
        return 0;
@@ -1006,6 +1183,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                wl->bss_type = BSS_TYPE_IBSS;
                wl->set_bss_type = BSS_TYPE_STA_BSS;
                break;
+       case NL80211_IFTYPE_AP:
+               wl->bss_type = BSS_TYPE_AP_BSS;
+               break;
        default:
                ret = -EOPNOTSUPP;
                goto out;
@@ -1038,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                break;
 
 irq_disable:
-               wl1271_disable_interrupts(wl);
                mutex_unlock(&wl->mutex);
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
@@ -1047,7 +1226,9 @@ irq_disable:
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
-               cancel_work_sync(&wl->irq_work);
+               wl1271_disable_interrupts(wl);
+               wl1271_flush_deferred_work(wl);
+               cancel_work_sync(&wl->netstack_work);
                mutex_lock(&wl->mutex);
 power_off:
                wl1271_power_off(wl);
@@ -1061,11 +1242,11 @@ power_off:
 
        wl->vif = vif;
        wl->state = WL1271_STATE_ON;
-       wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+       wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
 
        /* update hw/fw version info in wiphy struct */
        wiphy->hw_version = wl->chip.id;
-       strncpy(wiphy->fw_version, wl->chip.fw_ver,
+       strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
                sizeof(wiphy->fw_version));
 
        /*
@@ -1113,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
 
        wl->state = WL1271_STATE_OFF;
 
-       wl1271_disable_interrupts(wl);
-
        mutex_unlock(&wl->mutex);
 
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
        cancel_delayed_work_sync(&wl->scan_complete_work);
-       cancel_work_sync(&wl->irq_work);
+       cancel_work_sync(&wl->netstack_work);
        cancel_work_sync(&wl->tx_work);
        cancel_delayed_work_sync(&wl->pspoll_work);
        cancel_delayed_work_sync(&wl->elp_work);
@@ -1147,10 +1328,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
        wl->time_offset = 0;
        wl->session_counter = 0;
        wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->sta_rate_set = 0;
        wl->flags = 0;
        wl->vif = NULL;
        wl->filters = 0;
+       wl1271_free_ap_keys(wl);
+       memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+       wl->ap_fw_ps_map = 0;
+       wl->ap_ps_map = 0;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
                wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1370,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
 
 static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
 {
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl1271_set_default_filters(wl);
 
        /* combine requested filters with current filter config */
        filters = wl->filters | filters;
@@ -1322,25 +1505,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
                wl->basic_rate_set = wl->conf.tx.basic_rate_5;
 }
 
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
-       int i;
-       u32 rate = 0;
-
-       if (!wl->basic_rate_set) {
-               WARN_ON(1);
-               wl->basic_rate_set = wl->conf.tx.basic_rate;
-       }
-
-       for (i = 0; !rate; i++) {
-               if ((wl->basic_rate_set >> i) & 0x1)
-                       rate = 1 << i;
-       }
-
-       return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
 {
        int ret;
 
@@ -1350,9 +1515,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
                        if (ret < 0)
                                goto out;
                }
-               wl->rate_set = wl1271_min_rate_get(wl);
-               wl->sta_rate_set = 0;
-               ret = wl1271_acx_rate_policies(wl);
+               wl->rate_set = wl1271_tx_min_rate_get(wl);
+               ret = wl1271_acx_sta_rate_policies(wl);
                if (ret < 0)
                        goto out;
                ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1545,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
        struct wl1271 *wl = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
        int channel, ret = 0;
+       bool is_ap;
 
        channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+                    " changed 0x%x",
                     channel,
                     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
                     conf->power_level,
-                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+                        changed);
 
        /*
         * mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,7 +1573,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1417,31 +1586,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                wl->band = conf->channel->band;
                wl->channel = channel;
 
-               /*
-                * FIXME: the mac80211 should really provide a fixed rate
-                * to use here. for now, just use the smallest possible rate
-                * for the band as a fixed rate for association frames and
-                * other control messages.
-                */
-               if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-                       wl1271_set_band_rate(wl);
-
-               wl->basic_rate = wl1271_min_rate_get(wl);
-               ret = wl1271_acx_rate_policies(wl);
-               if (ret < 0)
-                       wl1271_warning("rate policy for update channel "
-                                      "failed %d", ret);
+               if (!is_ap) {
+                       /*
+                        * FIXME: the mac80211 should really provide a fixed
+                        * rate to use here. for now, just use the smallest
+                        * possible rate for the band as a fixed rate for
+                        * association frames and other control messages.
+                        */
+                       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+                               wl1271_set_band_rate(wl);
 
-               if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
-                       ret = wl1271_join(wl, false);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               wl1271_warning("cmd join to update channel "
+                               wl1271_warning("rate policy for channel "
                                               "failed %d", ret);
+
+                       if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+                               ret = wl1271_join(wl, false);
+                               if (ret < 0)
+                                       wl1271_warning("cmd join on channel "
+                                                      "failed %d", ret);
+                       }
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_IDLE) {
-               ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+       if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+               ret = wl1271_sta_handle_idle(wl,
+                                       conf->flags & IEEE80211_CONF_IDLE);
                if (ret < 0)
                        wl1271_warning("idle mode change failed %d", ret);
        }
@@ -1548,7 +1720,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        struct wl1271 *wl = hw->priv;
        int ret;
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+       wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+                    " total %x", changed, *total);
 
        mutex_lock(&wl->mutex);
 
@@ -1558,19 +1731,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-
-       if (*total & FIF_ALLMULTI)
-               ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
-       else if (fp)
-               ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
-                                                  fp->mc_list,
-                                                  fp->mc_list_length);
-       if (ret < 0)
-               goto out_sleep;
+       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+               if (*total & FIF_ALLMULTI)
+                       ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+               else if (fp)
+                       ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+                                                          fp->mc_list,
+                                                          fp->mc_list_length);
+               if (ret < 0)
+                       goto out_sleep;
+       }
 
        /* determine, whether supported filter values have changed */
        if (changed == 0)
@@ -1593,38 +1767,192 @@ out:
        kfree(fp);
 }
 
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                       u16 tx_seq_16)
+{
+       struct wl1271_ap_key *ap_key;
+       int i;
+
+       wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+       if (key_size > MAX_KEY_SIZE)
+               return -EINVAL;
+
+       /*
+        * Find next free entry in ap_keys. Also check we are not replacing
+        * an existing key.
+        */
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               if (wl->recorded_ap_keys[i] == NULL)
+                       break;
+
+               if (wl->recorded_ap_keys[i]->id == id) {
+                       wl1271_warning("trying to record key replacement");
+                       return -EINVAL;
+               }
+       }
+
+       if (i == MAX_NUM_KEYS)
+               return -EBUSY;
+
+       ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+       if (!ap_key)
+               return -ENOMEM;
+
+       ap_key->id = id;
+       ap_key->key_type = key_type;
+       ap_key->key_size = key_size;
+       memcpy(ap_key->key, key, key_size);
+       ap_key->hlid = hlid;
+       ap_key->tx_seq_32 = tx_seq_32;
+       ap_key->tx_seq_16 = tx_seq_16;
+
+       wl->recorded_ap_keys[i] = ap_key;
+       return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+       int i;
+
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               kfree(wl->recorded_ap_keys[i]);
+               wl->recorded_ap_keys[i] = NULL;
+       }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+       int i, ret = 0;
+       struct wl1271_ap_key *key;
+       bool wep_key_added = false;
+
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               if (wl->recorded_ap_keys[i] == NULL)
+                       break;
+
+               key = wl->recorded_ap_keys[i];
+               ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+                                           key->id, key->key_type,
+                                           key->key_size, key->key,
+                                           key->hlid, key->tx_seq_32,
+                                           key->tx_seq_16);
+               if (ret < 0)
+                       goto out;
+
+               if (key->key_type == KEY_WEP)
+                       wep_key_added = true;
+       }
+
+       if (wep_key_added) {
+               ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       wl1271_free_ap_keys(wl);
+       return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                      u8 key_size, const u8 *key, u32 tx_seq_32,
+                      u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+       int ret;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       if (is_ap) {
+               struct wl1271_station *wl_sta;
+               u8 hlid;
+
+               if (sta) {
+                       wl_sta = (struct wl1271_station *)sta->drv_priv;
+                       hlid = wl_sta->hlid;
+               } else {
+                       hlid = WL1271_AP_BROADCAST_HLID;
+               }
+
+               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                       /*
+                        * We do not support removing keys after AP shutdown.
+                        * Pretend we do to make mac80211 happy.
+                        */
+                       if (action != KEY_ADD_OR_REPLACE)
+                               return 0;
+
+                       ret = wl1271_record_ap_key(wl, id,
+                                            key_type, key_size,
+                                            key, hlid, tx_seq_32,
+                                            tx_seq_16);
+               } else {
+                       ret = wl1271_cmd_set_ap_key(wl, action,
+                                            id, key_type, key_size,
+                                            key, hlid, tx_seq_32,
+                                            tx_seq_16);
+               }
+
+               if (ret < 0)
+                       return ret;
+       } else {
+               const u8 *addr;
+               static const u8 bcast_addr[ETH_ALEN] = {
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+               };
+
+               addr = sta ? sta->addr : bcast_addr;
+
+               if (is_zero_ether_addr(addr)) {
+                       /* We dont support TX only encryption */
+                       return -EOPNOTSUPP;
+               }
+
+               /* The wl1271 does not allow to remove unicast keys - they
+                  will be cleared automatically on next CMD_JOIN. Ignore the
+                  request silently, as we dont want the mac80211 to emit
+                  an error message. */
+               if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+                       return 0;
+
+               ret = wl1271_cmd_set_sta_key(wl, action,
+                                            id, key_type, key_size,
+                                            key, addr, tx_seq_32,
+                                            tx_seq_16);
+               if (ret < 0)
+                       return ret;
+
+               /* the default WEP key needs to be configured at least once */
+               if (key_type == KEY_WEP) {
+                       ret = wl1271_cmd_set_sta_default_wep_key(wl,
+                                                       wl->default_key);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_vif *vif,
                             struct ieee80211_sta *sta,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
-       const u8 *addr;
        int ret;
        u32 tx_seq_32 = 0;
        u16 tx_seq_16 = 0;
        u8 key_type;
 
-       static const u8 bcast_addr[ETH_ALEN] =
-               { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
        wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
 
-       addr = sta ? sta->addr : bcast_addr;
-
-       wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
-       wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+       wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
        wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
                     key_conf->cipher, key_conf->keyidx,
                     key_conf->keylen, key_conf->flags);
        wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
 
-       if (is_zero_ether_addr(addr)) {
-               /* We dont support TX only encryption */
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
        mutex_lock(&wl->mutex);
 
        if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1632,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                goto out_unlock;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_unlock;
 
@@ -1671,36 +1999,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        switch (cmd) {
        case SET_KEY:
-               ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
-                                        key_conf->keyidx, key_type,
-                                        key_conf->keylen, key_conf->key,
-                                        addr, tx_seq_32, tx_seq_16);
+               ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+                                key_conf->keyidx, key_type,
+                                key_conf->keylen, key_conf->key,
+                                tx_seq_32, tx_seq_16, sta);
                if (ret < 0) {
                        wl1271_error("Could not add or replace key");
                        goto out_sleep;
                }
-
-               /* the default WEP key needs to be configured at least once */
-               if (key_type == KEY_WEP) {
-                       ret = wl1271_cmd_set_default_wep_key(wl,
-                                                            wl->default_key);
-                       if (ret < 0)
-                               goto out_sleep;
-               }
                break;
 
        case DISABLE_KEY:
-               /* The wl1271 does not allow to remove unicast keys - they
-                  will be cleared automatically on next CMD_JOIN. Ignore the
-                  request silently, as we dont want the mac80211 to emit
-                  an error message. */
-               if (!is_broadcast_ether_addr(addr))
-                       break;
-
-               ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
-                                        key_conf->keyidx, key_type,
-                                        key_conf->keylen, key_conf->key,
-                                        addr, 0, 0);
+               ret = wl1271_set_key(wl, KEY_REMOVE,
+                                    key_conf->keyidx, key_type,
+                                    key_conf->keylen, key_conf->key,
+                                    0, 0, sta);
                if (ret < 0) {
                        wl1271_error("Could not remove key");
                        goto out_sleep;
@@ -1719,7 +2032,6 @@ out_sleep:
 out_unlock:
        mutex_unlock(&wl->mutex);
 
-out:
        return ret;
 }
 
@@ -1751,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1777,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1805,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
                goto out;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -1821,7 +2133,7 @@ out:
        return ret;
 }
 
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                            int offset)
 {
        u8 *ptr = skb->data + offset;
@@ -1831,89 +2143,213 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                if (ptr[0] == WLAN_EID_SSID) {
                        wl->ssid_len = ptr[1];
                        memcpy(wl->ssid, ptr+2, wl->ssid_len);
-                       return;
+                       return 0;
                }
                ptr += (ptr[1] + 2);
        }
+
        wl1271_error("No SSID in IEs!\n");
+       return -ENOENT;
 }
 
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
-       enum wl1271_cmd_ps_mode mode;
-       struct wl1271 *wl = hw->priv;
-       struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
-       bool do_join = false;
-       bool set_assoc = false;
-       int ret;
+       int ret = 0;
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               if (bss_conf->use_short_slot)
+                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+               else
+                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+               if (ret < 0) {
+                       wl1271_warning("Set slot time failed %d", ret);
+                       goto out;
+               }
+       }
 
-       mutex_lock(&wl->mutex);
+       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+               if (bss_conf->use_short_preamble)
+                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+               else
+                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+       }
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
-               goto out;
+       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+               if (bss_conf->use_cts_prot)
+                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+               else
+                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+               if (ret < 0) {
+                       wl1271_warning("Set ctsprotect failed %d", ret);
+                       goto out;
+               }
+       }
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
-       if (ret < 0)
-               goto out;
+out:
+       return ret;
+}
+
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_bss_conf *bss_conf,
+                                         u32 changed)
+{
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       int ret = 0;
 
-       if ((changed & BSS_CHANGED_BEACON_INT) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
-               wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+       if ((changed & BSS_CHANGED_BEACON_INT)) {
+               wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
                        bss_conf->beacon_int);
 
                wl->beacon_int = bss_conf->beacon_int;
-               do_join = true;
        }
 
-       if ((changed & BSS_CHANGED_BEACON) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
-               struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+       if ((changed & BSS_CHANGED_BEACON)) {
+               struct ieee80211_hdr *hdr;
+               int ieoffset = offsetof(struct ieee80211_mgmt,
+                                       u.beacon.variable);
+               struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+               u16 tmpl_id;
+
+               if (!beacon)
+                       goto out;
+
+               wl1271_debug(DEBUG_MASTER, "beacon updated");
+
+               ret = wl1271_ssid_set(wl, beacon, ieoffset);
+               if (ret < 0) {
+                       dev_kfree_skb(beacon);
+                       goto out;
+               }
+               tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+                                 CMD_TEMPL_BEACON;
+               ret = wl1271_cmd_template_set(wl, tmpl_id,
+                                             beacon->data,
+                                             beacon->len, 0,
+                                             wl1271_tx_min_rate_get(wl));
+               if (ret < 0) {
+                       dev_kfree_skb(beacon);
+                       goto out;
+               }
 
-               wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+               hdr = (struct ieee80211_hdr *) beacon->data;
+               hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                                IEEE80211_STYPE_PROBE_RESP);
+
+               tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+                                 CMD_TEMPL_PROBE_RESPONSE;
+               ret = wl1271_cmd_template_set(wl,
+                                             tmpl_id,
+                                             beacon->data,
+                                             beacon->len, 0,
+                                             wl1271_tx_min_rate_get(wl));
+               dev_kfree_skb(beacon);
+               if (ret < 0)
+                       goto out;
+       }
 
-               if (beacon) {
-                       struct ieee80211_hdr *hdr;
-                       int ieoffset = offsetof(struct ieee80211_mgmt,
-                                               u.beacon.variable);
+out:
+       return ret;
+}
 
-                       wl1271_ssid_set(wl, beacon, ieoffset);
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      u32 changed)
+{
+       int ret = 0;
 
-                       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
-                                                     beacon->data,
-                                                     beacon->len, 0,
-                                                     wl1271_min_rate_get(wl));
+       if ((changed & BSS_CHANGED_BASIC_RATES)) {
+               u32 rates = bss_conf->basic_rates;
+               struct conf_tx_rate_class mgmt_rc;
+
+               wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+               wl->basic_rate = wl1271_tx_min_rate_get(wl);
+               wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+                            wl->basic_rate_set);
+
+               /* update the AP management rate policy with the new rates */
+               mgmt_rc.enabled_rates = wl->basic_rate_set;
+               mgmt_rc.long_retry_limit = 10;
+               mgmt_rc.short_retry_limit = 10;
+               mgmt_rc.aflags = 0;
+               ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+                                               ACX_TX_AP_MODE_MGMT_RATE);
+               if (ret < 0) {
+                       wl1271_error("AP mgmt policy change failed %d", ret);
+                       goto out;
+               }
+       }
 
-                       if (ret < 0) {
-                               dev_kfree_skb(beacon);
-                               goto out_sleep;
-                       }
+       ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+       if (ret < 0)
+               goto out;
 
-                       hdr = (struct ieee80211_hdr *) beacon->data;
-                       hdr->frame_control = cpu_to_le16(
-                               IEEE80211_FTYPE_MGMT |
-                               IEEE80211_STYPE_PROBE_RESP);
+       if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+               if (bss_conf->enable_beacon) {
+                       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                               ret = wl1271_cmd_start_bss(wl);
+                               if (ret < 0)
+                                       goto out;
 
-                       ret = wl1271_cmd_template_set(wl,
-                                                     CMD_TEMPL_PROBE_RESPONSE,
-                                                     beacon->data,
-                                                     beacon->len, 0,
-                                                     wl1271_min_rate_get(wl));
-                       dev_kfree_skb(beacon);
-                       if (ret < 0)
-                               goto out_sleep;
+                               set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               wl1271_debug(DEBUG_AP, "started AP");
 
-                       /* Need to update the SSID (for filtering etc) */
-                       do_join = true;
+                               ret = wl1271_ap_init_hwenc(wl);
+                               if (ret < 0)
+                                       goto out;
+                       }
+               } else {
+                       if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                               ret = wl1271_cmd_stop_bss(wl);
+                               if (ret < 0)
+                                       goto out;
+
+                               clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               wl1271_debug(DEBUG_AP, "stopped AP");
+                       }
                }
        }
 
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
+       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       if (ret < 0)
+               goto out;
+out:
+       return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       u32 changed)
+{
+       bool do_join = false, set_assoc = false;
+       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       u32 sta_rate_set = 0;
+       int ret;
+       struct ieee80211_sta *sta;
+       bool sta_exists = false;
+       struct ieee80211_sta_ht_cap sta_ht_cap;
+
+       if (is_ibss) {
+               ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+                                                    changed);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if ((changed & BSS_CHANGED_BEACON_INT)  && is_ibss)
+               do_join = true;
+
+       /* Need to update the SSID (for filtering etc) */
+       if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+               do_join = true;
+
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
                wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
                             bss_conf->enable_beacon ? "enabled" : "disabled");
 
@@ -1924,7 +2360,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                do_join = true;
        }
 
-       if (changed & BSS_CHANGED_CQM) {
+       if ((changed & BSS_CHANGED_CQM)) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
                        enable = true;
@@ -1942,24 +2378,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
             * and enable the BSSID filter
             */
            memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
-                       memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+               memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
+               if (!is_zero_ether_addr(wl->bssid)) {
                        ret = wl1271_cmd_build_null_data(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        ret = wl1271_build_qos_null_data(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* filter out all packets not from this BSSID */
                        wl1271_configure_filters(wl, 0);
 
                        /* Need to update the BSSID (for filtering etc) */
                        do_join = true;
+               }
        }
 
-       if (changed & BSS_CHANGED_ASSOC) {
+       rcu_read_lock();
+       sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       if (sta)  {
+               /* save the supp_rates of the ap */
+               sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
+               if (sta->ht_cap.ht_supported)
+                       sta_rate_set |=
+                           (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+               sta_ht_cap = sta->ht_cap;
+               sta_exists = true;
+       }
+       rcu_read_unlock();
+
+       if (sta_exists) {
+               /* handle new association with HT and HT information change */
+               if ((changed & BSS_CHANGED_HT) &&
+                   (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+                       ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+                                                            true);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht cap true failed %d",
+                                              ret);
+                               goto out;
+                       }
+                       ret = wl1271_acx_set_ht_information(wl,
+                                               bss_conf->ht_operation_mode);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht information failed %d",
+                                              ret);
+                               goto out;
+                       }
+               }
+               /* handle new association without HT and disassociation */
+               else if (changed & BSS_CHANGED_ASSOC) {
+                       ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+                                                            false);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht cap false failed %d",
+                                              ret);
+                               goto out;
+                       }
+               }
+       }
+
+       if ((changed & BSS_CHANGED_ASSOC)) {
                if (bss_conf->assoc) {
                        u32 rates;
                        int ieoffset;
@@ -1975,10 +2457,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        rates = bss_conf->basic_rates;
                        wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
                                                                         rates);
-                       wl->basic_rate = wl1271_min_rate_get(wl);
-                       ret = wl1271_acx_rate_policies(wl);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       if (sta_rate_set)
+                               wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+                                                               sta_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /*
                         * with wl1271, we don't need to update the
@@ -1988,7 +2473,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                         */
                        ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /*
                         * Get a template for hardware connection maintenance
@@ -2002,17 +2487,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        /* enable the connection monitoring feature */
                        ret = wl1271_acx_conn_monit_params(wl, true);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* If we want to go in PSM but we're not there yet */
                        if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
                            !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+                               enum wl1271_cmd_ps_mode mode;
+
                                mode = STATION_POWER_SAVE_MODE;
                                ret = wl1271_ps_set_mode(wl, mode,
                                                         wl->basic_rate,
                                                         true);
                                if (ret < 0)
-                                       goto out_sleep;
+                                       goto out;
                        }
                } else {
                        /* use defaults when not associated */
@@ -2029,10 +2516,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 
                        /* revert back to minimum rates for the current band */
                        wl1271_set_band_rate(wl);
-                       wl->basic_rate = wl1271_min_rate_get(wl);
-                       ret = wl1271_acx_rate_policies(wl);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* disable connection monitor features */
                        ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2527,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        /* Disable the keep-alive feature */
                        ret = wl1271_acx_keep_alive_mode(wl, false);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* restore the bssid filter and go to dummy bssid */
                        wl1271_unjoin(wl);
                        wl1271_dummy_join(wl);
                }
-
        }
 
-       if (changed & BSS_CHANGED_ERP_SLOT) {
-               if (bss_conf->use_short_slot)
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
-               else
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
-               if (ret < 0) {
-                       wl1271_warning("Set slot time failed %d", ret);
-                       goto out_sleep;
-               }
-       }
-
-       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-               if (bss_conf->use_short_preamble)
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
-               else
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
-       }
-
-       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               if (bss_conf->use_cts_prot)
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
-               else
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
-               if (ret < 0) {
-                       wl1271_warning("Set ctsprotect failed %d", ret);
-                       goto out_sleep;
-               }
-       }
-
-       /*
-        * Takes care of: New association with HT enable,
-        *                HT information change in beacon.
-        */
-       if (sta &&
-           (changed & BSS_CHANGED_HT) &&
-           (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
-               if (ret < 0) {
-                       wl1271_warning("Set ht cap true failed %d", ret);
-                       goto out_sleep;
-               }
-                       ret = wl1271_acx_set_ht_information(wl,
-                               bss_conf->ht_operation_mode);
-               if (ret < 0) {
-                       wl1271_warning("Set ht information failed %d", ret);
-                       goto out_sleep;
-               }
-       }
-       /*
-        * Takes care of: New association without HT,
-        *                Disassociation.
-        */
-       else if (sta && (changed & BSS_CHANGED_ASSOC)) {
-               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
-               if (ret < 0) {
-                       wl1271_warning("Set ht cap false failed %d", ret);
-                       goto out_sleep;
-               }
-       }
+       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       if (ret < 0)
+               goto out;
 
        if (changed & BSS_CHANGED_ARP_FILTER) {
                __be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,29 +2554,57 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        ret = wl1271_cmd_build_arp_rsp(wl, addr);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
-                               goto out_sleep;
+                               goto out;
                        }
 
                        ret = wl1271_acx_arp_ip_filter(wl,
-                               (ACX_ARP_FILTER_ARP_FILTERING |
-                                ACX_ARP_FILTER_AUTO_ARP),
+                               ACX_ARP_FILTER_ARP_FILTERING,
                                addr);
                } else
                        ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
 
                if (ret < 0)
-                       goto out_sleep;
+                       goto out;
        }
 
        if (do_join) {
                ret = wl1271_join(wl, set_assoc);
                if (ret < 0) {
                        wl1271_warning("cmd join failed %d", ret);
-                       goto out_sleep;
+                       goto out;
                }
        }
 
-out_sleep:
+out:
+       return;
+}
+
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      u32 changed)
+{
+       struct wl1271 *wl = hw->priv;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       int ret;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+                    (int)changed);
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       if (is_ap)
+               wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+       else
+               wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
        wl1271_ps_elp_sleep(wl);
 
 out:
@@ -2158,42 +2616,66 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
 {
        struct wl1271 *wl = hw->priv;
        u8 ps_scheme;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&wl->mutex);
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               ret = -EAGAIN;
-               goto out;
-       }
-
-       ret = wl1271_ps_elp_wakeup(wl, false);
-       if (ret < 0)
-               goto out;
-
-       /* the txop is confed in units of 32us by the mac80211, we need us */
-       ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
-                               params->cw_min, params->cw_max,
-                               params->aifs, params->txop << 5);
-       if (ret < 0)
-               goto out_sleep;
-
        if (params->uapsd)
                ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
        else
                ps_scheme = CONF_PS_SCHEME_LEGACY;
 
-       ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
-                                CONF_CHANNEL_TYPE_EDCF,
-                                wl1271_tx_get_queue(queue),
-                                ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
-       if (ret < 0)
-               goto out_sleep;
+       if (wl->state == WL1271_STATE_OFF) {
+               /*
+                * If the state is off, the parameters will be recorded and
+                * configured on init. This happens in AP-mode.
+                */
+               struct conf_tx_ac_category *conf_ac =
+                       &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+               struct conf_tx_tid *conf_tid =
+                       &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+               conf_ac->ac = wl1271_tx_get_queue(queue);
+               conf_ac->cw_min = (u8)params->cw_min;
+               conf_ac->cw_max = params->cw_max;
+               conf_ac->aifsn = params->aifs;
+               conf_ac->tx_op_limit = params->txop << 5;
+
+               conf_tid->queue_id = wl1271_tx_get_queue(queue);
+               conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+               conf_tid->tsid = wl1271_tx_get_queue(queue);
+               conf_tid->ps_scheme = ps_scheme;
+               conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+               conf_tid->apsd_conf[0] = 0;
+               conf_tid->apsd_conf[1] = 0;
+       } else {
+               ret = wl1271_ps_elp_wakeup(wl);
+               if (ret < 0)
+                       goto out;
+
+               /*
+                * the txop is confed in units of 32us by the mac80211,
+                * we need us
+                */
+               ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+                                       params->cw_min, params->cw_max,
+                                       params->aifs, params->txop << 5);
+               if (ret < 0)
+                       goto out_sleep;
+
+               ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+                                        CONF_CHANNEL_TYPE_EDCF,
+                                        wl1271_tx_get_queue(queue),
+                                        ps_scheme, CONF_ACK_POLICY_LEGACY,
+                                        0, 0);
+               if (ret < 0)
+                       goto out_sleep;
 
 out_sleep:
-       wl1271_ps_elp_sleep(wl);
+               wl1271_ps_elp_sleep(wl);
+       }
 
 out:
        mutex_unlock(&wl->mutex);
@@ -2215,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2247,6 +2729,184 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
        return 0;
 }
 
+static int wl1271_allocate_sta(struct wl1271 *wl,
+                            struct ieee80211_sta *sta,
+                            u8 *hlid)
+{
+       struct wl1271_station *wl_sta;
+       int id;
+
+       id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+       if (id >= AP_MAX_STATIONS) {
+               wl1271_warning("could not allocate HLID - too much stations");
+               return -EBUSY;
+       }
+
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       __set_bit(id, wl->ap_hlid_map);
+       wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+       *hlid = wl_sta->hlid;
+       memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+       return 0;
+}
+
+static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+{
+       int id = hlid - WL1271_AP_STA_HLID_START;
+
+       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+               return;
+
+       __clear_bit(id, wl->ap_hlid_map);
+       memset(wl->links[hlid].addr, 0, ETH_ALEN);
+       wl1271_tx_reset_link_queues(wl, hlid);
+       __clear_bit(hlid, &wl->ap_ps_map);
+       __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta)
+{
+       struct wl1271 *wl = hw->priv;
+       int ret = 0;
+       u8 hlid;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               goto out;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+       ret = wl1271_allocate_sta(wl, sta, &hlid);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_free_sta;
+
+       ret = wl1271_cmd_add_sta(wl, sta, hlid);
+       if (ret < 0)
+               goto out_sleep;
+
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+
+out_free_sta:
+       if (ret < 0)
+               wl1271_free_sta(wl, hlid);
+
+out:
+       mutex_unlock(&wl->mutex);
+       return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl1271_station *wl_sta;
+       int ret = 0, id;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               goto out;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+       if (ret < 0)
+               goto out_sleep;
+
+       wl1271_free_sta(wl, wl_sta->hlid);
+
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+       return ret;
+}
+
+int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          enum ieee80211_ampdu_mlme_action action,
+                          struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                          u8 buf_size)
+{
+       struct wl1271 *wl = hw->priv;
+       int ret;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               if (wl->ba_support) {
+                       ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+                                                                true);
+                       if (!ret)
+                               wl->ba_rx_bitmap |= BIT(tid);
+               } else {
+                       ret = -ENOTSUPP;
+               }
+               break;
+
+       case IEEE80211_AMPDU_RX_STOP:
+               ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+               if (!ret)
+                       wl->ba_rx_bitmap &= ~BIT(tid);
+               break;
+
+       /*
+        * The BA initiator session management in FW independently.
+        * Falling break here on purpose for all TX APDU commands.
+        */
+       case IEEE80211_AMPDU_TX_START:
+       case IEEE80211_AMPDU_TX_STOP:
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = -EINVAL;
+               break;
+
+       default:
+               wl1271_error("Incorrect ampdu action id=%x\n", action);
+               ret = -EINVAL;
+       }
+
+       wl1271_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+
+       return ret;
+}
+
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_rate wl1271_rates[] = {
        { .bitrate = 10,
@@ -2305,6 +2965,7 @@ static struct ieee80211_channel wl1271_channels[] = {
        { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
        { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
        { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+       { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
 };
 
 /* mapping to indexes for wl1271_rates */
@@ -2493,6 +3154,9 @@ static const struct ieee80211_ops wl1271_ops = {
        .conf_tx = wl1271_op_conf_tx,
        .get_tsf = wl1271_op_get_tsf,
        .get_survey = wl1271_op_get_survey,
+       .sta_add = wl1271_op_sta_add,
+       .sta_remove = wl1271_op_sta_remove,
+       .ampdu_action = wl1271_op_ampdu_action,
        CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
 };
 
@@ -2562,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
        if (wl->state == WL1271_STATE_OFF)
                goto out;
 
-       ret = wl1271_ps_elp_wakeup(wl, false);
+       ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
@@ -2607,6 +3271,18 @@ int wl1271_register_hw(struct wl1271 *wl)
        if (wl->mac80211_registered)
                return 0;
 
+       ret = wl1271_fetch_nvs(wl);
+       if (ret == 0) {
+               u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+               wl->mac_addr[0] = nvs_ptr[11];
+               wl->mac_addr[1] = nvs_ptr[10];
+               wl->mac_addr[2] = nvs_ptr[6];
+               wl->mac_addr[3] = nvs_ptr[5];
+               wl->mac_addr[4] = nvs_ptr[4];
+               wl->mac_addr[5] = nvs_ptr[3];
+       }
+
        SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
 
        ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3305,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
 
 void wl1271_unregister_hw(struct wl1271 *wl)
 {
+       if (wl->state == WL1271_STATE_PLT)
+               __wl1271_plt_stop(wl);
+
        unregister_netdevice_notifier(&wl1271_dev_notifier);
        ieee80211_unregister_hw(wl->hw);
        wl->mac80211_registered = false;
@@ -2661,13 +3340,15 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
                IEEE80211_HW_SUPPORTS_UAPSD |
                IEEE80211_HW_HAS_RATE_CONTROL |
                IEEE80211_HW_CONNECTION_MONITOR |
-               IEEE80211_HW_SUPPORTS_CQM_RSSI;
+               IEEE80211_HW_SUPPORTS_CQM_RSSI |
+               IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+               IEEE80211_HW_AP_LINK_PS;
 
        wl->hw->wiphy->cipher_suites = cipher_suites;
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
        wl->hw->wiphy->max_scan_ssids = 1;
        /*
         * Maximum length of elements in scanning probe request templates
@@ -2676,8 +3357,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
         */
        wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
                        sizeof(struct ieee80211_header);
-       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
-       wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
+       /*
+        * We keep local copies of the band structs because we need to
+        * modify them on a per-device basis.
+        */
+       memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+              sizeof(wl1271_band_2ghz));
+       memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+              sizeof(wl1271_band_5ghz));
+
+       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               &wl->bands[IEEE80211_BAND_2GHZ];
+       wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+               &wl->bands[IEEE80211_BAND_5GHZ];
 
        wl->hw->queues = 4;
        wl->hw->max_rates = 1;
@@ -2686,6 +3379,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
 
        SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
 
+       wl->hw->sta_data_size = sizeof(struct wl1271_station);
+
+       wl->hw->max_rx_aggregation_subframes = 8;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2697,7 +3394,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        struct ieee80211_hw *hw;
        struct platform_device *plat_dev = NULL;
        struct wl1271 *wl;
-       int i, ret;
+       int i, j, ret;
        unsigned int order;
 
        hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2725,9 +3422,16 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        for (i = 0; i < NUM_TX_QUEUES; i++)
                skb_queue_head_init(&wl->tx_queue[i]);
 
+       for (i = 0; i < NUM_TX_QUEUES; i++)
+               for (j = 0; j < AP_MAX_LINKS; j++)
+                       skb_queue_head_init(&wl->links[j].tx_queue[i]);
+
+       skb_queue_head_init(&wl->deferred_rx_queue);
+       skb_queue_head_init(&wl->deferred_tx_queue);
+
        INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
        INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
-       INIT_WORK(&wl->irq_work, wl1271_irq_work);
+       INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
        INIT_WORK(&wl->tx_work, wl1271_tx_work);
        INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
        INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@@ -2735,19 +3439,25 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
        wl->default_key = 0;
        wl->rx_counter = 0;
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+       wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
        wl->psm_entry_retry = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
        wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
        wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
        wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->sta_rate_set = 0;
        wl->band = IEEE80211_BAND_2GHZ;
        wl->vif = NULL;
        wl->flags = 0;
        wl->sg_enabled = true;
        wl->hw_pg_ver = -1;
+       wl->bss_type = MAX_BSS_TYPE;
+       wl->set_bss_type = MAX_BSS_TYPE;
+       wl->fw_bss_type = MAX_BSS_TYPE;
+       wl->last_tx_hlid = 0;
+       wl->ap_ps_map = 0;
+       wl->ap_fw_ps_map = 0;
+       wl->quirks = 0;
 
        memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,11 +3547,11 @@ int wl1271_free_hw(struct wl1271 *wl)
 }
 EXPORT_SYMBOL_GPL(wl1271_free_hw);
 
-u32 wl12xx_debug_level;
+u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
-module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
index 60a3738..971f13e 100644 (file)
@@ -24,6 +24,7 @@
 #include "reg.h"
 #include "ps.h"
 #include "io.h"
+#include "tx.h"
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
@@ -68,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
        }
 }
 
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
+int wl1271_ps_elp_wakeup(struct wl1271 *wl)
 {
        DECLARE_COMPLETION_ONSTACK(compl);
        unsigned long flags;
@@ -86,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
         * the completion variable in one entity.
         */
        spin_lock_irqsave(&wl->wl_lock, flags);
-       if (work_pending(&wl->irq_work) || chip_awake)
+       if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
                pending = true;
        else
                wl->elp_compl = &compl;
@@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
                        return ret;
                }
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
-                                        rates, send);
+               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
 
@@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
        case STATION_ACTIVE_MODE:
        default:
                wl1271_debug(DEBUG_PSM, "leaving psm");
-               ret = wl1271_ps_elp_wakeup(wl, false);
+               ret = wl1271_ps_elp_wakeup(wl);
                if (ret < 0)
                        return ret;
 
@@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
                if (ret < 0)
                        return ret;
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
-                                        rates, send);
+               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
                if (ret < 0)
                        return ret;
 
@@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
        return ret;
 }
 
+static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
+{
+       int i, filtered = 0;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       unsigned long flags;
+
+       /* filter all frames currently the low level queus for this hlid */
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+                       info = IEEE80211_SKB_CB(skb);
+                       info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+                       info->status.rates[0].idx = -1;
+                       ieee80211_tx_status(wl->hw, skb);
+                       filtered++;
+               }
+       }
+
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       wl->tx_queue_count -= filtered;
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       wl1271_handle_tx_low_watermark(wl);
+}
+
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+{
+       struct ieee80211_sta *sta;
+
+       if (test_bit(hlid, &wl->ap_ps_map))
+               return;
+
+       wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
+                    "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+                    clean_queues);
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       if (!sta) {
+               wl1271_error("could not find sta %pM for starting ps",
+                            wl->links[hlid].addr);
+               rcu_read_unlock();
+               return;
+       }
 
+       ieee80211_sta_ps_transition_ni(sta, true);
+       rcu_read_unlock();
+
+       /* do we want to filter all frames from this link's queues? */
+       if (clean_queues)
+               wl1271_ps_filter_frames(wl, hlid);
+
+       __set_bit(hlid, &wl->ap_ps_map);
+}
+
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+{
+       struct ieee80211_sta *sta;
+
+       if (!test_bit(hlid, &wl->ap_ps_map))
+               return;
+
+       wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
+
+       __clear_bit(hlid, &wl->ap_ps_map);
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       if (!sta) {
+               wl1271_error("could not find sta %pM for ending ps",
+                            wl->links[hlid].addr);
+               goto end;
+       }
+
+       ieee80211_sta_ps_transition_ni(sta, false);
+end:
+       rcu_read_unlock();
+}
index 8415060..c41bd0a 100644 (file)
@@ -30,7 +30,9 @@
 int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
                       u32 rates, bool send);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
+int wl1271_ps_elp_wakeup(struct wl1271 *wl);
 void wl1271_elp_work(struct work_struct *work);
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
 
 #endif /* __WL1271_PS_H__ */
index 682304c..919b59f 100644 (file)
 #include "rx.h"
 #include "io.h"
 
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
                                  u32 drv_rx_counter)
 {
        return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
                RX_MEM_BLOCK_MASK;
 }
 
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
                                 u32 drv_rx_counter)
 {
        return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
         */
        wl->noise = desc->rssi - (desc->snr >> 1);
 
-       status->freq = ieee80211_channel_to_frequency(desc->channel);
+       status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
 
        if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
 {
        struct wl1271_rx_descriptor *desc;
        struct sk_buff *skb;
-       u16 *fc;
+       struct ieee80211_hdr *hdr;
        u8 *buf;
        u8 beacon = 0;
 
@@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
        /* now we pull the descriptor out of the buffer */
        skb_pull(skb, sizeof(*desc));
 
-       fc = (u16 *)skb->data;
-       if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+       hdr = (struct ieee80211_hdr *)skb->data;
+       if (ieee80211_is_beacon(hdr->frame_control))
                beacon = 1;
 
        wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -129,12 +129,13 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
 
        skb_trim(skb, skb->len - desc->pad_len);
 
-       ieee80211_rx_ni(wl->hw, skb);
+       skb_queue_tail(&wl->deferred_rx_queue, skb);
+       ieee80211_queue_work(wl->hw, &wl->netstack_work);
 
        return 0;
 }
 
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
 {
        struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
        u32 buf_size;
@@ -198,6 +199,22 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
                        pkt_offset += pkt_length;
                }
        }
-       wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
-                       cpu_to_le32(wl->rx_counter));
+
+       /*
+        * Write the driver's packet counter to the FW. This is only required
+        * for older hardware revisions
+        */
+       if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+               wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+               wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+       } else {
+               wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+               wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+       }
 }
index 3abb26f..75fabf8 100644 (file)
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
-#define WL1271_RX_ALIGN_TO 4
-#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
-                            ~(WL1271_RX_ALIGN_TO - 1))
-
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
@@ -86,8 +82,9 @@
 /*
  * RX Descriptor status
  *
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
  */
 #define WL1271_RX_DESC_STATUS_MASK      0x07
 
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
        u8  snr;
        __le32 timestamp;
        u8  packet_class;
-       u8  process_id;
+       union {
+               u8  process_id; /* STA FW */
+               u8  hlid; /* AP FW */
+       } __packed;
        u8  pad_len;
        u8  reserved;
 } __packed;
 
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
 
 #endif
index 6f897b9..420653a 100644 (file)
@@ -27,6 +27,7 @@
 #include "cmd.h"
 #include "scan.h"
 #include "acx.h"
+#include "ps.h"
 
 void wl1271_scan_complete_work(struct work_struct *work)
 {
@@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
-               mutex_unlock(&wl->mutex);
-               return;
-       }
+       if (wl->state == WL1271_STATE_OFF)
+               goto out;
+
+       if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
+               goto out;
 
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        kfree(wl->scan.scanned_ch);
@@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
        ieee80211_scan_completed(wl->hw, false);
 
        /* restore hardware connection monitoring template */
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-               wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+               if (wl1271_ps_elp_wakeup(wl) == 0) {
+                       wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+                       wl1271_ps_elp_sleep(wl);
+               }
+       }
 
        if (wl->scan.failed) {
                wl1271_info("Scan completed due to error.");
                ieee80211_queue_work(wl->hw, &wl->recovery_work);
        }
+
+out:
        mutex_unlock(&wl->mutex);
 
 }
index 93cbb8d..5b9dbea 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 #include <linux/gpio.h>
 #include <linux/wl12xx.h>
 #include <linux/pm_runtime.h>
@@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
        return &(wl_to_func(wl)->dev);
 }
 
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
 {
        struct wl1271 *wl = cookie;
        unsigned long flags;
@@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
 
        /* complete the ELP completion */
        spin_lock_irqsave(&wl->wl_lock, flags);
+       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
        if (wl->elp_compl) {
                complete(wl->elp_compl);
                wl->elp_compl = NULL;
        }
-
-       if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
-               ieee80211_queue_work(wl->hw, &wl->irq_work);
-       set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-       return IRQ_HANDLED;
+       return IRQ_WAKE_THREAD;
 }
 
 static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
        int ret;
        struct sdio_func *func = wl_to_func(wl);
 
-       sdio_claim_host(func);
-
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
                wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
                wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
        }
 
-       sdio_release_host(func);
-
        if (ret)
                wl1271_error("sdio read failed (%d)", ret);
 }
@@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
        int ret;
        struct sdio_func *func = wl_to_func(wl);
 
-       sdio_claim_host(func);
-
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
                wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
                        ret = sdio_memcpy_toio(func, addr, buf, len);
        }
 
-       sdio_release_host(func);
-
        if (ret)
                wl1271_error("sdio write failed (%d)", ret);
 }
@@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
        struct sdio_func *func = wl_to_func(wl);
        int ret;
 
-       /* Power up the card */
+       /* Make sure the card will not be powered off by runtime PM */
        ret = pm_runtime_get_sync(&func->dev);
        if (ret < 0)
                goto out;
 
+       /* Runtime PM might be disabled, so power up the card manually */
+       ret = mmc_power_restore_host(func->card->host);
+       if (ret < 0)
+               goto out;
+
        sdio_claim_host(func);
        sdio_enable_func(func);
-       sdio_release_host(func);
 
 out:
        return ret;
@@ -179,12 +173,17 @@ out:
 static int wl1271_sdio_power_off(struct wl1271 *wl)
 {
        struct sdio_func *func = wl_to_func(wl);
+       int ret;
 
-       sdio_claim_host(func);
        sdio_disable_func(func);
        sdio_release_host(func);
 
-       /* Power down the card */
+       /* Runtime PM might be disabled, so power off the card manually */
+       ret = mmc_power_save_host(func->card->host);
+       if (ret < 0)
+               return ret;
+
+       /* Let runtime PM know the card is powered off */
        return pm_runtime_put_sync(&func->dev);
 }
 
@@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
        wl->irq = wlan_data->irq;
        wl->ref_clock = wlan_data->board_ref_clock;
 
-       ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+                                  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                  DRIVER_NAME, wl);
        if (ret < 0) {
                wl1271_error("request_irq() failed: %d", ret);
                goto out_free;
        }
 
-       set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
        disable_irq(wl->irq);
 
        ret = wl1271_init_ieee80211(wl);
@@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
  out_irq:
        free_irq(wl->irq, wl);
 
-
  out_free:
        wl1271_free_hw(wl);
 
@@ -345,3 +343,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
index 4671491..18cf017 100644 (file)
@@ -110,9 +110,9 @@ static void wl1271_spi_reset(struct wl1271 *wl)
        spi_message_add_tail(&t, &m);
 
        spi_sync(wl_to_spi(wl), &m);
-       kfree(cmd);
 
        wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+       kfree(cmd);
 }
 
 static void wl1271_spi_init(struct wl1271 *wl)
@@ -320,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
        spi_sync(wl_to_spi(wl), &m);
 }
 
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
 {
-       struct wl1271 *wl;
+       struct wl1271 *wl = cookie;
        unsigned long flags;
 
        wl1271_debug(DEBUG_IRQ, "IRQ");
 
-       wl = cookie;
-
        /* complete the ELP completion */
        spin_lock_irqsave(&wl->wl_lock, flags);
+       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
        if (wl->elp_compl) {
                complete(wl->elp_compl);
                wl->elp_compl = NULL;
        }
-
-       if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
-               ieee80211_queue_work(wl->hw, &wl->irq_work);
-       set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-       return IRQ_HANDLED;
+       return IRQ_WAKE_THREAD;
 }
 
 static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@@ -413,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
                goto out_free;
        }
 
-       ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+                                  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                  DRIVER_NAME, wl);
        if (ret < 0) {
                wl1271_error("request_irq() failed: %d", ret);
                goto out_free;
        }
 
-       set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
        disable_irq(wl->irq);
 
        ret = wl1271_init_ieee80211(wl);
@@ -495,4 +490,5 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
 MODULE_ALIAS("spi:wl1271");
index b44c75c..5e9ef7d 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 
 #include "wl12xx.h"
 #include "io.h"
 #include "ps.h"
 #include "tx.h"
 
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+       int ret;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       if (is_ap)
+               ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+       else
+               ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+       if (ret < 0)
+               return ret;
+
+       wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+       return 0;
+}
+
 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 {
        int id;
@@ -52,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
        }
 }
 
+static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+                                                struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+
+       /*
+        * add the station to the known list before transmitting the
+        * authentication response. this way it won't get de-authed by FW
+        * when transmitting too soon.
+        */
+       hdr = (struct ieee80211_hdr *)(skb->data +
+                                      sizeof(struct wl1271_tx_hw_descr));
+       if (ieee80211_is_auth(hdr->frame_control))
+               wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+}
+
+static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+{
+       bool fw_ps;
+       u8 tx_blks;
+
+       /* only regulate station links */
+       if (hlid < WL1271_AP_STA_HLID_START)
+               return;
+
+       fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+       tx_blks = wl->links[hlid].allocated_blks;
+
+       /*
+        * if in FW PS and there is enough data in FW we can put the link
+        * into high-level PS and clean out its TX queues.
+        */
+       if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_start(wl, hlid, true);
+}
+
+u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
+
+       if (control->control.sta) {
+               struct wl1271_station *wl_sta;
+
+               wl_sta = (struct wl1271_station *)
+                               control->control.sta->drv_priv;
+               return wl_sta->hlid;
+       } else {
+               struct ieee80211_hdr *hdr;
+
+               hdr = (struct ieee80211_hdr *)skb->data;
+               if (ieee80211_is_mgmt(hdr->frame_control))
+                       return WL1271_AP_GLOBAL_HLID;
+               else
+                       return WL1271_AP_BROADCAST_HLID;
+       }
+}
+
 static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
-                               u32 buf_offset)
+                               u32 buf_offset, u8 hlid)
 {
        struct wl1271_tx_hw_descr *desc;
        u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -82,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
 
                wl->tx_blocks_available -= total_blocks;
 
+               if (wl->bss_type == BSS_TYPE_AP_BSS)
+                       wl->links[hlid].allocated_blks += total_blocks;
+
                ret = 0;
 
                wl1271_debug(DEBUG_TX,
@@ -95,11 +173,12 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
 }
 
 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
-                             u32 extra, struct ieee80211_tx_info *control)
+                             u32 extra, struct ieee80211_tx_info *control,
+                             u8 hlid)
 {
        struct timespec ts;
        struct wl1271_tx_hw_descr *desc;
-       int pad, ac;
+       int pad, ac, rate_idx;
        s64 hosttime;
        u16 tx_attr;
 
@@ -117,7 +196,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        getnstimeofday(&ts);
        hosttime = (timespec_to_ns(&ts) >> 10);
        desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
-       desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+       else
+               desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
 
        /* configure the tx attributes */
        tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,25 +208,49 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        /* queue (we use same identifiers for tid's and ac's */
        ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
        desc->tid = ac;
-       desc->aid = TX_HW_DEFAULT_AID;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+               desc->aid = hlid;
+
+               /* if the packets are destined for AP (have a STA entry)
+                  send them with AP rate policies, otherwise use default
+                  basic rates */
+               if (control->control.sta)
+                       rate_idx = ACX_TX_AP_FULL_RATE;
+               else
+                       rate_idx = ACX_TX_BASIC_RATE;
+       } else {
+               desc->hlid = hlid;
+               switch (hlid) {
+               case WL1271_AP_GLOBAL_HLID:
+                       rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+                       break;
+               case WL1271_AP_BROADCAST_HLID:
+                       rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+                       break;
+               default:
+                       rate_idx = ac;
+                       break;
+               }
+       }
+
+       tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
        desc->reserved = 0;
 
        /* align the length (and store in terms of words) */
-       pad = WL1271_TX_ALIGN(skb->len);
+       pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
        desc->length = cpu_to_le16(pad >> 2);
 
        /* calculate number of padding bytes */
        pad = pad - skb->len;
        tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
 
-       /* if the packets are destined for AP (have a STA entry) send them
-          with AP rate policies, otherwise use default basic rates */
-       if (control->control.sta)
-               tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
-
        desc->tx_attr = cpu_to_le16(tx_attr);
 
-       wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
+       wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+               "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
+               le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
+               le16_to_cpu(desc->life_time), desc->total_mem_blocks);
 }
 
 /* caller must hold wl->mutex */
@@ -153,8 +260,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
-       u8 idx;
        u32 total_len;
+       u8 hlid;
 
        if (!skb)
                return -EINVAL;
@@ -166,29 +273,43 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
                extra = WL1271_TKIP_IV_SPACE;
 
        if (info->control.hw_key) {
-               idx = info->control.hw_key->hw_key_idx;
+               bool is_wep;
+               u8 idx = info->control.hw_key->hw_key_idx;
+               u32 cipher = info->control.hw_key->cipher;
 
-               /* FIXME: do we have to do this if we're not using WEP? */
-               if (unlikely(wl->default_key != idx)) {
-                       ret = wl1271_cmd_set_default_wep_key(wl, idx);
+               is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+                        (cipher == WLAN_CIPHER_SUITE_WEP104);
+
+               if (unlikely(is_wep && wl->default_key != idx)) {
+                       ret = wl1271_set_default_wep_key(wl, idx);
                        if (ret < 0)
                                return ret;
                        wl->default_key = idx;
                }
        }
 
-       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               hlid = wl1271_tx_get_hlid(skb);
+       else
+               hlid = TX_HW_DEFAULT_AID;
+
+       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
        if (ret < 0)
                return ret;
 
-       wl1271_tx_fill_hdr(wl, skb, extra, info);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               wl1271_tx_ap_update_inconnection_sta(wl, skb);
+               wl1271_tx_regulate_link(wl, hlid);
+       }
+
+       wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
 
        /*
         * The length of each packet is stored in terms of words. Thus, we must
         * pad the skb data to make sure its length is aligned.
         * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
         */
-       total_len = WL1271_TX_ALIGN(skb->len);
+       total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
        memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
        memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
 
@@ -222,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
        return enabled_rates;
 }
 
-static void handle_tx_low_watermark(struct wl1271 *wl)
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
 {
        unsigned long flags;
 
@@ -236,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
        }
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
 {
        struct sk_buff *skb = NULL;
        unsigned long flags;
@@ -262,12 +383,69 @@ out:
        return skb;
 }
 
+static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+{
+       struct sk_buff *skb = NULL;
+       unsigned long flags;
+       int i, h, start_hlid;
+
+       /* start from the link after the last one */
+       start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+
+       /* dequeue according to AC, round robin on each link */
+       for (i = 0; i < AP_MAX_LINKS; i++) {
+               h = (start_hlid + i) % AP_MAX_LINKS;
+
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+               if (skb)
+                       goto out;
+       }
+
+out:
+       if (skb) {
+               wl->last_tx_hlid = h;
+               spin_lock_irqsave(&wl->wl_lock, flags);
+               wl->tx_queue_count--;
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+       } else {
+               wl->last_tx_hlid = 0;
+       }
+
+       return skb;
+}
+
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+{
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               return wl1271_ap_skb_dequeue(wl);
+
+       return wl1271_sta_skb_dequeue(wl);
+}
+
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 
-       skb_queue_head(&wl->tx_queue[q], skb);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               u8 hlid = wl1271_tx_get_hlid(skb);
+               skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
+
+               /* make sure we dequeue the same packet next time */
+               wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
+       } else {
+               skb_queue_head(&wl->tx_queue[q], skb);
+       }
+
        spin_lock_irqsave(&wl->wl_lock, flags);
        wl->tx_queue_count++;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -277,38 +455,16 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
 {
        struct sk_buff *skb;
        bool woken_up = false;
-       u32 sta_rates = 0;
        u32 buf_offset = 0;
        bool sent_packets = false;
        int ret;
 
-       /* check if the rates supported by the AP have changed */
-       if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
-                                       &wl->flags))) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               sta_rates = wl->sta_rate_set;
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       }
-
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       /* if rates have changed, re-configure the rate policy */
-       if (unlikely(sta_rates)) {
-               ret = wl1271_ps_elp_wakeup(wl, false);
-               if (ret < 0)
-                       goto out;
-               woken_up = true;
-
-               wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
-               wl1271_acx_rate_policies(wl);
-       }
-
        while ((skb = wl1271_skb_dequeue(wl))) {
                if (!woken_up) {
-                       ret = wl1271_ps_elp_wakeup(wl, false);
+                       ret = wl1271_ps_elp_wakeup(wl);
                        if (ret < 0)
                                goto out_ack;
                        woken_up = true;
@@ -350,9 +506,15 @@ out_ack:
                sent_packets = true;
        }
        if (sent_packets) {
-               /* interrupt the firmware with the new packets */
-               wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
-               handle_tx_low_watermark(wl);
+               /*
+                * Interrupt the firmware with the new packets. This is only
+                * required for older hardware revisions
+                */
+               if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+                       wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
+                                      wl->tx_packets_count);
+
+               wl1271_handle_tx_low_watermark(wl);
        }
 
 out:
@@ -427,7 +589,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
                     result->rate_class_index, result->status);
 
        /* return the packet to the stack */
-       ieee80211_tx_status(wl->hw, skb);
+       skb_queue_tail(&wl->deferred_tx_queue, skb);
+       ieee80211_queue_work(wl->hw, &wl->netstack_work);
        wl1271_free_tx_id(wl, result->id);
 }
 
@@ -469,34 +632,92 @@ void wl1271_tx_complete(struct wl1271 *wl)
        }
 }
 
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
+{
+       struct sk_buff *skb;
+       int i, total = 0;
+       unsigned long flags;
+       struct ieee80211_tx_info *info;
+
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+                       wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
+                       info = IEEE80211_SKB_CB(skb);
+                       info->status.rates[0].idx = -1;
+                       info->status.rates[0].count = 0;
+                       ieee80211_tx_status(wl->hw, skb);
+                       total++;
+               }
+       }
+
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       wl->tx_queue_count -= total;
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       wl1271_handle_tx_low_watermark(wl);
+}
+
 /* caller must hold wl->mutex */
 void wl1271_tx_reset(struct wl1271 *wl)
 {
        int i;
        struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
 
        /* TX failure */
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
-                       wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
-                       ieee80211_tx_status(wl->hw, skb);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               for (i = 0; i < AP_MAX_LINKS; i++) {
+                       wl1271_tx_reset_link_queues(wl, i);
+                       wl->links[i].allocated_blks = 0;
+                       wl->links[i].prev_freed_blks = 0;
+               }
+
+               wl->last_tx_hlid = 0;
+       } else {
+               for (i = 0; i < NUM_TX_QUEUES; i++) {
+                       while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
+                               wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
+                                            skb);
+                               info = IEEE80211_SKB_CB(skb);
+                               info->status.rates[0].idx = -1;
+                               info->status.rates[0].count = 0;
+                               ieee80211_tx_status(wl->hw, skb);
+                       }
                }
        }
+
        wl->tx_queue_count = 0;
 
        /*
         * Make sure the driver is at a consistent state, in case this
         * function is called from a context other than interface removal.
         */
-       handle_tx_low_watermark(wl);
+       wl1271_handle_tx_low_watermark(wl);
 
-       for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
-               if (wl->tx_frames[i] != NULL) {
-                       skb = wl->tx_frames[i];
-                       wl1271_free_tx_id(wl, i);
-                       wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
-                       ieee80211_tx_status(wl->hw, skb);
+       for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
+               if (wl->tx_frames[i] == NULL)
+                       continue;
+
+               skb = wl->tx_frames[i];
+               wl1271_free_tx_id(wl, i);
+               wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
+
+               /* Remove private headers before passing the skb to mac80211 */
+               info = IEEE80211_SKB_CB(skb);
+               skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+               if (info->control.hw_key &&
+                   info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+                       int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+                       memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
+                               hdrlen);
+                       skb_pull(skb, WL1271_TKIP_IV_SPACE);
                }
+
+               info->status.rates[0].idx = -1;
+               info->status.rates[0].count = 0;
+
+               ieee80211_tx_status(wl->hw, skb);
+       }
 }
 
 #define WL1271_TX_FLUSH_TIMEOUT 500000
@@ -509,8 +730,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
 
        while (!time_after(jiffies, timeout)) {
                mutex_lock(&wl->mutex);
-               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
-                            wl->tx_frames_cnt);
+               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+                            wl->tx_frames_cnt, wl->tx_queue_count);
                if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
                        mutex_unlock(&wl->mutex);
                        return;
@@ -521,3 +742,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
 
        wl1271_warning("Unable to flush all TX buffers, timed out.");
 }
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+       int i;
+       u32 rate = 0;
+
+       if (!wl->basic_rate_set) {
+               WARN_ON(1);
+               wl->basic_rate_set = wl->conf.tx.basic_rate;
+       }
+
+       for (i = 0; !rate; i++) {
+               if ((wl->basic_rate_set >> i) & 0x1)
+                       rate = 1 << i;
+       }
+
+       return rate;
+}
index 903e5dc..02f07fa 100644 (file)
@@ -29,6 +29,7 @@
 #define TX_HW_BLOCK_SIZE                 252
 
 #define TX_HW_MGMT_PKT_LIFETIME_TU       2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU    8000
 /* The chipset reference driver states, that the "aid" value 1
  * is for infra-BSS, but is still always used */
 #define TX_HW_DEFAULT_AID                1
@@ -52,8 +53,6 @@
 #define TX_HW_RESULT_QUEUE_LEN_MASK      0xf
 
 #define WL1271_TX_ALIGN_TO 4
-#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
-                            ~(WL1271_TX_ALIGN_TO - 1))
 #define WL1271_TKIP_IV_SPACE 4
 
 struct wl1271_tx_hw_descr {
@@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr {
        u8 id;
        /* The packet TID value (as User-Priority) */
        u8 tid;
-       /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
-       u8 aid;
+       union {
+               /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+               u8 aid;
+               /* AP - host link ID (HLID) */
+               u8 hlid;
+       } __packed;
        u8 reserved;
 } __packed;
 
@@ -146,5 +149,9 @@ void wl1271_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
+u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 
 #endif
index 9050dd9..86be83e 100644 (file)
 #define DRIVER_NAME "wl1271"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START    50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END      60
+
 enum {
        DEBUG_NONE      = 0,
        DEBUG_IRQ       = BIT(0),
@@ -57,6 +64,8 @@ enum {
        DEBUG_SDIO      = BIT(14),
        DEBUG_FILTERS   = BIT(15),
        DEBUG_ADHOC     = BIT(16),
+       DEBUG_AP        = BIT(17),
+       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
        DEBUG_ALL       = ~0,
 };
 
@@ -103,17 +112,28 @@ extern u32 wl12xx_debug_level;
                                       true);                           \
        } while (0)
 
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |  \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN |      \
                                  CFG_BSSID_FILTER_EN | \
                                  CFG_MC_FILTER_EN)
 
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
                                  CFG_RX_MGMT_EN | CFG_RX_DATA_EN |   \
                                  CFG_RX_CTL_EN | CFG_RX_BCN_EN |     \
                                  CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
 
-#define WL1271_FW_NAME "wl1271-fw.bin"
-#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_DEFAULT_AP_RX_CONFIG  0
+
+#define WL1271_DEFAULT_AP_RX_FILTER  (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+                                 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+                                 CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+                                 CFG_RX_ASSOC_EN)
+
+
+
+#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
+#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
+
+#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
 
 #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
 #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -129,6 +149,25 @@ extern u32 wl12xx_debug_level;
 #define WL1271_DEFAULT_BEACON_INT  100
 #define WL1271_DEFAULT_DTIM_PERIOD 1
 
+#define WL1271_AP_GLOBAL_HLID      0
+#define WL1271_AP_BROADCAST_HLID   1
+#define WL1271_AP_STA_HLID_START   2
+
+/*
+ * When in AP-mode, we allow (at least) this number of mem-blocks
+ * to be transmitted to FW for a STA in PS-mode. Only when packets are
+ * present in the FW buffers it will wake the sleeping STA. We want to put
+ * enough packets for the driver to transmit all of its buffered data before
+ * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * as it might hurt the throughput of active STAs.
+ * The number of blocks (18) is enough for 2 large packets.
+ */
+#define WL1271_PS_STA_MAX_BLOCKS  (2 * 9)
+
+#define WL1271_AP_BSS_INDEX        0
+#define WL1271_AP_DEF_INACTIV_SEC  300
+#define WL1271_AP_DEF_BEACON_EXP   20
+
 #define ACX_TX_DESCRIPTORS         32
 
 #define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +200,13 @@ struct wl1271_partition_set {
 
 struct wl1271;
 
+#define WL12XX_NUM_FW_VER 5
+
 /* FIXME: I'm not sure about this structure name */
 struct wl1271_chip {
        u32 id;
-       char fw_ver[21];
+       char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+       unsigned int fw_ver[WL12XX_NUM_FW_VER];
 };
 
 struct wl1271_stats {
@@ -178,8 +220,13 @@ struct wl1271_stats {
 #define NUM_TX_QUEUES              4
 #define NUM_RX_PKT_DESC            8
 
-/* FW status registers */
-struct wl1271_fw_status {
+#define AP_MAX_STATIONS            5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS               (AP_MAX_STATIONS + 2)
+
+/* FW status registers common for AP/STA */
+struct wl1271_fw_common_status {
        __le32 intr;
        u8  fw_rx_counter;
        u8  drv_rx_counter;
@@ -188,9 +235,43 @@ struct wl1271_fw_status {
        __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
        __le32 tx_released_blks[NUM_TX_QUEUES];
        __le32 fw_localtime;
-       __le32 padding[2];
 } __packed;
 
+/* FW status registers for AP */
+struct wl1271_fw_ap_status {
+       struct wl1271_fw_common_status common;
+
+       /* Next fields valid only in AP FW */
+
+       /*
+        * A bitmap (where each bit represents a single HLID)
+        * to indicate if the station is in PS mode.
+        */
+       __le32 link_ps_bitmap;
+
+       /* Number of freed MBs per HLID */
+       u8 tx_lnk_free_blks[AP_MAX_LINKS];
+       u8 padding_1[1];
+} __packed;
+
+/* FW status registers for STA */
+struct wl1271_fw_sta_status {
+       struct wl1271_fw_common_status common;
+
+       u8  tx_total;
+       u8  reserved1;
+       __le16 reserved2;
+} __packed;
+
+struct wl1271_fw_full_status {
+       union {
+               struct wl1271_fw_common_status common;
+               struct wl1271_fw_sta_status sta;
+               struct wl1271_fw_ap_status ap;
+       };
+} __packed;
+
+
 struct wl1271_rx_mem_pool_addr {
        u32 addr;
        u32 addr_extra;
@@ -218,6 +299,48 @@ struct wl1271_if_operations {
        void (*disable_irq)(struct wl1271 *wl);
 };
 
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+       u8 id;
+       u8 key_type;
+       u8 key_size;
+       u8 key[MAX_KEY_SIZE];
+       u8 hlid;
+       u32 tx_seq_32;
+       u16 tx_seq_16;
+};
+
+enum wl12xx_flags {
+       WL1271_FLAG_STA_ASSOCIATED,
+       WL1271_FLAG_JOINED,
+       WL1271_FLAG_GPIO_POWER,
+       WL1271_FLAG_TX_QUEUE_STOPPED,
+       WL1271_FLAG_TX_PENDING,
+       WL1271_FLAG_IN_ELP,
+       WL1271_FLAG_PSM,
+       WL1271_FLAG_PSM_REQUESTED,
+       WL1271_FLAG_IRQ_RUNNING,
+       WL1271_FLAG_IDLE,
+       WL1271_FLAG_IDLE_REQUESTED,
+       WL1271_FLAG_PSPOLL_FAILURE,
+       WL1271_FLAG_STA_STATE_SENT,
+       WL1271_FLAG_FW_TX_BUSY,
+       WL1271_FLAG_AP_STARTED
+};
+
+struct wl1271_link {
+       /* AP-mode - TX queue per AC in link */
+       struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+
+       /* accounting for allocated / available TX blocks in FW */
+       u8 allocated_blks;
+       u8 prev_freed_blks;
+
+       u8 addr[ETH_ALEN];
+};
+
 struct wl1271 {
        struct platform_device *plat_dev;
        struct ieee80211_hw *hw;
@@ -236,21 +359,6 @@ struct wl1271 {
        enum wl1271_state state;
        struct mutex mutex;
 
-#define WL1271_FLAG_STA_RATES_CHANGED  (0)
-#define WL1271_FLAG_STA_ASSOCIATED     (1)
-#define WL1271_FLAG_JOINED             (2)
-#define WL1271_FLAG_GPIO_POWER         (3)
-#define WL1271_FLAG_TX_QUEUE_STOPPED   (4)
-#define WL1271_FLAG_IN_ELP             (5)
-#define WL1271_FLAG_PSM                (6)
-#define WL1271_FLAG_PSM_REQUESTED      (7)
-#define WL1271_FLAG_IRQ_PENDING        (8)
-#define WL1271_FLAG_IRQ_RUNNING        (9)
-#define WL1271_FLAG_IDLE              (10)
-#define WL1271_FLAG_IDLE_REQUESTED    (11)
-#define WL1271_FLAG_PSPOLL_FAILURE    (12)
-#define WL1271_FLAG_STA_STATE_SENT    (13)
-#define WL1271_FLAG_FW_TX_BUSY        (14)
        unsigned long flags;
 
        struct wl1271_partition_set part;
@@ -262,6 +370,7 @@ struct wl1271 {
 
        u8 *fw;
        size_t fw_len;
+       u8 fw_bss_type;
        struct wl1271_nvs_file *nvs;
        size_t nvs_len;
 
@@ -295,6 +404,12 @@ struct wl1271 {
        struct sk_buff_head tx_queue[NUM_TX_QUEUES];
        int tx_queue_count;
 
+       /* Frames received, not handled yet by mac80211 */
+       struct sk_buff_head deferred_rx_queue;
+
+       /* Frames sent, not returned yet to mac80211 */
+       struct sk_buff_head deferred_tx_queue;
+
        struct work_struct tx_work;
 
        /* Pending TX frames */
@@ -315,8 +430,8 @@ struct wl1271 {
        /* Intermediate buffer, used for packet aggregation */
        u8 *aggr_buf;
 
-       /* The target interrupt mask */
-       struct work_struct irq_work;
+       /* Network stack work  */
+       struct work_struct netstack_work;
 
        /* Hardware recovery work */
        struct work_struct recovery_work;
@@ -343,7 +458,6 @@ struct wl1271 {
         *      bits 16-23 - 802.11n   MCS index mask
         * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
         */
-       u32 sta_rate_set;
        u32 basic_rate_set;
        u32 basic_rate;
        u32 rate_set;
@@ -378,13 +492,12 @@ struct wl1271 {
        int last_rssi_event;
 
        struct wl1271_stats stats;
-       struct dentry *rootdir;
 
        __le32 buffer_32;
        u32 buffer_cmd;
        u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
-       struct wl1271_fw_status *fw_status;
+       struct wl1271_fw_full_status *fw_status;
        struct wl1271_tx_hw_res_if *tx_res_if;
 
        struct ieee80211_vif *vif;
@@ -400,6 +513,41 @@ struct wl1271 {
 
        /* Most recently reported noise in dBm */
        s8 noise;
+
+       /* map for HLIDs of associated stations - when operating in AP mode */
+       unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+       /* recoreded keys for AP-mode - set here before AP startup */
+       struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+       /* bands supported by this instance of wl12xx */
+       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+       /* RX BA constraint value */
+       bool ba_support;
+       u8 ba_rx_bitmap;
+
+       /*
+        * AP-mode - links indexed by HLID. The global and broadcast links
+        * are always active.
+        */
+       struct wl1271_link links[AP_MAX_LINKS];
+
+       /* the hlid of the link where the last transmitted skb came from */
+       int last_tx_hlid;
+
+       /* AP-mode - a bitmap of links currently in PS mode according to FW */
+       u32 ap_fw_ps_map;
+
+       /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+       unsigned long ap_ps_map;
+
+       /* Quirks of specific hardware revisions */
+       unsigned int quirks;
+};
+
+struct wl1271_station {
+       u8 hlid;
 };
 
 int wl1271_plt_start(struct wl1271 *wl);
@@ -414,6 +562,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
 #define WL1271_TX_QUEUE_LOW_WATERMARK  10
 #define WL1271_TX_QUEUE_HIGH_WATERMARK 25
 
+#define WL1271_DEFERRED_QUEUE_LIMIT    64
+
 /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
    on in case is has been shut down shortly before */
 #define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
@@ -423,4 +573,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
 #define HW_BG_RATES_MASK       0xffff
 #define HW_HT_RATES_OFFSET     16
 
+/* Quirks */
+
+/* Each RX/TX transaction requires an end-of-transaction transfer */
+#define WL12XX_QUIRK_END_OF_TRANSACTION        BIT(0)
+
 #endif
index be21032..18fe542 100644 (file)
@@ -55,7 +55,6 @@
 
 /* This really should be 8, but not for our firmware */
 #define MAX_SUPPORTED_RATES 32
-#define COUNTRY_STRING_LEN 3
 #define MAX_COUNTRY_TRIPLETS 32
 
 /* Headers */
@@ -99,7 +98,7 @@ struct country_triplet {
 
 struct wl12xx_ie_country {
        struct wl12xx_ie_header header;
-       u8 country_string[COUNTRY_STRING_LEN];
+       u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
        struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
 } __packed;
 
@@ -138,13 +137,13 @@ struct wl12xx_arp_rsp_template {
        struct ieee80211_hdr_3addr hdr;
 
        u8 llc_hdr[sizeof(rfc1042_header)];
-       u16 llc_type;
+       __be16 llc_type;
 
        struct arphdr arp_hdr;
        u8 sender_hw[ETH_ALEN];
-       u32 sender_ip;
+       __be32 sender_ip;
        u8 target_hw[ETH_ALEN];
-       u32 target_ip;
+       __be32 target_ip;
 } __packed;
 
 
@@ -160,4 +159,9 @@ struct wl12xx_probe_resp_template {
        struct wl12xx_ie_country country;
 } __packed;
 
+struct wl12xx_disconn_template {
+       struct ieee80211_header header;
+       __le16 disconn_reason;
+} __packed;
+
 #endif
index 6a9b660..a73a305 100644 (file)
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
 {
        int r;
        int i;
-       zd_addr_t *a16;
-       u16 *v16;
+       zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
+       u16 v16[USB_MAX_IOREAD32_COUNT * 2];
        unsigned int count16;
 
        if (count > USB_MAX_IOREAD32_COUNT)
                return -EINVAL;
 
-       /* Allocate a single memory block for values and addresses. */
-       count16 = 2*count;
-       /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
-       a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
-                                  GFP_KERNEL);
-       if (!a16) {
-               dev_dbg_f(zd_chip_dev(chip),
-                         "error ENOMEM in allocation of a16\n");
-               r = -ENOMEM;
-               goto out;
-       }
-       v16 = (u16 *)(a16 + count16);
+       /* Use stack for values and addresses. */
+       count16 = 2 * count;
+       BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
+       BUG_ON(count16 * sizeof(u16) > sizeof(v16));
 
        for (i = 0; i < count; i++) {
                int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
        if (r) {
                dev_dbg_f(zd_chip_dev(chip),
                          "error: zd_ioread16v_locked. Error number %d\n", r);
-               goto out;
+               return r;
        }
 
        for (i = 0; i < count; i++) {
@@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
                values[i] = (v16[j] << 16) | v16[j+1];
        }
 
-out:
-       kfree((void *)a16);
-       return r;
+       return 0;
 }
 
-int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
-                  unsigned int count)
+static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
+                                      const struct zd_ioreq32 *ioreqs,
+                                      unsigned int count)
 {
        int i, j, r;
-       struct zd_ioreq16 *ioreqs16;
+       struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
        unsigned int count16;
 
+       /* Use stack for values and addresses. */
+
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
 
        if (count == 0)
@@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
        if (count > USB_MAX_IOWRITE32_COUNT)
                return -EINVAL;
 
-       /* Allocate a single memory block for values and addresses. */
-       count16 = 2*count;
-       ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
-       if (!ioreqs16) {
-               r = -ENOMEM;
-               dev_dbg_f(zd_chip_dev(chip),
-                         "error %d in ioreqs16 allocation\n", r);
-               goto out;
-       }
+       count16 = 2 * count;
+       BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
 
        for (i = 0; i < count; i++) {
                j = 2*i;
@@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
                ioreqs16[j+1].addr  = ioreqs[i].addr;
        }
 
-       r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
+       r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
 #ifdef DEBUG
        if (r) {
                dev_dbg_f(zd_chip_dev(chip),
                          "error %d in zd_usb_write16v\n", r);
        }
 #endif /* DEBUG */
-out:
-       kfree(ioreqs16);
        return r;
 }
 
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+                         unsigned int count)
+{
+       int r;
+
+       zd_usb_iowrite16v_async_start(&chip->usb);
+       r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
+       if (r) {
+               zd_usb_iowrite16v_async_end(&chip->usb, 0);
+               return r;
+       }
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
+}
+
 int zd_iowrite16a_locked(struct zd_chip *chip,
                   const struct zd_ioreq16 *ioreqs, unsigned int count)
 {
@@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
        unsigned int i, j, t, max;
 
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
+       zd_usb_iowrite16v_async_start(&chip->usb);
+
        for (i = 0; i < count; i += j + t) {
                t = 0;
                max = count-i;
@@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
                        }
                }
 
-               r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
+               r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
                if (r) {
+                       zd_usb_iowrite16v_async_end(&chip->usb, 0);
                        dev_dbg_f(zd_chip_dev(chip),
                                  "error zd_usb_iowrite16v. Error number %d\n",
                                  r);
@@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
                }
        }
 
-       return 0;
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
 }
 
 /* Writes a variable number of 32 bit registers. The functions will split
@@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
        int r;
        unsigned int i, j, t, max;
 
+       zd_usb_iowrite16v_async_start(&chip->usb);
+
        for (i = 0; i < count; i += j + t) {
                t = 0;
                max = count-i;
@@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
                        }
                }
 
-               r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
+               r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
                if (r) {
+                       zd_usb_iowrite16v_async_end(&chip->usb, 0);
                        dev_dbg_f(zd_chip_dev(chip),
                                "error _zd_iowrite32v_locked."
                                " Error number %d\n", r);
@@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
                }
        }
 
-       return 0;
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
 }
 
 int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -370,16 +374,12 @@ error:
        return r;
 }
 
-/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
- *              CR_MAC_ADDR_P2 must be overwritten
- */
-int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
+                                   const struct zd_ioreq32 *in_reqs,
+                                   const char *type)
 {
        int r;
-       struct zd_ioreq32 reqs[2] = {
-               [0] = { .addr = CR_MAC_ADDR_P1 },
-               [1] = { .addr = CR_MAC_ADDR_P2 },
-       };
+       struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
 
        if (mac_addr) {
                reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
                              |  mac_addr[0];
                reqs[1].value = (mac_addr[5] <<  8)
                              |  mac_addr[4];
-               dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
+               dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
        } else {
-               dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
+               dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
        }
 
        mutex_lock(&chip->mutex);
@@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
        return r;
 }
 
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
+ *              CR_MAC_ADDR_P2 must be overwritten
+ */
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+       static const struct zd_ioreq32 reqs[2] = {
+               [0] = { .addr = CR_MAC_ADDR_P1 },
+               [1] = { .addr = CR_MAC_ADDR_P2 },
+       };
+
+       return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
+}
+
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
+{
+       static const struct zd_ioreq32 reqs[2] = {
+               [0] = { .addr = CR_BSSID_P1 },
+               [1] = { .addr = CR_BSSID_P2 },
+       };
+
+       return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
+}
+
 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
 {
        int r;
@@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
 static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
 {
        struct zd_ioreq32 reqs[3];
+       u16 b_interval = s->beacon_interval & 0xffff;
 
-       if (s->beacon_interval <= 5)
-               s->beacon_interval = 5;
-       if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
-               s->pre_tbtt = s->beacon_interval - 1;
+       if (b_interval <= 5)
+               b_interval = 5;
+       if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
+               s->pre_tbtt = b_interval - 1;
        if (s->atim_wnd_period >= s->pre_tbtt)
                s->atim_wnd_period = s->pre_tbtt - 1;
 
@@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
        reqs[1].addr = CR_PRE_TBTT;
        reqs[1].value = s->pre_tbtt;
        reqs[2].addr = CR_BCN_INTERVAL;
-       reqs[2].value = s->beacon_interval;
+       reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
 
        return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
 }
 
 
-static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+static int set_beacon_interval(struct zd_chip *chip, u16 interval,
+                              u8 dtim_period, int type)
 {
        int r;
        struct aw_pt_bi s;
+       u32 b_interval, mode_flag;
 
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+       if (interval > 0) {
+               switch (type) {
+               case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_MESH_POINT:
+                       mode_flag = BCN_MODE_IBSS;
+                       break;
+               case NL80211_IFTYPE_AP:
+                       mode_flag = BCN_MODE_AP;
+                       break;
+               default:
+                       mode_flag = 0;
+                       break;
+               }
+       } else {
+               dtim_period = 0;
+               mode_flag = 0;
+       }
+
+       b_interval = mode_flag | (dtim_period << 16) | interval;
+
+       r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
+       if (r)
+               return r;
        r = get_aw_pt_bi(chip, &s);
        if (r)
                return r;
-       s.beacon_interval = interval;
        return set_aw_pt_bi(chip, &s);
 }
 
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+                          int type)
 {
        int r;
 
        mutex_lock(&chip->mutex);
-       r = set_beacon_interval(chip, interval);
+       r = set_beacon_interval(chip, interval, dtim_period, type);
        mutex_unlock(&chip->mutex);
        return r;
 }
@@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
        if (r)
                return r;
 
-       return set_beacon_interval(chip, 100);
+       return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
 }
 
 static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
        mutex_lock(&chip->mutex);
        zd_usb_disable_int(&chip->usb);
        mutex_unlock(&chip->mutex);
+
+       /* cancel pending interrupt work */
+       cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
 }
 
 int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
        mutex_lock(&chip->mutex);
        zd_usb_enable_tx(&chip->usb);
        r = zd_usb_enable_rx(&chip->usb);
+       zd_tx_watchdog_enable(&chip->usb);
        mutex_unlock(&chip->mutex);
        return r;
 }
@@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
 void zd_chip_disable_rxtx(struct zd_chip *chip)
 {
        mutex_lock(&chip->mutex);
+       zd_tx_watchdog_disable(&chip->usb);
        zd_usb_disable_rx(&chip->usb);
        zd_usb_disable_tx(&chip->usb);
        mutex_unlock(&chip->mutex);
index f8bbf7d..14e4402 100644 (file)
@@ -546,6 +546,7 @@ enum {
 #define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
        RX_FILTER_CFEND | RX_FILTER_CFACK)
 
+#define BCN_MODE_AP                    0x1000000
 #define BCN_MODE_IBSS                  0x2000000
 
 /* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
 u8  zd_chip_get_channel(struct zd_chip *chip);
 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
 int zd_chip_switch_radio_on(struct zd_chip *chip);
 int zd_chip_switch_radio_off(struct zd_chip *chip);
 int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
 
 int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
 
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+                          int type);
 
 static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
 {
index 6ac597f..5463ca9 100644 (file)
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
 #ifdef DEBUG
 #  define ZD_ASSERT(x) \
 do { \
-       if (!(x)) { \
+       if (unlikely(!(x))) { \
                pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
                        __FILE__, __LINE__, __stringify(x)); \
                dump_stack(); \
index 6107304..5037c8b 100644 (file)
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
 static void housekeeping_init(struct zd_mac *mac);
 static void housekeeping_enable(struct zd_mac *mac);
 static void housekeeping_disable(struct zd_mac *mac);
+static void beacon_init(struct zd_mac *mac);
+static void beacon_enable(struct zd_mac *mac);
+static void beacon_disable(struct zd_mac *mac);
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
+static int zd_mac_config_beacon(struct ieee80211_hw *hw,
+                               struct sk_buff *beacon);
 
 static int zd_reg2alpha2(u8 regdomain, char *alpha2)
 {
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
        return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
 }
 
+static int set_mac_and_bssid(struct zd_mac *mac)
+{
+       int r;
+
+       if (!mac->vif)
+               return -1;
+
+       r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
+       if (r)
+               return r;
+
+       /* Vendor driver after setting MAC either sets BSSID for AP or
+        * filter for other modes.
+        */
+       if (mac->type != NL80211_IFTYPE_AP)
+               return set_rx_filter(mac);
+       else
+               return zd_write_bssid(&mac->chip, mac->vif->addr);
+}
+
 static int set_mc_hash(struct zd_mac *mac)
 {
        struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
        return zd_chip_set_multicast_hash(&mac->chip, &hash);
 }
 
-static int zd_op_start(struct ieee80211_hw *hw)
+int zd_op_start(struct ieee80211_hw *hw)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
                goto disable_rxtx;
 
        housekeeping_enable(mac);
+       beacon_enable(mac);
+       set_bit(ZD_DEVICE_RUNNING, &mac->flags);
        return 0;
 disable_rxtx:
        zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
        return r;
 }
 
-static void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct zd_chip *chip = &mac->chip;
        struct sk_buff *skb;
        struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
 
+       clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
        /* The order here deliberately is a little different from the open()
         * method, since we need to make sure there is no opportunity for RX
         * frames to be processed by mac80211 after we have stopped it.
         */
 
        zd_chip_disable_rxtx(chip);
+       beacon_disable(mac);
        housekeeping_disable(mac);
        flush_workqueue(zd_workqueue);
 
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
                dev_kfree_skb_any(skb);
 }
 
+int zd_restore_settings(struct zd_mac *mac)
+{
+       struct sk_buff *beacon;
+       struct zd_mc_hash multicast_hash;
+       unsigned int short_preamble;
+       int r, beacon_interval, beacon_period;
+       u8 channel;
+
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+
+       spin_lock_irq(&mac->lock);
+       multicast_hash = mac->multicast_hash;
+       short_preamble = mac->short_preamble;
+       beacon_interval = mac->beacon.interval;
+       beacon_period = mac->beacon.period;
+       channel = mac->channel;
+       spin_unlock_irq(&mac->lock);
+
+       r = set_mac_and_bssid(mac);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
+               return r;
+       }
+
+       r = zd_chip_set_channel(&mac->chip, channel);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
+                         r);
+               return r;
+       }
+
+       set_rts_cts(mac, short_preamble);
+
+       r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac),
+                         "zd_chip_set_multicast_hash failed, %d\n", r);
+               return r;
+       }
+
+       if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+           mac->type == NL80211_IFTYPE_ADHOC ||
+           mac->type == NL80211_IFTYPE_AP) {
+               if (mac->vif != NULL) {
+                       beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+                       if (beacon) {
+                               zd_mac_config_beacon(mac->hw, beacon);
+                               kfree_skb(beacon);
+                       }
+               }
+
+               zd_set_beacon_interval(&mac->chip, beacon_interval,
+                                       beacon_period, mac->type);
+
+               spin_lock_irq(&mac->lock);
+               mac->beacon.last_update = jiffies;
+               spin_unlock_irq(&mac->lock);
+       }
+
+       return 0;
+}
+
 /**
  * zd_mac_tx_status - reports tx status of a packet if required
  * @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
 static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
-       int r;
+       int r, ret, num_cmds, req_pos = 0;
        u32 tmp, j = 0;
        /* 4 more bytes for tail CRC */
        u32 full_len = beacon->len + 4;
+       unsigned long end_jiffies, message_jiffies;
+       struct zd_ioreq32 *ioreqs;
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
+       /* Alloc memory for full beacon write at once. */
+       num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
+       ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+       if (!ioreqs)
+               return -ENOMEM;
+
+       mutex_lock(&mac->chip.mutex);
+
+       r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
        if (r < 0)
-               return r;
-       r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+               goto out;
+       r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
        if (r < 0)
-               return r;
+               goto release_sema;
 
+       end_jiffies = jiffies + HZ / 2; /*~500ms*/
+       message_jiffies = jiffies + HZ / 10; /*~100ms*/
        while (tmp & 0x2) {
-               r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+               r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
                if (r < 0)
-                       return r;
-               if ((++j % 100) == 0) {
-                       printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
-                       if (j >= 500)  {
-                               printk(KERN_ERR "Giving up beacon config.\n");
-                               return -ETIMEDOUT;
+                       goto release_sema;
+               if (time_is_before_eq_jiffies(message_jiffies)) {
+                       message_jiffies = jiffies + HZ / 10;
+                       dev_err(zd_mac_dev(mac),
+                                       "CR_BCN_FIFO_SEMAPHORE not ready\n");
+                       if (time_is_before_eq_jiffies(end_jiffies))  {
+                               dev_err(zd_mac_dev(mac),
+                                               "Giving up beacon config.\n");
+                               r = -ETIMEDOUT;
+                               goto reset_device;
                        }
                }
-               msleep(1);
+               msleep(20);
        }
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
-       if (r < 0)
-               return r;
+       ioreqs[req_pos].addr = CR_BCN_FIFO;
+       ioreqs[req_pos].value = full_len - 1;
+       req_pos++;
        if (zd_chip_is_zd1211b(&mac->chip)) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_LENGTH;
+               ioreqs[req_pos].value = full_len - 1;
+               req_pos++;
        }
 
        for (j = 0 ; j < beacon->len; j++) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_FIFO,
-                               *((u8 *)(beacon->data + j)));
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_FIFO;
+               ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
+               req_pos++;
        }
 
        for (j = 0; j < 4; j++) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_FIFO;
+               ioreqs[req_pos].value = 0x0;
+               req_pos++;
        }
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
-       if (r < 0)
-               return r;
+       BUG_ON(req_pos != num_cmds);
+
+       r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
+
+release_sema:
+       /*
+        * Try very hard to release device beacon semaphore, as otherwise
+        * device/driver can be left in unusable state.
+        */
+       end_jiffies = jiffies + HZ / 2; /*~500ms*/
+       ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+       while (ret < 0) {
+               if (time_is_before_eq_jiffies(end_jiffies)) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               msleep(20);
+               ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+       }
+
+       if (ret < 0)
+               dev_err(zd_mac_dev(mac), "Could not release "
+                                        "CR_BCN_FIFO_SEMAPHORE!\n");
+       if (r < 0 || ret < 0) {
+               if (r >= 0)
+                       r = ret;
+               goto out;
+       }
 
        /* 802.11b/g 2.4G CCK 1Mb
         * 802.11a, not yet implemented, uses different values (see GPL vendor
         * driver)
         */
-       return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
-                       (full_len << 19));
+       r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
+                               CR_BCN_PLCP_CFG);
+out:
+       mutex_unlock(&mac->chip.mutex);
+       kfree(ioreqs);
+       return r;
+
+reset_device:
+       mutex_unlock(&mac->chip.mutex);
+       kfree(ioreqs);
+
+       /* semaphore stuck, reset device to avoid fw freeze later */
+       dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
+                                 "reseting device...");
+       usb_queue_reset_device(mac->chip.usb.intf);
+
+       return r;
 }
 
 static int fill_ctrlset(struct zd_mac *mac,
@@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        r = zd_usb_tx(&mac->chip.usb, skb);
        if (r)
                goto fail;
-       return 0;
+       return;
 
 fail:
        dev_kfree_skb(skb);
-       return 0;
 }
 
 /**
@@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
 
                mac->ack_pending = 1;
                mac->ack_signal = stats->signal;
+
+               /* Prevent pending tx-packet on AP-mode */
+               if (mac->type == NL80211_IFTYPE_AP) {
+                       skb = __skb_dequeue(q);
+                       zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
+                       mac->ack_pending = 0;
+               }
        }
 
        spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
                mac->type = vif->type;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
-       return zd_write_mac_addr(&mac->chip, vif->addr);
+       mac->vif = vif;
+
+       return set_mac_and_bssid(mac);
 }
 
 static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        mac->type = NL80211_IFTYPE_UNSPECIFIED;
-       zd_set_beacon_interval(&mac->chip, 0);
+       mac->vif = NULL;
+       zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
        zd_write_mac_addr(&mac->chip, NULL);
 }
 
@@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_conf *conf = &hw->conf;
 
+       spin_lock_irq(&mac->lock);
+       mac->channel = conf->channel->hw_value;
+       spin_unlock_irq(&mac->lock);
+
        return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
 }
 
-static void zd_process_intr(struct work_struct *work)
+static void zd_beacon_done(struct zd_mac *mac)
 {
-       u16 int_status;
-       struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
+       struct sk_buff *skb, *beacon;
 
-       int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
-       if (int_status & INT_CFG_NEXT_BCN)
-               dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
-       else
-               dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
-
-       zd_chip_enable_hwint(&mac->chip);
-}
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               return;
+       if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
+               return;
 
+       /*
+        * Send out buffered broad- and multicast frames.
+        */
+       while (!ieee80211_queue_stopped(mac->hw, 0)) {
+               skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
+               if (!skb)
+                       break;
+               zd_op_tx(mac->hw, skb);
+       }
 
-static void set_multicast_hash_handler(struct work_struct *work)
-{
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_multicast_hash_work);
-       struct zd_mc_hash hash;
+       /*
+        * Fetch next beacon so that tim_count is updated.
+        */
+       beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+       if (beacon) {
+               zd_mac_config_beacon(mac->hw, beacon);
+               kfree_skb(beacon);
+       }
 
        spin_lock_irq(&mac->lock);
-       hash = mac->multicast_hash;
+       mac->beacon.last_update = jiffies;
        spin_unlock_irq(&mac->lock);
-
-       zd_chip_set_multicast_hash(&mac->chip, &hash);
 }
 
-static void set_rx_filter_handler(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
 {
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_rx_filter_work);
-       int r;
+       u16 int_status;
+       unsigned long flags;
+       struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
 
-       dev_dbg_f(zd_mac_dev(mac), "\n");
-       r = set_rx_filter(mac);
-       if (r)
-               dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r);
+       spin_lock_irqsave(&mac->lock, flags);
+       int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
+       spin_unlock_irqrestore(&mac->lock, flags);
+
+       if (int_status & INT_CFG_NEXT_BCN) {
+               /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
+               zd_beacon_done(mac);
+       } else {
+               dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
+       }
+
+       zd_chip_enable_hwint(&mac->chip);
 }
 
+
 static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
                                   struct netdev_hw_addr_list *mc_list)
 {
@@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
        };
        struct zd_mac *mac = zd_hw_mac(hw);
        unsigned long flags;
+       int r;
 
        /* Only deal with supported flags */
        changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
        mac->multicast_hash = hash;
        spin_unlock_irqrestore(&mac->lock, flags);
 
-       /* XXX: these can be called here now, can sleep now! */
-       queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+       zd_chip_set_multicast_hash(&mac->chip, &hash);
 
-       if (changed_flags & FIF_CONTROL)
-               queue_work(zd_workqueue, &mac->set_rx_filter_work);
+       if (changed_flags & FIF_CONTROL) {
+               r = set_rx_filter(mac);
+               if (r)
+                       dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
+       }
 
        /* no handling required for FIF_OTHER_BSS as we don't currently
         * do BSSID filtering */
@@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
         * time. */
 }
 
-static void set_rts_cts_work(struct work_struct *work)
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
 {
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_rts_cts_work);
-       unsigned long flags;
-       unsigned int short_preamble;
-
        mutex_lock(&mac->chip.mutex);
-
-       spin_lock_irqsave(&mac->lock, flags);
-       mac->updating_rts_rate = 0;
-       short_preamble = mac->short_preamble;
-       spin_unlock_irqrestore(&mac->lock, flags);
-
        zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
        mutex_unlock(&mac->chip.mutex);
 }
@@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
                                   u32 changes)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
-       unsigned long flags;
        int associated;
 
        dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
 
        if (mac->type == NL80211_IFTYPE_MESH_POINT ||
-           mac->type == NL80211_IFTYPE_ADHOC) {
+           mac->type == NL80211_IFTYPE_ADHOC ||
+           mac->type == NL80211_IFTYPE_AP) {
                associated = true;
                if (changes & BSS_CHANGED_BEACON) {
                        struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
 
                        if (beacon) {
+                               zd_chip_disable_hwint(&mac->chip);
                                zd_mac_config_beacon(hw, beacon);
+                               zd_chip_enable_hwint(&mac->chip);
                                kfree_skb(beacon);
                        }
                }
 
                if (changes & BSS_CHANGED_BEACON_ENABLED) {
-                       u32 interval;
+                       u16 interval = 0;
+                       u8 period = 0;
 
-                       if (bss_conf->enable_beacon)
-                               interval = BCN_MODE_IBSS |
-                                               bss_conf->beacon_int;
-                       else
-                               interval = 0;
+                       if (bss_conf->enable_beacon) {
+                               period = bss_conf->dtim_period;
+                               interval = bss_conf->beacon_int;
+                       }
 
-                       zd_set_beacon_interval(&mac->chip, interval);
+                       spin_lock_irq(&mac->lock);
+                       mac->beacon.period = period;
+                       mac->beacon.interval = interval;
+                       mac->beacon.last_update = jiffies;
+                       spin_unlock_irq(&mac->lock);
+
+                       zd_set_beacon_interval(&mac->chip, interval, period,
+                                              mac->type);
                }
        } else
                associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
        /* TODO: do hardware bssid filtering */
 
        if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               spin_lock_irqsave(&mac->lock, flags);
+               spin_lock_irq(&mac->lock);
                mac->short_preamble = bss_conf->use_short_preamble;
-               if (!mac->updating_rts_rate) {
-                       mac->updating_rts_rate = 1;
-                       /* FIXME: should disable TX here, until work has
-                        * completed and RTS_CTS reg is updated */
-                       queue_work(zd_workqueue, &mac->set_rts_cts_work);
-               }
-               spin_unlock_irqrestore(&mac->lock, flags);
+               spin_unlock_irq(&mac->lock);
+
+               set_rts_cts(mac, bss_conf->use_short_preamble);
        }
 }
 
@@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
        hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
 
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-                   IEEE80211_HW_SIGNAL_UNSPEC;
+                   IEEE80211_HW_SIGNAL_UNSPEC |
+                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_MESH_POINT) |
                BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_ADHOC) |
+               BIT(NL80211_IFTYPE_AP);
 
        hw->max_signal = 100;
        hw->queues = 1;
@@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
 
        zd_chip_init(&mac->chip, hw, intf);
        housekeeping_init(mac);
-       INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
-       INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
-       INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
+       beacon_init(mac);
        INIT_WORK(&mac->process_intr, zd_process_intr);
 
        SET_IEEE80211_DEV(hw, &intf->dev);
        return hw;
 }
 
+#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
+
+static void beacon_watchdog_handler(struct work_struct *work)
+{
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, beacon.watchdog_work.work);
+       struct sk_buff *beacon;
+       unsigned long timeout;
+       int interval, period;
+
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               goto rearm;
+       if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
+               goto rearm;
+
+       spin_lock_irq(&mac->lock);
+       interval = mac->beacon.interval;
+       period = mac->beacon.period;
+       timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+       spin_unlock_irq(&mac->lock);
+
+       if (interval > 0 && time_is_before_jiffies(timeout)) {
+               dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
+                                          "restarting. "
+                                          "(interval: %d, dtim: %d)\n",
+                                          interval, period);
+
+               zd_chip_disable_hwint(&mac->chip);
+
+               beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+               if (beacon) {
+                       zd_mac_config_beacon(mac->hw, beacon);
+                       kfree_skb(beacon);
+               }
+
+               zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
+
+               zd_chip_enable_hwint(&mac->chip);
+
+               spin_lock_irq(&mac->lock);
+               mac->beacon.last_update = jiffies;
+               spin_unlock_irq(&mac->lock);
+       }
+
+rearm:
+       queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+                          BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_init(struct zd_mac *mac)
+{
+       INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
+}
+
+static void beacon_enable(struct zd_mac *mac)
+{
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+
+       mac->beacon.last_update = jiffies;
+       queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+                          BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_disable(struct zd_mac *mac)
+{
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+       cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+}
+
 #define LINK_LED_WORK_DELAY HZ
 
 static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work)
        int is_associated;
        int r;
 
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               goto requeue;
+
        spin_lock_irq(&mac->lock);
        is_associated = mac->associated;
        spin_unlock_irq(&mac->lock);
@@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work)
        if (r)
                dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
 
+requeue:
        queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
                           LINK_LED_WORK_DELAY);
 }
index a6d86b9..f8c93c3 100644 (file)
@@ -163,6 +163,17 @@ struct housekeeping {
        struct delayed_work link_led_work;
 };
 
+struct beacon {
+       struct delayed_work watchdog_work;
+       unsigned long last_update;
+       u16 interval;
+       u8 period;
+};
+
+enum zd_device_flags {
+       ZD_DEVICE_RUNNING,
+};
+
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
 #define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
        spinlock_t lock;
        spinlock_t intr_lock;
        struct ieee80211_hw *hw;
+       struct ieee80211_vif *vif;
        struct housekeeping housekeeping;
-       struct work_struct set_multicast_hash_work;
+       struct beacon beacon;
        struct work_struct set_rts_cts_work;
-       struct work_struct set_rx_filter_work;
        struct work_struct process_intr;
        struct zd_mc_hash multicast_hash;
        u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
        u8 regdomain;
        u8 default_regdomain;
+       u8 channel;
        int type;
        int associated;
+       unsigned long flags;
        struct sk_buff_head ack_wait_queue;
        struct ieee80211_channel channels[14];
        struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
        /* Short preamble (used for RTS/CTS) */
        unsigned int short_preamble:1;
 
-       /* flags to indicate update in progress */
-       unsigned int updating_rts_rate:1;
-
        /* whether to pass frames with CRC errors to stack */
        unsigned int pass_failed_fcs:1;
 
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
 void zd_mac_tx_failed(struct urb *urb);
 void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
 
+int zd_op_start(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw);
+int zd_restore_settings(struct zd_mac *mac);
+
 #ifdef DEBUG
 void zd_dump_rx_status(const struct rx_status *status);
 #else
index 06041cb..81e8048 100644 (file)
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
        int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
        if (int_num == CR_INTERRUPT) {
                struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
+               spin_lock(&mac->lock);
                memcpy(&mac->intr_buffer, urb->transfer_buffer,
                                USB_MAX_EP_INT_BUFFER);
+               spin_unlock(&mac->lock);
                schedule_work(&mac->process_intr);
        } else if (intr->read_regs_enabled) {
                intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -EPIPE:
-               goto kfree;
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+               return;
        default:
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
                goto resubmit;
        }
 
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
 resubmit:
        r = usb_submit_urb(urb, GFP_ATOMIC);
        if (r) {
-               dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
-               goto kfree;
+               dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
+                         urb, r);
+               /* TODO: add worker to reset intr->urb */
        }
        return;
-kfree:
-       kfree(urb->transfer_buffer);
 }
 
 static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
 int zd_usb_enable_int(struct zd_usb *usb)
 {
        int r;
-       struct usb_device *udev;
+       struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct zd_usb_interrupt *intr = &usb->intr;
-       void *transfer_buffer = NULL;
        struct urb *urb;
 
        dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
        intr->urb = urb;
        spin_unlock_irq(&intr->lock);
 
-       /* TODO: make it a DMA buffer */
        r = -ENOMEM;
-       transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL);
-       if (!transfer_buffer) {
+       intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                                         GFP_KERNEL, &intr->buffer_dma);
+       if (!intr->buffer) {
                dev_dbg_f(zd_usb_dev(usb),
                        "couldn't allocate transfer_buffer\n");
                goto error_set_urb_null;
        }
 
-       udev = zd_usb_to_usbdev(usb);
        usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
-                        transfer_buffer, USB_MAX_EP_INT_BUFFER,
+                        intr->buffer, USB_MAX_EP_INT_BUFFER,
                         int_urb_complete, usb,
                         intr->interval);
+       urb->transfer_dma = intr->buffer_dma;
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
        dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
        r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
 
        return 0;
 error:
-       kfree(transfer_buffer);
+       usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                         intr->buffer, intr->buffer_dma);
 error_set_urb_null:
        spin_lock_irq(&intr->lock);
        intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
 void zd_usb_disable_int(struct zd_usb *usb)
 {
        unsigned long flags;
+       struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct zd_usb_interrupt *intr = &usb->intr;
        struct urb *urb;
+       void *buffer;
+       dma_addr_t buffer_dma;
 
        spin_lock_irqsave(&intr->lock, flags);
        urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
                return;
        }
        intr->urb = NULL;
+       buffer = intr->buffer;
+       buffer_dma = intr->buffer_dma;
+       intr->buffer = NULL;
        spin_unlock_irqrestore(&intr->lock, flags);
 
        usb_kill_urb(urb);
        dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
        usb_free_urb(urb);
+
+       if (buffer)
+               usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                                 buffer, buffer_dma);
 }
 
 static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
 
 static void rx_urb_complete(struct urb *urb)
 {
+       int r;
        struct zd_usb *usb;
        struct zd_usb_rx *rx;
        const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -EPIPE:
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
                return;
        default:
                dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
        usb = urb->context;
        rx = &usb->rx;
 
+       zd_usb_reset_rx_idle_timer(usb);
+
        if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
                /* If there is an old first fragment, we don't care. */
                dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
        }
 
 resubmit:
-       usb_submit_urb(urb, GFP_ATOMIC);
+       r = usb_submit_urb(urb, GFP_ATOMIC);
+       if (r)
+               dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
 }
 
 static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
        usb_free_urb(urb);
 }
 
-int zd_usb_enable_rx(struct zd_usb *usb)
+static int __zd_usb_enable_rx(struct zd_usb *usb)
 {
        int i, r;
        struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
        return r;
 }
 
-void zd_usb_disable_rx(struct zd_usb *usb)
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+       int r;
+       struct zd_usb_rx *rx = &usb->rx;
+
+       mutex_lock(&rx->setup_mutex);
+       r = __zd_usb_enable_rx(usb);
+       mutex_unlock(&rx->setup_mutex);
+
+       zd_usb_reset_rx_idle_timer(usb);
+
+       return r;
+}
+
+static void __zd_usb_disable_rx(struct zd_usb *usb)
 {
        int i;
        unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
        spin_unlock_irqrestore(&rx->lock, flags);
 }
 
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+       struct zd_usb_rx *rx = &usb->rx;
+
+       mutex_lock(&rx->setup_mutex);
+       __zd_usb_disable_rx(usb);
+       mutex_unlock(&rx->setup_mutex);
+
+       cancel_delayed_work_sync(&rx->idle_work);
+}
+
+static void zd_usb_reset_rx(struct zd_usb *usb)
+{
+       bool do_reset;
+       struct zd_usb_rx *rx = &usb->rx;
+       unsigned long flags;
+
+       mutex_lock(&rx->setup_mutex);
+
+       spin_lock_irqsave(&rx->lock, flags);
+       do_reset = rx->urbs != NULL;
+       spin_unlock_irqrestore(&rx->lock, flags);
+
+       if (do_reset) {
+               __zd_usb_disable_rx(usb);
+               __zd_usb_enable_rx(usb);
+       }
+
+       mutex_unlock(&rx->setup_mutex);
+
+       if (do_reset)
+               zd_usb_reset_rx_idle_timer(usb);
+}
+
 /**
  * zd_usb_disable_tx - disable transmission
  * @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
        unsigned long flags;
-       struct list_head *pos, *n;
+
+       atomic_set(&tx->enabled, 0);
+
+       /* kill all submitted tx-urbs */
+       usb_kill_anchored_urbs(&tx->submitted);
 
        spin_lock_irqsave(&tx->lock, flags);
-       list_for_each_safe(pos, n, &tx->free_urb_list) {
-               list_del(pos);
-               usb_free_urb(list_entry(pos, struct urb, urb_list));
-       }
-       tx->enabled = 0;
+       WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+       WARN_ON(tx->submitted_urbs != 0);
        tx->submitted_urbs = 0;
+       spin_unlock_irqrestore(&tx->lock, flags);
+
        /* The stopped state is ignored, relying on ieee80211_wake_queues()
         * in a potentionally following zd_usb_enable_tx().
         */
-       spin_unlock_irqrestore(&tx->lock, flags);
 }
 
 /**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
        struct zd_usb_tx *tx = &usb->tx;
 
        spin_lock_irqsave(&tx->lock, flags);
-       tx->enabled = 1;
+       atomic_set(&tx->enabled, 1);
        tx->submitted_urbs = 0;
        ieee80211_wake_queues(zd_usb_to_hw(usb));
        tx->stopped = 0;
        spin_unlock_irqrestore(&tx->lock, flags);
 }
 
-/**
- * alloc_tx_urb - provides an tx URB
- * @usb: a &struct zd_usb pointer
- *
- * Allocates a new URB. If possible takes the urb from the free list in
- * usb->tx.
- */
-static struct urb *alloc_tx_urb(struct zd_usb *usb)
-{
-       struct zd_usb_tx *tx = &usb->tx;
-       unsigned long flags;
-       struct list_head *entry;
-       struct urb *urb;
-
-       spin_lock_irqsave(&tx->lock, flags);
-       if (list_empty(&tx->free_urb_list)) {
-               urb = usb_alloc_urb(0, GFP_ATOMIC);
-               goto out;
-       }
-       entry = tx->free_urb_list.next;
-       list_del(entry);
-       urb = list_entry(entry, struct urb, urb_list);
-out:
-       spin_unlock_irqrestore(&tx->lock, flags);
-       return urb;
-}
-
-/**
- * free_tx_urb - frees a used tx URB
- * @usb: a &struct zd_usb pointer
- * @urb: URB to be freed
- *
- * Frees the transmission URB, which means to put it on the free URB
- * list.
- */
-static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
-{
-       struct zd_usb_tx *tx = &usb->tx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tx->lock, flags);
-       if (!tx->enabled) {
-               usb_free_urb(urb);
-               goto out;
-       }
-       list_add(&urb->urb_list, &tx->free_urb_list);
-out:
-       spin_unlock_irqrestore(&tx->lock, flags);
-}
-
 static void tx_dec_submitted_urbs(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
        struct sk_buff *skb;
        struct ieee80211_tx_info *info;
        struct zd_usb *usb;
+       struct zd_usb_tx *tx;
+
+       skb = (struct sk_buff *)urb->context;
+       info = IEEE80211_SKB_CB(skb);
+       /*
+        * grab 'usb' pointer before handing off the skb (since
+        * it might be freed by zd_mac_tx_to_dev or mac80211)
+        */
+       usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+       tx = &usb->tx;
 
        switch (urb->status) {
        case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
                goto resubmit;
        }
 free_urb:
-       skb = (struct sk_buff *)urb->context;
-       /*
-        * grab 'usb' pointer before handing off the skb (since
-        * it might be freed by zd_mac_tx_to_dev or mac80211)
-        */
-       info = IEEE80211_SKB_CB(skb);
-       usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+       skb_unlink(skb, &usb->tx.submitted_skbs);
        zd_mac_tx_to_dev(skb, urb->status);
-       free_tx_urb(usb, urb);
+       usb_free_urb(urb);
        tx_dec_submitted_urbs(usb);
        return;
 resubmit:
+       usb_anchor_urb(urb, &tx->submitted);
        r = usb_submit_urb(urb, GFP_ATOMIC);
        if (r) {
+               usb_unanchor_urb(urb);
                dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
                goto free_urb;
        }
@@ -956,10 +982,17 @@ resubmit:
 int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
 {
        int r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct urb *urb;
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!atomic_read(&tx->enabled)) {
+               r = -ENOENT;
+               goto out;
+       }
 
-       urb = alloc_tx_urb(usb);
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
                r = -ENOMEM;
                goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
        usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
                          skb->data, skb->len, tx_urb_complete, skb);
 
+       info->rate_driver_data[1] = (void *)jiffies;
+       skb_queue_tail(&tx->submitted_skbs, skb);
+       usb_anchor_urb(urb, &tx->submitted);
+
        r = usb_submit_urb(urb, GFP_ATOMIC);
-       if (r)
+       if (r) {
+               dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
+               usb_unanchor_urb(urb);
+               skb_unlink(skb, &tx->submitted_skbs);
                goto error;
+       }
        tx_inc_submitted_urbs(usb);
        return 0;
 error:
-       free_tx_urb(usb, urb);
+       usb_free_urb(urb);
 out:
        return r;
 }
 
+static bool zd_tx_timeout(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+       struct sk_buff_head *q = &tx->submitted_skbs;
+       struct sk_buff *skb, *skbnext;
+       struct ieee80211_tx_info *info;
+       unsigned long flags, trans_start;
+       bool have_timedout = false;
+
+       spin_lock_irqsave(&q->lock, flags);
+       skb_queue_walk_safe(q, skb, skbnext) {
+               info = IEEE80211_SKB_CB(skb);
+               trans_start = (unsigned long)info->rate_driver_data[1];
+
+               if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
+                       have_timedout = true;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+
+       return have_timedout;
+}
+
+static void zd_tx_watchdog_handler(struct work_struct *work)
+{
+       struct zd_usb *usb =
+               container_of(work, struct zd_usb, tx.watchdog_work.work);
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
+               goto out;
+       if (!zd_tx_timeout(usb))
+               goto out;
+
+       /* TX halted, try reset */
+       dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
+
+       usb_queue_reset_device(usb->intf);
+
+       /* reset will stop this worker, don't rearm */
+       return;
+out:
+       queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+                          ZD_TX_WATCHDOG_INTERVAL);
+}
+
+void zd_tx_watchdog_enable(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!tx->watchdog_enabled) {
+               dev_dbg_f(zd_usb_dev(usb), "\n");
+               queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+                                  ZD_TX_WATCHDOG_INTERVAL);
+               tx->watchdog_enabled = 1;
+       }
+}
+
+void zd_tx_watchdog_disable(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (tx->watchdog_enabled) {
+               dev_dbg_f(zd_usb_dev(usb), "\n");
+               tx->watchdog_enabled = 0;
+               cancel_delayed_work_sync(&tx->watchdog_work);
+       }
+}
+
+static void zd_rx_idle_timer_handler(struct work_struct *work)
+{
+       struct zd_usb *usb =
+               container_of(work, struct zd_usb, rx.idle_work.work);
+       struct zd_mac *mac = zd_usb_to_mac(usb);
+
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               return;
+
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       /* 30 seconds since last rx, reset rx */
+       zd_usb_reset_rx(usb);
+}
+
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
+{
+       struct zd_usb_rx *rx = &usb->rx;
+
+       cancel_delayed_work(&rx->idle_work);
+       queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+}
+
 static inline void init_usb_interrupt(struct zd_usb *usb)
 {
        struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
        spin_lock_init(&rx->lock);
+       mutex_init(&rx->setup_mutex);
        if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
                rx->usb_packet_size = 512;
        } else {
                rx->usb_packet_size = 64;
        }
        ZD_ASSERT(rx->fragment_length == 0);
+       INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
 }
 
 static inline void init_usb_tx(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
        spin_lock_init(&tx->lock);
-       tx->enabled = 0;
+       atomic_set(&tx->enabled, 0);
        tx->stopped = 0;
-       INIT_LIST_HEAD(&tx->free_urb_list);
+       skb_queue_head_init(&tx->submitted_skbs);
+       init_usb_anchor(&tx->submitted);
        tx->submitted_urbs = 0;
+       tx->watchdog_enabled = 0;
+       INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
 }
 
 void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
        memset(usb, 0, sizeof(*usb));
        usb->intf = usb_get_intf(intf);
        usb_set_intfdata(usb->intf, hw);
+       init_usb_anchor(&usb->submitted_cmds);
        init_usb_interrupt(usb);
        init_usb_tx(usb);
        init_usb_rx(usb);
@@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
        ieee80211_unregister_hw(hw);
 
        /* Just in case something has gone wrong! */
+       zd_usb_disable_tx(usb);
        zd_usb_disable_rx(usb);
        zd_usb_disable_int(usb);
 
@@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
        dev_dbg(&intf->dev, "disconnected\n");
 }
 
+static void zd_usb_resume(struct zd_usb *usb)
+{
+       struct zd_mac *mac = zd_usb_to_mac(usb);
+       int r;
+
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       r = zd_op_start(zd_usb_to_hw(usb));
+       if (r < 0) {
+               dev_warn(zd_usb_dev(usb), "Device resume failed "
+                        "with error code %d. Retrying...\n", r);
+               if (usb->was_running)
+                       set_bit(ZD_DEVICE_RUNNING, &mac->flags);
+               usb_queue_reset_device(usb->intf);
+               return;
+       }
+
+       if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+               r = zd_restore_settings(mac);
+               if (r < 0) {
+                       dev_dbg(zd_usb_dev(usb),
+                               "failed to restore settings, %d\n", r);
+                       return;
+               }
+       }
+}
+
+static void zd_usb_stop(struct zd_usb *usb)
+{
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       zd_op_stop(zd_usb_to_hw(usb));
+
+       zd_usb_disable_tx(usb);
+       zd_usb_disable_rx(usb);
+       zd_usb_disable_int(usb);
+
+       usb->initialized = 0;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct zd_mac *mac;
+       struct zd_usb *usb;
+
+       if (!hw || intf->condition != USB_INTERFACE_BOUND)
+               return 0;
+
+       mac = zd_hw_mac(hw);
+       usb = &mac->chip.usb;
+
+       usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
+       zd_usb_stop(usb);
+
+       mutex_lock(&mac->chip.mutex);
+       return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct zd_mac *mac;
+       struct zd_usb *usb;
+
+       if (!hw || intf->condition != USB_INTERFACE_BOUND)
+               return 0;
+
+       mac = zd_hw_mac(hw);
+       usb = &mac->chip.usb;
+
+       mutex_unlock(&mac->chip.mutex);
+
+       if (usb->was_running)
+               zd_usb_resume(usb);
+       return 0;
+}
+
 static struct usb_driver driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = usb_ids,
        .probe          = probe,
        .disconnect     = disconnect,
+       .pre_reset      = pre_reset,
+       .post_reset     = post_reset,
 };
 
 struct workqueue_struct *zd_workqueue;
@@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
                return -EWOULDBLOCK;
        }
        if (!usb_int_enabled(usb)) {
-                dev_dbg_f(zd_usb_dev(usb),
+               dev_dbg_f(zd_usb_dev(usb),
                          "error: usb interrupt not enabled\n");
                return -EWOULDBLOCK;
        }
 
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
+                    sizeof(__le16) > sizeof(usb->req_buf));
+       BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
+              sizeof(usb->req_buf));
+
        req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
-       req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       req = (void *)usb->req_buf;
+
        req->id = cpu_to_le16(USB_REQ_READ_REGS);
        for (i = 0; i < count; i++)
                req->addr[i] = cpu_to_le16((u16)addresses[i]);
 
        udev = zd_usb_to_usbdev(usb);
        prepare_read_regs_int(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                             req, req_len, &actual_req_len, 50 /* ms */);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
+                       "error in usb_interrupt_msg(). Error number %d\n", r);
                goto error;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
+               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
@@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
        }
 
        timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(50));
        if (!timeout) {
                disable_read_regs_int(usb);
                dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
 
        r = get_results(usb, values, req, count);
 error:
-       kfree(req);
        return r;
 }
 
-int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
-                     unsigned int count)
+static void iowrite16v_urb_complete(struct urb *urb)
+{
+       struct zd_usb *usb = urb->context;
+
+       if (urb->status && !usb->cmd_error)
+               usb->cmd_error = urb->status;
+}
+
+static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
+{
+       int r = 0;
+       struct urb *urb = usb->urb_async_waiting;
+
+       if (!urb)
+               return 0;
+
+       usb->urb_async_waiting = NULL;
+
+       if (!last)
+               urb->transfer_flags |= URB_NO_INTERRUPT;
+
+       usb_anchor_urb(urb, &usb->submitted_cmds);
+       r = usb_submit_urb(urb, GFP_KERNEL);
+       if (r) {
+               usb_unanchor_urb(urb);
+               dev_dbg_f(zd_usb_dev(usb),
+                       "error in usb_submit_urb(). Error number %d\n", r);
+               goto error;
+       }
+
+       /* fall-through with r == 0 */
+error:
+       usb_free_urb(urb);
+       return r;
+}
+
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
+{
+       ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
+       ZD_ASSERT(usb->urb_async_waiting == NULL);
+       ZD_ASSERT(!usb->in_async);
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+
+       usb->in_async = 1;
+       usb->cmd_error = 0;
+       usb->urb_async_waiting = NULL;
+}
+
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
+{
+       int r;
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       ZD_ASSERT(usb->in_async);
+
+       /* Submit last iowrite16v URB */
+       r = zd_submit_waiting_urb(usb, true);
+       if (r) {
+               dev_dbg_f(zd_usb_dev(usb),
+                       "error in zd_submit_waiting_usb(). "
+                       "Error number %d\n", r);
+
+               usb_kill_anchored_urbs(&usb->submitted_cmds);
+               goto error;
+       }
+
+       if (timeout)
+               timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
+                                                       timeout);
+       if (!timeout) {
+               usb_kill_anchored_urbs(&usb->submitted_cmds);
+               if (usb->cmd_error == -ENOENT) {
+                       dev_dbg_f(zd_usb_dev(usb), "timed out");
+                       r = -ETIMEDOUT;
+                       goto error;
+               }
+       }
+
+       r = usb->cmd_error;
+error:
+       usb->in_async = 0;
+       return r;
+}
+
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                           unsigned int count)
 {
        int r;
        struct usb_device *udev;
        struct usb_req_write_regs *req = NULL;
-       int i, req_len, actual_req_len;
+       int i, req_len;
+       struct urb *urb;
+       struct usb_host_endpoint *ep;
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       ZD_ASSERT(usb->in_async);
 
        if (count == 0)
                return 0;
@@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                return -EWOULDBLOCK;
        }
 
+       udev = zd_usb_to_usbdev(usb);
+
+       ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
+       if (!ep)
+               return -ENOENT;
+
+       urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!urb)
+               return -ENOMEM;
+
        req_len = sizeof(struct usb_req_write_regs) +
                  count * sizeof(struct reg_data);
        req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       if (!req) {
+               r = -ENOMEM;
+               goto error;
+       }
 
        req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
        for (i = 0; i < count; i++) {
@@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                rw->value = cpu_to_le16(ioreqs[i].value);
        }
 
-       udev = zd_usb_to_usbdev(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                        req, req_len, iowrite16v_urb_complete, usb,
+                        ep->desc.bInterval);
+       urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+
+       /* Submit previous URB */
+       r = zd_submit_waiting_urb(usb, false);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
-               goto error;
-       }
-       if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg()"
-                       " req_len %d != actual_req_len %d\n",
-                       req_len, actual_req_len);
-               r = -EIO;
+                       "error in zd_submit_waiting_usb(). "
+                       "Error number %d\n", r);
                goto error;
        }
 
-       /* FALL-THROUGH with r == 0 */
+       /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
+        * of currect batch except for very last.
+        */
+       usb->urb_async_waiting = urb;
+       return 0;
 error:
-       kfree(req);
+       usb_free_urb(urb);
        return r;
 }
 
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                       unsigned int count)
+{
+       int r;
+
+       zd_usb_iowrite16v_async_start(usb);
+       r = zd_usb_iowrite16v_async(usb, ioreqs, count);
+       if (r) {
+               zd_usb_iowrite16v_async_end(usb, 0);
+               return r;
+       }
+       return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
+}
+
 int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
 {
        int r;
@@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
                        "error %d: Couldn't read CR203\n", r);
-               goto out;
+               return r;
        }
        bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
 
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
+                    USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
+                    sizeof(usb->req_buf));
+       BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
+              sizeof(usb->req_buf));
+
        req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
-       req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       req = (void *)usb->req_buf;
 
        req->id = cpu_to_le16(USB_REQ_WRITE_RF);
        /* 1: 3683a, but not used in ZYDAS driver */
@@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
        }
 
        udev = zd_usb_to_usbdev(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                             req, req_len, &actual_req_len, 50 /* ms */);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
+                       "error in usb_interrupt_msg(). Error number %d\n", r);
                goto out;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
+               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
@@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
 
        /* FALL-THROUGH with r == 0 */
 out:
-       kfree(req);
        return r;
 }
index 1b1655c..b3df2c8 100644 (file)
 #define ZD_USB_TX_HIGH  5
 #define ZD_USB_TX_LOW   2
 
+#define ZD_TX_TIMEOUT          (HZ * 5)
+#define ZD_TX_WATCHDOG_INTERVAL        round_jiffies_relative(HZ)
+#define ZD_RX_IDLE_INTERVAL    round_jiffies_relative(30 * HZ)
+
 enum devicetype {
        DEVICE_ZD1211  = 0,
        DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
        struct read_regs_int read_regs;
        spinlock_t lock;
        struct urb *urb;
+       void *buffer;
+       dma_addr_t buffer_dma;
        int interval;
        u8 read_regs_enabled:1;
 };
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
 
 struct zd_usb_rx {
        spinlock_t lock;
-       u8 fragment[2*USB_MAX_RX_SIZE];
+       struct mutex setup_mutex;
+       struct delayed_work idle_work;
+       u8 fragment[2 * USB_MAX_RX_SIZE];
        unsigned int fragment_length;
        unsigned int usb_packet_size;
        struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
 
 /**
  * struct zd_usb_tx - structure used for transmitting frames
+ * @enabled: atomic enabled flag, indicates whether tx is enabled
  * @lock: lock for transmission
- * @free_urb_list: list of free URBs, contains all the URBs, which can be used
+ * @submitted: anchor for URBs sent to device
  * @submitted_urbs: atomic integer that counts the URBs having sent to the
  *     device, which haven't been completed
- * @enabled: enabled flag, indicates whether tx is enabled
  * @stopped: indicates whether higher level tx queues are stopped
  */
 struct zd_usb_tx {
+       atomic_t enabled;
        spinlock_t lock;
-       struct list_head free_urb_list;
+       struct delayed_work watchdog_work;
+       struct sk_buff_head submitted_skbs;
+       struct usb_anchor submitted;
        int submitted_urbs;
-       int enabled;
-       int stopped;
+       u8 stopped:1, watchdog_enabled:1;
 };
 
 /* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
        struct zd_usb_rx rx;
        struct zd_usb_tx tx;
        struct usb_interface *intf;
-       u8 is_zd1211b:1, initialized:1;
+       struct usb_anchor submitted_cmds;
+       struct urb *urb_async_waiting;
+       int cmd_error;
+       u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
+       u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
 };
 
 #define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
 
 int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
 
+void zd_tx_watchdog_enable(struct zd_usb *usb);
+void zd_tx_watchdog_disable(struct zd_usb *usb);
+
 int zd_usb_enable_int(struct zd_usb *usb);
 void zd_usb_disable_int(struct zd_usb *usb);
 
 int zd_usb_enable_rx(struct zd_usb *usb);
 void zd_usb_disable_rx(struct zd_usb *usb);
 
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
+
 void zd_usb_enable_tx(struct zd_usb *usb);
 void zd_usb_disable_tx(struct zd_usb *usb);
 
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
        return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
 }
 
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                           unsigned int count);
 int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                      unsigned int count);
 
index 546de57..da1f121 100644 (file)
@@ -120,6 +120,9 @@ struct netfront_info {
        unsigned long rx_pfn_array[NET_RX_RING_SIZE];
        struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
        struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+       /* Statistics */
+       int rx_gso_checksum_fixup;
 };
 
 struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
        return cons;
 }
 
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
        struct iphdr *iph;
        unsigned char *th;
        int err = -EPROTO;
+       int recalculate_partial_csum = 0;
+
+       /*
+        * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+        * peers can fail to set NETRXF_csum_blank when sending a GSO
+        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+        * recalculate the partial checksum.
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+               struct netfront_info *np = netdev_priv(dev);
+               np->rx_gso_checksum_fixup++;
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               recalculate_partial_csum = 1;
+       }
+
+       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
 
        if (skb->protocol != htons(ETH_P_IP))
                goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
        switch (iph->protocol) {
        case IPPROTO_TCP:
                skb->csum_offset = offsetof(struct tcphdr, check);
+
+               if (recalculate_partial_csum) {
+                       struct tcphdr *tcph = (struct tcphdr *)th;
+                       tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        skb->len - iph->ihl*4,
+                                                        IPPROTO_TCP, 0);
+               }
                break;
        case IPPROTO_UDP:
                skb->csum_offset = offsetof(struct udphdr, check);
+
+               if (recalculate_partial_csum) {
+                       struct udphdr *udph = (struct udphdr *)th;
+                       udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        skb->len - iph->ihl*4,
+                                                        IPPROTO_UDP, 0);
+               }
                break;
        default:
                if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
 
-               if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       if (skb_checksum_setup(skb)) {
-                               kfree_skb(skb);
-                               packets_dropped++;
-                               dev->stats.rx_errors++;
-                               continue;
-                       }
+               if (checksum_setup(dev, skb)) {
+                       kfree_skb(skb);
+                       packets_dropped++;
+                       dev->stats.rx_errors++;
+                       continue;
                }
 
                dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
        }
 }
 
+static const struct xennet_stat {
+       char name[ETH_GSTRING_LEN];
+       u16 offset;
+} xennet_stats[] = {
+       {
+               "rx_gso_checksum_fixup",
+               offsetof(struct netfront_info, rx_gso_checksum_fixup)
+       },
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(xennet_stats);
+       default:
+               return -EINVAL;
+       }
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+                                    struct ethtool_stats *stats, u64 * data)
+{
+       void *np = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+               data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                              xennet_stats[i].name, ETH_GSTRING_LEN);
+               break;
+       }
+}
+
 static const struct ethtool_ops xennet_ethtool_ops =
 {
        .set_tx_csum = ethtool_op_set_tx_csum,
        .set_sg = xennet_set_sg,
        .set_tso = xennet_set_tso,
        .get_link = ethtool_op_get_link,
+
+       .get_sset_count = xennet_get_sset_count,
+       .get_ethtool_stats = xennet_get_ethtool_stats,
+       .get_strings = xennet_get_strings,
 };
 
 #ifdef CONFIG_SYSFS
index ffedfd4..ea15800 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig NFC_DEVICES
-       bool "NFC devices"
+       bool "Near Field Communication (NFC) devices"
        default n
        ---help---
          You'll have to say Y if your computer contains an NFC device that
index bae6472..724f65d 100644 (file)
@@ -60,7 +60,7 @@ enum pn544_irq {
 struct pn544_info {
        struct miscdevice miscdev;
        struct i2c_client *i2c_dev;
-       struct regulator_bulk_data regs[2];
+       struct regulator_bulk_data regs[3];
 
        enum pn544_state state;
        wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
 
 static const char reg_vdd_io[] = "Vdd_IO";
 static const char reg_vbat[]   = "VBat";
+static const char reg_vsim[]   = "VSim";
 
 /* sysfs interface */
 static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
 
        info->regs[0].supply = reg_vdd_io;
        info->regs[1].supply = reg_vbat;
+       info->regs[2].supply = reg_vsim;
        r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
                                 info->regs);
        if (r < 0)
index 28295d0..4d87b5d 100644 (file)
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
        (p)->unique_id = of_pdt_unique_id++; \
 } while (0)
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->path_component_name;
+       int len, ourlen, plen;
+       char *n;
+
+       dp->path_component_name = build_path_component(dp);
+
+       plen = strlen(dp->parent->full_name);
+       ourlen = strlen(dp->path_component_name);
+       len = ourlen + plen + 2;
+
+       n = prom_early_alloc(len);
+       strcpy(n, dp->parent->full_name);
+       if (!of_node_is_root(dp->parent)) {
+               strcpy(n + plen, "/");
+               plen++;
+       }
+       strcpy(n + plen, dp->path_component_name);
+
+       return n;
 }
 
-#else
+#else /* CONFIG_SPARC */
 
 static inline void of_pdt_incr_unique_id(void *p) { }
 static inline void irq_trans_init(struct device_node *dp) { }
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->name;
+       static int failsafe_id = 0; /* for generating unique names on failure */
+       char *buf;
+       int len;
+
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+               goto failsafe;
+
+       buf = prom_early_alloc(len + 1);
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+               goto failsafe;
+       return buf;
+
+ failsafe:
+       buf = prom_early_alloc(strlen(dp->parent->full_name) +
+                              strlen(dp->name) + 16);
+       sprintf(buf, "%s/%s@unknown%i",
+               of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+               dp->name, failsafe_id++);
+       pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+       return buf;
 }
 
 #endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
        return buf;
 }
 
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
-       char *res, *buf = NULL;
-       int len;
-
-       if (!of_pdt_prom_ops->pkg2path)
-               return NULL;
-
-       if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
-               return NULL;
-       buf = prom_early_alloc(len + 1);
-       if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
-               pr_err("%s: package-to-path failed\n", __func__);
-               return NULL;
-       }
-
-       res = strrchr(buf, '/');
-       if (!res) {
-               pr_err("%s: couldn't find / in %s\n", __func__, buf);
-               return NULL;
-       }
-       return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
-       char *buf;
-
-       buf = of_pdt_try_pkg2path(node);
-       if (!buf)
-               buf = of_pdt_get_one_property(node, "name");
-
-       return buf;
-}
-
 static struct device_node * __init of_pdt_create_node(phandle node,
                                                    struct device_node *parent)
 {
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
 
        kref_init(&dp->kref);
 
-       dp->name = of_pdt_build_name(node);
+       dp->name = of_pdt_get_one_property(node, "name");
        dp->type = of_pdt_get_one_property(node, "device_type");
        dp->phandle = node;
 
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
        return dp;
 }
 
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
-       int len, ourlen, plen;
-       char *n;
-
-       plen = strlen(dp->parent->full_name);
-       ourlen = strlen(of_pdt_node_name(dp));
-       len = ourlen + plen + 2;
-
-       n = prom_early_alloc(len);
-       strcpy(n, dp->parent->full_name);
-       if (!of_node_is_root(dp->parent)) {
-               strcpy(n + plen, "/");
-               plen++;
-       }
-       strcpy(n + plen, of_pdt_node_name(dp));
-
-       return n;
-}
-
 static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                                                   phandle node,
                                                   struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                *(*nextp) = dp;
                *nextp = &dp->allnext;
 
-#if defined(CONFIG_SPARC)
-               dp->path_component_name = build_path_component(dp);
-#endif
                dp->full_name = of_pdt_build_full_name(dp);
 
                dp->child = of_pdt_build_tree(dp,
index a2d9d1e..a848e02 100644 (file)
@@ -678,7 +678,7 @@ void parport_unregister_device(struct pardevice *dev)
 
        /* Make sure we haven't left any pointers around in the wait
         * list. */
-       spin_lock (&port->waitlist_lock);
+       spin_lock_irq(&port->waitlist_lock);
        if (dev->waitprev || dev->waitnext || port->waithead == dev) {
                if (dev->waitprev)
                        dev->waitprev->waitnext = dev->waitnext;
@@ -689,7 +689,7 @@ void parport_unregister_device(struct pardevice *dev)
                else
                        port->waittail = dev->waitprev;
        }
-       spin_unlock (&port->waitlist_lock);
+       spin_unlock_irq(&port->waitlist_lock);
 
        kfree(dev->state);
        kfree(dev);
index 8ecaac9..ea25e5b 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include "pci.h"
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
        u8 *data = (u8*) buf;
 
        /* Several chips lock up trying to read undefined config space */
-       if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+       if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
                size = dev->cfg_size;
        } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
                size = 128;
index 0bdda5b..42fbf1a 100644 (file)
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
                flags |= CONF_ENABLE_IOCARD;
        if (flags & CONF_ENABLE_IOCARD)
                s->socket.flags |= SS_IOCARD;
+       if (flags & CONF_ENABLE_ZVCARD)
+               s->socket.flags |= SS_ZVCARD | SS_IOCARD;
        if (flags & CONF_ENABLE_SPKR) {
                s->socket.flags |= SS_SPKR_ENA;
                status = CCSR_AUDIO_ENA;
index 3755e7c..2c54054 100644 (file)
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
 {
        struct pcmcia_low_level *ops = dev->platform_data;
        /*
index bb62ea8..b609b45 100644 (file)
@@ -1,3 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
 
index c3f7219..a520395 100644 (file)
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
 {
        int ret;
 
+       if (!machine_is_colibri() && !machine_is_colibri320())
+               return -ENODEV;
+
        colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
        if (!colibri_pcmcia_device)
                return -ENOMEM;
index b9f8c8f..25afe63 100644 (file)
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
                lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
                pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+               pxa2xx_configure_sockets(&sadev->dev);
                ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
                                pxa2xx_drv_pcmcia_add_one);
        }
index d163bc2..a59af5b 100644 (file)
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
 config IDEAPAD_LAPTOP
        tristate "Lenovo IdeaPad Laptop Extras"
        depends on ACPI
-       depends on RFKILL
+       depends on RFKILL && INPUT
        select INPUT_SPARSEKMAP
        help
          This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
index c5c4b8c..38b34a7 100644 (file)
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
  */
 #define AMW0_GUID1             "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
 #define AMW0_GUID2             "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1             "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1             "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
 #define WMID_GUID2             "95764E09-FB56-4e83-B31A-37761F60994A"
 #define WMID_GUID3             "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
 
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
                        return -EINVAL;
        return count;
 }
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
        set_bool_threeg);
 
 static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
index 4633fd8..fe49593 100644 (file)
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
        struct proc_dir_entry *proc;
        mode_t mode;
 
-       /*
-        * If parameter uid or gid is not changed, keep the default setting for
-        * our proc entries (-rw-rw-rw-) else, it means we care about security,
-        * and then set to -rw-rw----
-        */
-
        if ((asus_uid == 0) && (asus_gid == 0)) {
-               mode = S_IFREG | S_IRUGO | S_IWUGO;
+               mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
        } else {
                mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
                printk(KERN_WARNING "  asus_uid and asus_gid parameters are "
index 34657f9..ad24ef3 100644 (file)
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
        dell_send_request(buffer, 17, 11);
 
        /* If the hardware switch controls this radio, and the hardware
-          switch is disabled, don't allow changing the software state */
+          switch is disabled, don't allow changing the software state.
+          If the hardware switch is reported as not supported, always
+          fire the SMI to toggle the killswitch. */
        if ((hwswitch_state & BIT(hwswitch_bit)) &&
-           !(buffer->output[1] & BIT(16))) {
+           !(buffer->output[1] & BIT(16)) &&
+           (buffer->output[1] & BIT(0))) {
                ret = -EINVAL;
                goto out;
        }
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
+       int status;
+
+       get_buffer();
+       dell_send_request(buffer, 17, 11);
+       status = buffer->output[1];
+       release_buffer();
+
+       /* if hardware rfkill is not supported, set it explicitly */
+       if (!(status & BIT(0))) {
+               if (wifi_rfkill)
+                       dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+               if (bluetooth_rfkill)
+                       dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+               if (wwan_rfkill)
+                       dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+       }
+
        if (wifi_rfkill)
                dell_rfkill_query(wifi_rfkill, (void *)1);
        if (bluetooth_rfkill)
index 930e627..61433d4 100644 (file)
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
 #define GPOSW_DOU 0x08
 #define GPOSW_RDRV 0x30
 
+#define GPIO_UPDATE_TYPE       0x80000000
 
 #define NUM_GPIO 24
 
-struct pmic_gpio_irq {
-       spinlock_t lock;
-       u32 trigger[NUM_GPIO];
-       u32 dirty;
-       struct work_struct work;
-};
-
-
 struct pmic_gpio {
+       struct mutex            buslock;
        struct gpio_chip        chip;
-       struct pmic_gpio_irq    irqtypes;
        void                    *gpiointr;
        int                     irq;
        unsigned                irq_base;
+       unsigned int            update_type;
+       u32                     trigger_type;
 };
 
-static void pmic_program_irqtype(int gpio, int type)
-{
-       if (type & IRQ_TYPE_EDGE_RISING)
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
-       else
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
-       if (type & IRQ_TYPE_EDGE_FALLING)
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
-       else
-               intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
-       struct pmic_gpio_irq *t =
-               container_of(work, struct pmic_gpio_irq, work);
-       unsigned long flags;
-       int i;
-       u16 type;
-
-       spin_lock_irqsave(&t->lock, flags);
-       /* As we drop the lock, we may need multiple scans if we race the
-          pmic_irq_type function */
-       while (t->dirty) {
-               /*
-                *      For each pin that has the dirty bit set send an IPC
-                *      message to configure the hardware via the PMIC
-                */
-               for (i = 0; i < NUM_GPIO; i++) {
-                       if (!(t->dirty & (1 << i)))
-                               continue;
-                       t->dirty &= ~(1 << i);
-                       /* We can't trust the array entry or dirty
-                          once the lock is dropped */
-                       type = t->trigger[i];
-                       spin_unlock_irqrestore(&t->lock, flags);
-                       pmic_program_irqtype(i, type);
-                       spin_lock_irqsave(&t->lock, flags);
-               }
-       }
-       spin_unlock_irqrestore(&t->lock, flags);
-}
-
 static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
                        1 << (offset - 16));
 }
 
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
 {
-       struct pmic_gpio *pg = get_irq_chip_data(irq);
-       u32 gpio = irq - pg->irq_base;
-       unsigned long flags;
+       struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+       u32 gpio = data->irq - pg->irq_base;
 
        if (gpio >= pg->chip.ngpio)
                return -EINVAL;
 
-       spin_lock_irqsave(&pg->irqtypes.lock, flags);
-       pg->irqtypes.trigger[gpio] = type;
-       pg->irqtypes.dirty |=  (1 << gpio);
-       spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
-       schedule_work(&pg->irqtypes.work);
+       pg->trigger_type = type;
+       pg->update_type = gpio | GPIO_UPDATE_TYPE;
        return 0;
 }
 
-
-
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
        struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 }
 
 /* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
 
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
 
 static struct irq_chip pmic_irqchip = {
        .name           = "PMIC-GPIO",
-       .mask           = pmic_irq_mask,
-       .unmask         = pmic_irq_unmask,
-       .set_type       = pmic_irq_type,
+       .irq_mask       = pmic_irq_mask,
+       .irq_unmask     = pmic_irq_unmask,
+       .irq_set_type   = pmic_irq_type,
 };
 
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
 {
-       struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+       struct pmic_gpio *pg = data;
        u8 intsts = *((u8 *)pg->gpiointr + 4);
        int gpio;
+       irqreturn_t ret = IRQ_NONE;
 
        for (gpio = 0; gpio < 8; gpio++) {
                if (intsts & (1 << gpio)) {
                        pr_debug("pmic pin %d triggered\n", gpio);
                        generic_handle_irq(pg->irq_base + gpio);
+                       ret = IRQ_HANDLED;
                }
        }
-
-       if (desc->chip->irq_eoi)
-               desc->chip->irq_eoi(irq_get_irq_data(irq));
-       else
-               dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
+       return ret;
 }
 
 static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
        pg->chip.can_sleep = 1;
        pg->chip.dev = dev;
 
-       INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
-       spin_lock_init(&pg->irqtypes.lock);
+       mutex_init(&pg->buslock);
 
        pg->chip.dev = dev;
        retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
                printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
                goto err;
        }
-       set_irq_data(pg->irq, pg);
-       set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+       retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+       if (retval) {
+               printk(KERN_WARNING "pmic: Interrupt request failed\n");
+               goto err;
+       }
+
        for (i = 0; i < 8; i++) {
                set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
                                        handle_simple_irq, "demux");
index 1752ef0..a91d510 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/sfi.h>
 #include <asm/mrst.h>
 #include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
 
 /* IPC defines the following message types */
 #define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
@@ -161,7 +160,7 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
 {
        int i, nc, bytes, d;
        u32 offset = 0;
-       u32 err = 0;
+       int err;
        u8 cbuf[IPC_WWBUF_SIZE] = { };
        u32 *wbuf = (u32 *)&cbuf;
 
@@ -404,7 +403,7 @@ EXPORT_SYMBOL(intel_scu_ipc_update_register);
  */
 int intel_scu_ipc_simple_command(int cmd, int sub)
 {
-       u32 err = 0;
+       int err;
 
        mutex_lock(&ipclock);
        if (ipcdev.pdev == NULL) {
@@ -434,8 +433,7 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
                                                        u32 *out, int outlen)
 {
-       u32 err = 0;
-       int i = 0;
+       int i, err;
 
        mutex_lock(&ipclock);
        if (ipcdev.pdev == NULL) {
index ba3231d..b93a032 100644 (file)
@@ -128,6 +128,6 @@ static void __exit ipc_module_exit(void)
 module_init(ipc_module_init);
 module_exit(ipc_module_exit);
 
-MODULE_LICENSE("GPL V2");
+MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Utility driver for intel scu ipc");
 MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
index 1fe0f1f..865ef78 100644 (file)
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
                        return -EINVAL; \
        return count; \
 } \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
        show_bool_##value, set_bool_##value);
 
 show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
index dd59958..eb99223 100644 (file)
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
        if (keycode != KEY_RESERVED) {
                mutex_lock(&tpacpi_inputdev_send_mutex);
 
+               input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
                input_report_key(tpacpi_inputdev, keycode, 1);
-               if (keycode == KEY_UNKNOWN)
-                       input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-                                   scancode);
                input_sync(tpacpi_inputdev);
 
+               input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
                input_report_key(tpacpi_inputdev, keycode, 0);
-               if (keycode == KEY_UNKNOWN)
-                       input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-                                   scancode);
                input_sync(tpacpi_inputdev);
 
                mutex_unlock(&tpacpi_inputdev_send_mutex);
index 2728469..82583b0 100644 (file)
@@ -46,8 +46,6 @@ static void pps_ktimer_event(unsigned long ptr)
        /* First of all we get the time stamp... */
        pps_get_ts(&ts);
 
-       dev_info(pps->dev, "PPS event at %lu\n", jiffies);
-
        pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
 
        mod_timer(&ktimer, jiffies + HZ);
index 32221ef..c571d6d 100644 (file)
@@ -163,7 +163,7 @@ static void parport_attach(struct parport *port)
        }
 
        device->pardev = parport_register_device(port, KBUILD_MODNAME,
-                       NULL, NULL, parport_irq, 0, device);
+                       NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
        if (!device->pardev) {
                pr_err("couldn't register with %s\n", port->name);
                goto err_free;
index f3a73dd..e4c4f3d 100644 (file)
@@ -6,7 +6,7 @@ comment "PPS generators support"
 
 config PPS_GENERATOR_PARPORT
        tristate "Parallel port PPS signal generator"
-       depends on PARPORT
+       depends on PARPORT && BROKEN
        help
          If you say yes here you get support for a PPS signal generator which
          utilizes STROBE pin of a parallel port to send PPS signals. It uses
index 5c32f8d..b93af3e 100644 (file)
@@ -198,7 +198,7 @@ static void parport_attach(struct parport *port)
        }
 
        device.pardev = parport_register_device(port, KBUILD_MODNAME,
-                       NULL, NULL, NULL, 0, &device);
+                       NULL, NULL, NULL, PARPORT_FLAG_EXCL, &device);
        if (!device.pardev) {
                pr_err("couldn't register with %s\n", port->name);
                return;
index cba1b43..a4e8eb9 100644 (file)
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
 {
        unsigned long flags;
        int captured = 0;
-       struct pps_ktime ts_real;
+       struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
 
        /* check event type */
        BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
index 467e82b..a50391b 100644 (file)
@@ -943,6 +943,8 @@ static int rio_enum_complete(struct rio_mport *port)
  * @port: Master port to send transactions
  * @destid: Current destination ID in network
  * @hopcount: Number of hops into the network
+ * @prev: previous rio_dev
+ * @prev_port: previous port number
  *
  * Recursively discovers a RIO network.  Transactions are sent via the
  * master port passed in @port.
index 76b4185..1269fbd 100644 (file)
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
 
        /* Several chips lock up trying to read undefined config space */
        if (capable(CAP_SYS_ADMIN))
-               size = 0x200000;
+               size = RIO_MAINT_SPACE_SZ;
 
-       if (off > size)
+       if (off >= size)
                return 0;
        if (off + count > size) {
                size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
        loff_t init_off = off;
        u8 *data = (u8 *) buf;
 
-       if (off > 0x200000)
+       if (off >= RIO_MAINT_SPACE_SZ)
                return 0;
-       if (off + count > 0x200000) {
-               size = 0x200000 - off;
+       if (off + count > RIO_MAINT_SPACE_SZ) {
+               size = RIO_MAINT_SPACE_SZ - off;
                count = size;
        }
 
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
                 .name = "config",
                 .mode = S_IRUGO | S_IWUSR,
                 },
-       .size = 0x200000,
+       .size = RIO_MAINT_SPACE_SZ,
        .read = rio_read_config,
        .write = rio_write_config,
 };
index f53d31b..2bb5de1 100644 (file)
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
 
        dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
 
-       BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+       BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
 
        return mc13xxx_regulators[id].voltages[val];
 }
index 8b0d2c4..06df898 100644 (file)
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_IDLE;
        default:
                BUG();
+               return -EINVAL;
        }
 }
 
index 9583cbc..c404b61 100644 (file)
@@ -143,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
        rtc->id = id;
        rtc->ops = ops;
        rtc->owner = owner;
+       rtc->irq_freq = 1;
        rtc->max_user_freq = 64;
        rtc->dev.parent = dev;
        rtc->dev.class = rtc_class;
index 90384b9..cb2f072 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/log2.h>
 #include <linux/workqueue.h>
 
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+
 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
 {
        int err;
@@ -120,12 +123,18 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        err = mutex_lock_interruptible(&rtc->ops_lock);
        if (err)
                return err;
-       alarm->enabled = rtc->aie_timer.enabled;
-       if (alarm->enabled)
+       if (rtc->ops == NULL)
+               err = -ENODEV;
+       else if (!rtc->ops->read_alarm)
+               err = -EINVAL;
+       else {
+               memset(alarm, 0, sizeof(struct rtc_wkalrm));
+               alarm->enabled = rtc->aie_timer.enabled;
                alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
+       }
        mutex_unlock(&rtc->ops_lock);
 
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
@@ -175,16 +184,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                return err;
        if (rtc->aie_timer.enabled) {
                rtc_timer_remove(rtc, &rtc->aie_timer);
-               rtc->aie_timer.enabled = 0;
        }
        rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
        rtc->aie_timer.period = ktime_set(0, 0);
        if (alarm->enabled) {
-               rtc->aie_timer.enabled = 1;
-               rtc_timer_enqueue(rtc, &rtc->aie_timer);
+               err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
        }
        mutex_unlock(&rtc->ops_lock);
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
@@ -195,16 +202,15 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
                return err;
 
        if (rtc->aie_timer.enabled != enabled) {
-               if (enabled) {
-                       rtc->aie_timer.enabled = 1;
-                       rtc_timer_enqueue(rtc, &rtc->aie_timer);
-               } else {
+               if (enabled)
+                       err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+               else
                        rtc_timer_remove(rtc, &rtc->aie_timer);
-                       rtc->aie_timer.enabled = 0;
-               }
        }
 
-       if (!rtc->ops)
+       if (err)
+               /* nothing */;
+       else if (!rtc->ops)
                err = -ENODEV;
        else if (!rtc->ops->alarm_irq_enable)
                err = -EINVAL;
@@ -222,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
        if (err)
                return err;
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       if (enabled == 0 && rtc->uie_irq_active) {
+               mutex_unlock(&rtc->ops_lock);
+               return rtc_dev_update_irq_enable_emul(rtc, 0);
+       }
+#endif
        /* make sure we're changing state */
        if (rtc->uie_rtctimer.enabled == enabled)
                goto out;
@@ -235,15 +247,22 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
                now = rtc_tm_to_ktime(tm);
                rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
                rtc->uie_rtctimer.period = ktime_set(1, 0);
-               rtc->uie_rtctimer.enabled = 1;
-               rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
-       } else {
+               err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+       } else
                rtc_timer_remove(rtc, &rtc->uie_rtctimer);
-               rtc->uie_rtctimer.enabled = 0;
-       }
 
 out:
        mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       /*
+        * Enable emulation if the driver did not provide
+        * the update_irq_enable function pointer or if returned
+        * -EINVAL to signal that it has been configured without
+        * interrupts or that are not available at the moment.
+        */
+       if (err == -EINVAL)
+               err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
        return err;
 
 }
@@ -259,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
  *
  * Triggers the registered irq_task function callback.
  */
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
 {
        unsigned long flags;
 
@@ -460,6 +479,9 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
        int err = 0;
        unsigned long flags;
 
+       if (freq <= 0)
+               return -EINVAL;
+
        spin_lock_irqsave(&rtc->irq_task_lock, flags);
        if (rtc->irq_task != NULL && task == NULL)
                err = -EBUSY;
@@ -488,10 +510,13 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
  * Enqueues a timer onto the rtc devices timerqueue and sets
  * the next alarm event appropriately.
  *
+ * Sets the enabled bit on the added timer.
+ *
  * Must hold ops_lock for proper serialization of timerqueue
  */
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
 {
+       timer->enabled = 1;
        timerqueue_add(&rtc->timerqueue, &timer->node);
        if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
                struct rtc_wkalrm alarm;
@@ -501,7 +526,13 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        schedule_work(&rtc->irqwork);
+               else if (err) {
+                       timerqueue_del(&rtc->timerqueue, &timer->node);
+                       timer->enabled = 0;
+                       return err;
+               }
        }
+       return 0;
 }
 
 /**
@@ -512,13 +543,15 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
  * Removes a timer onto the rtc devices timerqueue and sets
  * the next alarm event appropriately.
  *
+ * Clears the enabled bit on the removed timer.
+ *
  * Must hold ops_lock for proper serialization of timerqueue
  */
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
 {
        struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
        timerqueue_del(&rtc->timerqueue, &timer->node);
-
+       timer->enabled = 0;
        if (next == &timer->node) {
                struct rtc_wkalrm alarm;
                int err;
@@ -626,8 +659,7 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
        timer->node.expires = expires;
        timer->period = period;
 
-       timer->enabled = 1;
-       rtc_timer_enqueue(rtc, timer);
+       ret = rtc_timer_enqueue(rtc, timer);
 
        mutex_unlock(&rtc->ops_lock);
        return ret;
@@ -645,7 +677,6 @@ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
        mutex_lock(&rtc->ops_lock);
        if (timer->enabled)
                rtc_timer_remove(rtc, timer);
-       timer->enabled = 0;
        mutex_unlock(&rtc->ops_lock);
        return ret;
 }
index b2752b6..e725d51 100644 (file)
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        return ret;
 }
 
-static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
-                       unsigned long arg)
+static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
        int ret = 0;
 
        spin_lock_irq(&rtc->lock);
 
-       switch (cmd) {
-       case RTC_AIE_ON:
+       if(enabled) {
                if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
                        ret = -EINVAL;
-                       break;
+                       goto out;
                }
                rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
                                | RTC_BIT(CTRL_TOPEN));
                rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
                rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
-               break;
-       case RTC_AIE_OFF:
+       } else {
                rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
                                & ~RTC_BIT(CTRL_TOPEN));
                rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
                rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
-               break;
-       default:
-               ret = -ENOIOCTLCMD;
-               break;
        }
-
+out:
        spin_unlock_irq(&rtc->lock);
 
        return ret;
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
 }
 
 static struct rtc_class_ops at32_rtc_ops = {
-       .ioctl          = at32_rtc_ioctl,
        .read_time      = at32_rtc_readtime,
        .set_time       = at32_rtc_settime,
        .read_alarm     = at32_rtc_readalarm,
        .set_alarm      = at32_rtc_setalarm,
+       .alarm_irq_enable = at32_rtc_alarm_irq_enable,
 };
 
 static int __init at32_rtc_probe(struct platform_device *pdev)
index bc8bbca..26d1cf5 100644 (file)
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 
        /* important:  scrub old status before enabling IRQs */
        switch (cmd) {
-       case RTC_AIE_OFF:       /* alarm off */
-               at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
-               break;
-       case RTC_AIE_ON:        /* alarm on */
-               at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-               at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
-               break;
        case RTC_UIE_OFF:       /* update off */
                at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
                break;
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
        return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+
+       if (enabled) {
+               at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+               at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+       } else
+               at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+
+       return 0;
+}
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
        .read_alarm     = at91_rtc_readalarm,
        .set_alarm      = at91_rtc_setalarm,
        .proc           = at91_rtc_proc,
+       .alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
index f677e07..5469c52 100644 (file)
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
        dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
 
        switch (cmd) {
-       case RTC_AIE_OFF:               /* alarm off */
-               rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
-               break;
-       case RTC_AIE_ON:                /* alarm on */
-               rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
-               break;
        case RTC_UIE_OFF:               /* update off */
                rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
                break;
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
        return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct sam9_rtc *rtc = dev_get_drvdata(dev);
+       u32 mr = rtt_readl(rtc, MR);
+
+       dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
+       if (enabled)
+               rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+       else
+               rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+       return 0;
+}
+
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
        .read_alarm     = at91_rtc_readalarm,
        .set_alarm      = at91_rtc_setalarm,
        .proc           = at91_rtc_proc,
+       .alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
index b4b6087..17971d9 100644 (file)
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
                bfin_rtc_int_clear(~RTC_ISTAT_SEC);
                break;
 
-       case RTC_AIE_ON:
-               dev_dbg_stamp(dev);
-               bfin_rtc_int_set_alarm(rtc);
-               break;
-       case RTC_AIE_OFF:
-               dev_dbg_stamp(dev);
-               bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
-               break;
-
        default:
                dev_dbg_stamp(dev);
                ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
        return ret;
 }
 
+static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct bfin_rtc *rtc = dev_get_drvdata(dev);
+
+       dev_dbg_stamp(dev);
+       if (enabled)
+               bfin_rtc_int_set_alarm(rtc);
+       else
+               bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+}
+
 static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
        .read_alarm    = bfin_rtc_read_alarm,
        .set_alarm     = bfin_rtc_set_alarm,
        .proc          = bfin_rtc_proc,
+       .alarm_irq_enable = bfin_rtc_alarm_irq_enable,
 };
 
 static int __devinit bfin_rtc_probe(struct platform_device *pdev)
index 212b16e..d0e06ed 100644 (file)
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
        return err;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+       struct rtc_device *rtc =
+               container_of(work, struct rtc_device, uie_task);
+       struct rtc_time tm;
+       int num = 0;
+       int err;
+
+       err = rtc_read_time(rtc, &tm);
+
+       spin_lock_irq(&rtc->irq_lock);
+       if (rtc->stop_uie_polling || err) {
+               rtc->uie_task_active = 0;
+       } else if (rtc->oldsecs != tm.tm_sec) {
+               num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+               rtc->oldsecs = tm.tm_sec;
+               rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+               rtc->uie_timer_active = 1;
+               rtc->uie_task_active = 0;
+               add_timer(&rtc->uie_timer);
+       } else if (schedule_work(&rtc->uie_task) == 0) {
+               rtc->uie_task_active = 0;
+       }
+       spin_unlock_irq(&rtc->irq_lock);
+       if (num)
+               rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+       struct rtc_device *rtc = (struct rtc_device *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtc->irq_lock, flags);
+       rtc->uie_timer_active = 0;
+       rtc->uie_task_active = 1;
+       if ((schedule_work(&rtc->uie_task) == 0))
+               rtc->uie_task_active = 0;
+       spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+       spin_lock_irq(&rtc->irq_lock);
+       if (rtc->uie_irq_active) {
+               rtc->stop_uie_polling = 1;
+               if (rtc->uie_timer_active) {
+                       spin_unlock_irq(&rtc->irq_lock);
+                       del_timer_sync(&rtc->uie_timer);
+                       spin_lock_irq(&rtc->irq_lock);
+                       rtc->uie_timer_active = 0;
+               }
+               if (rtc->uie_task_active) {
+                       spin_unlock_irq(&rtc->irq_lock);
+                       flush_scheduled_work();
+                       spin_lock_irq(&rtc->irq_lock);
+               }
+               rtc->uie_irq_active = 0;
+       }
+       spin_unlock_irq(&rtc->irq_lock);
+       return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+       struct rtc_time tm;
+       int err;
+
+       err = rtc_read_time(rtc, &tm);
+       if (err)
+               return err;
+       spin_lock_irq(&rtc->irq_lock);
+       if (!rtc->uie_irq_active) {
+               rtc->uie_irq_active = 1;
+               rtc->stop_uie_polling = 0;
+               rtc->oldsecs = tm.tm_sec;
+               rtc->uie_task_active = 1;
+               if (schedule_work(&rtc->uie_task) == 0)
+                       rtc->uie_task_active = 0;
+       }
+       rtc->irq_data = 0;
+       spin_unlock_irq(&rtc->irq_lock);
+       return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+       if (enabled)
+               return set_uie(rtc);
+       else
+               return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file,
        if (err)
                goto done;
 
-       /* try the driver's ioctl interface */
-       if (ops->ioctl) {
-               err = ops->ioctl(rtc->dev.parent, cmd, arg);
-               if (err != -ENOIOCTLCMD) {
-                       mutex_unlock(&rtc->ops_lock);
-                       return err;
-               }
-       }
-
-       /* if the driver does not provide the ioctl interface
-        * or if that particular ioctl was not implemented
-        * (-ENOIOCTLCMD), we will try to emulate here.
-        *
+       /*
         * Drivers *SHOULD NOT* provide ioctl implementations
         * for these requests.  Instead, provide methods to
         * support the following code, so that the RTC's main
@@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file,
                return err;
 
        default:
-               err = -ENOTTY;
+               /* Finally try the driver's ioctl interface */
+               if (ops->ioctl) {
+                       err = ops->ioctl(rtc->dev.parent, cmd, arg);
+                       if (err == -ENOIOCTLCMD)
+                               err = -ENOTTY;
+               }
                break;
        }
 
@@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
 
        rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       INIT_WORK(&rtc->uie_task, rtc_uie_task);
+       setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
        cdev_init(&rtc->char_dev, &rtc_dev_fops);
        rtc->char_dev.owner = rtc->owner;
 }
index bf430f9..60ce696 100644 (file)
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
        __raw_writel(data, &priv->rtcregs[reg]);
 }
 
+
+static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct ds1286_priv *priv = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned char val;
+
+       /* Allow or mask alarm interrupts */
+       spin_lock_irqsave(&priv->lock, flags);
+       val = ds1286_rtc_read(priv, RTC_CMD);
+       if (enabled)
+               val &=  ~RTC_TDM;
+       else
+               val |=  RTC_TDM;
+       ds1286_rtc_write(priv, val, RTC_CMD);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return 0;
+}
+
 #ifdef CONFIG_RTC_INTF_DEV
 
 static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        unsigned char val;
 
        switch (cmd) {
-       case RTC_AIE_OFF:
-               /* Mask alarm int. enab. bit    */
-               spin_lock_irqsave(&priv->lock, flags);
-               val = ds1286_rtc_read(priv, RTC_CMD);
-               val |=  RTC_TDM;
-               ds1286_rtc_write(priv, val, RTC_CMD);
-               spin_unlock_irqrestore(&priv->lock, flags);
-               break;
-       case RTC_AIE_ON:
-               /* Allow alarm interrupts.      */
-               spin_lock_irqsave(&priv->lock, flags);
-               val = ds1286_rtc_read(priv, RTC_CMD);
-               val &=  ~RTC_TDM;
-               ds1286_rtc_write(priv, val, RTC_CMD);
-               spin_unlock_irqrestore(&priv->lock, flags);
-               break;
        case RTC_WIE_OFF:
                /* Mask watchdog int. enab. bit */
                spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 }
 
 static const struct rtc_class_ops ds1286_ops = {
-       .ioctl          = ds1286_ioctl,
-       .proc           = ds1286_proc,
+       .ioctl          = ds1286_ioctl,
+       .proc           = ds1286_proc,
        .read_time      = ds1286_read_time,
        .set_time       = ds1286_set_time,
        .read_alarm     = ds1286_read_alarm,
        .set_alarm      = ds1286_set_alarm,
+       .alarm_irq_enable = ds1286_alarm_irq_enable,
 };
 
 static int __devinit ds1286_probe(struct platform_device *pdev)
index 077af1d..57fbcc1 100644 (file)
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour)
  * Interface to RTC framework
  */
 
-#ifdef CONFIG_RTC_INTF_DEV
-
-/*
- * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
- */
-static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct ds1305   *ds1305 = dev_get_drvdata(dev);
        u8              buf[2];
-       int             status = -ENOIOCTLCMD;
+       long            err = -EINVAL;
 
        buf[0] = DS1305_WRITE | DS1305_CONTROL;
        buf[1] = ds1305->ctrl[0];
 
-       switch (cmd) {
-       case RTC_AIE_OFF:
-               status = 0;
-               if (!(buf[1] & DS1305_AEI0))
-                       goto done;
-               buf[1] &= ~DS1305_AEI0;
-               break;
-
-       case RTC_AIE_ON:
-               status = 0;
+       if (enabled) {
                if (ds1305->ctrl[0] & DS1305_AEI0)
                        goto done;
                buf[1] |= DS1305_AEI0;
-               break;
-       }
-       if (status == 0) {
-               status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
-                               NULL, 0);
-               if (status >= 0)
-                       ds1305->ctrl[0] = buf[1];
+       } else {
+               if (!(buf[1] & DS1305_AEI0))
+                       goto done;
+               buf[1] &= ~DS1305_AEI0;
        }
-
+       err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
+       if (err >= 0)
+               ds1305->ctrl[0] = buf[1];
 done:
-       return status;
+       return err;
+
 }
 
-#else
-#define ds1305_ioctl   NULL
-#endif
 
 /*
  * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@ done:
 #endif
 
 static const struct rtc_class_ops ds1305_ops = {
-       .ioctl          = ds1305_ioctl,
        .read_time      = ds1305_get_time,
        .set_time       = ds1305_set_time,
        .read_alarm     = ds1305_get_alarm,
        .set_alarm      = ds1305_set_alarm,
        .proc           = ds1305_proc,
+       .alarm_irq_enable = ds1305_alarm_irq_enable,
 };
 
 static void ds1305_work(struct work_struct *work)
index 0d559b6..4724ba3 100644 (file)
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        return 0;
 }
 
-static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct i2c_client       *client = to_i2c_client(dev);
        struct ds1307           *ds1307 = i2c_get_clientdata(client);
        int                     ret;
 
-       switch (cmd) {
-       case RTC_AIE_OFF:
-               if (!test_bit(HAS_ALARM, &ds1307->flags))
-                       return -ENOTTY;
-
-               ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~DS1337_BIT_A1IE;
-
-               ret = i2c_smbus_write_byte_data(client,
-                                               DS1337_REG_CONTROL, ret);
-               if (ret < 0)
-                       return ret;
-
-               break;
-
-       case RTC_AIE_ON:
-               if (!test_bit(HAS_ALARM, &ds1307->flags))
-                       return -ENOTTY;
+       if (!test_bit(HAS_ALARM, &ds1307->flags))
+               return -ENOTTY;
 
-               ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-               if (ret < 0)
-                       return ret;
+       ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+       if (ret < 0)
+               return ret;
 
+       if (enabled)
                ret |= DS1337_BIT_A1IE;
+       else
+               ret &= ~DS1337_BIT_A1IE;
 
-               ret = i2c_smbus_write_byte_data(client,
-                                               DS1337_REG_CONTROL, ret);
-               if (ret < 0)
-                       return ret;
-
-               break;
-
-       default:
-               return -ENOIOCTLCMD;
-       }
+       ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
        .set_time       = ds1307_set_time,
        .read_alarm     = ds1337_read_alarm,
        .set_alarm      = ds1337_set_alarm,
-       .ioctl          = ds1307_ioctl,
+       .alarm_irq_enable = ds1307_alarm_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
index 47fb635..d834a63 100644 (file)
@@ -307,42 +307,25 @@ unlock:
        mutex_unlock(&ds1374->mutex);
 }
 
-static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct ds1374 *ds1374 = i2c_get_clientdata(client);
-       int ret = -ENOIOCTLCMD;
+       int ret;
 
        mutex_lock(&ds1374->mutex);
 
-       switch (cmd) {
-       case RTC_AIE_OFF:
-               ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-               if (ret < 0)
-                       goto out;
-
-               ret &= ~DS1374_REG_CR_WACE;
-
-               ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-               if (ret < 0)
-                       goto out;
-
-               break;
-
-       case RTC_AIE_ON:
-               ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-               if (ret < 0)
-                       goto out;
+       ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+       if (ret < 0)
+               goto out;
 
+       if (enabled) {
                ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
                ret &= ~DS1374_REG_CR_WDALM;
-
-               ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-               if (ret < 0)
-                       goto out;
-
-               break;
+       } else {
+               ret &= ~DS1374_REG_CR_WACE;
        }
+       ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
 
 out:
        mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
        .set_time = ds1374_set_time,
        .read_alarm = ds1374_read_alarm,
        .set_alarm = ds1374_set_alarm,
-       .ioctl = ds1374_ioctl,
+       .alarm_irq_enable = ds1374_alarm_irq_enable,
 };
 
 static int ds1374_probe(struct i2c_client *client,
index 23a9ee1..9507354 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
                time->tm_hour = bcd2bin(hour);
        }
 
-       time->tm_wday = bcd2bin(week);
+       /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+       time->tm_wday = bcd2bin(week) - 1;
        time->tm_mday = bcd2bin(day);
-       time->tm_mon = bcd2bin(month & 0x7F);
+       /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+       time->tm_mon = bcd2bin(month & 0x7F) - 1;
        if (century)
                add_century = 100;
 
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
        buf[0] = bin2bcd(time->tm_sec);
        buf[1] = bin2bcd(time->tm_min);
        buf[2] = bin2bcd(time->tm_hour);
-       buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+       /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+       buf[3] = bin2bcd(time->tm_wday + 1);
        buf[4] = bin2bcd(time->tm_mday); /* Date */
-       buf[5] = bin2bcd(time->tm_mon);
+       /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+       buf[5] = bin2bcd(time->tm_mon + 1);
        if (time->tm_year >= 100) {
                buf[5] |= 0x80;
                buf[6] = bin2bcd(time->tm_year - 100);
index 5a8daa3..69fe664 100644 (file)
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
        return m41t80_set_datetime(to_i2c_client(dev), tm);
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-static int
-m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct i2c_client *client = to_i2c_client(dev);
        int rc;
 
-       switch (cmd) {
-       case RTC_AIE_OFF:
-       case RTC_AIE_ON:
-               break;
-       default:
-               return -ENOIOCTLCMD;
-       }
-
        rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
        if (rc < 0)
                goto err;
-       switch (cmd) {
-       case RTC_AIE_OFF:
-               rc &= ~M41T80_ALMON_AFE;
-               break;
-       case RTC_AIE_ON:
+
+       if (enabled)
                rc |= M41T80_ALMON_AFE;
-               break;
-       }
+       else
+               rc &= ~M41T80_ALMON_AFE;
+
        if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
                goto err;
+
        return 0;
 err:
        return -EIO;
 }
-#else
-#define        m41t80_rtc_ioctl NULL
-#endif
 
 static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = {
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
        .proc = m41t80_rtc_proc,
-       .ioctl = m41t80_rtc_ioctl,
+       .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index a99a0b5..3978f4c 100644 (file)
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 /*
  * Handle commands from user-space
  */
-static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
-                       unsigned long arg)
+static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct m48t59_plat_data *pdata = pdev->dev.platform_data;
        struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
        unsigned long flags;
-       int ret = 0;
 
        spin_lock_irqsave(&m48t59->lock, flags);
-       switch (cmd) {
-       case RTC_AIE_OFF:       /* alarm interrupt off */
-               M48T59_WRITE(0x00, M48T59_INTR);
-               break;
-       case RTC_AIE_ON:        /* alarm interrupt on */
+       if (enabled)
                M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
-               break;
-       default:
-               ret = -ENOIOCTLCMD;
-               break;
-       }
+       else
+               M48T59_WRITE(0x00, M48T59_INTR);
        spin_unlock_irqrestore(&m48t59->lock, flags);
 
-       return ret;
+       return 0;
 }
 
 static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
 }
 
 static const struct rtc_class_ops m48t59_rtc_ops = {
-       .ioctl          = m48t59_rtc_ioctl,
        .read_time      = m48t59_rtc_read_time,
        .set_time       = m48t59_rtc_set_time,
        .read_alarm     = m48t59_rtc_readalarm,
        .set_alarm      = m48t59_rtc_setalarm,
        .proc           = m48t59_rtc_proc,
+       .alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
 };
 
 static const struct rtc_class_ops m48t02_rtc_ops = {
index bcd0cf6..1db62db 100644 (file)
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled)
        return 0;
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
 /* Currently, the vRTC doesn't support UIE ON/OFF */
-static int
-mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct mrst_rtc *mrst = dev_get_drvdata(dev);
        unsigned long   flags;
 
-       switch (cmd) {
-       case RTC_AIE_OFF:
-       case RTC_AIE_ON:
-               if (!mrst->irq)
-                       return -EINVAL;
-               break;
-       default:
-               /* PIE ON/OFF is handled by mrst_irq_set_state() */
-               return -ENOIOCTLCMD;
-       }
-
        spin_lock_irqsave(&rtc_lock, flags);
-       switch (cmd) {
-       case RTC_AIE_OFF:       /* alarm off */
-               mrst_irq_disable(mrst, RTC_AIE);
-               break;
-       case RTC_AIE_ON:        /* alarm on */
+       if (enabled)
                mrst_irq_enable(mrst, RTC_AIE);
-               break;
-       }
+       else
+               mrst_irq_disable(mrst, RTC_AIE);
        spin_unlock_irqrestore(&rtc_lock, flags);
        return 0;
 }
 
-#else
-#define        mrst_rtc_ioctl  NULL
-#endif
 
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
 #endif
 
 static const struct rtc_class_ops mrst_rtc_ops = {
-       .ioctl          = mrst_rtc_ioctl,
        .read_time      = mrst_read_time,
        .set_time       = mrst_set_time,
        .read_alarm     = mrst_read_alarm,
        .set_alarm      = mrst_set_alarm,
        .proc           = mrst_procfs,
        .irq_set_state  = mrst_irq_set_state,
+       .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
 };
 
 static struct mrst_rtc mrst_rtc;
index b2fff0c..6782062 100644 (file)
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv,
 static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
                                unsigned int reg)
 {
-       return __raw_writel(val, &priv->regs[reg]);
+       __raw_writel(val, &priv->regs[reg]);
 }
 
 static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
index bcca472..60627a7 100644 (file)
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        return 0;
 }
 
-static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
-                       unsigned long arg)
+static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
 
        if (pdata->irq < 0)
-               return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
-       switch (cmd) {
-       case RTC_AIE_OFF:
-               writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-               break;
-       case RTC_AIE_ON:
+               return -EINVAL; /* fall back into rtc-dev's emulation */
+
+       if (enabled)
                writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-               break;
-       default:
-               return -ENOIOCTLCMD;
-       }
+       else
+               writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
        return 0;
 }
 
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
        .set_time       = mv_rtc_set_time,
        .read_alarm     = mv_rtc_read_alarm,
        .set_alarm      = mv_rtc_set_alarm,
-       .ioctl          = mv_rtc_ioctl,
+       .alarm_irq_enable = mv_rtc_alarm_irq_enable,
 };
 
 static int __devinit mv_rtc_probe(struct platform_device *pdev)
index e72b523..b4dbf3a 100644 (file)
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        u8 reg;
 
        switch (cmd) {
-       case RTC_AIE_OFF:
-       case RTC_AIE_ON:
        case RTC_UIE_OFF:
        case RTC_UIE_ON:
                break;
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        rtc_wait_not_busy();
        reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
        switch (cmd) {
-       /* AIE = Alarm Interrupt Enable */
-       case RTC_AIE_OFF:
-               reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
-               break;
-       case RTC_AIE_ON:
-               reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
-               break;
        /* UIE = Update Interrupt Enable (1/second) */
        case RTC_UIE_OFF:
                reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 #define        omap_rtc_ioctl  NULL
 #endif
 
+static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       u8 reg;
+
+       local_irq_disable();
+       rtc_wait_not_busy();
+       reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+       if (enabled)
+               reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+       else
+               reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+       rtc_wait_not_busy();
+       rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+       local_irq_enable();
+
+       return 0;
+}
+
 /* this hardware doesn't support "don't care" alarm fields */
 static int tm2bcd(struct rtc_time *tm)
 {
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = {
        .set_time       = omap_rtc_set_time,
        .read_alarm     = omap_rtc_read_alarm,
        .set_alarm      = omap_rtc_set_alarm,
+       .alarm_irq_enable = omap_rtc_alarm_irq_enable,
 };
 
 static int omap_rtc_alarm;
index c086fc3..242bbf8 100644 (file)
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
 
 static int rtc_proc_open(struct inode *inode, struct file *file)
 {
+       int ret;
        struct rtc_device *rtc = PDE(inode)->data;
 
        if (!try_module_get(THIS_MODULE))
                return -ENODEV;
 
-       return single_open(file, rtc_proc_show, rtc);
+       ret = single_open(file, rtc_proc_show, rtc);
+       if (ret)
+               module_put(THIS_MODULE);
+       return ret;
 }
 
 static int rtc_proc_release(struct inode *inode, struct file *file)
index 36eb661..694da39 100644 (file)
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
 static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
                                unsigned int reg)
 {
-       return __raw_writel(val, &priv->regs[reg]);
+       __raw_writel(val, &priv->regs[reg]);
 }
 
 static void rp5c01_lock(struct rp5c01_priv *priv)
index dd14e20..6aaa155 100644 (file)
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
                if (rs5c->type == rtc_rs5c372a
                                && (buf & RS5C372A_CTRL1_SL1))
                        return -ENOIOCTLCMD;
-       case RTC_AIE_OFF:
-       case RTC_AIE_ON:
-               /* these irq management calls only make sense for chips
-                * which are wired up to an IRQ.
-                */
-               if (!rs5c->has_irq)
-                       return -ENOIOCTLCMD;
-               break;
        default:
                return -ENOIOCTLCMD;
        }
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 
        addr = RS5C_ADDR(RS5C_REG_CTRL1);
        switch (cmd) {
-       case RTC_AIE_OFF:       /* alarm off */
-               buf &= ~RS5C_CTRL1_AALE;
-               break;
-       case RTC_AIE_ON:        /* alarm on */
-               buf |= RS5C_CTRL1_AALE;
-               break;
        case RTC_UIE_OFF:       /* update off */
                buf &= ~RS5C_CTRL1_CT_MASK;
                break;
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 #endif
 
 
+static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct i2c_client       *client = to_i2c_client(dev);
+       struct rs5c372          *rs5c = i2c_get_clientdata(client);
+       unsigned char           buf;
+       int                     status, addr;
+
+       buf = rs5c->regs[RS5C_REG_CTRL1];
+
+       if (!rs5c->has_irq)
+               return -EINVAL;
+
+       status = rs5c_get_regs(rs5c);
+       if (status < 0)
+               return status;
+
+       addr = RS5C_ADDR(RS5C_REG_CTRL1);
+       if (enabled)
+               buf |= RS5C_CTRL1_AALE;
+       else
+               buf &= ~RS5C_CTRL1_AALE;
+
+       if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
+               printk(KERN_WARNING "%s: can't update alarm\n",
+                       rs5c->rtc->name);
+               status = -EIO;
+       } else
+               rs5c->regs[RS5C_REG_CTRL1] = buf;
+
+       return status;
+}
+
+
 /* NOTE:  Since RTC_WKALM_{RD,SET} were originally defined for EFI,
  * which only exposes a polled programming interface; and since
  * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
        .set_time       = rs5c372_rtc_set_time,
        .read_alarm     = rs5c_read_alarm,
        .set_alarm      = rs5c_set_alarm,
+       .alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index cf953ec..b80fa28 100644 (file)
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
 }
 
 /* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
 {
        unsigned int tmp;
 
-       pr_debug("%s: aie=%d\n", __func__, to);
+       pr_debug("%s: aie=%d\n", __func__, enabled);
 
        tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
 
-       if (to)
+       if (enabled)
                tmp |= S3C2410_RTCALM_ALMEN;
 
        writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
+
+       return 0;
 }
 
 static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        writeb(alrm_en, base + S3C2410_RTCALM);
 
-       s3c_rtc_setaie(alrm->enabled);
+       s3c_rtc_setaie(dev, alrm->enabled);
 
        return 0;
 }
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
        rtc_device_unregister(rtc);
 
        s3c_rtc_setpie(&dev->dev, 0);
-       s3c_rtc_setaie(0);
+       s3c_rtc_setaie(&dev->dev, 0);
 
        clk_disable(rtc_clk);
        clk_put(rtc_clk);
index 88ea52b..5dfe5ff 100644 (file)
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
                unsigned long arg)
 {
        switch (cmd) {
-       case RTC_AIE_OFF:
-               spin_lock_irq(&sa1100_rtc_lock);
-               RTSR &= ~RTSR_ALE;
-               spin_unlock_irq(&sa1100_rtc_lock);
-               return 0;
-       case RTC_AIE_ON:
-               spin_lock_irq(&sa1100_rtc_lock);
-               RTSR |= RTSR_ALE;
-               spin_unlock_irq(&sa1100_rtc_lock);
-               return 0;
        case RTC_UIE_OFF:
                spin_lock_irq(&sa1100_rtc_lock);
                RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
        return -ENOIOCTLCMD;
 }
 
+static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       spin_lock_irq(&sa1100_rtc_lock);
+       if (enabled)
+               RTSR |= RTSR_ALE;
+       else
+               RTSR &= ~RTSR_ALE;
+       spin_unlock_irq(&sa1100_rtc_lock);
+       return 0;
+}
+
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
        .proc = sa1100_rtc_proc,
        .irq_set_freq = sa1100_irq_set_freq,
        .irq_set_state = sa1100_irq_set_state,
+       .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
 static int sa1100_rtc_probe(struct platform_device *pdev)
index 06e41ed..93314a9 100644 (file)
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        unsigned int ret = 0;
 
        switch (cmd) {
-       case RTC_AIE_OFF:
-       case RTC_AIE_ON:
-               sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
-               break;
        case RTC_UIE_OFF:
                rtc->periodic_freq &= ~PF_OXS;
                sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
        return ret;
 }
 
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       sh_rtc_setaie(dev, enabled);
+       return 0;
+}
+
 static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = {
        .irq_set_state  = sh_rtc_irq_set_state,
        .irq_set_freq   = sh_rtc_irq_set_freq,
        .proc           = sh_rtc_proc,
+       .alarm_irq_enable = sh_rtc_alarm_irq_enable,
 };
 
 static int __init sh_rtc_probe(struct platform_device *pdev)
index 51725f7..a82d6fe 100644 (file)
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq)
        return 0;
 }
 
-static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
-       unsigned long arg)
+static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-       /* We do support interrupts, they're generated
-        * using the sysfs interface.
-        */
-       switch (cmd) {
-       case RTC_PIE_ON:
-       case RTC_PIE_OFF:
-       case RTC_UIE_ON:
-       case RTC_UIE_OFF:
-       case RTC_AIE_ON:
-       case RTC_AIE_OFF:
-               return 0;
-
-       default:
-               return -ENOIOCTLCMD;
-       }
+       return 0;
 }
 
 static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = {
        .read_alarm = test_rtc_read_alarm,
        .set_alarm = test_rtc_set_alarm,
        .set_mmss = test_rtc_set_mmss,
-       .ioctl = test_rtc_ioctl,
+       .alarm_irq_enable = test_rtc_alarm_irq_enable,
 };
 
 static ssize_t test_irq_show(struct device *dev,
index c324424..769190a 100644 (file)
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
 static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
-       case RTC_AIE_ON:
-               spin_lock_irq(&rtc_lock);
-
-               if (!alarm_enabled) {
-                       enable_irq(aie_irq);
-                       alarm_enabled = 1;
-               }
-
-               spin_unlock_irq(&rtc_lock);
-               break;
-       case RTC_AIE_OFF:
-               spin_lock_irq(&rtc_lock);
-
-               if (alarm_enabled) {
-                       disable_irq(aie_irq);
-                       alarm_enabled = 0;
-               }
-
-               spin_unlock_irq(&rtc_lock);
-               break;
        case RTC_EPOCH_READ:
                return put_user(epoch, (unsigned long __user *)arg);
        case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
        return 0;
 }
 
+static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       spin_lock_irq(&rtc_lock);
+       if (enabled) {
+               if (!alarm_enabled) {
+                       enable_irq(aie_irq);
+                       alarm_enabled = 1;
+               }
+       } else {
+               if (alarm_enabled) {
+                       disable_irq(aie_irq);
+                       alarm_enabled = 0;
+               }
+       }
+       spin_unlock_irq(&rtc_lock);
+       return 0;
+}
+
 static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
 {
        struct platform_device *pdev = (struct platform_device *)dev_id;
index 4155805..2b771f1 100644 (file)
@@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
 
        private = (struct dasd_eckd_private *) device->private;
        lcu = private->lcu;
+       /* nothing to do if already disconnected */
+       if (!lcu)
+               return;
        device->discipline->get_uid(device, &uid);
        spin_lock_irqsave(&lcu->lock, flags);
        list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
 
        private = (struct dasd_eckd_private *) device->private;
        lcu = private->lcu;
+       /* nothing to do if already removed */
+       if (!lcu)
+               return 0;
        spin_lock_irqsave(&lcu->lock, flags);
        _remove_device_from_lcu(lcu, device);
        spin_unlock_irqrestore(&lcu->lock, flags);
index 318672d..a9fe23d 100644 (file)
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
 static struct ccw_device_id dasd_eckd_ids[] = {
        { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
        { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
-       { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+       { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
        { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
        { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
        { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
index c881a14..1f6a4d8 100644 (file)
@@ -62,8 +62,8 @@ static int xpram_devs;
 /*
  * Parameter parsing functions.
  */
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
 
 module_param(devs, int, 0);
 module_param_array(sizes, charp, NULL, 0);
index 8cd58e4..5ad44da 100644 (file)
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
          unsigned int cmd, unsigned long arg)
 {
        void __user *argp;
-       int ct, perm;
+       unsigned int ct;
+       int perm;
 
        argp = (void __user *)arg;
 
index 7a242f0..267b54e 100644 (file)
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
        return rc;
 }
 
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+       request->callback = (void *) tape_free_request;
+       request->callback_data = NULL;
+       tape_do_io_async(device, request);
+}
+
 extern int tape_oper_handler(int irq, int status);
 extern void tape_noper_handler(int irq, int status);
 extern int tape_open(struct tape_device *);
index c17f35b..c265111 100644 (file)
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
  * Medium sense for 34xx tapes. There is no 'real' medium sense call.
  * So we just do a normal sense.
  */
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
 {
-       struct tape_request *request;
-       unsigned char       *sense;
-       int                  rc;
-
-       request = tape_alloc_request(1, 32);
-       if (IS_ERR(request)) {
-               DBF_EXCEPTION(6, "MSEN fail\n");
-               return PTR_ERR(request);
-       }
-
-       request->op = TO_MSEN;
-       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       struct tape_device *device = request->device;
+       unsigned char *sense;
 
-       rc = tape_do_io_interruptible(device, request);
        if (request->rc == 0) {
                sense = request->cpdata;
 
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
                        device->tape_generic_status |= GMT_WR_PROT(~0);
                else
                        device->tape_generic_status &= ~GMT_WR_PROT(~0);
-       } else {
+       } else
                DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
                        request->rc);
-       }
        tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+       struct tape_request *request;
+       int rc;
+
+       request = tape_alloc_request(1, 32);
+       if (IS_ERR(request)) {
+               DBF_EXCEPTION(6, "MSEN fail\n");
+               return PTR_ERR(request);
+       }
 
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       rc = tape_do_io_interruptible(device, request);
+       __tape_34xx_medium_sense(request);
        return rc;
 }
 
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = tape_alloc_request(1, 32);
+       if (IS_ERR(request)) {
+               DBF_EXCEPTION(6, "MSEN fail\n");
+               return;
+       }
+
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+       request->callback = (void *) __tape_34xx_medium_sense;
+       request->callback_data = NULL;
+       tape_do_io_async(device, request);
+}
+
 struct tape_34xx_work {
        struct tape_device      *device;
        enum tape_op             op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
  * is inserted but cannot call tape_do_io* from an interrupt context.
  * Maybe that's useful for other actions we want to start from the
  * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 static void
 tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
 
        switch(p->op) {
                case TO_MSEN:
-                       tape_34xx_medium_sense(device);
+                       tape_34xx_medium_sense_async(device);
                        break;
                default:
                        DBF_EVENT(3, "T34XX: internal error: unknown work\n");
index fbe361f..de2e99e 100644 (file)
@@ -329,17 +329,17 @@ out:
 /*
  * Enable encryption
  */
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
 {
        struct tape_request *request;
        char *data;
 
        DBF_EVENT(6, "tape_3592_enable_crypt\n");
        if (!crypt_supported(device))
-               return -ENOSYS;
+               return ERR_PTR(-ENOSYS);
        request = tape_alloc_request(2, 72);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return request;
        data = request->cpdata;
        memset(data,0,72);
 
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
        request->op = TO_CRYPT_ON;
        tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
        tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+       return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_enable_crypt(device);
+       if (IS_ERR(request))
+               return PTR_ERR(request);
        return tape_do_io_free(device, request);
 }
 
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_enable_crypt(device);
+       if (!IS_ERR(request))
+               tape_do_io_async_free(device, request);
+}
+
 /*
  * Disable encryption
  */
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
 {
        struct tape_request *request;
        char *data;
 
        DBF_EVENT(6, "tape_3592_disable_crypt\n");
        if (!crypt_supported(device))
-               return -ENOSYS;
+               return ERR_PTR(-ENOSYS);
        request = tape_alloc_request(2, 72);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return request;
        data = request->cpdata;
        memset(data,0,72);
 
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
        tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
        tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
 
+       return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_disable_crypt(device);
+       if (IS_ERR(request))
+               return PTR_ERR(request);
        return tape_do_io_free(device, request);
 }
 
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = __tape_3592_disable_crypt(device);
+       if (!IS_ERR(request))
+               tape_do_io_async_free(device, request);
+}
+
 /*
  * IOCTL: Set encryption status
  */
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
 /*
  * SENSE Medium: Get Sense data about medium state
  */
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
 {
        struct tape_request *request;
 
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
        return tape_do_io_free(device, request);
 }
 
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+       struct tape_request *request;
+
+       request = tape_alloc_request(1, 128);
+       if (IS_ERR(request))
+               return;
+       request->op = TO_MSEN;
+       tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+       tape_do_io_async_free(device, request);
+}
+
 /*
  * MTTELL: Tell block. Return the number of block relative to current file.
  */
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
  * 2. The attention msg is written to the "read subsystem data" buffer.
  * In this case we probably should print it to the console.
  */
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
 {
        struct tape_request *request;
        char *buf;
 
        request = tape_alloc_request(3, 4096);
        if (IS_ERR(request))
-               return PTR_ERR(request);
+               return;
        request->op = TO_READ_ATTMSG;
        buf = request->cpdata;
        buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
        tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
        tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
        tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
-       return tape_do_io_free(device, request);
+       tape_do_io_async_free(device, request);
 }
 
 /*
  * These functions are used to schedule follow-up actions from within an
  * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 struct work_handler_data {
        struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
 
        switch (p->op) {
        case TO_MSEN:
-               tape_3590_sense_medium(p->device);
+               tape_3590_sense_medium_async(p->device);
                break;
        case TO_READ_ATTMSG:
-               tape_3590_read_attmsg(p->device);
+               tape_3590_read_attmsg_async(p->device);
                break;
        case TO_CRYPT_ON:
-               tape_3592_enable_crypt(p->device);
+               tape_3592_enable_crypt_async(p->device);
                break;
        case TO_CRYPT_OFF:
-               tape_3592_disable_crypt(p->device);
+               tape_3592_disable_crypt_async(p->device);
                break;
        default:
                DBF_EVENT(3, "T3590: work handler undefined for "
index e9fff2b..5640c89 100644 (file)
@@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
 static int get_inbound_buffer_frontier(struct qdio_q *q)
 {
        int count, stop;
-       unsigned char state;
+       unsigned char state = 0;
 
        /*
         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data)
 static int get_outbound_buffer_frontier(struct qdio_q *q)
 {
        int count, stop;
-       unsigned char state;
+       unsigned char state = 0;
 
        if (need_siga_sync(q))
                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
index 65ebee0..b6a6356 100644 (file)
@@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
        struct iucv_event ev;
        int rc;
 
-       if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
+       if (memcmp(iucvMagic, ipuser, 16))
                /* ipuser must match iucvMagic. */
                return -EINVAL;
        rc = -EINVAL;
index f47a714..af3f7b0 100644 (file)
@@ -225,7 +225,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 /*****************************************************************************/
 #define QETH_MAX_QUEUES 4
 #define QETH_IN_BUF_SIZE_DEFAULT 65536
-#define QETH_IN_BUF_COUNT_DEFAULT 16
+#define QETH_IN_BUF_COUNT_DEFAULT 64
+#define QETH_IN_BUF_COUNT_HSDEFAULT 128
 #define QETH_IN_BUF_COUNT_MIN 8
 #define QETH_IN_BUF_COUNT_MAX 128
 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
@@ -741,7 +742,6 @@ struct qeth_card {
        /* QDIO buffer handling */
        struct qeth_qdio_info qdio;
        struct qeth_perf_stats perf_stats;
-       int use_hard_stop;
        int read_or_write_problem;
        struct qeth_osn_info osn_info;
        struct qeth_discipline discipline;
index 29f848b..25eef30 100644 (file)
@@ -302,12 +302,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
-               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n",
-                               ipa_name, com, QETH_CARD_IFNAME(card),
-                                       rc, qeth_get_ipa_msg(rc));
+               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+                               "x%X \"%s\"\n",
+                               ipa_name, com, dev_name(&card->gdev->dev),
+                               QETH_CARD_IFNAME(card), rc,
+                               qeth_get_ipa_msg(rc));
        else
-               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n",
-                               ipa_name, com, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+                               ipa_name, com, dev_name(&card->gdev->dev),
+                               QETH_CARD_IFNAME(card));
 }
 
 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -988,16 +991,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
        chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
        if (chp_dsc != NULL) {
                /* CHPP field bit 6 == 1 -> single queue */
-               if ((chp_dsc->chpp & 0x02) == 0x02)
+               if ((chp_dsc->chpp & 0x02) == 0x02) {
+                       if ((atomic_read(&card->qdio.state) !=
+                               QETH_QDIO_UNINITIALIZED) &&
+                           (card->qdio.no_out_queues == 4))
+                               /* change from 4 to 1 outbound queues */
+                               qeth_free_qdio_buffers(card);
                        card->qdio.no_out_queues = 1;
+                       if (card->qdio.default_out_queue != 0)
+                               dev_info(&card->gdev->dev,
+                                       "Priority Queueing not supported\n");
+                       card->qdio.default_out_queue = 0;
+               } else {
+                       if ((atomic_read(&card->qdio.state) !=
+                               QETH_QDIO_UNINITIALIZED) &&
+                           (card->qdio.no_out_queues == 1)) {
+                               /* change from 1 to 4 outbound queues */
+                               qeth_free_qdio_buffers(card);
+                               card->qdio.default_out_queue = 2;
+                       }
+                       card->qdio.no_out_queues = 4;
+               }
                card->info.func_level = 0x4100 + chp_dsc->desc;
                kfree(chp_dsc);
        }
-       if (card->qdio.no_out_queues == 1) {
-               card->qdio.default_out_queue = 0;
-               dev_info(&card->gdev->dev,
-                       "Priority Queueing not supported\n");
-       }
        QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
        QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
        return;
@@ -1009,7 +1026,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
        atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
        /* inbound */
        card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
-       card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
+       if (card->info.type == QETH_CARD_TYPE_IQD)
+               card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
+       else
+               card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
        card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
        INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
        INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
@@ -1069,7 +1089,6 @@ static int qeth_setup_card(struct qeth_card *card)
        card->data.state  = CH_STATE_DOWN;
        card->state = CARD_STATE_DOWN;
        card->lan_online = 0;
-       card->use_hard_stop = 0;
        card->read_or_write_problem = 0;
        card->dev = NULL;
        spin_lock_init(&card->vlanlock);
@@ -1718,20 +1737,22 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                };
        }
 
+       if (reply->rc == -EIO)
+               goto error;
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
 
 time_err:
+       reply->rc = -ETIME;
        spin_lock_irqsave(&reply->card->lock, flags);
        list_del_init(&reply->list);
        spin_unlock_irqrestore(&reply->card->lock, flags);
-       reply->rc = -ETIME;
        atomic_inc(&reply->received);
+error:
        atomic_set(&card->write.irq_pending, 0);
        qeth_release_buffer(iob->channel, iob);
        card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
-       wake_up(&reply->wait_q);
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
@@ -1832,33 +1853,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
        }
 }
 
-static inline int qeth_get_max_mtu_for_card(int cardtype)
-{
-       switch (cardtype) {
-
-       case QETH_CARD_TYPE_UNKNOWN:
-       case QETH_CARD_TYPE_OSD:
-       case QETH_CARD_TYPE_OSN:
-       case QETH_CARD_TYPE_OSM:
-       case QETH_CARD_TYPE_OSX:
-               return 61440;
-       case QETH_CARD_TYPE_IQD:
-               return 57344;
-       default:
-               return 1500;
-       }
-}
-
-static inline int qeth_get_mtu_out_of_mpc(int cardtype)
-{
-       switch (cardtype) {
-       case QETH_CARD_TYPE_IQD:
-               return 1;
-       default:
-               return 0;
-       }
-}
-
 static inline int qeth_get_mtu_outof_framesize(int framesize)
 {
        switch (framesize) {
@@ -1881,10 +1875,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
        case QETH_CARD_TYPE_OSD:
        case QETH_CARD_TYPE_OSM:
        case QETH_CARD_TYPE_OSX:
-               return ((mtu >= 576) && (mtu <= 61440));
        case QETH_CARD_TYPE_IQD:
                return ((mtu >= 576) &&
-                       (mtu <= card->info.max_mtu + 4096 - 32));
+                       (mtu <= card->info.max_mtu));
        case QETH_CARD_TYPE_OSN:
        case QETH_CARD_TYPE_UNKNOWN:
        default:
@@ -1907,7 +1900,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
        memcpy(&card->token.ulp_filter_r,
               QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
               QETH_MPC_TOKEN_LENGTH);
-       if (qeth_get_mtu_out_of_mpc(card->info.type)) {
+       if (card->info.type == QETH_CARD_TYPE_IQD) {
                memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
                mtu = qeth_get_mtu_outof_framesize(framesize);
                if (!mtu) {
@@ -1915,12 +1908,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
                        QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
                        return 0;
                }
-               card->info.max_mtu = mtu;
+               if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
+                       /* frame size has changed */
+                       if (card->dev &&
+                           ((card->dev->mtu == card->info.initial_mtu) ||
+                            (card->dev->mtu > mtu)))
+                               card->dev->mtu = mtu;
+                       qeth_free_qdio_buffers(card);
+               }
                card->info.initial_mtu = mtu;
+               card->info.max_mtu = mtu;
                card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
        } else {
                card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
-               card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
+               card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
+                       iob->data);
                card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
        }
 
@@ -2495,45 +2497,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
 
-static int qeth_send_startstoplan(struct qeth_card *card,
-               enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
-{
-       int rc;
-       struct qeth_cmd_buffer *iob;
-
-       iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
-       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-
-       return rc;
-}
-
 int qeth_send_startlan(struct qeth_card *card)
 {
        int rc;
+       struct qeth_cmd_buffer *iob;
 
        QETH_DBF_TEXT(SETUP, 2, "strtlan");
 
-       rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
+       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_send_startlan);
 
-int qeth_send_stoplan(struct qeth_card *card)
-{
-       int rc = 0;
-
-       /*
-        * TODO: according to the IPA format document page 14,
-        * TCP/IP (we!) never issue a STOPLAN
-        * is this right ?!?
-        */
-       QETH_DBF_TEXT(SETUP, 2, "stoplan");
-
-       rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(qeth_send_stoplan);
-
 int qeth_default_setadapterparms_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
@@ -3775,6 +3751,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
        }
 }
 
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+       int rc;
+       int length;
+       char *prcd;
+       struct ccw_device *ddev;
+       int ddev_offline = 0;
+
+       QETH_DBF_TEXT(SETUP, 2, "detcapab");
+       ddev = CARD_DDEV(card);
+       if (!ddev->online) {
+               ddev_offline = 1;
+               rc = ccw_device_set_online(ddev);
+               if (rc) {
+                       QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+                       goto out;
+               }
+       }
+
+       rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+       if (rc) {
+               QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+                       dev_name(&card->gdev->dev), rc);
+               QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+               goto out_offline;
+       }
+       qeth_configure_unitaddr(card, prcd);
+       qeth_configure_blkt_default(card, prcd);
+       kfree(prcd);
+
+       rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+       if (rc)
+               QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+       if (ddev_offline == 1)
+               ccw_device_set_offline(ddev);
+out:
+       return;
+}
+
 static int qeth_qdio_establish(struct qeth_card *card)
 {
        struct qdio_initialize init_data;
@@ -3905,6 +3922,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
        atomic_set(&card->force_alloc_skb, 0);
+       qeth_get_channel_path_desc(card);
 retry:
        if (retries)
                QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3951,7 @@ retriable:
                else
                        goto retry;
        }
+       qeth_determine_capabilities(card);
        qeth_init_tokens(card);
        qeth_init_func_level(card);
        rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4221,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
        card->discipline.ccwgdriver = NULL;
 }
 
-static void qeth_determine_capabilities(struct qeth_card *card)
-{
-       int rc;
-       int length;
-       char *prcd;
-
-       QETH_DBF_TEXT(SETUP, 2, "detcapab");
-       rc = ccw_device_set_online(CARD_DDEV(card));
-       if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
-               goto out;
-       }
-
-
-       rc = qeth_read_conf_data(card, (void **) &prcd, &length);
-       if (rc) {
-               QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
-                       dev_name(&card->gdev->dev), rc);
-               QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
-               goto out_offline;
-       }
-       qeth_configure_unitaddr(card, prcd);
-       qeth_configure_blkt_default(card, prcd);
-       kfree(prcd);
-
-       rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
-       if (rc)
-               QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
-
-out_offline:
-       ccw_device_set_offline(CARD_DDEV(card));
-out:
-       return;
-}
-
 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card;
index 2ac8f6a..6fbaacb 100644 (file)
@@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
                kfree(mc);
 }
 
-static void qeth_l2_del_all_mc(struct qeth_card *card)
+static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
 {
        struct qeth_mc_mac *mc, *tmp;
 
        spin_lock_bh(&card->mclock);
        list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
-               if (mc->is_vmac)
-                       qeth_l2_send_setdelmac(card, mc->mc_addr,
+               if (del) {
+                       if (mc->is_vmac)
+                               qeth_l2_send_setdelmac(card, mc->mc_addr,
                                        IPA_CMD_DELVMAC, NULL);
-               else
-                       qeth_l2_send_delgroupmac(card, mc->mc_addr);
+                       else
+                               qeth_l2_send_delgroupmac(card, mc->mc_addr);
+               }
                list_del(&mc->list);
                kfree(mc);
        }
@@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
                                 qeth_l2_send_setdelvlan_cb, NULL);
 }
 
-static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
+static void qeth_l2_process_vlans(struct qeth_card *card)
 {
        struct qeth_vlan_vid *id;
        QETH_CARD_TEXT(card, 3, "L2prcvln");
        spin_lock_bh(&card->vlanlock);
        list_for_each_entry(id, &card->vid_list, list) {
-               if (clear)
-                       qeth_l2_send_setdelvlan(card, id->vid,
-                               IPA_CMD_DELVLAN);
-               else
-                       qeth_l2_send_setdelvlan(card, id->vid,
-                               IPA_CMD_SETVLAN);
+               qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
        }
        spin_unlock_bh(&card->vlanlock);
 }
@@ -379,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                        dev_close(card->dev);
                        rtnl_unlock();
                }
-               if (!card->use_hard_stop ||
-                       recovery_mode) {
-                       __u8 *mac = &card->dev->dev_addr[0];
-                       rc = qeth_l2_send_delmac(card, mac);
-                       QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
-               }
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
                card->state = CARD_STATE_SOFTSETUP;
        }
        if (card->state == CARD_STATE_SOFTSETUP) {
-               qeth_l2_process_vlans(card, 1);
-               if (!card->use_hard_stop ||
-                       recovery_mode)
-                       qeth_l2_del_all_mc(card);
+               qeth_l2_del_all_mc(card, 0);
                qeth_clear_ipacmd_list(card);
                card->state = CARD_STATE_HARDSETUP;
        }
@@ -405,7 +394,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
-       card->use_hard_stop = 0;
        return rc;
 }
 
@@ -573,13 +561,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
                case IPA_RC_L2_DUP_LAYER3_MAC:
                        dev_warn(&card->gdev->dev,
                                "MAC address %pM already exists\n",
-                               card->dev->dev_addr);
+                               cmd->data.setdelmac.mac);
                        break;
                case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
                case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
                        dev_warn(&card->gdev->dev,
                                "MAC address %pM is not authorized\n",
-                               card->dev->dev_addr);
+                               cmd->data.setdelmac.mac);
                        break;
                default:
                        break;
@@ -705,7 +693,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
        if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
            (card->state != CARD_STATE_UP))
                return;
-       qeth_l2_del_all_mc(card);
+       qeth_l2_del_all_mc(card, 1);
        spin_lock_bh(&card->mclock);
        netdev_for_each_mc_addr(ha, dev)
                qeth_l2_add_mc(card, ha->addr, 0);
@@ -907,10 +895,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
        qeth_set_allowed_threads(card, 0, 1);
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
-       if (cgdev->state == CCWGROUP_ONLINE) {
-               card->use_hard_stop = 1;
+       if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l2_set_offline(cgdev);
-       }
 
        if (card->dev) {
                unregister_netdev(card->dev);
@@ -1040,7 +1026,7 @@ contin:
 
        if (card->info.type != QETH_CARD_TYPE_OSN &&
            card->info.type != QETH_CARD_TYPE_OSM)
-               qeth_l2_process_vlans(card, 0);
+               qeth_l2_process_vlans(card);
 
        netif_tx_disable(card->dev);
 
@@ -1076,7 +1062,6 @@ contin:
        return 0;
 
 out_remove:
-       card->use_hard_stop = 1;
        qeth_l2_stop_card(card, 0);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -1144,7 +1129,6 @@ static int qeth_l2_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
-       card->use_hard_stop = 1;
        __qeth_l2_set_offline(card->gdev, 1);
        rc = __qeth_l2_set_online(card->gdev, 1);
        if (!rc)
@@ -1191,7 +1175,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
        if (card->state == CARD_STATE_UP) {
-               card->use_hard_stop = 1;
                __qeth_l2_set_offline(card->gdev, 1);
        } else
                __qeth_l2_set_offline(card->gdev, 0);
index d09b0c4..142e5f6 100644 (file)
@@ -510,8 +510,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
        kfree(tbd_list);
 }
 
-static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
-                                       int recover)
+static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
 {
        struct qeth_ipaddr *addr, *tmp;
        unsigned long flags;
@@ -530,11 +529,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
                addr = list_entry(card->ip_list.next,
                                  struct qeth_ipaddr, entry);
                list_del_init(&addr->entry);
-               if (clean) {
-                       spin_unlock_irqrestore(&card->ip_lock, flags);
-                       qeth_l3_deregister_addr_entry(card, addr);
-                       spin_lock_irqsave(&card->ip_lock, flags);
-               }
                if (!recover || addr->is_multicast) {
                        kfree(addr);
                        continue;
@@ -1611,29 +1605,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
        return 0;
 }
 
-static int qeth_l3_put_unique_id(struct qeth_card *card)
-{
-
-       int rc = 0;
-       struct qeth_cmd_buffer *iob;
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 2, "puniqeid");
-
-       if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
-               UNIQUE_ID_NOT_BY_CARD)
-               return -1;
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
-                                    QETH_PROT_IPV6);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
-                               card->info.unique_id;
-       memcpy(&cmd->data.create_destroy_addr.unique_id[0],
-              card->dev->dev_addr, OSA_ADDR_LEN);
-       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-       return rc;
-}
-
 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
@@ -2324,25 +2295,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
                        dev_close(card->dev);
                        rtnl_unlock();
                }
-               if (!card->use_hard_stop) {
-                       rc = qeth_send_stoplan(card);
-                       if (rc)
-                               QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-               }
                card->state = CARD_STATE_SOFTSETUP;
        }
        if (card->state == CARD_STATE_SOFTSETUP) {
-               qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
+               qeth_l3_clear_ip_list(card, 1);
                qeth_clear_ipacmd_list(card);
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               if (!card->use_hard_stop &&
-                   (card->info.type != QETH_CARD_TYPE_IQD)) {
-                       rc = qeth_l3_put_unique_id(card);
-                       if (rc)
-                               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
-               }
                qeth_qdio_clear_card(card, 0);
                qeth_clear_qdio_buffers(card);
                qeth_clear_working_pool_list(card);
@@ -2352,7 +2312,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
-       card->use_hard_stop = 0;
        return rc;
 }
 
@@ -3433,6 +3392,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
                                card->dev->dev_id = card->info.unique_id &
                                                         0xffff;
+                       if (!card->info.guestlan)
+                               card->dev->features |= NETIF_F_GRO;
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
                card->dev = alloc_netdev(0, "hsi%d", ether_setup);
@@ -3471,6 +3432,9 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
        card->discipline.output_handler = (qdio_handler_t *)
                qeth_qdio_output_handler;
        card->discipline.recover = qeth_l3_recover;
+       if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+           (card->info.type == QETH_CARD_TYPE_OSX))
+               card->options.checksum_type = HW_CHECKSUMMING;
        return 0;
 }
 
@@ -3483,17 +3447,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        qeth_set_allowed_threads(card, 0, 1);
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
-       if (cgdev->state == CCWGROUP_ONLINE) {
-               card->use_hard_stop = 1;
+       if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l3_set_offline(cgdev);
-       }
 
        if (card->dev) {
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
 
-       qeth_l3_clear_ip_list(card, 0, 0);
+       qeth_l3_clear_ip_list(card, 0);
        qeth_l3_clear_ipato_list(card);
        return;
 }
@@ -3594,7 +3556,6 @@ contin:
        mutex_unlock(&card->discipline_mutex);
        return 0;
 out_remove:
-       card->use_hard_stop = 1;
        qeth_l3_stop_card(card, 0);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -3663,7 +3624,6 @@ static int qeth_l3_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
-       card->use_hard_stop = 1;
        __qeth_l3_set_offline(card->gdev, 1);
        rc = __qeth_l3_set_online(card->gdev, 1);
        if (!rc)
@@ -3684,7 +3644,6 @@ static int qeth_l3_recover(void *ptr)
 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       qeth_l3_clear_ip_list(card, 0, 0);
        qeth_qdio_clear_card(card, 0);
        qeth_clear_qdio_buffers(card);
 }
@@ -3700,7 +3659,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
        if (card->state == CARD_STATE_UP) {
-               card->use_hard_stop = 1;
                __qeth_l3_set_offline(card->gdev, 1);
        } else
                __qeth_l3_set_offline(card->gdev, 0);
index 65e1cf1..207b7d7 100644 (file)
@@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
 static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
                             u8 ipuser[16])
 {
-       if (strncmp(ipvmid, "*MSG    ", sizeof(ipvmid)) != 0)
+       if (strncmp(ipvmid, "*MSG    ", 8) != 0)
                return -EINVAL;
        /* Path pending from *MSG. */
        return iucv_path_accept(path, &smsg_handler, "SMSGIUCV        ", NULL);
index 475c31a..77b26f5 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr.h
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
 struct device_attribute;
 /*The limit of outstanding scsi command that firmware can handle*/
 #define ARCMSR_MAX_OUTSTANDING_CMD                                             256
-#define ARCMSR_MAX_FREECCB_NUM                                                 320
-#define ARCMSR_DRIVER_VERSION               "Driver Version 1.20.00.15 2010/02/02"
+#ifdef CONFIG_XEN
+       #define ARCMSR_MAX_FREECCB_NUM  160
+#else
+       #define ARCMSR_MAX_FREECCB_NUM  320
+#endif
+#define ARCMSR_DRIVER_VERSION               "Driver Version 1.20.00.15 2010/08/05"
 #define ARCMSR_SCSI_INITIATOR_ID                                               255
 #define ARCMSR_MAX_XFER_SECTORS                                                        512
 #define ARCMSR_MAX_XFER_SECTORS_B                                              4096
@@ -60,7 +64,6 @@ struct device_attribute;
 #define ARCMSR_MAX_HBB_POSTQUEUE                                               264
 #define ARCMSR_MAX_XFER_LEN                                                    0x26000 /* 152K */
 #define ARCMSR_CDB_SG_PAGE_LENGTH                                              256 
-#define SCSI_CMD_ARECA_SPECIFIC                                                0xE1
 #ifndef PCI_DEVICE_ID_ARECA_1880
 #define PCI_DEVICE_ID_ARECA_1880 0x1880
  #endif
index a4e04c5..acdae33 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_attr.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: attributes exported to sysfs and device host
 *******************************************************************************
 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
index 1cadcd6..984bd52 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_hba.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
@@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
 static int sleeptime = 10;
-static int retrycount = 30;
+static int retrycount = 12;
 wait_queue_head_t wait_q;
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
                                        struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
                if (isleep > 0) {
                        msleep(isleep*1000);
                }
-               printk(KERN_NOTICE "wake-up\n");
                return 0;
 }
 
@@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
 }
 
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
-
 {
        int id, lun;
        if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
                                , pCCB->startdone
                                , atomic_read(&acb->ccboutstandingcount));
                  return;
-               }
+       }
        arcmsr_report_ccb_state(acb, pCCB, error);
 }
 
@@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
        case ACB_ADAPTER_TYPE_B: {
                struct MessageUnit_B *reg = acb->pmuB;
                /*clear all outbound posted Q*/
-               writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */
+               writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
                for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
                        if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
                                writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
                arcmsr_drain_donequeue(acb, pCCB, error);
        }
 }
-
 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
 {
        uint32_t index;
@@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
        if (atomic_read(&acb->ccboutstandingcount) >=
                        ARCMSR_MAX_OUTSTANDING_CMD)
                return SCSI_MLQUEUE_HOST_BUSY;
-       if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
-               printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
-               return 0;
-       }
        ccb = arcmsr_get_freeccb(acb);
        if (!ccb)
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
        int index, rtn;
        bool error;
        polling_hbb_ccb_retry:
+
        poll_count++;
        /* clear doorbell interrupt */
        writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
 {
        struct MessageUnit_A __iomem *reg = acb->pmuA;
        if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                return;
        } else {
                acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
                        atomic_set(&acb->rq_map_token, 16);
                }
                atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-               if (atomic_dec_and_test(&acb->rq_map_token))
+               if (atomic_dec_and_test(&acb->rq_map_token)) {
+                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                        return;
+               }
                writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
                mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
        }
@@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
 {
        struct MessageUnit_B __iomem *reg = acb->pmuB;
        if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                return;
        } else {
                acb->fw_flag = FW_NORMAL;
                if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
-                       atomic_set(&acb->rq_map_token,16);
+                       atomic_set(&acb->rq_map_token, 16);
                }
                atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-               if(atomic_dec_and_test(&acb->rq_map_token))
+               if (atomic_dec_and_test(&acb->rq_map_token)) {
+                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                        return;
+               }
                writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
                mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
        }
@@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
 {
        struct MessageUnit_C __iomem *reg = acb->pmuC;
        if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                return;
        } else {
                acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
                        atomic_set(&acb->rq_map_token, 16);
                }
                atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-               if (atomic_dec_and_test(&acb->rq_map_token))
+               if (atomic_dec_and_test(&acb->rq_map_token)) {
+                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                        return;
+               }
                writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
                writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
                mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
        uint32_t intmask_org;
        uint8_t rtnval = 0x00;
        int i = 0;
+       unsigned long flags;
+
        if (atomic_read(&acb->ccboutstandingcount) != 0) {
                /* disable all outbound interrupt */
                intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
                for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
                        ccb = acb->pccb_pool[i];
                        if (ccb->startdone == ARCMSR_CCB_START) {
-                               arcmsr_ccb_complete(ccb);
+                               scsi_dma_unmap(ccb->pcmd);
+                               ccb->startdone = ARCMSR_CCB_DONE;
+                               ccb->ccb_flags = 0;
+                               spin_lock_irqsave(&acb->ccblist_lock, flags);
+                               list_add_tail(&ccb->list, &acb->ccb_free_list);
+                               spin_unlock_irqrestore(&acb->ccblist_lock, flags);
                        }
                }
                atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
 
 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
 {
-       struct AdapterControlBlock *acb =
-               (struct AdapterControlBlock *)cmd->device->host->hostdata;
+       struct AdapterControlBlock *acb;
        uint32_t intmask_org, outbound_doorbell;
        int retry_count = 0;
        int rtn = FAILED;
@@ -2971,31 +2980,16 @@ sleep_again:
                                atomic_set(&acb->rq_map_token, 16);
                                atomic_set(&acb->ante_token_value, 16);
                                acb->fw_flag = FW_NORMAL;
-                               init_timer(&acb->eternal_timer);
-                               acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-                               acb->eternal_timer.data = (unsigned long) acb;
-                               acb->eternal_timer.function = &arcmsr_request_device_map;
-                               add_timer(&acb->eternal_timer);
+                               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                                acb->acb_flags &= ~ACB_F_BUS_RESET;
                                rtn = SUCCESS;
                                printk(KERN_ERR "arcmsr: scsi  bus reset eh returns with success\n");
                        } else {
                                acb->acb_flags &= ~ACB_F_BUS_RESET;
-                               if (atomic_read(&acb->rq_map_token) == 0) {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       init_timer(&acb->eternal_timer);
-                                               acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-                                       acb->eternal_timer.data = (unsigned long) acb;
-                                       acb->eternal_timer.function = &arcmsr_request_device_map;
-                                       add_timer(&acb->eternal_timer);
-                               } else {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-                               }
+                               atomic_set(&acb->rq_map_token, 16);
+                               atomic_set(&acb->ante_token_value, 16);
+                               acb->fw_flag = FW_NORMAL;
+                               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
                                rtn = SUCCESS;
                        }
                        break;
@@ -3007,21 +3001,10 @@ sleep_again:
                                rtn = FAILED;
                        } else {
                                acb->acb_flags &= ~ACB_F_BUS_RESET;
-                               if (atomic_read(&acb->rq_map_token) == 0) {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       init_timer(&acb->eternal_timer);
-                                               acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-                                       acb->eternal_timer.data = (unsigned long) acb;
-                                       acb->eternal_timer.function = &arcmsr_request_device_map;
-                                       add_timer(&acb->eternal_timer);
-                               } else {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-                               }
+                               atomic_set(&acb->rq_map_token, 16);
+                               atomic_set(&acb->ante_token_value, 16);
+                               acb->fw_flag = FW_NORMAL;
+                               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                                rtn = SUCCESS;
                        }
                        break;
@@ -3067,31 +3050,16 @@ sleep:
                                atomic_set(&acb->rq_map_token, 16);
                                atomic_set(&acb->ante_token_value, 16);
                                acb->fw_flag = FW_NORMAL;
-                               init_timer(&acb->eternal_timer);
-                               acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-                               acb->eternal_timer.data = (unsigned long) acb;
-                               acb->eternal_timer.function = &arcmsr_request_device_map;
-                               add_timer(&acb->eternal_timer);
+                               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
                                acb->acb_flags &= ~ACB_F_BUS_RESET;
                                rtn = SUCCESS;
                                printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
                        } else {
                                acb->acb_flags &= ~ACB_F_BUS_RESET;
-                               if (atomic_read(&acb->rq_map_token) == 0) {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       init_timer(&acb->eternal_timer);
-                                               acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-                                       acb->eternal_timer.data = (unsigned long) acb;
-                                       acb->eternal_timer.function = &arcmsr_request_device_map;
-                                       add_timer(&acb->eternal_timer);
-                               } else {
-                                       atomic_set(&acb->rq_map_token, 16);
-                                       atomic_set(&acb->ante_token_value, 16);
-                                       acb->fw_flag = FW_NORMAL;
-                                       mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-                               }
+                               atomic_set(&acb->rq_map_token, 16);
+                               atomic_set(&acb->ante_token_value, 16);
+                               acb->fw_flag = FW_NORMAL;
+                               mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
                                rtn = SUCCESS;
                        }
                        break;
index 5cf4e98..11dff23 100644 (file)
@@ -1,6 +1,8 @@
 config SCSI_CXGB3_ISCSI
        tristate "Chelsio T3 iSCSI support"
-       depends on CHELSIO_T3_DEPENDS
+       depends on PCI && INET
+       select NETDEVICES
+       select NETDEV_10000
        select CHELSIO_T3
        select SCSI_ISCSI_ATTRS
        ---help---
index bb94b39..d5302c2 100644 (file)
@@ -1,6 +1,8 @@
 config SCSI_CXGB4_ISCSI
        tristate "Chelsio T4 iSCSI support"
-       depends on CHELSIO_T4_DEPENDS
+       depends on PCI && INET
+       select NETDEVICES
+       select NETDEV_10000
        select CHELSIO_T4
        select SCSI_ISCSI_ATTRS
        ---help---
index d2ad3d6..a24dff9 100644 (file)
@@ -451,26 +451,13 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
 }
 
 static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
-                                       __be16 sport, __be16 dport, u8 tos)
+                                     __be16 sport, __be16 dport, u8 tos)
 {
        struct rtable *rt;
-       struct flowi fl = {
-               .oif = 0,
-               .nl_u = {
-                       .ip4_u = {
-                               .daddr = daddr,
-                               .saddr = saddr,
-                               .tos = tos }
-                       },
-               .proto = IPPROTO_TCP,
-               .uli_u = {
-                       .ports = {
-                               .sport = sport,
-                               .dport = dport }
-                       }
-       };
 
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+       rt = ip_route_output_ports(&init_net, NULL, daddr, saddr,
+                                  dport, sport, IPPROTO_TCP, tos, 0);
+       if (IS_ERR(rt))
                return NULL;
 
        return rt;
index 9f9600b..3becc6a 100644 (file)
@@ -285,9 +285,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
        }
 
        /* Do not support for bonding device */
-       if ((netdev->priv_flags & IFF_MASTER_ALB) ||
-           (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
-           (netdev->priv_flags & IFF_MASTER_8023AD)) {
+       if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
                FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
                return -EOPNOTSUPP;
        }
index 5815cbe..9a7aaf5 100644 (file)
@@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
 
        spin_lock_irqsave(shost->host_lock, flags);
        list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+       shost->host_eh_scheduled = 0;
        spin_unlock_irqrestore(shost->host_lock, flags);
 
        SAS_DPRINTK("Enter %s\n", __func__);
index b2a8170..9ead039 100644 (file)
@@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
                /* adjust hba_queue_depth, reply_free_queue_depth,
                 * and queue_size
                 */
-               ioc->hba_queue_depth -= queue_diff;
-               ioc->reply_free_queue_depth -= queue_diff;
-               queue_size -= queue_diff;
+               ioc->hba_queue_depth -= (queue_diff / 2);
+               ioc->reply_free_queue_depth -= (queue_diff / 2);
+               queue_size = facts->MaxReplyDescriptorPostQueueDepth;
        }
        ioc->reply_post_queue_depth = queue_size;
 
@@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
 static void
 _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 {
+       mpt2sas_scsih_reset_handler(ioc, reset_phase);
+       mpt2sas_ctl_reset_handler(ioc, reset_phase);
        switch (reset_phase) {
        case MPT2_IOC_PRE_RESET:
                dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
                    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
                break;
        }
-       mpt2sas_scsih_reset_handler(ioc, reset_phase);
-       mpt2sas_ctl_reset_handler(ioc, reset_phase);
 }
 
 /**
@@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
 {
        int r;
        unsigned long flags;
+       u8 pe_complete = ioc->wait_for_port_enable_to_complete;
 
        dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
            __func__));
@@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
        if (r)
                goto out;
        _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+       /* If this hard reset is called while port enable is active, then
+        * there is no reason to call make_ioc_operational
+        */
+       if (pe_complete) {
+               r = -EFAULT;
+               goto out;
+       }
        r = _base_make_ioc_operational(ioc, sleep_flag);
        if (!r)
                _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
index eda347c..5ded3db 100644 (file)
@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
 }
 
 /**
- * mptscsih_get_scsi_lookup - returns scmd entry
+ * _scsih_scsi_lookup_get - returns scmd entry
  * @ioc: per adapter object
  * @smid: system request message index
  *
@@ -831,6 +831,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
        return ioc->scsi_lookup[smid - 1].scmd;
 }
 
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+       unsigned long flags;
+       struct scsi_cmnd *scmd;
+
+       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+       scmd = ioc->scsi_lookup[smid - 1].scmd;
+       ioc->scsi_lookup[smid - 1].scmd = NULL;
+       spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+       return scmd;
+}
+
 /**
  * _scsih_scsi_lookup_find_by_scmd - scmd lookup
  * @ioc: per adapter object
@@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
        u16 handle;
 
        for (i = 0 ; i < event_data->NumEntries; i++) {
-               if (event_data->PHY[i].PhyStatus &
-                   MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
-                       continue;
                handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
                if (!handle)
                        continue;
@@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
        u16 count = 0;
 
        for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
-               scmd = _scsih_scsi_lookup_get(ioc, smid);
+               scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
                if (!scmd)
                        continue;
                count++;
@@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        u32 response_code = 0;
 
        mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
-       scmd = _scsih_scsi_lookup_get(ioc, smid);
+       scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
        if (scmd == NULL)
                return 1;
 
@@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
                     event_data);
 #endif
 
+       /* In MPI Revision K (0xC), the internal device reset complete was
+        * implemented, so avoid setting tm_busy flag for older firmware.
+        */
+       if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+               return;
+
        if (event_data->ReasonCode !=
            MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
           event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
     struct fw_event_work *fw_event)
 {
        struct scsi_cmnd *scmd;
+       struct scsi_device *sdev;
        u16 smid, handle;
        u32 lun;
        struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
        Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
 #endif
        u16 ioc_status;
+       unsigned long flags;
+       int r;
+
        dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
            "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
            event_data->PortWidth));
        dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
            __func__));
 
+       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+       ioc->broadcast_aen_busy = 0;
        termination_count = 0;
        query_count = 0;
        mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
                scmd = _scsih_scsi_lookup_get(ioc, smid);
                if (!scmd)
                        continue;
-               sas_device_priv_data = scmd->device->hostdata;
+               sdev = scmd->device;
+               sas_device_priv_data = sdev->hostdata;
                if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
                        continue;
                 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
                lun = sas_device_priv_data->lun;
                query_count++;
 
+               spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
                mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
                    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
                ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
                    (mpi_reply->ResponseCode ==
                     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
                     mpi_reply->ResponseCode ==
-                    MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+                    MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+                       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
                        continue;
-
-               mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-                   MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
+               }
+               r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+                   sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+                   scmd);
+               if (r == FAILED)
+                       sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+                           "scmd(%p)\n", scmd);
                termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+               spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
        }
-       ioc->broadcast_aen_busy = 0;
+       spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
        dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
            "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev)
                destroy_workqueue(wq);
 
        /* release all the volumes */
+       _scsih_ir_shutdown(ioc);
        list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
            list) {
                if (raid_device->starget) {
index 44578b5..d3e58d7 100644 (file)
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
 {
        struct Scsi_Host *host = rport_to_shost(rport);
        fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+       unsigned long flags;
 
        if (!fcport)
                return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
         * Transport has effectively 'deleted' the rport, clear
         * all local references.
         */
-       spin_lock_irq(host->host_lock);
+       spin_lock_irqsave(host->host_lock, flags);
        fcport->rport = fcport->drport = NULL;
        *((fc_port_t **)rport->dd_data) = NULL;
-       spin_unlock_irq(host->host_lock);
+       spin_unlock_irqrestore(host->host_lock, flags);
 
        if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
                return;
index f948e1a..d9479c3 100644 (file)
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
+       unsigned long flags;
 
-       spin_lock_irq(fcport->vha->host->host_lock);
+       spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
-       spin_unlock_irq(fcport->vha->host->host_lock);
+       spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
        if (rport)
                fc_remote_port_delete(rport);
 }
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
        struct fc_rport_identifiers rport_ids;
        struct fc_rport *rport;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
        qla2x00_rport_del(fcport);
 
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
                    "Unable to allocate fc remote port!\n");
                return;
        }
-       spin_lock_irq(fcport->vha->host->host_lock);
+       spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        *((fc_port_t **)rport->dd_data) = fcport;
-       spin_unlock_irq(fcport->vha->host->host_lock);
+       spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 
        rport->supported_classes = fcport->supported_classes;
 
index c194c23..f27724d 100644 (file)
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
        }
        if (atomic_read(&fcport->state) != FCS_ONLINE) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-                       atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        cmd->result = DID_NO_CONNECT << 16;
                        goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
 {
        struct fc_rport *rport;
        scsi_qla_host_t *base_vha;
+       unsigned long flags;
 
        if (!fcport->rport)
                return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
        rport = fcport->rport;
        if (defer) {
                base_vha = pci_get_drvdata(vha->hw->pdev);
-               spin_lock_irq(vha->host->host_lock);
+               spin_lock_irqsave(vha->host->host_lock, flags);
                fcport->drport = rport;
-               spin_unlock_irq(vha->host->host_lock);
+               spin_unlock_irqrestore(vha->host->host_lock, flags);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
        } else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
 
        set_user_nice(current, -20);
 
+       set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
 
-               set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                __set_current_state(TASK_RUNNING);
 
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
                qla2x00_do_dpc_all_vps(base_vha);
 
                ha->dpc_active = 0;
+               set_current_state(TASK_INTERRUPTIBLE);
        } /* End of while(1) */
+       __set_current_state(TASK_RUNNING);
 
        DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
index 7b31093..a6b2d72 100644 (file)
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
                            unsigned long long lba, unsigned int num, int write)
 {
        int ret;
-       unsigned int block, rest = 0;
+       unsigned long long block, rest = 0;
        int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
        func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
index 9045c52..fb2bb35 100644 (file)
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue);
+               __blk_run_queue(sdev->request_queue, false);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
index 998c01b..5c3ccfc 100644 (file)
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q);
+       __blk_run_queue(rport->rqst_q, false);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
index de885a0..f33e2dd 100644 (file)
@@ -173,7 +173,8 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
        return 0;
 }
 
-#define VALID(x) (x | 0x80)
+#define SENSE_VALID_FLAG 0x80
+#define VALID(x) (x | SENSE_VALID_FLAG)
 
 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
        [IRQ_TYPE_EDGE_FALLING] = VALID(0),
@@ -201,7 +202,8 @@ static int intc_set_type(struct irq_data *data, unsigned int type)
        ihp = intc_find_irq(d->sense, d->nr_sense, irq);
        if (ihp) {
                addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
-               intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
+               intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
+                                                   value & ~SENSE_VALID_FLAG);
        }
 
        return 0;
index 351d8a3..19752b0 100644 (file)
@@ -7,10 +7,9 @@
 #include <linux/of_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 
-struct awesome_struct {
+struct ce4100_info {
        struct ssp_device ssp;
-       struct platform_device spi_pdev;
-       struct pxa2xx_spi_master spi_pdata;
+       struct platform_device *spi_pdev;
 };
 
 static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
 }
 EXPORT_SYMBOL_GPL(pxa_ssp_free);
 
-static void plat_dev_release(struct device *dev)
-{
-       struct awesome_struct *as = container_of(dev,
-                       struct awesome_struct, spi_pdev.dev);
-
-       of_device_node_put(&as->spi_pdev.dev);
-}
-
 static int __devinit ce4100_spi_probe(struct pci_dev *dev,
                const struct pci_device_id *ent)
 {
        int ret;
        resource_size_t phys_beg;
        resource_size_t phys_len;
-       struct awesome_struct *spi_info;
+       struct ce4100_info *spi_info;
        struct platform_device *pdev;
-       struct pxa2xx_spi_master *spi_pdata;
+       struct pxa2xx_spi_master spi_pdata;
        struct ssp_device *ssp;
 
        ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
                return ret;
        }
 
+       pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
        spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
-       if (!spi_info) {
+       if (!pdev || !spi_info ) {
                ret = -ENOMEM;
-               goto err_kz;
+               goto err_nomem;
        }
-       ssp = &spi_info->ssp;
-       pdev = &spi_info->spi_pdev;
-       spi_pdata =  &spi_info->spi_pdata;
+       memset(&spi_pdata, 0, sizeof(spi_pdata));
+       spi_pdata.num_chipselect = dev->devfn;
 
-       pdev->name = "pxa2xx-spi";
-       pdev->id = dev->devfn;
-       pdev->dev.parent = &dev->dev;
-       pdev->dev.platform_data = &spi_info->spi_pdata;
+       ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+       if (ret)
+               goto err_nomem;
 
+       pdev->dev.parent = &dev->dev;
 #ifdef CONFIG_OF
        pdev->dev.of_node = dev->dev.of_node;
 #endif
-       pdev->dev.release = plat_dev_release;
-
-       spi_pdata->num_chipselect = dev->devfn;
-
+       ssp = &spi_info->ssp;
        ssp->phys_base = pci_resource_start(dev, 0);
        ssp->mmio_base = ioremap(phys_beg, phys_len);
        if (!ssp->mmio_base) {
                dev_err(&pdev->dev, "failed to ioremap() registers\n");
                ret = -EIO;
-               goto err_remap;
+               goto err_nomem;
        }
        ssp->irq = dev->irq;
        ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 
        pci_set_drvdata(dev, spi_info);
 
-       ret = platform_device_register(pdev);
+       ret = platform_device_add(pdev);
        if (ret)
                goto err_dev_add;
 
@@ -135,27 +123,21 @@ err_dev_add:
        mutex_unlock(&ssp_lock);
        iounmap(ssp->mmio_base);
 
-err_remap:
-       kfree(spi_info);
-
-err_kz:
+err_nomem:
        release_mem_region(phys_beg, phys_len);
-
+       platform_device_put(pdev);
+       kfree(spi_info);
        return ret;
 }
 
 static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 {
-       struct awesome_struct *spi_info;
-       struct platform_device *pdev;
+       struct ce4100_info *spi_info;
        struct ssp_device *ssp;
 
        spi_info = pci_get_drvdata(dev);
-
        ssp = &spi_info->ssp;
-       pdev = &spi_info->spi_pdev;
-
-       platform_device_unregister(pdev);
+       platform_device_unregister(spi_info->spi_pdev);
 
        iounmap(ssp->mmio_base);
        release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 }
 
 static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
        { },
 };
index 56f60c8..2c665fc 100644 (file)
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
        bytes_done = 0;
 
        while (bytes_done < t->len) {
+               void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
+               const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
                n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
-                                          t->tx_buf + bytes_done,
-                                          t->rx_buf + bytes_done,
+                                          tx_buf,
+                                          rx_buf,
                                           words, bits);
                if (n < 0)
                        break;
index 3918d2c..e05ba6e 100644 (file)
@@ -1192,10 +1192,10 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
 }
 EXPORT_SYMBOL(ssb_device_enable);
 
-/* Wait for a bit in a register to get set or unset.
+/* Wait for bitmask in a register to get set or cleared.
  * timeout is in units of ten-microseconds */
-static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
-                       int timeout, int set)
+static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
+                        int timeout, int set)
 {
        int i;
        u32 val;
@@ -1203,7 +1203,7 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
        for (i = 0; i < timeout; i++) {
                val = ssb_read32(dev, reg);
                if (set) {
-                       if (val & bitmask)
+                       if ((val & bitmask) == bitmask)
                                return 0;
                } else {
                        if (!(val & bitmask))
@@ -1220,20 +1220,38 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
 
 void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
 {
-       u32 reject;
+       u32 reject, val;
 
        if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET)
                return;
 
        reject = ssb_tmslow_reject_bitmask(dev);
-       ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
-       ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1);
-       ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
-       ssb_write32(dev, SSB_TMSLOW,
-                   SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
-                   reject | SSB_TMSLOW_RESET |
-                   core_specific_flags);
-       ssb_flush_tmslow(dev);
+
+       if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) {
+               ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
+               ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1);
+               ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
+
+               if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+                       val = ssb_read32(dev, SSB_IMSTATE);
+                       val |= SSB_IMSTATE_REJECT;
+                       ssb_write32(dev, SSB_IMSTATE, val);
+                       ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000,
+                                     0);
+               }
+
+               ssb_write32(dev, SSB_TMSLOW,
+                       SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+                       reject | SSB_TMSLOW_RESET |
+                       core_specific_flags);
+               ssb_flush_tmslow(dev);
+
+               if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+                       val = ssb_read32(dev, SSB_IMSTATE);
+                       val &= ~SSB_IMSTATE_REJECT;
+                       ssb_write32(dev, SSB_IMSTATE, val);
+               }
+       }
 
        ssb_write32(dev, SSB_TMSLOW,
                    reject | SSB_TMSLOW_RESET |
index 158449e..a467b20 100644 (file)
@@ -468,10 +468,14 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
                SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
                SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
                SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+               SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
+               SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
        } else {
                SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
                SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
                SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
+               SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
+               SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0);
        }
        SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
             SSB_SPROM4_ANTAVAIL_A_SHIFT);
@@ -641,7 +645,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
                break;
        default:
                ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
-                          "  revision %d detected. Will extract"
+                          " revision %d detected. Will extract"
                           " v1\n", out->revision);
                out->revision = 1;
                sprom_extract_r123(out, in);
index c7345db..f853379 100644 (file)
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
 
        /* Fetch the vendor specific tuples. */
        res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
-                               ssb_pcmcia_do_get_invariants, sprom);
+                               ssb_pcmcia_do_get_invariants, iv);
        if ((res == 0) || (res == -ENOSPC))
                return 0;
 
index 0e298db..29b8ab4 100644 (file)
@@ -360,8 +360,8 @@ int PSSendOps(void *arg)
                status = 1;
                goto complete;
        }
-        len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size;
-       memcpy(config_bdaddr, firmware->data,len);
+       len = min(firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
+       memcpy(config_bdaddr, firmware->data, len);
        config_bdaddr[len] = '\0';
        write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
                A_RELEASE_FIRMWARE(firmware);
index 991463f..9b7b71c 100644 (file)
@@ -2313,7 +2313,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
        notif_bss_info->frame_len =
            offsetof(struct ieee80211_mgmt,
                     u.beacon.variable) + wl_get_ielen(wl);
-       freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+       freq = ieee80211_channel_to_frequency(notif_bss_info->channel,
+                                             band->band);
+
        channel = ieee80211_get_channel(wiphy, freq);
 
        WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
index bdd629d..6363077 100644 (file)
@@ -104,9 +104,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
 static void wl_release_fw(struct wl_info *wl);
 
 /* local prototypes */
-static int wl_start(struct sk_buff *skb, struct wl_info *wl);
-static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
-                       struct sk_buff *skb);
 static void wl_dpc(unsigned long data);
 
 MODULE_AUTHOR("Broadcom Corporation");
@@ -135,7 +132,6 @@ module_param(phymsglevel, int, 0);
 
 #define HW_TO_WL(hw)    (hw->priv)
 #define WL_TO_HW(wl)     (wl->pub->ieee_hw)
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 static int wl_ops_start(struct ieee80211_hw *hw);
 static void wl_ops_stop(struct ieee80211_hw *hw);
 static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -173,20 +169,18 @@ static int wl_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           enum ieee80211_ampdu_mlme_action action,
                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       int status;
        struct wl_info *wl = hw->priv;
        WL_LOCK(wl);
        if (!wl->pub->up) {
                WL_ERROR("ops->tx called while down\n");
-               status = -ENETDOWN;
+               kfree_skb(skb);
                goto done;
        }
-       status = wl_start(skb, wl);
+       wlc_sendpkt_mac80211(wl->wlc, skb, hw);
  done:
        WL_UNLOCK(wl);
-       return status;
 }
 
 static int wl_ops_start(struct ieee80211_hw *hw)
@@ -209,11 +203,8 @@ static void wl_ops_stop(struct ieee80211_hw *hw)
        struct wl_info *wl = hw->priv;
        ASSERT(wl);
        WL_LOCK(wl);
-       wl_down(wl);
        ieee80211_stop_queues(hw);
        WL_UNLOCK(wl);
-
-       return;
 }
 
 static int
@@ -246,7 +237,14 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 static void
 wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-       return;
+       struct wl_info *wl;
+
+       wl = HW_TO_WL(hw);
+
+       /* put driver in down state */
+       WL_LOCK(wl);
+       wl_down(wl);
+       WL_UNLOCK(wl);
 }
 
 static int
@@ -259,9 +257,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
        switch (type) {
        case NL80211_CHAN_HT20:
        case NL80211_CHAN_NO_HT:
-               WL_LOCK(wl);
                err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
-               WL_UNLOCK(wl);
                break;
        case NL80211_CHAN_HT40MINUS:
        case NL80211_CHAN_HT40PLUS:
@@ -281,6 +277,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
        int err = 0;
        int new_int;
 
+       WL_LOCK(wl);
        if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
                WL_NONE("%s: Setting listen interval to %d\n",
                        __func__, conf->listen_interval);
@@ -337,6 +334,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
        }
 
  config_out:
+       WL_UNLOCK(wl);
        return err;
 }
 
@@ -455,13 +453,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
 
 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
 {
+       struct wl_info *wl = hw->priv;
        WL_NONE("Scan Start\n");
+       WL_LOCK(wl);
+       wlc_scan_start(wl->wlc);
+       WL_UNLOCK(wl);
        return;
 }
 
 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
 {
+       struct wl_info *wl = hw->priv;
        WL_NONE("Scan Complete\n");
+       WL_LOCK(wl);
+       wlc_scan_stop(wl->wlc);
+       WL_UNLOCK(wl);
        return;
 }
 
@@ -779,7 +785,7 @@ static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
        wl_found++;
        return wl;
 
- fail:
+fail:
        wl_free(wl);
 fail1:
        return NULL;
@@ -1090,7 +1096,6 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 }
 
-#ifdef LINUXSTA_PS
 static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct wl_info *wl;
@@ -1105,11 +1110,12 @@ static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
                return -ENODEV;
        }
 
+       /* only need to flag hw is down for proper resume */
        WL_LOCK(wl);
-       wl_down(wl);
        wl->pub->hw_up = false;
        WL_UNLOCK(wl);
-       pci_save_state(pdev, wl->pci_psstate);
+
+       pci_save_state(pdev);
        pci_disable_device(pdev);
        return pci_set_power_state(pdev, PCI_D3hot);
 }
@@ -1133,7 +1139,7 @@ static int wl_resume(struct pci_dev *pdev)
        if (err)
                return err;
 
-       pci_restore_state(pdev, wl->pci_psstate);
+       pci_restore_state(pdev);
 
        err = pci_enable_device(pdev);
        if (err)
@@ -1145,13 +1151,12 @@ static int wl_resume(struct pci_dev *pdev)
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-       WL_LOCK(wl);
-       err = wl_up(wl);
-       WL_UNLOCK(wl);
-
+       /*
+       *  done. driver will be put in up state
+       *  in wl_ops_add_interface() call.
+       */
        return err;
 }
-#endif                         /* LINUXSTA_PS */
 
 static void wl_remove(struct pci_dev *pdev)
 {
@@ -1184,14 +1189,12 @@ static void wl_remove(struct pci_dev *pdev)
 }
 
 static struct pci_driver wl_pci_driver = {
- .name  = "brcm80211",
- .probe = wl_pci_probe,
-#ifdef LINUXSTA_PS
- .suspend = wl_suspend,
- .resume  = wl_resume,
-#endif                         /* LINUXSTA_PS */
- .remove   = __devexit_p(wl_remove),
- .id_table = wl_id_table,
+       .name = "brcm80211",
+       .probe = wl_pci_probe,
+       .suspend = wl_suspend,
+       .resume = wl_resume,
+       .remove = __devexit_p(wl_remove),
+       .id_table = wl_id_table,
 };
 
 /**
@@ -1316,22 +1319,6 @@ void wl_free(struct wl_info *wl)
        osl_detach(osh);
 }
 
-/* transmit a packet */
-static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
-{
-       if (!wl)
-               return -ENETDOWN;
-
-       return wl_start_int(wl, WL_TO_HW(wl), skb);
-}
-
-static int BCMFASTPATH
-wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       wlc_sendpkt_mac80211(wl->wlc, skb, hw);
-       return NETDEV_TX_OK;
-}
-
 void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
                      int prio)
 {
index 1d5d01a..aa12d1a 100644 (file)
@@ -5126,7 +5126,6 @@ wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
        fifo = prio2fifo[prio];
 
        ASSERT((uint) skb_headroom(sdu) >= TXOFF);
-       ASSERT(!(sdu->cloned));
        ASSERT(!(sdu->next));
        ASSERT(!(sdu->prev));
        ASSERT(fifo < NFIFO);
@@ -6819,11 +6818,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
        ratespec_t rspec;
        unsigned char *plcp;
 
+#if 0
+       /* Clearly, this is bogus -- reading the TSF now is wrong */
        wlc_read_tsf(wlc, &tsf_l, &tsf_h);      /* mactime */
        rx_status->mactime = tsf_h;
        rx_status->mactime <<= 32;
        rx_status->mactime |= tsf_l;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */
+#endif
 
        channel = WLC_CHAN_CHANNEL(rxh->RxChan);
 
@@ -8462,3 +8464,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
 
        kfree(qi);
 }
+
+/*
+ * Flag 'scan in progress' to withold dynamic phy calibration
+ */
+void wlc_scan_start(struct wlc_info *wlc)
+{
+       wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void wlc_scan_stop(struct wlc_info *wlc)
+{
+       wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
index 146a690..aff4130 100644 (file)
@@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
 extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
 extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
 extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
+extern void wlc_scan_start(struct wlc_info *wlc);
+extern void wlc_scan_stop(struct wlc_info *wlc);
 
 static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
                                    uint *arg)
index aad4732..1502d80 100644 (file)
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO
 config COMEDI_NI_ATMIO
        tristate "NI AT-MIO E series ISA-PNP card support"
        depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+       select COMEDI_8255
        default N
        ---help---
          Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO
 config COMEDI_NI_PCIMIO
        tristate "NI PCI-MIO-E series and M series support"
        depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+       select COMEDI_8255
+       select COMEDI_FC
        default N
        ---help---
          Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS
 config COMEDI_NI_MIO_CS
        tristate "NI DAQCard E series PCMCIA support"
        depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+       select COMEDI_8255
        select COMEDI_FC
        default N
        ---help---
@@ -1268,7 +1272,6 @@ config COMEDI_MITE
 config COMEDI_NI_TIO
        tristate "NI general purpose counter support"
        depends on COMEDI_MITE
-       select COMEDI_8255
        default N
        ---help---
          Enable support for National Instruments general purpose counters.
index cd25b24..fd274e9 100644 (file)
@@ -61,8 +61,6 @@
 #define PCI_DAQ_SIZE           4096
 #define PCI_DAQ_SIZE_660X       8192
 
-MODULE_LICENSE("GPL");
-
 struct mite_struct *mite_devices;
 EXPORT_SYMBOL(mite_devices);
 
index 14e716e..54741c9 100644 (file)
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
 
 module_init(driver_ni6527_init_module);
 module_exit(driver_ni6527_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index 8b8e2aa..403fc09 100644 (file)
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
 
 module_init(driver_ni_65xx_init_module);
 module_exit(driver_ni_65xx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index 6612b08..ca2aeaa 100644 (file)
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
        };
        return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index e9f034e..d8d91f9 100644 (file)
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
        mite_list_devices();
        return -EIO;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index 4d1868d..0728c3c 100644 (file)
@@ -575,7 +575,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
        /* grab our IRQ */
        if (irq) {
                isr_flags = 0;
-               if (thisboard->bustype == pci_bustype)
+               if (thisboard->bustype == pci_bustype
+                   || thisboard->bustype == pcmcia_bustype)
                        isr_flags |= IRQF_SHARED;
                if (request_irq(irq, labpc_interrupt, isr_flags,
                                driver_labpc.driver_name, dev)) {
index 84a15c3..005d2fe 100644 (file)
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
 
 module_init(driver_pcidio_init_module);
 module_exit(driver_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index 23a3812..9148abd 100644 (file)
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
 
        return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
index b3d05fc..4fb8094 100644 (file)
@@ -368,6 +368,7 @@ static int blkvsc_probe(struct device *device)
                blkdev->gd->first_minor = 0;
        blkdev->gd->fops = &block_ops;
        blkdev->gd->private_data = blkdev;
+       blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
        sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
 
        blkvsc_do_inquiry(blkdev);
index df9cd13..0edbe74 100644 (file)
@@ -1279,7 +1279,7 @@ static void netvsc_channel_cb(void *context)
        /* ASSERT(device); */
 
        packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
-                        GFP_KERNEL);
+                        GFP_ATOMIC);
        if (!packet)
                return;
        buffer = packet;
index 0147b40..b41c964 100644 (file)
@@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
        if (status == 1) {
                netif_carrier_on(net);
                netif_wake_queue(net);
+               netif_notify_peers(net);
        } else {
                netif_carrier_off(net);
                netif_stop_queue(net);
@@ -358,7 +359,6 @@ static int netvsc_probe(struct device *device)
 
        /* Set initial state */
        netif_carrier_off(net);
-       netif_stop_queue(net);
 
        net_device_ctx = netdev_priv(net);
        net_device_ctx->device_ctx = device_ctx;
index deb68c8..b8b54da 100644 (file)
@@ -68,7 +68,7 @@ static ssize_t ad7476_show_scale(struct device *dev,
        /* Corresponds to Vref / 2^(bits) */
        unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-       return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+       return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
 
index 6859089..5d85efa 100644 (file)
@@ -68,7 +68,7 @@ static ssize_t ad7887_show_scale(struct device *dev,
        /* Corresponds to Vref / 2^(bits) */
        unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-       return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+       return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
 
index 6309d52..89ccf37 100644 (file)
@@ -432,7 +432,7 @@ static ssize_t ad799x_show_scale(struct device *dev,
        /* Corresponds to Vref / 2^(bits) */
        unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-       return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+       return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
index e3387cd..0f87eca 100644 (file)
@@ -87,7 +87,7 @@ static ssize_t ad5446_show_scale(struct device *dev,
        /* Corresponds to Vref / 2^(bits) */
        unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
 
-       return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+       return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
 
index e38e89d..e2f6d6a 100644 (file)
@@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value)
                sc_access[3].reg_addr = 0x109;
                sc_access[3].mask = MASK6;
                sc_access[3].value = 0x00;
-               num_val = 4;
+               sc_access[4].reg_addr = 0x104;
+               sc_access[4].value = 0x3C;
+               sc_access[4].mask = 0xff;
+               num_val = 5;
                break;
        default:
                return -EINVAL;
index 3fe5f41..0aad0d7 100644 (file)
@@ -495,7 +495,7 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
 /* send boot data to the IR TX device */
 static int send_boot_data(struct IR_tx *tx)
 {
-       int ret;
+       int ret, i;
        unsigned char buf[4];
 
        /* send the boot block */
@@ -503,7 +503,7 @@ static int send_boot_data(struct IR_tx *tx)
        if (ret != 0)
                return ret;
 
-       /* kick it off? */
+       /* Hit the go button to activate the new boot data */
        buf[0] = 0x00;
        buf[1] = 0x20;
        ret = i2c_master_send(tx->c, buf, 2);
@@ -511,7 +511,19 @@ static int send_boot_data(struct IR_tx *tx)
                zilog_error("i2c_master_send failed with %d\n", ret);
                return ret < 0 ? ret : -EFAULT;
        }
-       ret = i2c_master_send(tx->c, buf, 1);
+
+       /*
+        * Wait for zilog to settle after hitting go post boot block upload.
+        * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+        * upon attempting to get firmware revision, and tx probe thus fails.
+        */
+       for (i = 0; i < 10; i++) {
+               ret = i2c_master_send(tx->c, buf, 1);
+               if (ret == 1)
+                       break;
+               udelay(100);
+       }
+
        if (ret != 1) {
                zilog_error("i2c_master_send failed with %d\n", ret);
                return ret < 0 ? ret : -EFAULT;
@@ -523,8 +535,8 @@ static int send_boot_data(struct IR_tx *tx)
                zilog_error("i2c_master_recv failed with %d\n", ret);
                return 0;
        }
-       if (buf[0] != 0x80) {
-               zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+       if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+               zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
                return 0;
        }
        zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -827,7 +839,15 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
                zilog_error("i2c_master_send failed with %d\n", ret);
                return ret < 0 ? ret : -EFAULT;
        }
-       ret = i2c_master_send(tx->c, buf, 1);
+
+       /* Give the z8 a moment to process data block */
+       for (i = 0; i < 10; i++) {
+               ret = i2c_master_send(tx->c, buf, 1);
+               if (ret == 1)
+                       break;
+               udelay(100);
+       }
+
        if (ret != 1) {
                zilog_error("i2c_master_send failed with %d\n", ret);
                return ret < 0 ? ret : -EFAULT;
index 23fa049..a2f29d4 100644 (file)
@@ -347,7 +347,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
        if ((!mfd) || (mfd->key != MFD_KEY))
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(mfd->fbi, 1);
 
        ret = msm_fb_suspend_sub(mfd);
@@ -358,7 +358,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
                pdev->dev.power.power_state = state;
        }
 
-       release_console_sem();
+       console_unlock();
        return ret;
 }
 #else
@@ -431,11 +431,11 @@ static int msm_fb_resume(struct platform_device *pdev)
        if ((!mfd) || (mfd->key != MFD_KEY))
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        ret = msm_fb_resume_sub(mfd);
        pdev->dev.power.power_state = PMSG_ON;
        fb_set_suspend(mfd->fbi, 1);
-       release_console_sem();
+       console_unlock();
 
        return ret;
 }
index 9f26dc9..56a283d 100644 (file)
@@ -373,17 +373,17 @@ static void dcon_source_switch(struct work_struct *work)
                 *
                 * For now, we just hope..
                 */
-               acquire_console_sem();
+               console_lock();
                ignore_fb_events = 1;
                if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) {
                        ignore_fb_events = 0;
-                       release_console_sem();
+                       console_unlock();
                        printk(KERN_ERR "olpc-dcon:  Failed to enter CPU mode\n");
                        dcon_pending = DCON_SOURCE_DCON;
                        return;
                }
                ignore_fb_events = 0;
-               release_console_sem();
+               console_unlock();
 
                /* And turn off the DCON */
                pdata->set_dconload(1);
@@ -435,12 +435,12 @@ static void dcon_source_switch(struct work_struct *work)
                        }
                }
 
-               acquire_console_sem();
+               console_lock();
                ignore_fb_events = 1;
                if (fb_blank(fbinfo, FB_BLANK_POWERDOWN))
                        printk(KERN_ERR "olpc-dcon:  couldn't blank fb!\n");
                ignore_fb_events = 0;
-               release_console_sem();
+               console_unlock();
 
                printk(KERN_INFO "olpc-dcon: The DCON has control\n");
                break;
index 89279ba..39413b7 100644 (file)
@@ -525,7 +525,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
 {
        int err;
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        switch (msg->flags) {
index 701561d..236dd36 100644 (file)
@@ -484,8 +484,6 @@ struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd,
        net_dev->ml_priv = (void *)pAd;
        pAd->net_dev = net_dev;
 
-       netif_stop_queue(net_dev);
-
        return net_dev;
 
 }
index ee68d51..322bf49 100644 (file)
@@ -106,6 +106,7 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x0411, 0x016f)},   /* MelCo.,Inc. WLI-UC-G301N */
        {USB_DEVICE(0x1737, 0x0070)},   /* Linksys WUSB100 */
        {USB_DEVICE(0x1737, 0x0071)},   /* Linksys WUSB600N */
+       {USB_DEVICE(0x1737, 0x0078)},   /* Linksys WUSB100v2 */
        {USB_DEVICE(0x0411, 0x00e8)},   /* Buffalo WLI-UC-G300N */
        {USB_DEVICE(0x050d, 0x815c)},   /* Belkin F5D8053 */
        {USB_DEVICE(0x100D, 0x9031)},   /* Motorola 2770 */
index 32088a6..84be383 100644 (file)
@@ -128,12 +128,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
        u8 *ptmpchar = NULL, *ppayload, *ptr;
        struct tx_desc *ptx_desc;
        u32 txdscp_sz = sizeof(struct tx_desc);
+       u8 ret = _FAIL;
 
        ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
        if (pmappedfw && (ulfilelength > 0)) {
                update_fwhdr(&fwhdr, pmappedfw);
                if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
-                       goto exit_fail;
+                       goto firmware_rel;
                fill_fwpriv(padapter, &fwhdr.fwpriv);
                /* firmware check ok */
                maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -141,7 +142,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
                maxlen += txdscp_sz;
                ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
                if (ptmpchar == NULL)
-                       return _FAIL;
+                       goto firmware_rel;
 
                ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
                            ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -273,11 +274,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
                        goto exit_fail;
        } else
                goto exit_fail;
-       return _SUCCESS;
+       ret = _SUCCESS;
 
 exit_fail:
        kfree(ptmpchar);
-       return _FAIL;
+firmware_rel:
+       release_firmware((struct firmware *)phfwfile_hdl);
+       return ret;
 }
 
 uint rtl8712_hal_init(struct _adapter *padapter)
index a692ee8..21ce2af 100644 (file)
@@ -47,54 +47,123 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
 static void r871xu_dev_remove(struct usb_interface *pusb_intf);
 
 static struct usb_device_id rtl871x_usb_id_tbl[] = {
-       /*92SU
-        * Realtek */
-       {USB_DEVICE(0x0bda, 0x8171)},
-       {USB_DEVICE(0x0bda, 0x8172)},
+
+/* RTL8188SU */
+       /* Realtek */
+       {USB_DEVICE(0x0BDA, 0x8171)},
        {USB_DEVICE(0x0bda, 0x8173)},
-       {USB_DEVICE(0x0bda, 0x8174)},
        {USB_DEVICE(0x0bda, 0x8712)},
        {USB_DEVICE(0x0bda, 0x8713)},
        {USB_DEVICE(0x0bda, 0xC512)},
-       /* Abocom  */
+       /* Abocom */
        {USB_DEVICE(0x07B8, 0x8188)},
+       /* ASUS */
+       {USB_DEVICE(0x0B05, 0x1786)},
+       {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
+       /* Belkin */
+       {USB_DEVICE(0x050D, 0x945A)},
        /* Corega */
-       {USB_DEVICE(0x07aa, 0x0047)},
-       /* Dlink */
-       {USB_DEVICE(0x07d1, 0x3303)},
-       {USB_DEVICE(0x07d1, 0x3302)},
-       {USB_DEVICE(0x07d1, 0x3300)},
-       /* Dlink for Skyworth */
-       {USB_DEVICE(0x14b2, 0x3300)},
-       {USB_DEVICE(0x14b2, 0x3301)},
-       {USB_DEVICE(0x14b2, 0x3302)},
+       {USB_DEVICE(0x07AA, 0x0047)},
+       /* D-Link */
+       {USB_DEVICE(0x2001, 0x3306)},
+       {USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
+       /* Edimax */
+       {USB_DEVICE(0x7392, 0x7611)},
        /* EnGenius */
        {USB_DEVICE(0x1740, 0x9603)},
-       {USB_DEVICE(0x1740, 0x9605)},
+       /* Hawking */
+       {USB_DEVICE(0x0E66, 0x0016)},
+       /* Hercules */
+       {USB_DEVICE(0x06F8, 0xE034)},
+       {USB_DEVICE(0x06F8, 0xE032)},
+       /* Logitec */
+       {USB_DEVICE(0x0789, 0x0167)},
+       /* PCI */
+       {USB_DEVICE(0x2019, 0xAB28)},
+       {USB_DEVICE(0x2019, 0xED16)},
+       /* Sitecom */
+       {USB_DEVICE(0x0DF6, 0x0057)},
+       {USB_DEVICE(0x0DF6, 0x0045)},
+       {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
+       {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x0063)},
+       /* Sweex */
+       {USB_DEVICE(0x177F, 0x0154)},
+       /* Thinkware */
+       {USB_DEVICE(0x0BDA, 0x5077)},
+       /* Toshiba */
+       {USB_DEVICE(0x1690, 0x0752)},
+       /* - */
+       {USB_DEVICE(0x20F4, 0x646B)},
+       {USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8191SU */
+       /* Realtek */
+       {USB_DEVICE(0x0BDA, 0x8172)},
+       /* Amigo */
+       {USB_DEVICE(0x0EB0, 0x9061)},
+       /* ASUS/EKB */
+       {USB_DEVICE(0x0BDA, 0x8172)},
+       {USB_DEVICE(0x13D3, 0x3323)},
+       {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
+       {USB_DEVICE(0x13D3, 0x3342)},
+       /* ASUS/EKBLenovo */
+       {USB_DEVICE(0x13D3, 0x3333)},
+       {USB_DEVICE(0x13D3, 0x3334)},
+       {USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
+       {USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
+       /* ASUS/Media BOX */
+       {USB_DEVICE(0x13D3, 0x3309)},
        /* Belkin */
-       {USB_DEVICE(0x050d, 0x815F)},
-       {USB_DEVICE(0x050d, 0x945A)},
-       {USB_DEVICE(0x050d, 0x845A)},
-       /* Guillemot */
-       {USB_DEVICE(0x06f8, 0xe031)},
+       {USB_DEVICE(0x050D, 0x815F)},
+       /* D-Link */
+       {USB_DEVICE(0x07D1, 0x3302)},
+       {USB_DEVICE(0x07D1, 0x3300)},
+       {USB_DEVICE(0x07D1, 0x3303)},
        /* Edimax */
-       {USB_DEVICE(0x7392, 0x7611)},
        {USB_DEVICE(0x7392, 0x7612)},
-       {USB_DEVICE(0x7392, 0x7622)},
-       /* Sitecom */
-       {USB_DEVICE(0x0DF6, 0x0045)},
+       /* EnGenius */
+       {USB_DEVICE(0x1740, 0x9605)},
+       /* Guillemot */
+       {USB_DEVICE(0x06F8, 0xE031)},
        /* Hawking */
        {USB_DEVICE(0x0E66, 0x0015)},
-       {USB_DEVICE(0x0E66, 0x0016)},
-       {USB_DEVICE(0x0b05, 0x1786)},
-       {USB_DEVICE(0x0b05, 0x1791)},    /* 11n mode disable */
-
+       /* Mediao */
        {USB_DEVICE(0x13D3, 0x3306)},
-       {USB_DEVICE(0x13D3, 0x3309)},
+       /* PCI */
+       {USB_DEVICE(0x2019, 0xED18)},
+       {USB_DEVICE(0x2019, 0x4901)},
+       /* Sitecom */
+       {USB_DEVICE(0x0DF6, 0x0058)},
+       {USB_DEVICE(0x0DF6, 0x0049)},
+       {USB_DEVICE(0x0DF6, 0x004C)},
+       {USB_DEVICE(0x0DF6, 0x0064)},
+       /* Skyworth */
+       {USB_DEVICE(0x14b2, 0x3300)},
+       {USB_DEVICE(0x14b2, 0x3301)},
+       {USB_DEVICE(0x14B2, 0x3302)},
+       /* - */
+       {USB_DEVICE(0x04F2, 0xAFF2)},
+       {USB_DEVICE(0x04F2, 0xAFF5)},
+       {USB_DEVICE(0x04F2, 0xAFF6)},
+       {USB_DEVICE(0x13D3, 0x3339)},
+       {USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
+       {USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
        {USB_DEVICE(0x13D3, 0x3310)},
-       {USB_DEVICE(0x13D3, 0x3311)},    /* 11n mode disable */
        {USB_DEVICE(0x13D3, 0x3325)},
-       {USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8192SU */
+       /* Realtek */
+       {USB_DEVICE(0x0BDA, 0x8174)},
+       {USB_DEVICE(0x0BDA, 0x8174)},
+       /* Belkin */
+       {USB_DEVICE(0x050D, 0x845A)},
+       /* Corega */
+       {USB_DEVICE(0x07AA, 0x0051)},
+       /* Edimax */
+       {USB_DEVICE(0x7392, 0x7622)},
+       /* NEC */
+       {USB_DEVICE(0x0409, 0x02B6)},
        {}
 };
 
@@ -103,8 +172,20 @@ MODULE_DEVICE_TABLE(usb, rtl871x_usb_id_tbl);
 static struct specific_device_id specific_device_id_tbl[] = {
        {.idVendor = 0x0b05, .idProduct = 0x1791,
                 .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x0df6, .idProduct = 0x0059,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x13d3, .idProduct = 0x3306,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
        {.idVendor = 0x13D3, .idProduct = 0x3311,
                 .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x13d3, .idProduct = 0x3335,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x13d3, .idProduct = 0x3336,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x13d3, .idProduct = 0x3340,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
+       {.idVendor = 0x13d3, .idProduct = 0x3341,
+                .flags = SPEC_DEV_ID_DISABLE_HT},
        {}
 };
 
index 0bc113c..d007e4a 100644 (file)
@@ -1044,9 +1044,9 @@ static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg)
 
        /* when doing suspend, call fb apis and pci apis */
        if (msg.event == PM_EVENT_SUSPEND) {
-               acquire_console_sem();
+               console_lock();
                fb_set_suspend(&sfb->fb, 1);
-               release_console_sem();
+               console_unlock();
                retv = pci_save_state(pdev);
                pci_disable_device(pdev);
                retv = pci_choose_state(pdev, msg);
@@ -1105,9 +1105,9 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
 
        smtcfb_setmode(sfb);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(&sfb->fb, 0);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 408bb9b..07a7f54 100644 (file)
@@ -332,7 +332,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
        unsigned long flags;
 
        len = strlen(buf);
-       if (len > 0 || len < 3) {
+       if (len > 0 && len < 3) {
                ch = buf[0];
                if (ch == '\n')
                        ch = '0';
index e8f047e..80183a7 100644 (file)
@@ -986,12 +986,6 @@ static int __devinit synaptics_rmi4_probe
        input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
                                                MAX_TOUCH_MAJOR, 0, 0);
 
-       retval = input_register_device(rmi4_data->input_dev);
-       if (retval) {
-               dev_err(&client->dev, "%s:input register failed\n", __func__);
-               goto err_input_register;
-       }
-
        /* Clear interrupts */
        synaptics_rmi4_i2c_block_read(rmi4_data,
                        rmi4_data->fn01_data_base_addr + 1, intr_status,
@@ -1003,15 +997,20 @@ static int __devinit synaptics_rmi4_probe
        if (retval) {
                dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
                                __func__, platformdata->irq_number);
-               goto err_request_irq;
+               goto err_unset_clientdata;
+       }
+
+       retval = input_register_device(rmi4_data->input_dev);
+       if (retval) {
+               dev_err(&client->dev, "%s:input register failed\n", __func__);
+               goto err_free_irq;
        }
 
        return retval;
 
-err_request_irq:
+err_free_irq:
        free_irq(platformdata->irq_number, rmi4_data);
-       input_unregister_device(rmi4_data->input_dev);
-err_input_register:
+err_unset_clientdata:
        i2c_set_clientdata(client, NULL);
 err_query_dev:
        if (platformdata->regulator_en) {
index 5718645..27e0aa8 100644 (file)
@@ -949,7 +949,7 @@ func_end:
  *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
  *      schedules a DPC to dispatch I/O.
  */
-void io_mbox_msg(u32 msg)
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
 {
        struct io_mgr *pio_mgr;
        struct dev_object *dev_obj;
@@ -959,9 +959,9 @@ void io_mbox_msg(u32 msg)
        dev_get_io_mgr(dev_obj, &pio_mgr);
 
        if (!pio_mgr)
-               return;
+               return NOTIFY_BAD;
 
-       pio_mgr->intr_val = (u16)msg;
+       pio_mgr->intr_val = (u16)((u32)msg);
        if (pio_mgr->intr_val & MBX_PM_CLASS)
                io_dispatch_pm(pio_mgr);
 
@@ -973,7 +973,7 @@ void io_mbox_msg(u32 msg)
                spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
                tasklet_schedule(&pio_mgr->dpc_tasklet);
        }
-       return;
+       return NOTIFY_OK;
 }
 
 /*
index a3b0a18..a3f69f6 100644 (file)
@@ -223,6 +223,10 @@ static struct bridge_drv_interface drv_interface_fxns = {
        bridge_msg_set_queue_id,
 };
 
+static struct notifier_block dsp_mbox_notifier = {
+       .notifier_call = io_mbox_msg,
+};
+
 static inline void flush_all(struct bridge_dev_context *dev_context)
 {
        if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
@@ -553,7 +557,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
                 * Enable Mailbox events and also drain any pending
                 * stale messages.
                 */
-               dev_context->mbox = omap_mbox_get("dsp");
+               dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
                if (IS_ERR(dev_context->mbox)) {
                        dev_context->mbox = NULL;
                        pr_err("%s: Failed to get dsp mailbox handle\n",
@@ -563,8 +567,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 
        }
        if (!status) {
-               dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
-
 /*PM_IVA2GRPSEL_PER = 0xC0;*/
                temp = readl(resources->dw_per_pm_base + 0xA8);
                temp = (temp & 0xFFFFFF30) | 0xC0;
@@ -685,7 +687,7 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
        /* Disable the mailbox interrupts */
        if (dev_context->mbox) {
                omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
-               omap_mbox_put(dev_context->mbox);
+               omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
                dev_context->mbox = NULL;
        }
        /* Reset IVA2 clocks*/
@@ -786,10 +788,7 @@ static int bridge_dev_create(struct bridge_dev_context
 
        pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
        if (pt_attrs != NULL) {
-               /* Assuming that we use only DSP's memory map
-                * until 0x4000:0000 , we would need only 1024
-                * L1 enties i.e L1 size = 4K */
-               pt_attrs->l1_size = 0x1000;
+               pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
                align_size = pt_attrs->l1_size;
                /* Align sizes are expected to be power of 2 */
                /* we like to get aligned on L1 table size */
index 18aec55..8242c70 100644 (file)
@@ -72,22 +72,17 @@ extern void io_dpc(unsigned long ref_data);
 /*
  *  ======== io_mbox_msg ========
  *  Purpose:
- *      Main interrupt handler for the shared memory Bridge channel manager.
- *      Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
- *      then schedules a DPC to dispatch I/O.
+ *     Main message handler for the shared memory Bridge channel manager.
+ *     Determine if this message is ours, then schedules a DPC to
+ *     dispatch I/O.
  *  Parameters:
- *      ref_data:   Pointer to the channel manager object for this board.
- *                  Set in an initial call to ISR_Install().
+ *     self:   Pointer to its own notifier_block struct.
+ *     len:    Length of message.
+ *     msg:    Message code received.
  *  Returns:
- *      TRUE if interrupt handled; FALSE otherwise.
- *  Requires:
- *      Must be in locked memory if executing in kernel mode.
- *      Must only call functions which are in locked memory if Kernel mode.
- *      Must only call asynchronous services.
- *      Interrupts are disabled and EOI for this interrupt has been sent.
- *  Ensures:
+ *     NOTIFY_OK if handled; NOTIFY_BAD otherwise.
  */
-void io_mbox_msg(u32 msg);
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg);
 
 /*
  *  ======== io_request_chnl ========
index 30dbfb6..d732679 100644 (file)
@@ -32,6 +32,7 @@
 
 struct stub_device {
        struct usb_interface *interface;
+       struct usb_device *udev;
        struct list_head list;
 
        struct usbip_device ud;
index b186b5f..a7ce51c 100644 (file)
@@ -258,10 +258,11 @@ static void stub_shutdown_connection(struct usbip_device *ud)
 static void stub_device_reset(struct usbip_device *ud)
 {
        struct stub_device *sdev = container_of(ud, struct stub_device, ud);
-       struct usb_device *udev = interface_to_usbdev(sdev->interface);
+       struct usb_device *udev = sdev->udev;
        int ret;
 
        usbip_udbg("device reset");
+
        ret = usb_lock_device_for_reset(udev, sdev->interface);
        if (ret < 0) {
                dev_err(&udev->dev, "lock for reset\n");
@@ -309,7 +310,8 @@ static void stub_device_unusable(struct usbip_device *ud)
  *
  * Allocates and initializes a new stub_device struct.
  */
-static struct stub_device *stub_device_alloc(struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev,
+                                            struct usb_interface *interface)
 {
        struct stub_device *sdev;
        int busnum = interface_to_busnum(interface);
@@ -324,7 +326,8 @@ static struct stub_device *stub_device_alloc(struct usb_interface *interface)
                return NULL;
        }
 
-       sdev->interface = interface;
+       sdev->interface = usb_get_intf(interface);
+       sdev->udev = usb_get_dev(udev);
 
        /*
         * devid is defined with devnum when this driver is first allocated.
@@ -450,11 +453,12 @@ static int stub_probe(struct usb_interface *interface,
                        return err;
                }
 
+               usb_get_intf(interface);
                return 0;
        }
 
        /* ok. this is my device. */
-       sdev = stub_device_alloc(interface);
+       sdev = stub_device_alloc(udev, interface);
        if (!sdev)
                return -ENOMEM;
 
@@ -476,6 +480,8 @@ static int stub_probe(struct usb_interface *interface,
                dev_err(&interface->dev, "create sysfs files for %s\n",
                        udev_busid);
                usb_set_intfdata(interface, NULL);
+               usb_put_intf(interface);
+
                busid_priv->interf_count = 0;
 
                busid_priv->sdev = NULL;
@@ -545,6 +551,7 @@ static void stub_disconnect(struct usb_interface *interface)
        if (busid_priv->interf_count > 1) {
                busid_priv->interf_count--;
                shutdown_busid(busid_priv);
+               usb_put_intf(interface);
                return;
        }
 
@@ -554,6 +561,9 @@ static void stub_disconnect(struct usb_interface *interface)
        /* 1. shutdown the current connection */
        shutdown_busid(busid_priv);
 
+       usb_put_dev(sdev->udev);
+       usb_put_intf(interface);
+
        /* 3. free sdev */
        busid_priv->sdev = NULL;
        stub_device_free(sdev);
index 3de6fd2..ae6ac82 100644 (file)
@@ -364,7 +364,7 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
 
 static int get_pipe(struct stub_device *sdev, int epnum, int dir)
 {
-       struct usb_device *udev = interface_to_usbdev(sdev->interface);
+       struct usb_device *udev = sdev->udev;
        struct usb_host_endpoint *ep;
        struct usb_endpoint_descriptor *epd = NULL;
 
@@ -484,7 +484,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
        int ret;
        struct stub_priv *priv;
        struct usbip_device *ud = &sdev->ud;
-       struct usb_device *udev = interface_to_usbdev(sdev->interface);
+       struct usb_device *udev = sdev->udev;
        int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
 
 
index 41a1fe5..afc3b1a 100644 (file)
@@ -100,9 +100,6 @@ struct vhci_hcd {
         * But, the index of this array begins from 0.
         */
        struct vhci_device vdev[VHCI_NPORTS];
-
-       /* vhci_device which has not been assiged its address yet */
-       int pending_port;
 };
 
 
@@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport);
 void vhci_rx_loop(struct usbip_task *ut);
 void vhci_tx_loop(struct usbip_task *ut);
 
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+                                           __u32 seqnum);
+
 #define hardware               (&the_controller->pdev.dev)
 
 static inline struct vhci_device *port_to_vdev(__u32 port)
index 08bd26a..a35fe61 100644 (file)
@@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
         * the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
         * spin_unlock(&the_controller->vdev[rhport].ud.lock); */
 
-       the_controller->pending_port = rhport;
-
        spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -559,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
        struct device *dev = &urb->dev->dev;
        int ret = 0;
        unsigned long flags;
+       struct vhci_device *vdev;
 
        usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
                    hcd, urb, mem_flags);
@@ -574,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                return urb->status;
        }
 
+       vdev = port_to_vdev(urb->dev->portnum-1);
+
+       /* refuse enqueue for dead connection */
+       spin_lock(&vdev->ud.lock);
+       if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
+               usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
+               spin_unlock(&vdev->ud.lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
+               return -ENODEV;
+       }
+       spin_unlock(&vdev->ud.lock);
+
        ret = usb_hcd_link_urb_to_ep(hcd, urb);
        if (ret)
                goto no_need_unlink;
@@ -592,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                __u8 type = usb_pipetype(urb->pipe);
                struct usb_ctrlrequest *ctrlreq =
                                (struct usb_ctrlrequest *) urb->setup_packet;
-               struct vhci_device *vdev =
-                               port_to_vdev(the_controller->pending_port);
 
                if (type != PIPE_CONTROL || !ctrlreq) {
                        dev_err(dev, "invalid request to devnum 0\n");
@@ -607,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                        dev_info(dev, "SetAddress Request (%d) to port %d\n",
                                 ctrlreq->wValue, vdev->rhport);
 
-                       vdev->udev = urb->dev;
+                       if (vdev->udev)
+                               usb_put_dev(vdev->udev);
+                       vdev->udev = usb_get_dev(urb->dev);
 
                        spin_lock(&vdev->ud.lock);
                        vdev->ud.status = VDEV_ST_USED;
@@ -627,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                                                "Get_Descriptor to device 0 "
                                                "(get max pipe size)\n");
 
-                       /* FIXME: reference count? (usb_get_dev()) */
-                       vdev->udev = urb->dev;
+                       if (vdev->udev)
+                               usb_put_dev(vdev->udev);
+                       vdev->udev = usb_get_dev(urb->dev);
                        goto out;
 
                default:
@@ -805,7 +817,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
        return 0;
 }
 
-
 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
 {
        struct vhci_unlink *unlink, *tmp;
@@ -813,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
        spin_lock(&vdev->priv_lock);
 
        list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+               usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
                list_del(&unlink->list);
                kfree(unlink);
        }
 
        list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+               struct urb *urb;
+
+               /* give back URB of unanswered unlink request */
+               usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
+
+               urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+               if (!urb) {
+                       usbip_uinfo("the urb (seqnum %lu) was already given back\n",
+                                                       unlink->unlink_seqnum);
+                       list_del(&unlink->list);
+                       kfree(unlink);
+                       continue;
+               }
+
+               urb->status = -ENODEV;
+
+               spin_lock(&the_controller->lock);
+               usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+               spin_unlock(&the_controller->lock);
+
+               usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+
                list_del(&unlink->list);
                kfree(unlink);
        }
@@ -887,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud)
        vdev->speed  = 0;
        vdev->devid  = 0;
 
+       if (vdev->udev)
+               usb_put_dev(vdev->udev);
+       vdev->udev = NULL;
+
        ud->tcp_socket = NULL;
 
        ud->status = VDEV_ST_NULL;
index 8147d72..bf69914 100644 (file)
 #include "vhci.h"
 
 
-/* get URB from transmitted urb queue */
-static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
                                            __u32 seqnum)
 {
        struct vhci_priv *priv, *tmp;
        struct urb *urb = NULL;
        int status;
 
-       spin_lock(&vdev->priv_lock);
-
        list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
                if (priv->seqnum == seqnum) {
                        urb = priv->urb;
@@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
                }
        }
 
-       spin_unlock(&vdev->priv_lock);
-
        return urb;
 }
 
@@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
 
+       spin_lock(&vdev->priv_lock);
 
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
 
+       spin_unlock(&vdev->priv_lock);
 
        if (!urb) {
                usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -161,7 +159,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                return;
        }
 
+       spin_lock(&vdev->priv_lock);
+
        urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+
+       spin_unlock(&vdev->priv_lock);
+
        if (!urb) {
                /*
                 * I get the result of a unlink request. But, it seems that I
@@ -190,6 +193,19 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
        return;
 }
 
+static int vhci_priv_tx_empty(struct vhci_device *vdev)
+{
+       int empty = 0;
+
+       spin_lock(&vdev->priv_lock);
+
+       empty = list_empty(&vdev->priv_rx);
+
+       spin_unlock(&vdev->priv_lock);
+
+       return empty;
+}
+
 /* recv a pdu */
 static void vhci_rx_pdu(struct usbip_device *ud)
 {
@@ -202,11 +218,29 @@ static void vhci_rx_pdu(struct usbip_device *ud)
 
        memset(&pdu, 0, sizeof(pdu));
 
-
        /* 1. receive a pdu header */
        ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+       if (ret < 0) {
+               if (ret == -ECONNRESET)
+                       usbip_uinfo("connection reset by peer\n");
+               else if (ret == -EAGAIN) {
+                       /* ignore if connection was idle */
+                       if (vhci_priv_tx_empty(vdev))
+                               return;
+                       usbip_uinfo("connection timed out with pending urbs\n");
+               } else if (ret != -ERESTARTSYS)
+                       usbip_uinfo("xmit failed %d\n", ret);
+
+               usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+               return;
+       }
+       if (ret == 0) {
+               usbip_uinfo("connection closed");
+               usbip_event_add(ud, VDEV_EVENT_DOWN);
+               return;
+       }
        if (ret != sizeof(pdu)) {
-               usbip_uerr("receiving pdu failed! size is %d, should be %d\n",
+               usbip_uerr("received pdu size is %d, should be %d\n",
                                        ret, (unsigned int)sizeof(pdu));
                usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
                return;
diff --git a/drivers/staging/vme/bridges/Module.symvers b/drivers/staging/vme/bridges/Module.symvers
deleted file mode 100644 (file)
index e69de29..0000000
index 2163d60..3724e1e 100644 (file)
@@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
        *total_flags = new_flags;
 }
 
-static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct wbsoft_priv *priv = dev->priv;
 
        if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
                priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
-               return NETDEV_TX_BUSY;
+               kfree_skb(skb);
+               return;
        }
 
        priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
@@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
         */
 
        Mds_Tx(priv);
-
-       return NETDEV_TX_OK;
 }
 
 static int wbsoft_start(struct ieee80211_hw *dev)
index 7016fdd..e19b932 100644 (file)
@@ -3954,8 +3954,8 @@ void XGI_GetCRT2ResInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
 unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
 {
 
-       if ((((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA))
-                       && (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+       if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+                       (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
                return 1;
 
        return 0;
@@ -8773,7 +8773,7 @@ unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
 
        if (pVBInfo->IF_DEF_LVDS == 0) {
                CRT2Index = CRT2Index >> 6; /*  for LCD */
-               if (((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) { /*301b*/
+               if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
                        if (pVBInfo->LCDResInfo != Panel1024x768)
                                VCLKIndex = LCDXlat2VCLK[CRT2Index];
                        else
index 5415712..4bd8cbd 100644 (file)
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
 
                if (zram_test_flag(zram, index, ZRAM_ZERO)) {
                        handle_zero_page(page);
+                       index++;
                        continue;
                }
 
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
                        pr_debug("Read before write: sector=%lu, size=%u",
                                (ulong)(bio->bi_sector), bio->bi_size);
                        /* Do nothing */
+                       index++;
                        continue;
                }
 
                /* Page is stored uncompressed since it's incompressible */
                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
                        handle_uncompressed_page(zram, page, index);
+                       index++;
                        continue;
                }
 
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
                        mutex_unlock(&zram->lock);
                        zram_stat_inc(&zram->stats.pages_zero);
                        zram_set_flag(zram, index, ZRAM_ZERO);
+                       index++;
                        continue;
                }
 
index 5cfd708..973bb19 100644 (file)
@@ -13,8 +13,7 @@ target_core_mod-y             := target_core_configfs.o \
                                   target_core_transport.o \
                                   target_core_cdb.o \
                                   target_core_ua.o \
-                                  target_core_rd.o \
-                                  target_core_mib.o
+                                  target_core_rd.o
 
 obj-$(CONFIG_TARGET_CORE)      += target_core_mod.o
 
index 2764510..caf8dc1 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/parser.h>
 #include <linux/syscalls.h>
 #include <linux/configfs.h>
-#include <linux/proc_fs.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
 {
        struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
                                struct se_subsystem_dev, se_dev_group);
-       struct config_group *dev_cg;
-
-       if (!(se_dev))
-               return;
+       struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+       struct se_subsystem_api *t = hba->transport;
+       struct config_group *dev_cg = &se_dev->se_dev_group;
 
-       dev_cg = &se_dev->se_dev_group;
        kfree(dev_cg->default_groups);
+       /*
+        * This pointer will set when the storage is enabled with:
+        *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+        */
+       if (se_dev->se_dev_ptr) {
+               printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+                       "virtual_device() for se_dev_ptr: %p\n",
+                       se_dev->se_dev_ptr);
+
+               se_free_virtual_device(se_dev->se_dev_ptr, hba);
+       } else {
+               /*
+                * Release struct se_subsystem_dev->se_dev_su_ptr..
+                */
+               printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+                       "device() for se_dev_su_ptr: %p\n",
+                       se_dev->se_dev_su_ptr);
+
+               t->free_device(se_dev->se_dev_su_ptr);
+       }
+
+       printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+                       "_dev_t: %p\n", se_dev);
+       kfree(se_dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
        NULL,
 };
 
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+       struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+                       struct t10_alua_lu_gp, lu_gp_group);
+
+       core_alua_free_lu_gp(lu_gp);
+}
+
 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+       .release                = target_core_alua_lu_gp_release,
        .show_attribute         = target_core_alua_lu_gp_attr_show,
        .store_attribute        = target_core_alua_lu_gp_attr_store,
 };
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
        printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
                " Group: core/alua/lu_gps/%s, ID: %hu\n",
                config_item_name(item), lu_gp->lu_gp_id);
-
+       /*
+        * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+        * -> target_core_alua_lu_gp_release()
+        */
        config_item_put(item);
-       core_alua_free_lu_gp(lu_gp);
 }
 
 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
        NULL,
 };
 
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+                       struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+       core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+       .release                = target_core_alua_tg_pt_gp_release,
        .show_attribute         = target_core_alua_tg_pt_gp_attr_show,
        .store_attribute        = target_core_alua_tg_pt_gp_attr_store,
 };
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
        printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
                " Group: alua/tg_pt_gps/%s, ID: %hu\n",
                config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+       /*
+        * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+        * -> target_core_alua_tg_pt_gp_release().
+        */
        config_item_put(item);
-       core_alua_free_tg_pt_gp(tg_pt_gp);
 }
 
 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
        struct se_subsystem_api *t;
        struct config_item *df_item;
        struct config_group *dev_cg, *tg_pt_gp_cg;
-       int i, ret;
+       int i;
 
        hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
 
-       if (mutex_lock_interruptible(&hba->hba_access_mutex))
-               goto out;
-
+       mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
        spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
                config_item_put(df_item);
        }
        kfree(tg_pt_gp_cg->default_groups);
-       core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+       /*
+        * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+        * directly from target_core_alua_tg_pt_gp_release().
+        */
        T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
 
        dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
                dev_cg->default_groups[i] = NULL;
                config_item_put(df_item);
        }
-
-       config_item_put(item);
        /*
-        * This pointer will set when the storage is enabled with:
-        * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+        * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+        * from target_core_dev_item_ops->release() ->target_core_dev_release().
         */
-       if (se_dev->se_dev_ptr) {
-               printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
-                       "virtual_device() for se_dev_ptr: %p\n",
-                               se_dev->se_dev_ptr);
-
-               ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
-               if (ret < 0)
-                       goto hba_out;
-       } else {
-               /*
-                * Release struct se_subsystem_dev->se_dev_su_ptr..
-                */
-               printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
-                       "device() for se_dev_su_ptr: %p\n",
-                       se_dev->se_dev_su_ptr);
-
-               t->free_device(se_dev->se_dev_su_ptr);
-       }
-
-       printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
-               "_dev_t: %p\n", se_dev);
-
-hba_out:
+       config_item_put(item);
        mutex_unlock(&hba->hba_access_mutex);
-out:
-       kfree(se_dev);
 }
 
 static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
 
+static void target_core_hba_release(struct config_item *item)
+{
+       struct se_hba *hba = container_of(to_config_group(item),
+                               struct se_hba, hba_group);
+       core_delete_hba(hba);
+}
+
 static struct configfs_attribute *target_core_hba_attrs[] = {
        &target_core_hba_hba_info.attr,
        &target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
 };
 
 static struct configfs_item_operations target_core_hba_item_ops = {
+       .release                = target_core_hba_release,
        .show_attribute         = target_core_hba_attr_show,
        .store_attribute        = target_core_hba_attr_store,
 };
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_hba *hba = item_to_hba(item);
-
+       /*
+        * core_delete_hba() is called from target_core_hba_item_ops->release()
+        * -> target_core_hba_release()
+        */
        config_item_put(item);
-       core_delete_hba(hba);
 }
 
 static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
        struct config_group *lu_gp_cg = NULL;
        struct configfs_subsystem *subsys;
-       struct proc_dir_entry *scsi_target_proc = NULL;
        struct t10_alua_lu_gp *lu_gp;
        int ret;
 
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
        if (core_dev_setup_virtual_lun0() < 0)
                goto out;
 
-       scsi_target_proc = proc_mkdir("scsi_target", 0);
-       if (!(scsi_target_proc)) {
-               printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
-               goto out;
-       }
-       ret = init_scsi_target_mib();
-       if (ret < 0)
-               goto out;
-
        return 0;
 
 out:
        configfs_unregister_subsystem(subsys);
-       if (scsi_target_proc)
-               remove_proc_entry("scsi_target", 0);
        core_dev_release_virtual_lun0();
        rd_module_exit();
 out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(lu_gp_cg->default_groups);
-       core_alua_free_lu_gp(se_global->default_lu_gp);
-       se_global->default_lu_gp = NULL;
+       lu_gp_cg->default_groups = NULL;
 
        alua_cg = &se_global->alua_group;
        for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(alua_cg->default_groups);
+       alua_cg->default_groups = NULL;
 
        hba_cg = &se_global->target_core_hbagroup;
        for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
                config_item_put(item);
        }
        kfree(hba_cg->default_groups);
-
-       for (i = 0; subsys->su_group.default_groups[i]; i++) {
-               item = &subsys->su_group.default_groups[i]->cg_item;
-               subsys->su_group.default_groups[i] = NULL;
-               config_item_put(item);
-       }
+       hba_cg->default_groups = NULL;
+       /*
+        * We expect subsys->su_group.default_groups to be released
+        * by configfs subsystem provider logic..
+        */
+       configfs_unregister_subsystem(subsys);
        kfree(subsys->su_group.default_groups);
 
-       configfs_unregister_subsystem(subsys);
+       core_alua_free_lu_gp(se_global->default_lu_gp);
+       se_global->default_lu_gp = NULL;
+
        printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
                        " Infrastructure\n");
 
-       remove_scsi_target_mib();
-       remove_proc_entry("scsi_target", 0);
        core_dev_release_virtual_lun0();
        rd_module_exit();
        release_se_global();
index 317ce58..5da051a 100644 (file)
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
                /*
                 * deve->se_lun_acl will be NULL for demo-mode created LUNs
                 * that have not been explictly concerted to MappedLUNs ->
-                * struct se_lun_acl.
+                * struct se_lun_acl, but we remove deve->alua_port_list from
+                * port->sep_alua_list. This also means that active UAs and
+                * NodeACL context specific PR metadata for demo-mode
+                * MappedLUN *deve will be released below..
                 */
-               if (!(deve->se_lun_acl))
-                       return 0;
-
                spin_lock_bh(&port->sep_alua_lock);
                list_del(&deve->alua_port_list);
                spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
                                printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
                                        " already set for demo mode -> explict"
                                        " LUN ACL transition\n");
+                               spin_unlock_irq(&nacl->device_list_lock);
                                return -1;
                        }
                        if (deve->se_lun != lun) {
                                printk(KERN_ERR "struct se_dev_entry->se_lun does"
                                        " match passed struct se_lun for demo mode"
                                        " -> explict LUN ACL transition\n");
+                               spin_unlock_irq(&nacl->device_list_lock);
                                return -1;
                        }
                        deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
                }
        }
        spin_unlock(&hba->device_lock);
-
-       while (atomic_read(&hba->dev_mib_access_count))
-               cpu_relax();
 }
 
 int se_dev_check_online(struct se_device *dev)
index 32b148d..b65d1c8 100644 (file)
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
 
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+       struct se_lun_acl *lacl = container_of(to_config_group(item),
+                               struct se_lun_acl, se_lun_group);
+       struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+       core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
        &target_fabric_mappedlun_write_protect.attr,
        NULL,
 };
 
 static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+       .release                = target_fabric_mappedlun_release,
        .show_attribute         = target_fabric_mappedlun_attr_show,
        .store_attribute        = target_fabric_mappedlun_attr_store,
        .allow_link             = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_lun_acl *lacl = container_of(to_config_group(item),
-                       struct se_lun_acl, se_lun_group);
-       struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
        config_item_put(item);
-       core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+       struct se_node_acl *se_nacl = container_of(to_config_group(item),
+                       struct se_node_acl, acl_group);
+       struct se_portal_group *se_tpg = se_nacl->se_tpg;
+       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+       .release                = target_fabric_nacl_base_release,
        .show_attribute         = target_fabric_nacl_base_attr_show,
        .store_attribute        = target_fabric_nacl_base_attr_store,
 };
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_portal_group *se_tpg = container_of(group,
-                       struct se_portal_group, tpg_acl_group);
-       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
        struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
                nacl_cg->default_groups[i] = NULL;
                config_item_put(df_item);
        }
-
+       /*
+        * struct se_node_acl free is done in target_fabric_nacl_base_release()
+        */
        config_item_put(item);
-       tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
 
 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
 
+static void target_fabric_np_base_release(struct config_item *item)
+{
+       struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+                               struct se_tpg_np, tpg_np_group);
+       struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
 static struct configfs_item_operations target_fabric_np_base_item_ops = {
+       .release                = target_fabric_np_base_release,
        .show_attribute         = target_fabric_np_base_attr_show,
        .store_attribute        = target_fabric_np_base_attr_store,
 };
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
        if (!(se_tpg_np) || IS_ERR(se_tpg_np))
                return ERR_PTR(-EINVAL);
 
+       se_tpg_np->tpg_np_parent = se_tpg;
        config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
                        &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
 
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_portal_group *se_tpg = container_of(group,
-                               struct se_portal_group, tpg_np_group);
-       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
-       struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
-                               struct se_tpg_np, tpg_np_group);
-
+       /*
+        * struct se_tpg_np is released via target_fabric_np_base_release()
+        */
        config_item_put(item);
-       tf->tf_ops.fabric_drop_np(se_tpg_np);
 }
 
 static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
  */
 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
 
+static void target_fabric_tpg_release(struct config_item *item)
+{
+       struct se_portal_group *se_tpg = container_of(to_config_group(item),
+                       struct se_portal_group, tpg_group);
+       struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+       struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+       .release                = target_fabric_tpg_release,
        .show_attribute         = target_fabric_tpg_attr_show,
        .store_attribute        = target_fabric_tpg_attr_store,
 };
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
-       struct target_fabric_configfs *tf = wwn->wwn_tf;
        struct se_portal_group *se_tpg = container_of(to_config_group(item),
                                struct se_portal_group, tpg_group);
        struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
        }
 
        config_item_put(item);
-       tf->tf_ops.fabric_drop_tpg(se_tpg);
 }
 
+static void target_fabric_release_wwn(struct config_item *item)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item),
+                               struct se_wwn, wwn_group);
+       struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+       tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+       .release        = target_fabric_release_wwn,
+};
+
 static struct configfs_group_operations target_fabric_tpg_group_ops = {
        .make_group     = target_fabric_make_tpg,
        .drop_item      = target_fabric_drop_tpg,
 };
 
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+               NULL);
 
 /* End of tfc_tpg_cit */
 
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
        struct config_group *group,
        struct config_item *item)
 {
-       struct target_fabric_configfs *tf = container_of(group,
-                               struct target_fabric_configfs, tf_group);
-       struct se_wwn *wwn = container_of(to_config_group(item),
-                               struct se_wwn, wwn_group);
-
        config_item_put(item);
-       tf->tf_ops.fabric_drop_wwn(wwn);
 }
 
 static struct configfs_group_operations target_fabric_wwn_group_ops = {
index c6e0d75..67f0c09 100644 (file)
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
 
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
                                FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
-       if (!(bd))
+       if (IS_ERR(bd))
                goto failed;
        /*
         * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
 {
        struct iblock_dev *ib_dev = p;
 
-       blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-       bioset_free(ib_dev->ibd_bio_set);
+       if (ib_dev->ibd_bd != NULL)
+               blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+       if (ib_dev->ibd_bio_set != NULL)
+               bioset_free(ib_dev->ibd_bio_set);
        kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644 (file)
index d5a48aa..0000000
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename:  target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX             1
-#define SCSI_TRANSPORT_INDEX           1
-
-#define NONE           "None"
-#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
-                               const struct list_head *head)
-{
-       return list->prev == head;
-}
-
-static void *locate_hba_start(
-       struct seq_file *seq,
-       loff_t *pos)
-{
-       spin_lock(&se_global->g_device_lock);
-       return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
-       struct seq_file *seq,
-       void *v,
-       loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
-       struct seq_file *seq,
-       loff_t *pos)
-{
-       spin_lock(&se_global->hba_lock);
-       return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
-       struct seq_file *seq,
-       void *v,
-       loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
-       if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
-               seq_puts(seq, "inst sw_indx\n");
-
-       seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
-       seq_printf(seq, "plugin: %s version: %s\n",
-                       hba->transport->name, TARGET_CORE_VERSION);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
-       .start  = scsi_inst_seq_start,
-       .next   = scsi_inst_seq_next,
-       .stop   = scsi_inst_seq_stop,
-       .show   = scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_inst_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       char str[28];
-       int k;
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst indx role ports\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
-                  dev->dev_index, "Target", dev->dev_port_count);
-
-       memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
-       /* vendor */
-       for (k = 0; k < 8; k++)
-               str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
-                               DEV_T10_WWN(dev)->vendor[k] : 0x20;
-       str[k] = 0x20;
-
-       /* model */
-       for (k = 0; k < 16; k++)
-               str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
-                               DEV_T10_WWN(dev)->model[k] : 0x20;
-       str[k + 9] = 0;
-
-       seq_printf(seq, "dev_alias: %s\n", str);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
-       .start  = scsi_dev_seq_start,
-       .next   = scsi_dev_seq_next,
-       .stop   = scsi_dev_seq_stop,
-       .show   = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_dev_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *sep, *sep_tmp;
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx role busy_count\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       /* FIXME: scsiPortBusyStatuses count */
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-               seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
-                       dev->dev_index, sep->sep_index, "Device",
-                       dev->dev_index, 0);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
-       .start  = scsi_port_seq_start,
-       .next   = scsi_port_seq_next,
-       .stop   = scsi_port_seq_stop,
-       .show   = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *se, *se_tmp;
-       struct se_portal_group *tpg;
-       struct t10_wwn *wwn;
-       char buf[64];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx dev_name\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       wwn = DEV_T10_WWN(dev);
-
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
-               tpg = se->sep_tpg;
-               sprintf(buf, "scsiTransport%s",
-                               TPG_TFO(tpg)->get_fabric_name());
-
-               seq_printf(seq, "%u %s %u %s+%s\n",
-                       hba->hba_index, /* scsiTransportIndex */
-                       buf,  /* scsiTransportType */
-                       (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
-                       TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
-                       0,
-                       TPG_TFO(tpg)->tpg_get_wwn(tpg),
-                       (strlen(wwn->unit_serial)) ?
-                       /* scsiTransportDevName */
-                       wwn->unit_serial : wwn->vendor);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
-       .start  = scsi_transport_seq_start,
-       .next   = scsi_transport_seq_next,
-       .stop   = scsi_transport_seq_stop,
-       .show   = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_transport_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT       1  /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       int non_accessible_lus = 0;
-       char status[16];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst indx num_LUs status non_access_LUs"
-                       " resets\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
-               strcpy(status, "activated");
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
-               strcpy(status, "deactivated");
-               non_accessible_lus = 1;
-               break;
-       case TRANSPORT_DEVICE_SHUTDOWN:
-               strcpy(status, "shutdown");
-               non_accessible_lus = 1;
-               break;
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-               strcpy(status, "offline");
-               non_accessible_lus = 1;
-               break;
-       default:
-               sprintf(status, "unknown(%d)", dev->dev_status);
-               non_accessible_lus = 1;
-       }
-
-       seq_printf(seq, "%u %u %u %s %u %u\n",
-                  hba->hba_index, dev->dev_index, LU_COUNT,
-                  status, non_accessible_lus, dev->num_resets);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
-       .start  = scsi_tgt_dev_seq_start,
-       .next   = scsi_tgt_dev_seq_next,
-       .stop   = scsi_tgt_dev_seq_stop,
-       .show   = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_tgt_dev_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       struct se_port *sep, *sep_tmp;
-       struct se_portal_group *tpg;
-       u32 rx_mbytes, tx_mbytes;
-       unsigned long long num_cmds;
-       char buf[64];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst device indx name port_index in_cmds"
-                       " write_mbytes read_mbytes hs_in_cmds\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-               tpg = sep->sep_tpg;
-               sprintf(buf, "%sPort#",
-                       TPG_TFO(tpg)->get_fabric_name());
-
-               seq_printf(seq, "%u %u %u %s%d %s%s%d ",
-                    hba->hba_index,
-                    dev->dev_index,
-                    sep->sep_index,
-                    buf, sep->sep_index,
-                    TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
-                    TPG_TFO(tpg)->tpg_get_tag(tpg));
-
-               spin_lock(&sep->sep_lun->lun_sep_lock);
-               num_cmds = sep->sep_stats.cmd_pdus;
-               rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
-               tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
-               spin_unlock(&sep->sep_lun->lun_sep_lock);
-
-               seq_printf(seq, "%llu %u %u %u\n", num_cmds,
-                       rx_mbytes, tx_mbytes, 0);
-       }
-       spin_unlock(&dev->se_port_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
-       .start  = scsi_tgt_port_seq_start,
-       .next   = scsi_tgt_port_seq_next,
-       .stop   = scsi_tgt_port_seq_stop,
-       .show   = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_tgt_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       spin_lock_bh(&se_global->se_tpg_lock);
-       return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
-                                        loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-                                               se_tpg_list);
-       struct se_dev_entry *deve;
-       struct se_lun *lun;
-       struct se_node_acl *se_nacl;
-       int j;
-
-       if (list_is_first(&se_tpg->se_tpg_list,
-                         &se_global->g_se_tpg_list))
-               seq_puts(seq, "inst dev port indx dev_or_port intr_name "
-                        "map_indx att_count num_cmds read_mbytes "
-                        "write_mbytes hs_num_cmds creation_time row_status\n");
-
-       if (!(se_tpg))
-               return 0;
-
-       spin_lock(&se_tpg->acl_node_lock);
-       list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
-               atomic_inc(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               spin_unlock(&se_tpg->acl_node_lock);
-
-               spin_lock_irq(&se_nacl->device_list_lock);
-               for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-                       deve = &se_nacl->device_list[j];
-                       if (!(deve->lun_flags &
-                                       TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-                           (!deve->se_lun))
-                               continue;
-                       lun = deve->se_lun;
-                       if (!lun->lun_se_dev)
-                               continue;
-
-                       seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
-                                       " %u %s\n",
-                               /* scsiInstIndex */
-                               (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-                               TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-                               0,
-                               /* scsiDeviceIndex */
-                               lun->lun_se_dev->dev_index,
-                               /* scsiAuthIntrTgtPortIndex */
-                               TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-                               /* scsiAuthIntrIndex */
-                               se_nacl->acl_index,
-                               /* scsiAuthIntrDevOrPort */
-                               1,
-                               /* scsiAuthIntrName */
-                               se_nacl->initiatorname[0] ?
-                                       se_nacl->initiatorname : NONE,
-                               /* FIXME: scsiAuthIntrLunMapIndex */
-                               0,
-                               /* scsiAuthIntrAttachedTimes */
-                               deve->attach_count,
-                               /* scsiAuthIntrOutCommands */
-                               deve->total_cmds,
-                               /* scsiAuthIntrReadMegaBytes */
-                               (u32)(deve->read_bytes >> 20),
-                               /* scsiAuthIntrWrittenMegaBytes */
-                               (u32)(deve->write_bytes >> 20),
-                               /* FIXME: scsiAuthIntrHSOutCommands */
-                               0,
-                               /* scsiAuthIntrLastCreation */
-                               (u32)(((u32)deve->creation_time -
-                                           INITIAL_JIFFIES) * 100 / HZ),
-                               /* FIXME: scsiAuthIntrRowStatus */
-                               "Ready");
-               }
-               spin_unlock_irq(&se_nacl->device_list_lock);
-
-               spin_lock(&se_tpg->acl_node_lock);
-               atomic_dec(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_dec();
-       }
-       spin_unlock(&se_tpg->acl_node_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
-       .start  = scsi_auth_intr_seq_start,
-       .next   = scsi_auth_intr_seq_next,
-       .stop   = scsi_auth_intr_seq_stop,
-       .show   = scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_auth_intr_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       spin_lock_bh(&se_global->se_tpg_lock);
-       return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
-                                        loff_t *pos)
-{
-       return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
-       spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-                                               se_tpg_list);
-       struct se_dev_entry *deve;
-       struct se_lun *lun;
-       struct se_node_acl *se_nacl;
-       struct se_session *se_sess;
-       unsigned char buf[64];
-       int j;
-
-       if (list_is_first(&se_tpg->se_tpg_list,
-                         &se_global->g_se_tpg_list))
-               seq_puts(seq, "inst dev port indx port_auth_indx port_name"
-                       " port_ident\n");
-
-       if (!(se_tpg))
-               return 0;
-
-       spin_lock(&se_tpg->session_lock);
-       list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
-               if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
-                   (!se_sess->se_node_acl) ||
-                   (!se_sess->se_node_acl->device_list))
-                       continue;
-
-               atomic_inc(&se_sess->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               se_nacl = se_sess->se_node_acl;
-               atomic_inc(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_inc();
-               spin_unlock(&se_tpg->session_lock);
-
-               spin_lock_irq(&se_nacl->device_list_lock);
-               for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-                       deve = &se_nacl->device_list[j];
-                       if (!(deve->lun_flags &
-                                       TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-                          (!deve->se_lun))
-                               continue;
-
-                       lun = deve->se_lun;
-                       if (!lun->lun_se_dev)
-                               continue;
-
-                       memset(buf, 0, 64);
-                       if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
-                               TPG_TFO(se_tpg)->sess_get_initiator_sid(
-                                       se_sess, (unsigned char *)&buf[0], 64);
-
-                       seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
-                               /* scsiInstIndex */
-                               (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-                               TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-                               0,
-                               /* scsiDeviceIndex */
-                               lun->lun_se_dev->dev_index,
-                               /* scsiPortIndex */
-                               TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-                               /* scsiAttIntrPortIndex */
-                               (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
-                               TPG_TFO(se_tpg)->sess_get_index(se_sess) :
-                               0,
-                               /* scsiAttIntrPortAuthIntrIdx */
-                               se_nacl->acl_index,
-                               /* scsiAttIntrPortName */
-                               se_nacl->initiatorname[0] ?
-                                       se_nacl->initiatorname : NONE,
-                               /* scsiAttIntrPortIdentifier */
-                               buf);
-               }
-               spin_unlock_irq(&se_nacl->device_list_lock);
-
-               spin_lock(&se_tpg->session_lock);
-               atomic_dec(&se_nacl->mib_ref_count);
-               smp_mb__after_atomic_dec();
-               atomic_dec(&se_sess->mib_ref_count);
-               smp_mb__after_atomic_dec();
-       }
-       spin_unlock(&se_tpg->session_lock);
-
-       return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
-       .start  = scsi_att_intr_port_seq_start,
-       .next   = scsi_att_intr_port_seq_next,
-       .stop   = scsi_att_intr_port_seq_stop,
-       .show   = scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_att_intr_port_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
-       locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX          1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
-       struct se_hba *hba;
-       struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-                                               g_se_dev_list);
-       struct se_device *dev = se_dev->se_dev_ptr;
-       int j;
-       char str[28];
-
-       if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-               seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
-               " dev_type status state-bit num_cmds read_mbytes"
-               " write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
-       if (!(dev))
-               return 0;
-
-       hba = dev->se_hba;
-       if (!(hba)) {
-               /* Log error ? */
-               return 0;
-       }
-
-       /* Fix LU state, if we can read it from the device */
-       seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
-                       dev->dev_index, SCSI_LU_INDEX,
-                       (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
-                       (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
-                       /* scsiLuWwnName */
-                       (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
-                       "None");
-
-       memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-       /* scsiLuVendorId */
-       for (j = 0; j < 8; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
-                       DEV_T10_WWN(dev)->vendor[j] : 0x20;
-       str[8] = 0;
-       seq_printf(seq, " %s", str);
-
-       /* scsiLuProductId */
-       for (j = 0; j < 16; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
-                       DEV_T10_WWN(dev)->model[j] : 0x20;
-       str[16] = 0;
-       seq_printf(seq, " %s", str);
-
-       /* scsiLuRevisionId */
-       for (j = 0; j < 4; j++)
-               str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
-                       DEV_T10_WWN(dev)->revision[j] : 0x20;
-       str[4] = 0;
-       seq_printf(seq, " %s", str);
-
-       seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
-               /* scsiLuPeripheralType */
-                  TRANSPORT(dev)->get_device_type(dev),
-                  (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
-               "available" : "notavailable", /* scsiLuStatus */
-               "exposed",      /* scsiLuState */
-               (unsigned long long)dev->num_cmds,
-               /* scsiLuReadMegaBytes */
-               (u32)(dev->read_bytes >> 20),
-               /* scsiLuWrittenMegaBytes */
-               (u32)(dev->write_bytes >> 20),
-               dev->num_resets, /* scsiLuInResets */
-               0, /* scsiLuOutTaskSetFullStatus */
-               0, /* scsiLuHSInCommands */
-               (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
-                                                       100 / HZ));
-
-       return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
-       .start  = scsi_lu_seq_start,
-       .next   = scsi_lu_seq_next,
-       .stop   = scsi_lu_seq_stop,
-       .show   = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = scsi_lu_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
-       remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
-       remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
-       remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
-       struct proc_dir_entry *dir_entry;
-       struct proc_dir_entry *scsi_inst_entry;
-       struct proc_dir_entry *scsi_dev_entry;
-       struct proc_dir_entry *scsi_port_entry;
-       struct proc_dir_entry *scsi_transport_entry;
-       struct proc_dir_entry *scsi_tgt_dev_entry;
-       struct proc_dir_entry *scsi_tgt_port_entry;
-       struct proc_dir_entry *scsi_auth_intr_entry;
-       struct proc_dir_entry *scsi_att_intr_port_entry;
-       struct proc_dir_entry *scsi_lu_entry;
-
-       dir_entry = proc_mkdir("scsi_target/mib", NULL);
-       if (!(dir_entry)) {
-               printk(KERN_ERR "proc_mkdir() failed.\n");
-               return -1;
-       }
-
-       scsi_inst_entry =
-               create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
-       if (scsi_inst_entry)
-               scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
-       else
-               goto error;
-
-       scsi_dev_entry =
-               create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
-       if (scsi_dev_entry)
-               scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
-       else
-               goto error;
-
-       scsi_port_entry =
-               create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
-       if (scsi_port_entry)
-               scsi_port_entry->proc_fops = &scsi_port_seq_fops;
-       else
-               goto error;
-
-       scsi_transport_entry =
-               create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
-       if (scsi_transport_entry)
-               scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
-       else
-               goto error;
-
-       scsi_tgt_dev_entry =
-               create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
-       if (scsi_tgt_dev_entry)
-               scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
-       else
-               goto error;
-
-       scsi_tgt_port_entry =
-               create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
-       if (scsi_tgt_port_entry)
-               scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
-       else
-               goto error;
-
-       scsi_auth_intr_entry =
-               create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
-       if (scsi_auth_intr_entry)
-               scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
-       else
-               goto error;
-
-       scsi_att_intr_port_entry =
-             create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
-       if (scsi_att_intr_port_entry)
-               scsi_att_intr_port_entry->proc_fops =
-                               &scsi_att_intr_port_seq_fops;
-       else
-               goto error;
-
-       scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
-       if (scsi_lu_entry)
-               scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
-       else
-               goto error;
-
-       return 0;
-
-error:
-       printk(KERN_ERR "create_proc_entry() failed.\n");
-       remove_scsi_target_mib();
-       return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
-       memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
-       spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
-       u32 new_index;
-
-       if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
-               printk(KERN_ERR "Invalid index type %d\n", type);
-               return -1;
-       }
-
-       spin_lock(&scsi_index_table.lock);
-       new_index = ++scsi_index_table.scsi_mib_index[type];
-       if (new_index == 0)
-               new_index = ++scsi_index_table.scsi_mib_index[type];
-       spin_unlock(&scsi_index_table.lock);
-
-       return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644 (file)
index 2772046..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
-       SCSI_INST_INDEX,
-       SCSI_DEVICE_INDEX,
-       SCSI_AUTH_INTR_INDEX,
-       SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
-       spinlock_t      lock;
-       u32             scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
-       u64     cmd_pdus;
-       u64     tx_data_octets;
-       u64     rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif   /*** TARGET_CORE_MIB_H ***/
index 742d246..f2a0847 100644 (file)
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
         */
        bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
                                FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
-       if (!(bd)) {
-               printk("pSCSI: blkdev_get_by_path() failed\n");
+       if (IS_ERR(bd)) {
+               printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
                scsi_device_put(sd);
                return NULL;
        }
index abfa81a..c26f674 100644 (file)
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
        spin_lock_init(&acl->device_list_lock);
        spin_lock_init(&acl->nacl_sess_lock);
        atomic_set(&acl->acl_pr_ref_count, 0);
-       atomic_set(&acl->mib_ref_count, 0);
        acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
                cpu_relax();
 }
 
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
-       while (atomic_read(&nacl->mib_ref_count) != 0)
-               cpu_relax();
-}
-
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
        int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
        spin_unlock_bh(&tpg->session_lock);
 
        core_tpg_wait_for_nacl_pr_ref(acl);
-       core_tpg_wait_for_mib_ref(acl);
        core_clear_initiator_node_from_tpg(acl, tpg);
        core_free_device_list_for_node(acl, tpg);
 
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+       struct se_node_acl *nacl, *nacl_tmp;
+
        printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
                " for endpoint: %s Portal Tag %u\n",
                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
 
        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
                cpu_relax();
+       /*
+        * Release any remaining demo-mode generated se_node_acl that have
+        * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+        * in transport_deregister_session().
+        */
+       spin_lock_bh(&se_tpg->acl_node_lock);
+       list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+                       acl_list) {
+               list_del(&nacl->acl_list);
+               se_tpg->num_node_acls--;
+               spin_unlock_bh(&se_tpg->acl_node_lock);
+
+               core_tpg_wait_for_nacl_pr_ref(nacl);
+               core_free_device_list_for_node(nacl, se_tpg);
+               TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+               spin_lock_bh(&se_tpg->acl_node_lock);
+       }
+       spin_unlock_bh(&se_tpg->acl_node_lock);
 
        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
                core_tpg_release_virtual_lun0(se_tpg);
index 28b6292..236e22d 100644 (file)
@@ -379,6 +379,40 @@ void release_se_global(void)
        se_global = NULL;
 }
 
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+       memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+       spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+       u32 new_index;
+
+       if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+               printk(KERN_ERR "Invalid index type %d\n", type);
+               return -EINVAL;
+       }
+
+       spin_lock(&scsi_index_table.lock);
+       new_index = ++scsi_index_table.scsi_mib_index[type];
+       if (new_index == 0)
+               new_index = ++scsi_index_table.scsi_mib_index[type];
+       spin_unlock(&scsi_index_table.lock);
+
+       return new_index;
+}
+
 void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
        atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
        }
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
-       atomic_set(&se_sess->mib_ref_count, 0);
 
        return se_sess;
 }
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
                transport_free_session(se_sess);
                return;
        }
-       /*
-        * Wait for possible reference in drivers/target/target_core_mib.c:
-        * scsi_att_intr_port_seq_show()
-        */
-       while (atomic_read(&se_sess->mib_ref_count) != 0)
-               cpu_relax();
 
        spin_lock_bh(&se_tpg->session_lock);
        list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
                                spin_unlock_bh(&se_tpg->acl_node_lock);
 
                                core_tpg_wait_for_nacl_pr_ref(se_nacl);
-                               core_tpg_wait_for_mib_ref(se_nacl);
                                core_free_device_list_for_node(se_nacl, se_tpg);
                                TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
                                                se_nacl);
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
 
                return ret;
        }
+
+       BUG_ON(list_empty(se_mem_list));
        /*
         * This is the normal path for all normal non BIDI and BIDI-COMMAND
         * WRITE payloads..  If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
                struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
                u32 se_mem_cnt = 0, task_offset = 0;
 
-               BUG_ON(list_empty(cmd->t_task->t_mem_list));
+               if (!list_empty(T_TASK(cmd)->t_mem_list))
+                       se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+                                       struct se_mem, se_list);
 
                ret = transport_do_se_mem_map(dev, task,
                                cmd->t_task->t_mem_list, NULL, se_mem,
index f7a5dba..bf7c687 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig THERMAL
        tristate "Generic Thermal sysfs driver"
-       depends on NET
        help
          Generic Thermal Sysfs driver offers a generic mechanism for
          thermal management. Usually it's made up of one or more thermal
index 7d0e63c..713b7ea 100644 (file)
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
 
 static unsigned int thermal_event_seqnum;
 
-static struct genl_family thermal_event_genl_family = {
-       .id = GENL_ID_GENERATE,
-       .name = THERMAL_GENL_FAMILY_NAME,
-       .version = THERMAL_GENL_VERSION,
-       .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
-       .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
        int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .name = THERMAL_GENL_FAMILY_NAME,
+       .version = THERMAL_GENL_VERSION,
+       .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+       .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
 int generate_netlink_event(u32 orig, enum events event)
 {
        struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
        return result;
 }
 
+static void genetlink_exit(void)
+{
+       genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
 static int __init thermal_init(void)
 {
        int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
        return result;
 }
 
-static void genetlink_exit(void)
-{
-       genl_unregister_family(&thermal_event_genl_family);
-}
-
 static void __exit thermal_exit(void)
 {
        class_unregister(&thermal_class);
index e6bed5f..d79e7e9 100644 (file)
@@ -10,4 +10,3 @@ obj-$(CONFIG_HVC_XEN)         += hvc_xen.o
 obj-$(CONFIG_HVC_IUCV)         += hvc_iucv.o
 obj-$(CONFIG_HVC_UDBG)         += hvc_udbg.o
 obj-$(CONFIG_HVCS)             += hvcs.o
-obj-$(CONFIG_VIRTIO_CONSOLE)   += virtio_console.o
index 44b8412..aa2e5d3 100644 (file)
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
 
        gsm->initiator = c->initiator;
        gsm->mru = c->mru;
+       gsm->mtu = c->mtu;
        gsm->encoding = c->encapsulation;
        gsm->adaption = c->adaption;
        gsm->n2 = c->n2;
index 47d3228..52fc0c9 100644 (file)
@@ -581,8 +581,9 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
                           __u8 __user *buf, size_t nr)
 {
        struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
-       int ret;
+       int ret = 0;
        struct n_hdlc_buf *rbuf;
+       DECLARE_WAITQUEUE(wait, current);
 
        if (debuglevel >= DEBUG_LEVEL_INFO)     
                printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
@@ -598,57 +599,55 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
                return -EFAULT;
        }
 
-       tty_lock();
+       add_wait_queue(&tty->read_wait, &wait);
 
        for (;;) {
                if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-                       tty_unlock();
-                       return -EIO;
+                       ret = -EIO;
+                       break;
                }
+               if (tty_hung_up_p(file))
+                       break;
 
-               n_hdlc = tty2n_hdlc (tty);
-               if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
-                        tty != n_hdlc->tty) {
-                       tty_unlock();
-                       return 0;
-               }
+               set_current_state(TASK_INTERRUPTIBLE);
 
                rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
-               if (rbuf)
+               if (rbuf) {
+                       if (rbuf->count > nr) {
+                               /* too large for caller's buffer */
+                               ret = -EOVERFLOW;
+                       } else {
+                               if (copy_to_user(buf, rbuf->buf, rbuf->count))
+                                       ret = -EFAULT;
+                               else
+                                       ret = rbuf->count;
+                       }
+
+                       if (n_hdlc->rx_free_buf_list.count >
+                           DEFAULT_RX_BUF_COUNT)
+                               kfree(rbuf);
+                       else
+                               n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
                        break;
+               }
                        
                /* no data */
                if (file->f_flags & O_NONBLOCK) {
-                       tty_unlock();
-                       return -EAGAIN;
+                       ret = -EAGAIN;
+                       break;
                }
-                       
-               interruptible_sleep_on (&tty->read_wait);
+
+               schedule();
+
                if (signal_pending(current)) {
-                       tty_unlock();
-                       return -EINTR;
+                       ret = -EINTR;
+                       break;
                }
        }
-               
-       if (rbuf->count > nr)
-               /* frame too large for caller's buffer (discard frame) */
-               ret = -EOVERFLOW;
-       else {
-               /* Copy the data to the caller's buffer */
-               if (copy_to_user(buf, rbuf->buf, rbuf->count))
-                       ret = -EFAULT;
-               else
-                       ret = rbuf->count;
-       }
-       
-       /* return HDLC buffer to free list unless the free list */
-       /* count has exceeded the default value, in which case the */
-       /* buffer is freed back to the OS to conserve memory */
-       if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
-               kfree(rbuf);
-       else    
-               n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
-       tty_unlock();
+
+       remove_wait_queue(&tty->read_wait, &wait);
+       __set_current_state(TASK_RUNNING);
+
        return ret;
        
 }      /* end of n_hdlc_tty_read() */
@@ -691,14 +690,15 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                count = maxframe;
        }
        
-       tty_lock();
-
        add_wait_queue(&tty->write_wait, &wait);
-       set_current_state(TASK_INTERRUPTIBLE);
+
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
        
-       /* Allocate transmit buffer */
-       /* sleep until transmit buffer available */             
-       while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) {
+               tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+               if (tbuf)
+                       break;
+
                if (file->f_flags & O_NONBLOCK) {
                        error = -EAGAIN;
                        break;
@@ -719,7 +719,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                }
        }
 
-       set_current_state(TASK_RUNNING);
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&tty->write_wait, &wait);
 
        if (!error) {           
@@ -731,7 +731,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
                n_hdlc_send_frames(n_hdlc,tty);
        }
-       tty_unlock();
+
        return error;
        
 }      /* end of n_hdlc_tty_write() */
index be0ebce..de0160e 100644 (file)
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status)
 
 static void receive_chars(struct m68k_serial *info, unsigned short rx)
 {
-       struct tty_struct *tty = info->port.tty;
+       struct tty_struct *tty = info->tty;
        m68328_uart *uart = &uart_addr[info->line];
        unsigned char ch, flag;
 
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info)
                goto clear_and_return;
        }
 
-       if((info->xmit_cnt <= 0) || info->port.tty->stopped) {
+       if((info->xmit_cnt <= 0) || info->tty->stopped) {
                /* That's peculiar... TX ints off */
                uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
                goto clear_and_return;
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work)
        struct m68k_serial      *info = container_of(work, struct m68k_serial, tqueue);
        struct tty_struct       *tty;
        
-       tty = info->port.tty;
+       tty = info->tty;
        if (!tty)
                return;
 #if 0
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work)
        struct m68k_serial      *info = container_of(work, struct m68k_serial, tqueue_hangup);
        struct tty_struct       *tty;
        
-       tty = info->port.tty;
+       tty = info->tty;
        if (!tty)
                return;
 
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info)
        uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
 #endif
 
-       if (info->port.tty)
-               clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+       if (info->tty)
+               clear_bit(TTY_IO_ERROR, &info->tty->flags);
        info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
 
        /*
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info)
                info->xmit_buf = 0;
        }
 
-       if (info->port.tty)
-               set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+       if (info->tty)
+               set_bit(TTY_IO_ERROR, &info->tty->flags);
        
        info->flags &= ~S_INITIALIZED;
        local_irq_restore(flags);
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info)
        unsigned cflag;
        int     i;
 
-       if (!info->port.tty || !info->port.tty->termios)
+       if (!info->tty || !info->tty->termios)
                return;
-       cflag = info->port.tty->termios->c_cflag;
+       cflag = info->tty->termios->c_cflag;
        if (!(port = info->port))
                return;
 
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
 static int rs_ioctl(struct tty_struct *tty, struct file * file,
                    unsigned int cmd, unsigned long arg)
 {
-       int error;
        struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
        int retval;
 
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
        tty_ldisc_flush(tty);
        tty->closing = 0;
        info->event = 0;
-       info->port.tty = NULL;
+       info->tty = NULL;
 #warning "This is not and has never been valid so fix it"      
 #if 0
        if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty)
        info->event = 0;
        info->count = 0;
        info->flags &= ~S_NORMAL_ACTIVE;
-       info->port.tty = NULL;
+       info->tty = NULL;
        wake_up_interruptible(&info->open_wait);
 }
 
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp)
 
        info->count++;
        tty->driver_data = info;
-       info->port.tty = tty;
+       info->tty = tty;
 
        /*
         * Start up serial port
@@ -1338,7 +1337,7 @@ rs68328_init(void)
            info = &m68k_soft[i];
            info->magic = SERIAL_MAGIC;
            info->port = (int) &uart_addr[i];
-           info->port.tty = NULL;
+           info->tty = NULL;
            info->irq = uart_irqs[i];
            info->custom_divisor = 16;
            info->close_delay = 50;
index 88b1335..bc21eea 100644 (file)
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
        /* .read_proc = rs_360_read_proc, */
        .tiocmget = rs_360_tiocmget,
        .tiocmset = rs_360_tiocmset,
+       .get_icount = rs_360_get_icount,
 };
 
 static int __init rs_360_init(void)
index b25e6e4..3975df6 100644 (file)
@@ -236,7 +236,8 @@ static const struct serial8250_config uart_config[] = {
                .fifo_size      = 128,
                .tx_loadsz      = 128,
                .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+               /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP,
        },
        [PORT_16654] = {
                .name           = "ST16654",
index b1682d7..2b83346 100644 (file)
@@ -1518,6 +1518,7 @@ config SERIAL_BCM63XX_CONSOLE
 config SERIAL_GRLIB_GAISLER_APBUART
        tristate "GRLIB APBUART serial support"
        depends on OF
+       select SERIAL_CORE
        ---help---
        Add support for the GRLIB APBUART serial port.
 
index e381b89..9b1ff2b 100644 (file)
@@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
 {
        struct bfin_serial_port *uart = dev_id;
 
-       spin_lock(&uart->port.lock);
        while (UART_GET_LSR(uart) & DR)
                bfin_serial_rx_chars(uart);
-       spin_unlock(&uart->port.lock);
 
        return IRQ_HANDLED;
 }
@@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
 {
        int x_pos, pos;
 
-       dma_disable_irq(uart->tx_dma_channel);
-       dma_disable_irq(uart->rx_dma_channel);
-       spin_lock_bh(&uart->port.lock);
+       dma_disable_irq_nosync(uart->rx_dma_channel);
+       spin_lock_bh(&uart->rx_lock);
 
        /* 2D DMA RX buffer ring is used. Because curr_y_count and
         * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
                uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
        }
 
-       spin_unlock_bh(&uart->port.lock);
-       dma_enable_irq(uart->tx_dma_channel);
+       spin_unlock_bh(&uart->rx_lock);
        dma_enable_irq(uart->rx_dma_channel);
 
        mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
        unsigned short irqstat;
        int x_pos, pos;
 
-       spin_lock(&uart->port.lock);
+       spin_lock(&uart->rx_lock);
        irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
        clear_dma_irqstat(uart->rx_dma_channel);
 
@@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
                uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
        }
 
-       spin_unlock(&uart->port.lock);
+       spin_unlock(&uart->rx_lock);
 
        return IRQ_HANDLED;
 }
@@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
                }
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
+               spin_lock_init(&uart->rx_lock);
                uart->tx_done       = 1;
                uart->tx_count      = 0;
 
index beb1afa..7b951ad 100644 (file)
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
        s->rts = 0;
 
        sprintf(b, "max3100-%d", s->minor);
-       s->workqueue = create_freezeable_workqueue(b);
+       s->workqueue = create_freezable_workqueue(b);
        if (!s->workqueue) {
                dev_warn(&s->spi->dev, "cannot create workqueue\n");
                return -EBUSY;
index 910870e..750b4f6 100644 (file)
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
        struct max3107_port *s = container_of(port, struct max3107_port, port);
 
        /* Initialize work queue */
-       s->workqueue = create_freezeable_workqueue("max3107");
+       s->workqueue = create_freezable_workqueue("max3107");
        if (!s->workqueue) {
                dev_err(&s->spi->dev, "Workqueue creation failed\n");
                return -EBUSY;
index a2f2b32..602d984 100644 (file)
@@ -829,7 +829,7 @@ static void __init sbd_probe_duarts(void)
 #ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
 /*
  * Serial console stuff.  Very basic, polling driver for doing serial
- * console output.  The console_sem is held by the caller, so we
+ * console output.  The console_lock is held by the caller, so we
  * shouldn't be interrupted for more console activity.
  */
 static void sbd_console_putchar(struct uart_port *uport, int ch)
index 93760b2..1ef4df9 100644 (file)
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
        PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
        PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
index c556ed9..81f1395 100644 (file)
@@ -46,7 +46,7 @@
 #include <asm/irq_regs.h>
 
 /* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = 1;
+static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
 
 static bool sysrq_on(void)
@@ -571,6 +571,7 @@ struct sysrq_state {
        unsigned int alt_use;
        bool active;
        bool need_reinject;
+       bool reinjecting;
 };
 
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
        unsigned int alt_code = sysrq->alt_use;
 
        if (sysrq->need_reinject) {
+               /* we do not want the assignment to be reordered */
+               sysrq->reinjecting = true;
+               mb();
+
                /* Simulate press and release of Alt + SysRq */
                input_inject_event(handle, EV_KEY, alt_code, 1);
                input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
                input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
                input_inject_event(handle, EV_KEY, alt_code, 0);
                input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+               mb();
+               sysrq->reinjecting = false;
        }
 }
 
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
        bool was_active = sysrq->active;
        bool suppress;
 
+       /*
+        * Do not filter anything if we are in the process of re-injecting
+        * Alt+SysRq combination.
+        */
+       if (sysrq->reinjecting)
+               return false;
+
        switch (type) {
 
        case EV_SYN:
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
                                sysrq->alt_use = sysrq->alt;
                                /*
                                 * If nothing else will be pressed we'll need
-                                * to re-inject Alt-SysRq keysroke.
+                                * to re-inject Alt-SysRq keysroke.
                                 */
                                sysrq->need_reinject = true;
                        }
index 464d09d..0065da4 100644 (file)
@@ -3256,8 +3256,8 @@ static ssize_t show_cons_active(struct device *dev,
        struct console *c;
        ssize_t count = 0;
 
-       acquire_console_sem();
-       for (c = console_drivers; c; c = c->next) {
+       console_lock();
+       for_each_console(c) {
                if (!c->device)
                        continue;
                if (!c->write)
@@ -3271,7 +3271,7 @@ static ssize_t show_cons_active(struct device *dev,
        while (i--)
                count += sprintf(buf + count, "%s%d%c",
                                 cs[i]->name, cs[i]->index, i ? ' ':'\n');
-       release_console_sem();
+       console_unlock();
 
        return count;
 }
@@ -3306,7 +3306,7 @@ int __init tty_init(void)
        if (IS_ERR(consdev))
                consdev = NULL;
        else
-               device_create_file(consdev, &dev_attr_active);
+               WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
 
 #ifdef CONFIG_VT
        vty_init(&console_fops);
index ebae344..c956ed6 100644 (file)
@@ -316,9 +316,9 @@ int paste_selection(struct tty_struct *tty)
        /* always called with BTM from vt_ioctl */
        WARN_ON(!tty_locked());
 
-       acquire_console_sem();
+       console_lock();
        poke_blanked_console();
-       release_console_sem();
+       console_unlock();
 
        ld = tty_ldisc_ref(tty);
        if (!ld) {
index eab3a1f..a672ed1 100644 (file)
@@ -202,7 +202,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        /* Select the proper current console and verify
         * sanity of the situation under the console lock.
         */
-       acquire_console_sem();
+       console_lock();
 
        attr = (currcons & 128);
        currcons = (currcons & 127);
@@ -336,9 +336,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                 * the pagefault handling code may want to call printk().
                 */
 
-               release_console_sem();
+               console_unlock();
                ret = copy_to_user(buf, con_buf_start, orig_count);
-               acquire_console_sem();
+               console_lock();
 
                if (ret) {
                        read += (orig_count - ret);
@@ -354,7 +354,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        if (read)
                ret = read;
 unlock_out:
-       release_console_sem();
+       console_unlock();
        mutex_unlock(&con_buf_mtx);
        return ret;
 }
@@ -379,7 +379,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
        /* Select the proper current console and verify
         * sanity of the situation under the console lock.
         */
-       acquire_console_sem();
+       console_lock();
 
        attr = (currcons & 128);
        currcons = (currcons & 127);
@@ -414,9 +414,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                /* Temporarily drop the console lock so that we can read
                 * in the write data from userspace safely.
                 */
-               release_console_sem();
+               console_unlock();
                ret = copy_from_user(con_buf, buf, this_round);
-               acquire_console_sem();
+               console_lock();
 
                if (ret) {
                        this_round -= ret;
@@ -542,7 +542,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                vcs_scr_updated(vc);
 
 unlock_out:
-       release_console_sem();
+       console_unlock();
 
        mutex_unlock(&con_buf_mtx);
 
index 76407ec..147ede3 100644 (file)
@@ -1003,9 +1003,9 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
        struct vc_data *vc = tty->driver_data;
        int ret;
 
-       acquire_console_sem();
+       console_lock();
        ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
-       release_console_sem();
+       console_unlock();
        return ret;
 }
 
@@ -1271,7 +1271,7 @@ static void default_attr(struct vc_data *vc)
        vc->vc_color = vc->vc_def_color;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_m(struct vc_data *vc)
 {
        int i;
@@ -1415,7 +1415,7 @@ int mouse_reporting(void)
        return vc_cons[fg_console].d->vc_report_mouse;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void set_mode(struct vc_data *vc, int on_off)
 {
        int i;
@@ -1485,7 +1485,7 @@ static void set_mode(struct vc_data *vc, int on_off)
                }
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void setterm_command(struct vc_data *vc)
 {
        switch(vc->vc_par[0]) {
@@ -1545,7 +1545,7 @@ static void setterm_command(struct vc_data *vc)
        }
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_at(struct vc_data *vc, unsigned int nr)
 {
        if (nr > vc->vc_cols - vc->vc_x)
@@ -1555,7 +1555,7 @@ static void csi_at(struct vc_data *vc, unsigned int nr)
        insert_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_L(struct vc_data *vc, unsigned int nr)
 {
        if (nr > vc->vc_rows - vc->vc_y)
@@ -1566,7 +1566,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
        vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_P(struct vc_data *vc, unsigned int nr)
 {
        if (nr > vc->vc_cols - vc->vc_x)
@@ -1576,7 +1576,7 @@ static void csi_P(struct vc_data *vc, unsigned int nr)
        delete_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_M(struct vc_data *vc, unsigned int nr)
 {
        if (nr > vc->vc_rows - vc->vc_y)
@@ -1587,7 +1587,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
        vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held (except via vc_init->reset_terminal */
+/* console_lock is held (except via vc_init->reset_terminal */
 static void save_cur(struct vc_data *vc)
 {
        vc->vc_saved_x          = vc->vc_x;
@@ -1603,7 +1603,7 @@ static void save_cur(struct vc_data *vc)
        vc->vc_saved_G1         = vc->vc_G1_charset;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void restore_cur(struct vc_data *vc)
 {
        gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
@@ -1625,7 +1625,7 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
        EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
        ESpalette };
 
-/* console_sem is held (except via vc_init()) */
+/* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
 {
        vc->vc_top              = 0;
@@ -1685,7 +1685,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
            csi_J(vc, 2);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
        /*
@@ -2119,7 +2119,7 @@ static int is_double_width(uint32_t ucs)
        return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
 }
 
-/* acquires console_sem */
+/* acquires console_lock */
 static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 #ifdef VT_BUF_VRAM_ONLY
@@ -2147,11 +2147,11 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
 
        might_sleep();
 
-       acquire_console_sem();
+       console_lock();
        vc = tty->driver_data;
        if (vc == NULL) {
                printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -2159,7 +2159,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
        if (!vc_cons_allocated(currcons)) {
            /* could this happen? */
                printk_once("con_write: tty %d not allocated\n", currcons+1);
-           release_console_sem();
+           console_unlock();
            return 0;
        }
 
@@ -2375,7 +2375,7 @@ rescan_last_byte:
        }
        FLUSH
        console_conditional_schedule();
-       release_console_sem();
+       console_unlock();
        notify_update(vc);
        return n;
 #undef FLUSH
@@ -2388,11 +2388,11 @@ rescan_last_byte:
  * us to do the switches asynchronously (needed when we want
  * to switch due to a keyboard interrupt).  Synchronization
  * with other console code and prevention of re-entrancy is
- * ensured with console_sem.
+ * ensured with console_lock.
  */
 static void console_callback(struct work_struct *ignored)
 {
-       acquire_console_sem();
+       console_lock();
 
        if (want_console >= 0) {
                if (want_console != fg_console &&
@@ -2422,7 +2422,7 @@ static void console_callback(struct work_struct *ignored)
        }
        notify_update(vc_cons[fg_console].d);
 
-       release_console_sem();
+       console_unlock();
 }
 
 int set_console(int nr)
@@ -2603,7 +2603,7 @@ static struct console vt_console_driver = {
  */
 
 /*
- * Generally a bit racy with respect to console_sem().
+ * Generally a bit racy with respect to console_lock();.
  *
  * There are some functions which don't need it.
  *
@@ -2629,17 +2629,17 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
        switch (type)
        {
                case TIOCL_SETSEL:
-                       acquire_console_sem();
+                       console_lock();
                        ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
-                       release_console_sem();
+                       console_unlock();
                        break;
                case TIOCL_PASTESEL:
                        ret = paste_selection(tty);
                        break;
                case TIOCL_UNBLANKSCREEN:
-                       acquire_console_sem();
+                       console_lock();
                        unblank_screen();
-                       release_console_sem();
+                       console_unlock();
                        break;
                case TIOCL_SELLOADLUT:
                        ret = sel_loadlut(p);
@@ -2688,10 +2688,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
                        }
                        break;
                case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
-                       acquire_console_sem();
+                       console_lock();
                        ignore_poke = 1;
                        do_blank_screen(0);
-                       release_console_sem();
+                       console_unlock();
                        break;
                case TIOCL_BLANKEDSCREEN:
                        ret = console_blanked;
@@ -2790,11 +2790,11 @@ static void con_flush_chars(struct tty_struct *tty)
                return;
 
        /* if we race with con_close(), vt may be null */
-       acquire_console_sem();
+       console_lock();
        vc = tty->driver_data;
        if (vc)
                set_cursor(vc);
-       release_console_sem();
+       console_unlock();
 }
 
 /*
@@ -2805,7 +2805,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
        unsigned int currcons = tty->index;
        int ret = 0;
 
-       acquire_console_sem();
+       console_lock();
        if (tty->driver_data == NULL) {
                ret = vc_allocate(currcons);
                if (ret == 0) {
@@ -2813,7 +2813,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
 
                        /* Still being freed */
                        if (vc->port.tty) {
-                               release_console_sem();
+                               console_unlock();
                                return -ERESTARTSYS;
                        }
                        tty->driver_data = vc;
@@ -2827,11 +2827,11 @@ static int con_open(struct tty_struct *tty, struct file *filp)
                                tty->termios->c_iflag |= IUTF8;
                        else
                                tty->termios->c_iflag &= ~IUTF8;
-                       release_console_sem();
+                       console_unlock();
                        return ret;
                }
        }
-       release_console_sem();
+       console_unlock();
        return ret;
 }
 
@@ -2844,9 +2844,9 @@ static void con_shutdown(struct tty_struct *tty)
 {
        struct vc_data *vc = tty->driver_data;
        BUG_ON(vc == NULL);
-       acquire_console_sem();
+       console_lock();
        vc->port.tty = NULL;
-       release_console_sem();
+       console_unlock();
        tty_shutdown(tty);
 }
 
@@ -2893,13 +2893,13 @@ static int __init con_init(void)
        struct vc_data *vc;
        unsigned int currcons = 0, i;
 
-       acquire_console_sem();
+       console_lock();
 
        if (conswitchp)
                display_desc = conswitchp->con_startup();
        if (!display_desc) {
                fg_console = 0;
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -2946,7 +2946,7 @@ static int __init con_init(void)
        printable = 1;
        printk("\n");
 
-       release_console_sem();
+       console_unlock();
 
 #ifdef CONFIG_VT_CONSOLE
        register_console(&vt_console_driver);
@@ -2994,7 +2994,7 @@ int __init vty_init(const struct file_operations *console_fops)
        if (IS_ERR(tty0dev))
                tty0dev = NULL;
        else
-               device_create_file(tty0dev, &dev_attr_active);
+               WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
 
        vcs_init();
 
@@ -3037,7 +3037,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
        if (!try_module_get(owner))
                return -ENODEV;
 
-       acquire_console_sem();
+       console_lock();
 
        /* check if driver is registered */
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3122,7 +3122,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
 
        retval = 0;
 err:
-       release_console_sem();
+       console_unlock();
        module_put(owner);
        return retval;
 };
@@ -3171,7 +3171,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
        if (!try_module_get(owner))
                return -ENODEV;
 
-       acquire_console_sem();
+       console_lock();
 
        /* check if driver is registered and if it is unbindable */
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3185,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
        }
 
        if (retval) {
-               release_console_sem();
+               console_unlock();
                goto err;
        }
 
@@ -3204,12 +3204,12 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
        }
 
        if (retval) {
-               release_console_sem();
+               console_unlock();
                goto err;
        }
 
        if (!con_is_bound(csw)) {
-               release_console_sem();
+               console_unlock();
                goto err;
        }
 
@@ -3238,7 +3238,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
        if (!con_is_bound(csw))
                con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
 
-       release_console_sem();
+       console_unlock();
        /* ignore return value, binding should not fail */
        bind_con_driver(defcsw, first, last, deflt);
 err:
@@ -3538,14 +3538,14 @@ int register_con_driver(const struct consw *csw, int first, int last)
        if (!try_module_get(owner))
                return -ENODEV;
 
-       acquire_console_sem();
+       console_lock();
 
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                con_driver = &registered_con_driver[i];
 
                /* already registered */
                if (con_driver->con == csw)
-                       retval = -EINVAL;
+                       retval = -EBUSY;
        }
 
        if (retval)
@@ -3592,7 +3592,7 @@ int register_con_driver(const struct consw *csw, int first, int last)
        }
 
 err:
-       release_console_sem();
+       console_unlock();
        module_put(owner);
        return retval;
 }
@@ -3613,7 +3613,7 @@ int unregister_con_driver(const struct consw *csw)
 {
        int i, retval = -ENODEV;
 
-       acquire_console_sem();
+       console_lock();
 
        /* cannot unregister a bound driver */
        if (con_is_bound(csw))
@@ -3639,7 +3639,7 @@ int unregister_con_driver(const struct consw *csw)
                }
        }
 err:
-       release_console_sem();
+       console_unlock();
        return retval;
 }
 EXPORT_SYMBOL(unregister_con_driver);
@@ -3656,7 +3656,12 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
        int err;
 
        err = register_con_driver(csw, first, last);
-
+       /* if we get an busy error we still want to bind the console driver
+        * and return success, as we may have unbound the console driver
+       Â * but not unregistered it.
+       */
+       if (err == -EBUSY)
+               err = 0;
        if (!err)
                bind_con_driver(csw, first, last, deflt);
 
@@ -3934,9 +3939,9 @@ int con_set_cmap(unsigned char __user *arg)
 {
        int rc;
 
-       acquire_console_sem();
+       console_lock();
        rc = set_get_cmap (arg,1);
-       release_console_sem();
+       console_unlock();
 
        return rc;
 }
@@ -3945,9 +3950,9 @@ int con_get_cmap(unsigned char __user *arg)
 {
        int rc;
 
-       acquire_console_sem();
+       console_lock();
        rc = set_get_cmap (arg,0);
-       release_console_sem();
+       console_unlock();
 
        return rc;
 }
@@ -3994,12 +3999,12 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
        } else
                font.data = NULL;
 
-       acquire_console_sem();
+       console_lock();
        if (vc->vc_sw->con_font_get)
                rc = vc->vc_sw->con_font_get(vc, &font);
        else
                rc = -ENOSYS;
-       release_console_sem();
+       console_unlock();
 
        if (rc)
                goto out;
@@ -4076,12 +4081,12 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
        font.data = memdup_user(op->data, size);
        if (IS_ERR(font.data))
                return PTR_ERR(font.data);
-       acquire_console_sem();
+       console_lock();
        if (vc->vc_sw->con_font_set)
                rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
        else
                rc = -ENOSYS;
-       release_console_sem();
+       console_unlock();
        kfree(font.data);
        return rc;
 }
@@ -4103,12 +4108,12 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
        else
                name[MAX_FONT_NAME - 1] = 0;
 
-       acquire_console_sem();
+       console_lock();
        if (vc->vc_sw->con_font_default)
                rc = vc->vc_sw->con_font_default(vc, &font, s);
        else
                rc = -ENOSYS;
-       release_console_sem();
+       console_unlock();
        if (!rc) {
                op->width = font.width;
                op->height = font.height;
@@ -4124,7 +4129,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
        if (vc->vc_mode != KD_TEXT)
                return -EINVAL;
 
-       acquire_console_sem();
+       console_lock();
        if (!vc->vc_sw->con_font_copy)
                rc = -ENOSYS;
        else if (con < 0 || !vc_cons_allocated(con))
@@ -4133,7 +4138,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
                rc = 0;
        else
                rc = vc->vc_sw->con_font_copy(vc, con);
-       release_console_sem();
+       console_unlock();
        return rc;
 }
 
index 6b68a0f..1235ebd 100644 (file)
@@ -649,12 +649,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                /*
                 * explicitly blank/unblank the screen if switching modes
                 */
-               acquire_console_sem();
+               console_lock();
                if (arg == KD_TEXT)
                        do_unblank_screen(1);
                else
                        do_blank_screen(1);
-               release_console_sem();
+               console_unlock();
                break;
 
        case KDGETMODE:
@@ -893,7 +893,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        ret = -EINVAL;
                        goto out;
                }
-               acquire_console_sem();
+               console_lock();
                vc->vt_mode = tmp;
                /* the frsig is ignored, so we set it to 0 */
                vc->vt_mode.frsig = 0;
@@ -901,7 +901,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                vc->vt_pid = get_pid(task_pid(current));
                /* no switch is required -- saw@shade.msu.ru */
                vc->vt_newvt = -1;
-               release_console_sem();
+               console_unlock();
                break;
        }
 
@@ -910,9 +910,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                struct vt_mode tmp;
                int rc;
 
-               acquire_console_sem();
+               console_lock();
                memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
-               release_console_sem();
+               console_unlock();
 
                rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
                if (rc)
@@ -965,9 +965,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        ret =  -ENXIO;
                else {
                        arg--;
-                       acquire_console_sem();
+                       console_lock();
                        ret = vc_allocate(arg);
-                       release_console_sem();
+                       console_unlock();
                        if (ret)
                                break;
                        set_console(arg);
@@ -990,7 +990,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        ret = -ENXIO;
                else {
                        vsa.console--;
-                       acquire_console_sem();
+                       console_lock();
                        ret = vc_allocate(vsa.console);
                        if (ret == 0) {
                                struct vc_data *nvc;
@@ -1003,7 +1003,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                put_pid(nvc->vt_pid);
                                nvc->vt_pid = get_pid(task_pid(current));
                        }
-                       release_console_sem();
+                       console_unlock();
                        if (ret)
                                break;
                        /* Commence switch and lock */
@@ -1044,7 +1044,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                /*
                 * Switching-from response
                 */
-               acquire_console_sem();
+               console_lock();
                if (vc->vt_newvt >= 0) {
                        if (arg == 0)
                                /*
@@ -1063,7 +1063,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                vc->vt_newvt = -1;
                                ret = vc_allocate(newvt);
                                if (ret) {
-                                       release_console_sem();
+                                       console_unlock();
                                        break;
                                }
                                /*
@@ -1083,7 +1083,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        if (arg != VT_ACKACQ)
                                ret = -EINVAL;
                }
-               release_console_sem();
+               console_unlock();
                break;
 
         /*
@@ -1096,20 +1096,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                }
                if (arg == 0) {
                    /* deallocate all unused consoles, but leave 0 */
-                       acquire_console_sem();
+                       console_lock();
                        for (i=1; i<MAX_NR_CONSOLES; i++)
                                if (! VT_BUSY(i))
                                        vc_deallocate(i);
-                       release_console_sem();
+                       console_unlock();
                } else {
                        /* deallocate a single console, if possible */
                        arg--;
                        if (VT_BUSY(arg))
                                ret = -EBUSY;
                        else if (arg) {                       /* leave 0 */
-                               acquire_console_sem();
+                               console_lock();
                                vc_deallocate(arg);
-                               release_console_sem();
+                               console_unlock();
                        }
                }
                break;
@@ -1126,7 +1126,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                    get_user(cc, &vtsizes->v_cols))
                        ret = -EFAULT;
                else {
-                       acquire_console_sem();
+                       console_lock();
                        for (i = 0; i < MAX_NR_CONSOLES; i++) {
                                vc = vc_cons[i].d;
 
@@ -1135,7 +1135,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                        vc_resize(vc_cons[i].d, cc, ll);
                                }
                        }
-                       release_console_sem();
+                       console_unlock();
                }
                break;
        }
@@ -1187,14 +1187,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                for (i = 0; i < MAX_NR_CONSOLES; i++) {
                        if (!vc_cons[i].d)
                                continue;
-                       acquire_console_sem();
+                       console_lock();
                        if (vlin)
                                vc_cons[i].d->vc_scan_lines = vlin;
                        if (clin)
                                vc_cons[i].d->vc_font.height = clin;
                        vc_cons[i].d->vc_resize_user = 1;
                        vc_resize(vc_cons[i].d, cc, ll);
-                       release_console_sem();
+                       console_unlock();
                }
                break;
        }
@@ -1367,7 +1367,7 @@ void vc_SAK(struct work_struct *work)
        struct vc_data *vc;
        struct tty_struct *tty;
 
-       acquire_console_sem();
+       console_lock();
        vc = vc_con->d;
        if (vc) {
                tty = vc->port.tty;
@@ -1379,7 +1379,7 @@ void vc_SAK(struct work_struct *work)
                        __do_SAK(tty);
                reset_vc(vc);
        }
-       release_console_sem();
+       console_unlock();
 }
 
 #ifdef CONFIG_COMPAT
@@ -1737,10 +1737,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
 {
        int prev;
 
-       acquire_console_sem();
+       console_lock();
        /* Graphics mode - up to X */
        if (disable_vt_switch) {
-               release_console_sem();
+               console_unlock();
                return 0;
        }
        prev = fg_console;
@@ -1748,7 +1748,7 @@ int vt_move_to_console(unsigned int vt, int alloc)
        if (alloc && vc_allocate(vt)) {
                /* we can't have a free VC for now. Too bad,
                 * we don't want to mess the screen for now. */
-               release_console_sem();
+               console_unlock();
                return -ENOSPC;
        }
 
@@ -1758,10 +1758,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
                 * Let the calling function know so it can decide
                 * what to do.
                 */
-               release_console_sem();
+               console_unlock();
                return -EIO;
        }
-       release_console_sem();
+       console_unlock();
        tty_lock();
        if (vt_waitactive(vt + 1)) {
                pr_debug("Suspend: Can't switch VCs.");
@@ -1781,8 +1781,8 @@ int vt_move_to_console(unsigned int vt, int alloc)
  */
 void pm_set_vt_switch(int do_switch)
 {
-       acquire_console_sem();
+       console_lock();
        disable_vt_switch = !do_switch;
-       release_console_sem();
+       console_unlock();
 }
 EXPORT_SYMBOL(pm_set_vt_switch);
index d6ede98..4ab49d4 100644 (file)
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
        { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
        { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
        { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
index 6ee4451..47085e5 100644 (file)
@@ -342,7 +342,7 @@ static ssize_t wdm_write
                goto outnp;
        }
 
-       if (!file->f_flags && O_NONBLOCK)
+       if (!(file->f_flags & O_NONBLOCK))
                r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
                                                                &desc->flags));
        else
index 9da2505..df502a9 100644 (file)
@@ -192,12 +192,12 @@ int usb_create_ep_devs(struct device *parent,
        ep_dev->dev.parent = parent;
        ep_dev->dev.release = ep_device_release;
        dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
-       device_enable_async_suspend(&ep_dev->dev);
 
        retval = device_register(&ep_dev->dev);
        if (retval)
                goto error_register;
 
+       device_enable_async_suspend(&ep_dev->dev);
        endpoint->ep_dev = ep_dev;
        return retval;
 
index b55d460..f71e8e3 100644 (file)
@@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
                        return retval;
        }
 
-       synchronize_irq(pci_dev->irq);
+       /* If MSI-X is enabled, the driver will have synchronized all vectors
+        * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
+        * synchronized here.
+        */
+       if (!hcd->msix_enabled)
+               synchronize_irq(pci_dev->irq);
 
        /* Downstream ports from this root hub should already be quiesced, so
         * there will be no DMA activity.  Now we can shut down the upstream
index 6a95017..e935f71 100644 (file)
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 
        dev_dbg(&rhdev->dev, "usb %s%s\n",
                        (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
-       clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
        if (!hcd->driver->bus_resume)
                return -ENOENT;
        if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 
        hcd->state = HC_STATE_RESUMING;
        status = hcd->driver->bus_resume(hcd);
+       clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
        if (status == 0) {
                /* TRSMRCY = 10 msec */
                msleep(10);
index b98efae..0f299b7 100644 (file)
@@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws);
 static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 {
        struct usb_device *hdev = hub->hdev;
+       struct usb_hcd *hcd;
+       int ret;
        int port1;
        int status;
        bool need_debounce_delay = false;
@@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                        usb_autopm_get_interface_no_resume(
                                        to_usb_interface(hub->intfdev));
                        return;         /* Continues at init2: below */
+               } else if (type == HUB_RESET_RESUME) {
+                       /* The internal host controller state for the hub device
+                        * may be gone after a host power loss on system resume.
+                        * Update the device's info so the HW knows it's a hub.
+                        */
+                       hcd = bus_to_hcd(hdev->bus);
+                       if (hcd->driver->update_hub_device) {
+                               ret = hcd->driver->update_hub_device(hcd, hdev,
+                                               &hub->tt, GFP_NOIO);
+                               if (ret < 0) {
+                                       dev_err(hub->intfdev, "Host not "
+                                                       "accepting hub info "
+                                                       "update.\n");
+                                       dev_err(hub->intfdev, "LS/FS devices "
+                                                       "and hubs may not work "
+                                                       "under this hub\n.");
+                               }
+                       }
+                       hub_power_on(hub, true);
                } else {
                        hub_power_on(hub, true);
                }
@@ -2660,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
 
        mutex_lock(&usb_address0_mutex);
 
-       if (!udev->config && oldspeed == USB_SPEED_SUPER) {
-               /* Don't reset USB 3.0 devices during an initial setup */
-               usb_set_device_state(udev, USB_STATE_DEFAULT);
-       } else {
-               /* Reset the device; full speed may morph to high speed */
-               /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
-               retval = hub_port_reset(hub, port1, udev, delay);
-               if (retval < 0)         /* error or disconnect */
-                       goto fail;
-               /* success, speed is known */
-       }
+       /* Reset the device; full speed may morph to high speed */
+       /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+       retval = hub_port_reset(hub, port1, udev, delay);
+       if (retval < 0)         /* error or disconnect */
+               goto fail;
+       /* success, speed is known */
+
        retval = -ENODEV;
 
        if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2732,6 +2749,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
                udev->ttport = hdev->ttport;
        } else if (udev->speed != USB_SPEED_HIGH
                        && hdev->speed == USB_SPEED_HIGH) {
+               if (!hub->tt.hub) {
+                       dev_err(&udev->dev, "parent hub has no TT\n");
+                       retval = -EINVAL;
+                       goto fail;
+               }
                udev->tt = &hub->tt;
                udev->ttport = port1;
        }
index 44c5954..81ce6a8 100644 (file)
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Samsung Android phone modem - ID conflict with SPH-I500 */
+       { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Keytouch QWERTY Panel keyboard */
+       { USB_DEVICE(0x0926, 0x3333), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
        { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
index 1dc9739..d500996 100644 (file)
@@ -509,7 +509,7 @@ config USB_LANGWELL
        select USB_GADGET_SELECTED
 
 config USB_GADGET_EG20T
-       boolean "Intel EG20T(Topcliff) USB Device controller"
+       boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
        depends on PCI
        select USB_GADGET_DUALSPEED
        help
@@ -525,6 +525,11 @@ config USB_GADGET_EG20T
          This driver dose not support interrupt transfer or isochronous
          transfer modes.
 
+         This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+         for IVI(In-Vehicle Infotainment) use.
+         ML7213 is companion chip for Intel Atom E6xx series.
+         ML7213 is completely compatible for Intel EG20T PCH.
+
 config USB_EG20T
        tristate
        depends on USB_GADGET_EG20T
@@ -541,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM
          ci13xxx_udc core.
          This driver depends on OTG driver for PHY initialization,
          clock management, powering up VBUS, and power management.
+         This driver is not supported on boards like trout which
+         has an external PHY.
 
          Say "y" to link the driver statically, or "m" to build a
          dynamically linked module called "ci13xxx_msm" and force all
index 31656a2..a1c67ae 100644 (file)
@@ -76,10 +76,21 @@ static DEFINE_SPINLOCK(udc_lock);
 
 /* control endpoint description */
 static const struct usb_endpoint_descriptor
-ctrl_endpt_desc = {
+ctrl_endpt_out_desc = {
        .bLength         = USB_DT_ENDPOINT_SIZE,
        .bDescriptorType = USB_DT_ENDPOINT,
 
+       .bEndpointAddress = USB_DIR_OUT,
+       .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+       .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+       .bLength         = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType = USB_DT_ENDPOINT,
+
+       .bEndpointAddress = USB_DIR_IN,
        .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
        .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
 };
@@ -265,10 +276,10 @@ static int hw_device_init(void __iomem *base)
        hw_bank.size /= sizeof(u32);
 
        reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
-       if (reg == 0 || reg > ENDPT_MAX)
-               return -ENODEV;
+       hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
 
-       hw_ep_max = reg;   /* cache hw ENDPT_MAX */
+       if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+               return -ENODEV;
 
        /* setup lock mode ? */
 
@@ -1197,16 +1208,17 @@ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
        }
 
        spin_lock_irqsave(udc->lock, flags);
-       for (i = 0; i < hw_ep_max; i++) {
-               struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+       for (i = 0; i < hw_ep_max/2; i++) {
+               struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+               struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
                n += scnprintf(buf + n, PAGE_SIZE - n,
                               "EP=%02i: RX=%08X TX=%08X\n",
-                              i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+                              i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
                for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
                        n += scnprintf(buf + n, PAGE_SIZE - n,
                                       " %04X:    %08X    %08X\n", j,
-                                      *((u32 *)mEp->qh[RX].ptr + j),
-                                      *((u32 *)mEp->qh[TX].ptr + j));
+                                      *((u32 *)mEpRx->qh.ptr + j),
+                                      *((u32 *)mEpTx->qh.ptr + j));
                }
        }
        spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
        unsigned long flags;
        struct list_head   *ptr = NULL;
        struct ci13xxx_req *req = NULL;
-       unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+       unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
 
        dbg_trace("[%s] %p\n", __func__, buf);
        if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
 
        spin_lock_irqsave(udc->lock, flags);
        for (i = 0; i < hw_ep_max; i++)
-               for (k = RX; k <= TX; k++)
-                       list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
-                       {
-                               req = list_entry(ptr,
-                                                struct ci13xxx_req, queue);
+               list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+               {
+                       req = list_entry(ptr, struct ci13xxx_req, queue);
+
+                       n += scnprintf(buf + n, PAGE_SIZE - n,
+                                       "EP=%02i: TD=%08X %s\n",
+                                       i % hw_ep_max/2, (u32)req->dma,
+                                       ((i < hw_ep_max/2) ? "RX" : "TX"));
 
+                       for (j = 0; j < qSize; j++)
                                n += scnprintf(buf + n, PAGE_SIZE - n,
-                                              "EP=%02i: TD=%08X %s\n",
-                                              i, (u32)req->dma,
-                                              ((k == RX) ? "RX" : "TX"));
-
-                               for (j = 0; j < qSize; j++)
-                                       n += scnprintf(buf + n, PAGE_SIZE - n,
-                                                      " %04X:    %08X\n", j,
-                                                      *((u32 *)req->ptr + j));
-                       }
+                                               " %04X:    %08X\n", j,
+                                               *((u32 *)req->ptr + j));
+               }
        spin_unlock_irqrestore(udc->lock, flags);
 
        return n;
@@ -1467,12 +1477,12 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
         *  At this point it's guaranteed exclusive access to qhead
         *  (endpt is not primed) so it's no need to use tripwire
         */
-       mEp->qh[mEp->dir].ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
-       mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS;   /* clear status */
+       mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+       mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
        if (mReq->req.zero == 0)
-               mEp->qh[mEp->dir].ptr->cap |=  QH_ZLT;
+               mEp->qh.ptr->cap |=  QH_ZLT;
        else
-               mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+               mEp->qh.ptr->cap &= ~QH_ZLT;
 
        wmb();   /* synchronize before ep prime */
 
@@ -1542,11 +1552,11 @@ __acquires(mEp->lock)
 
        hw_ep_flush(mEp->num, mEp->dir);
 
-       while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+       while (!list_empty(&mEp->qh.queue)) {
 
                /* pop oldest request */
                struct ci13xxx_req *mReq = \
-                       list_entry(mEp->qh[mEp->dir].queue.next,
+                       list_entry(mEp->qh.queue.next,
                                   struct ci13xxx_req, queue);
                list_del_init(&mReq->queue);
                mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
 {
        struct usb_ep *ep;
        struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
-       struct ci13xxx_ep *mEp = container_of(gadget->ep0,
-                                             struct ci13xxx_ep, ep);
 
        trace("%p", gadget);
 
@@ -1583,7 +1591,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
        gadget_for_each_ep(ep, gadget) {
                usb_ep_fifo_flush(ep);
        }
-       usb_ep_fifo_flush(gadget->ep0);
+       usb_ep_fifo_flush(&udc->ep0out.ep);
+       usb_ep_fifo_flush(&udc->ep0in.ep);
 
        udc->driver->disconnect(gadget);
 
@@ -1591,11 +1600,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
        gadget_for_each_ep(ep, gadget) {
                usb_ep_disable(ep);
        }
-       usb_ep_disable(gadget->ep0);
+       usb_ep_disable(&udc->ep0out.ep);
+       usb_ep_disable(&udc->ep0in.ep);
 
-       if (mEp->status != NULL) {
-               usb_ep_free_request(gadget->ep0, mEp->status);
-               mEp->status = NULL;
+       if (udc->status != NULL) {
+               usb_ep_free_request(&udc->ep0in.ep, udc->status);
+               udc->status = NULL;
        }
 
        return 0;
@@ -1614,7 +1624,6 @@ static void isr_reset_handler(struct ci13xxx *udc)
 __releases(udc->lock)
 __acquires(udc->lock)
 {
-       struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
        int retval;
 
        trace("%p", udc);
@@ -1635,11 +1644,15 @@ __acquires(udc->lock)
        if (retval)
                goto done;
 
-       retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+       retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
+       if (retval)
+               goto done;
+
+       retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
        if (!retval) {
-               mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
-               if (mEp->status == NULL) {
-                       usb_ep_disable(&mEp->ep);
+               udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
+               if (udc->status == NULL) {
+                       usb_ep_disable(&udc->ep0out.ep);
                        retval = -ENOMEM;
                }
        }
@@ -1672,16 +1685,17 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
 
 /**
  * isr_get_status_response: get_status request response
- * @ep:    endpoint
+ * @udc: udc struct
  * @setup: setup request packet
  *
  * This function returns an error code
  */
-static int isr_get_status_response(struct ci13xxx_ep *mEp,
+static int isr_get_status_response(struct ci13xxx *udc,
                                   struct usb_ctrlrequest *setup)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
+       struct ci13xxx_ep *mEp = &udc->ep0in;
        struct usb_request *req = NULL;
        gfp_t gfp_flags = GFP_ATOMIC;
        int dir, num, retval;
@@ -1736,27 +1750,23 @@ __acquires(mEp->lock)
 
 /**
  * isr_setup_status_phase: queues the status phase of a setup transation
- * @mEp: endpoint
+ * @udc: udc struct
  *
  * This function returns an error code
  */
-static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+static int isr_setup_status_phase(struct ci13xxx *udc)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
        int retval;
+       struct ci13xxx_ep *mEp;
 
-       trace("%p", mEp);
-
-       /* mEp is always valid & configured */
-
-       if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-               mEp->dir = (mEp->dir == TX) ? RX : TX;
+       trace("%p", udc);
 
-       mEp->status->no_interrupt = 1;
+       mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
 
        spin_unlock(mEp->lock);
-       retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+       retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
        spin_lock(mEp->lock);
 
        return retval;
@@ -1778,11 +1788,11 @@ __acquires(mEp->lock)
 
        trace("%p", mEp);
 
-       if (list_empty(&mEp->qh[mEp->dir].queue))
+       if (list_empty(&mEp->qh.queue))
                return -EINVAL;
 
        /* pop oldest request */
-       mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+       mReq = list_entry(mEp->qh.queue.next,
                          struct ci13xxx_req, queue);
        list_del_init(&mReq->queue);
 
@@ -1794,10 +1804,10 @@ __acquires(mEp->lock)
 
        dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
 
-       if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+       if (!list_empty(&mEp->qh.queue)) {
                struct ci13xxx_req* mReqEnq;
 
-               mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
+               mReqEnq = list_entry(mEp->qh.queue.next,
                                  struct ci13xxx_req, queue);
                _hardware_enqueue(mEp, mReqEnq);
        }
@@ -1836,16 +1846,14 @@ __acquires(udc->lock)
                int type, num, err = -EINVAL;
                struct usb_ctrlrequest req;
 
-
                if (mEp->desc == NULL)
                        continue;   /* not configured */
 
-               if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
-                   (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+               if (hw_test_and_clear_complete(i)) {
                        err = isr_tr_complete_low(mEp);
                        if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
                                if (err > 0)   /* needs status phase */
-                                       err = isr_setup_status_phase(mEp);
+                                       err = isr_setup_status_phase(udc);
                                if (err < 0) {
                                        dbg_event(_usb_addr(mEp),
                                                  "ERROR", err);
@@ -1866,15 +1874,22 @@ __acquires(udc->lock)
                        continue;
                }
 
+               /*
+                * Flush data and handshake transactions of previous
+                * setup packet.
+                */
+               _ep_nuke(&udc->ep0out);
+               _ep_nuke(&udc->ep0in);
+
                /* read_setup_packet */
                do {
                        hw_test_and_set_setup_guard();
-                       memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+                       memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
                } while (!hw_test_and_clear_setup_guard());
 
                type = req.bRequestType;
 
-               mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+               udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
 
                dbg_setup(_usb_addr(mEp), &req);
 
@@ -1895,7 +1910,7 @@ __acquires(udc->lock)
                                if (err)
                                        break;
                        }
-                       err = isr_setup_status_phase(mEp);
+                       err = isr_setup_status_phase(udc);
                        break;
                case USB_REQ_GET_STATUS:
                        if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
@@ -1905,7 +1920,7 @@ __acquires(udc->lock)
                        if (le16_to_cpu(req.wLength) != 2 ||
                            le16_to_cpu(req.wValue)  != 0)
                                break;
-                       err = isr_get_status_response(mEp, &req);
+                       err = isr_get_status_response(udc, &req);
                        break;
                case USB_REQ_SET_ADDRESS:
                        if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@ __acquires(udc->lock)
                        err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
                        if (err)
                                break;
-                       err = isr_setup_status_phase(mEp);
+                       err = isr_setup_status_phase(udc);
                        break;
                case USB_REQ_SET_FEATURE:
                        if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@ __acquires(udc->lock)
                        spin_lock(udc->lock);
                        if (err)
                                break;
-                       err = isr_setup_status_phase(mEp);
+                       err = isr_setup_status_phase(udc);
                        break;
                default:
 delegate:
                        if (req.wLength == 0)   /* no data phase */
-                               mEp->dir = TX;
+                               udc->ep0_dir = TX;
 
                        spin_unlock(udc->lock);
                        err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@ static int ep_enable(struct usb_ep *ep,
                     const struct usb_endpoint_descriptor *desc)
 {
        struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-       int direction, retval = 0;
+       int retval = 0;
        unsigned long flags;
 
        trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@ static int ep_enable(struct usb_ep *ep,
 
        mEp->desc = desc;
 
-       if (!list_empty(&mEp->qh[mEp->dir].queue))
+       if (!list_empty(&mEp->qh.queue))
                warn("enabling a non-empty endpoint!");
 
        mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@ static int ep_enable(struct usb_ep *ep,
 
        mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
 
-       direction = mEp->dir;
-       do {
-               dbg_event(_usb_addr(mEp), "ENABLE", 0);
+       dbg_event(_usb_addr(mEp), "ENABLE", 0);
 
-               mEp->qh[mEp->dir].ptr->cap = 0;
+       mEp->qh.ptr->cap = 0;
 
-               if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-                       mEp->qh[mEp->dir].ptr->cap |=  QH_IOS;
-               else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
-                       mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
-               else
-                       mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
-
-               mEp->qh[mEp->dir].ptr->cap |=
-                       (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
-               mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE;   /* needed? */
-
-               retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+       if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+               mEp->qh.ptr->cap |=  QH_IOS;
+       else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+               mEp->qh.ptr->cap &= ~QH_MULT;
+       else
+               mEp->qh.ptr->cap &= ~QH_ZLT;
 
-               if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-                       mEp->dir = (mEp->dir == TX) ? RX : TX;
+       mEp->qh.ptr->cap |=
+               (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+       mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
 
-       } while (mEp->dir != direction);
+       retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
 
        spin_unlock_irqrestore(mEp->lock, flags);
        return retval;
@@ -2146,7 +2154,7 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
        spin_lock_irqsave(mEp->lock, flags);
 
        if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
-           !list_empty(&mEp->qh[mEp->dir].queue)) {
+           !list_empty(&mEp->qh.queue)) {
                _ep_nuke(mEp);
                retval = -EOVERFLOW;
                warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
        /* push request */
        mReq->req.status = -EINPROGRESS;
        mReq->req.actual = 0;
-       list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+       list_add_tail(&mReq->queue, &mEp->qh.queue);
 
-       if (list_is_singular(&mEp->qh[mEp->dir].queue))
+       if (list_is_singular(&mEp->qh.queue))
                retval = _hardware_enqueue(mEp, mReq);
 
        if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
        trace("%p, %p", ep, req);
 
        if (ep == NULL || req == NULL || mEp->desc == NULL ||
-           list_empty(&mReq->queue)  || list_empty(&mEp->qh[mEp->dir].queue))
+           list_empty(&mReq->queue)  || list_empty(&mEp->qh.queue))
                return -EINVAL;
 
        spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@ static int ep_set_halt(struct usb_ep *ep, int value)
 #ifndef STALL_IN
        /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
        if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-           !list_empty(&mEp->qh[mEp->dir].queue)) {
+           !list_empty(&mEp->qh.queue)) {
                spin_unlock_irqrestore(mEp->lock, flags);
                return -EAGAIN;
        }
@@ -2355,7 +2363,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
                if (is_active) {
                        pm_runtime_get_sync(&_gadget->dev);
                        hw_device_reset(udc);
-                       hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+                       hw_device_state(udc->ep0out.qh.dma);
                } else {
                        hw_device_state(0);
                        if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
                int (*bind)(struct usb_gadget *))
 {
        struct ci13xxx *udc = _udc;
-       unsigned long i, k, flags;
+       unsigned long flags;
+       int i, j;
        int retval = -ENOMEM;
 
        trace("%p", driver);
@@ -2427,45 +2436,46 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
 
        info("hw_ep_max = %d", hw_ep_max);
 
-       udc->driver = driver;
        udc->gadget.dev.driver = NULL;
 
        retval = 0;
-       for (i = 0; i < hw_ep_max; i++) {
-               struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+       for (i = 0; i < hw_ep_max/2; i++) {
+               for (j = RX; j <= TX; j++) {
+                       int k = i + j * hw_ep_max/2;
+                       struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
 
-               scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+                       scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+                                       (j == TX)  ? "in" : "out");
 
-               mEp->lock         = udc->lock;
-               mEp->device       = &udc->gadget.dev;
-               mEp->td_pool      = udc->td_pool;
+                       mEp->lock         = udc->lock;
+                       mEp->device       = &udc->gadget.dev;
+                       mEp->td_pool      = udc->td_pool;
 
-               mEp->ep.name      = mEp->name;
-               mEp->ep.ops       = &usb_ep_ops;
-               mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+                       mEp->ep.name      = mEp->name;
+                       mEp->ep.ops       = &usb_ep_ops;
+                       mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
 
-               /* this allocation cannot be random */
-               for (k = RX; k <= TX; k++) {
-                       INIT_LIST_HEAD(&mEp->qh[k].queue);
+                       INIT_LIST_HEAD(&mEp->qh.queue);
                        spin_unlock_irqrestore(udc->lock, flags);
-                       mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
-                                                       GFP_KERNEL,
-                                                       &mEp->qh[k].dma);
+                       mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+                                       &mEp->qh.dma);
                        spin_lock_irqsave(udc->lock, flags);
-                       if (mEp->qh[k].ptr == NULL)
+                       if (mEp->qh.ptr == NULL)
                                retval = -ENOMEM;
                        else
-                               memset(mEp->qh[k].ptr, 0,
-                                      sizeof(*mEp->qh[k].ptr));
-               }
-               if (i == 0)
-                       udc->gadget.ep0 = &mEp->ep;
-               else
+                               memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+                       /* skip ep0 out and in endpoints */
+                       if (i == 0)
+                               continue;
+
                        list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+               }
        }
        if (retval)
                goto done;
 
+       udc->gadget.ep0 = &udc->ep0in.ep;
        /* bind gadget */
        driver->driver.bus     = NULL;
        udc->gadget.dev.driver = &driver->driver;
@@ -2479,6 +2489,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
                goto done;
        }
 
+       udc->driver = driver;
        pm_runtime_get_sync(&udc->gadget.dev);
        if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
                if (udc->vbus_active) {
@@ -2490,14 +2501,12 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
                }
        }
 
-       retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+       retval = hw_device_state(udc->ep0out.qh.dma);
        if (retval)
                pm_runtime_put_sync(&udc->gadget.dev);
 
  done:
        spin_unlock_irqrestore(udc->lock, flags);
-       if (retval)
-               usb_gadget_unregister_driver(driver);
        return retval;
 }
 EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -2510,7 +2519,7 @@ EXPORT_SYMBOL(usb_gadget_probe_driver);
 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 {
        struct ci13xxx *udc = _udc;
-       unsigned long i, k, flags;
+       unsigned long i, flags;
 
        trace("%p", driver);
 
@@ -2546,17 +2555,14 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
        for (i = 0; i < hw_ep_max; i++) {
                struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
 
-               if (i == 0)
-                       udc->gadget.ep0 = NULL;
-               else if (!list_empty(&mEp->ep.ep_list))
+               if (!list_empty(&mEp->ep.ep_list))
                        list_del_init(&mEp->ep.ep_list);
 
-               for (k = RX; k <= TX; k++)
-                       if (mEp->qh[k].ptr != NULL)
-                               dma_pool_free(udc->qh_pool,
-                                             mEp->qh[k].ptr, mEp->qh[k].dma);
+               if (mEp->qh.ptr != NULL)
+                       dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
        }
 
+       udc->gadget.ep0 = NULL;
        udc->driver = NULL;
 
        spin_unlock_irqrestore(udc->lock, flags);
index f61fed0..a2492b6 100644 (file)
@@ -20,7 +20,7 @@
  * DEFINE
  *****************************************************************************/
 #define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
-#define ENDPT_MAX          (16)
+#define ENDPT_MAX          (32)
 #define CTRL_PAYLOAD_MAX   (64)
 #define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
 #define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
@@ -88,8 +88,7 @@ struct ci13xxx_ep {
                struct list_head   queue;
                struct ci13xxx_qh *ptr;
                dma_addr_t         dma;
-       }                                      qh[2];
-       struct usb_request                    *status;
+       }                                      qh;
        int                                    wedge;
 
        /* global resources */
@@ -119,9 +118,13 @@ struct ci13xxx {
 
        struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
        struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
+       struct usb_request        *status;    /* ep0 status request */
 
        struct usb_gadget          gadget;     /* USB slave device */
        struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+       u32                        ep0_dir;    /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in  ci13xxx_ep[16]
 
        struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
        struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
index f6ff845..1ba4bef 100644 (file)
@@ -928,8 +928,9 @@ unknown:
                 */
                switch (ctrl->bRequestType & USB_RECIP_MASK) {
                case USB_RECIP_INTERFACE:
-                       if (cdev->config)
-                               f = cdev->config->interface[intf];
+                       if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+                               break;
+                       f = cdev->config->interface[intf];
                        break;
 
                case USB_RECIP_ENDPOINT:
index b5dbb23..6d8e533 100644 (file)
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
 
 #include "gadget_chips.h"
 
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
                        return ERR_PTR(-ENOMEM);
                common->free_storage_on_release = 1;
        } else {
-               memset(common, 0, sizeof common);
+               memset(common, 0, sizeof *common);
                common->free_storage_on_release = 0;
        }
 
index 3c6e1a0..5e14950 100644 (file)
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
 
                if (unlikely(!skb))
                        break;
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
-                               req->actual);
-               page = NULL;
 
-               if (req->actual < req->length) { /* Last fragment */
+               if (skb->len == 0) { /* First fragment */
                        skb->protocol = htons(ETH_P_PHONET);
                        skb_reset_mac_header(skb);
-                       pskb_pull(skb, 1);
+                       /* Can't use pskb_pull() on page in IRQ */
+                       memcpy(skb_put(skb, 1), page_address(page), 1);
+               }
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               skb->len == 0, req->actual);
+               page = NULL;
+
+               if (req->actual < req->length) { /* Last fragment */
                        skb->dev = dev;
                        dev->stats.rx_packets++;
                        dev->stats.rx_bytes += skb->len;
index 0c8dd81..b120dbb 100644 (file)
 #define PCH_UDC_BRLEN          0x0F    /* Burst length */
 #define PCH_UDC_THLEN          0x1F    /* Threshold length */
 /* Value of EP Buffer Size */
-#define UDC_EP0IN_BUFF_SIZE    64
-#define UDC_EPIN_BUFF_SIZE     512
-#define UDC_EP0OUT_BUFF_SIZE   64
-#define UDC_EPOUT_BUFF_SIZE    512
+#define UDC_EP0IN_BUFF_SIZE    16
+#define UDC_EPIN_BUFF_SIZE     256
+#define UDC_EP0OUT_BUFF_SIZE   16
+#define UDC_EPOUT_BUFF_SIZE    256
 /* Value of EP maximum packet size */
 #define UDC_EP0IN_MAX_PKT_SIZE 64
 #define UDC_EP0OUT_MAX_PKT_SIZE        64
@@ -351,7 +351,7 @@ struct pch_udc_dev {
        struct pci_pool         *data_requests;
        struct pci_pool         *stp_requests;
        dma_addr_t                      dma_addr;
-       unsigned long                   ep0out_buf[64];
+       void                            *ep0out_buf;
        struct usb_ctrlrequest          setup_data;
        unsigned long                   phys_addr;
        void __iomem                    *base_addr;
@@ -361,6 +361,8 @@ struct pch_udc_dev {
 
 #define PCH_UDC_PCI_BAR                        1
 #define PCI_DEVICE_ID_INTEL_EG20T_UDC  0x8808
+#define PCI_VENDOR_ID_ROHM             0x10DB
+#define PCI_DEVICE_ID_ML7213_IOH_UDC   0x801D
 
 static const char      ep0_string[] = "ep0in";
 static DEFINE_SPINLOCK(udc_stall_spinlock);    /* stall spin lock */
@@ -1219,11 +1221,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
        dev = ep->dev;
        if (req->dma_mapped) {
                if (ep->in)
-                       pci_unmap_single(dev->pdev, req->req.dma,
-                                        req->req.length, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&dev->pdev->dev, req->req.dma,
+                                        req->req.length, DMA_TO_DEVICE);
                else
-                       pci_unmap_single(dev->pdev, req->req.dma,
-                                        req->req.length, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&dev->pdev->dev, req->req.dma,
+                                        req->req.length, DMA_FROM_DEVICE);
                req->dma_mapped = 0;
                req->req.dma = DMA_ADDR_INVALID;
        }
@@ -1414,7 +1416,6 @@ static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
 
        pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
        td_data = req->td_data;
-       ep->td_data = req->td_data;
        /* Set the status bits for all descriptors */
        while (1) {
                td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
        if (usbreq->length &&
            ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
                if (ep->in)
-                       usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-                                       usbreq->length, PCI_DMA_TODEVICE);
+                       usbreq->dma = dma_map_single(&dev->pdev->dev,
+                                                    usbreq->buf,
+                                                    usbreq->length,
+                                                    DMA_TO_DEVICE);
                else
-                       usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-                                       usbreq->length, PCI_DMA_FROMDEVICE);
+                       usbreq->dma = dma_map_single(&dev->pdev->dev,
+                                                    usbreq->buf,
+                                                    usbreq->length,
+                                                    DMA_FROM_DEVICE);
                req->dma_mapped = 1;
        }
        if (usbreq->length > 0) {
-               retval = prepare_dma(ep, req, gfp);
+               retval = prepare_dma(ep, req, GFP_ATOMIC);
                if (retval)
                        goto probe_end;
        }
@@ -1646,7 +1651,6 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
                        pch_udc_wait_ep_stall(ep);
                        pch_udc_ep_clear_nak(ep);
                        pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
-                       pch_udc_set_dma(dev, DMA_DIR_TX);
                }
        }
        /* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
            PCH_UDC_BS_DMA_DONE)
                return;
        pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+       pch_udc_ep_set_ddptr(ep, 0);
        if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
            PCH_UDC_RTS_SUCC) {
                dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@ static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
        u32     epsts;
        struct pch_udc_ep       *ep;
 
-       ep = &dev->ep[2*ep_num];
+       ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
        epsts = ep->epsts;
        ep->epsts = 0;
 
@@ -2008,7 +2013,7 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
        struct pch_udc_ep               *ep;
        struct pch_udc_request          *req = NULL;
 
-       ep = &dev->ep[2*ep_num + 1];
+       ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
        epsts = ep->epsts;
        ep->epsts = 0;
 
@@ -2025,10 +2030,11 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
        }
        if (epsts & UDC_EPSTS_HE)
                return;
-       if (epsts & UDC_EPSTS_RSS)
+       if (epsts & UDC_EPSTS_RSS) {
                pch_udc_ep_set_stall(ep);
                pch_udc_enable_ep_interrupts(ep->dev,
                                             PCH_UDC_EPINT(ep->in, ep->num));
+       }
        if (epsts & UDC_EPSTS_RCS) {
                if (!dev->prot_stall) {
                        pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
 {
        u32     epsts;
        struct pch_udc_ep       *ep;
+       struct pch_udc_ep       *ep_out;
 
        ep = &dev->ep[UDC_EP0IN_IDX];
+       ep_out = &dev->ep[UDC_EP0OUT_IDX];
        epsts = ep->epsts;
        ep->epsts = 0;
 
@@ -2073,8 +2081,16 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
                return;
        if (epsts & UDC_EPSTS_HE)
                return;
-       if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
+       if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
                pch_udc_complete_transfer(ep);
+               pch_udc_clear_dma(dev, DMA_DIR_RX);
+               ep_out->td_data->status = (ep_out->td_data->status &
+                                       ~PCH_UDC_BUFF_STS) |
+                                       PCH_UDC_BS_HST_RDY;
+               pch_udc_ep_clear_nak(ep_out);
+               pch_udc_set_dma(dev, DMA_DIR_RX);
+               pch_udc_ep_set_rrdy(ep_out);
+       }
        /* On IN interrupt, provide data if we have any */
        if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
             !(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
                dev->stall = 0;
                dev->ep[UDC_EP0IN_IDX].halted = 0;
                dev->ep[UDC_EP0OUT_IDX].halted = 0;
-               /* In data not ready */
-               pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
                dev->setup_data = ep->td_stp->request;
                pch_udc_init_setup_buff(ep->td_stp);
-               pch_udc_clear_dma(dev, DMA_DIR_TX);
+               pch_udc_clear_dma(dev, DMA_DIR_RX);
                pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
                                      dev->ep[UDC_EP0IN_IDX].in);
                if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
                setup_supported = dev->driver->setup(&dev->gadget,
                                                     &dev->setup_data);
                spin_lock(&dev->lock);
+
+               if (dev->setup_data.bRequestType & USB_DIR_IN) {
+                       ep->td_data->status = (ep->td_data->status &
+                                               ~PCH_UDC_BUFF_STS) |
+                                               PCH_UDC_BS_HST_RDY;
+                       pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+               }
                /* ep0 in returns data on IN phase */
                if (setup_supported >= 0 && setup_supported <
                                            UDC_EP0IN_MAX_PKT_SIZE) {
                        pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
                        /* Gadget would have queued a request when
                         * we called the setup */
-                       pch_udc_set_dma(dev, DMA_DIR_RX);
-                       pch_udc_ep_clear_nak(ep);
+                       if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+                               pch_udc_set_dma(dev, DMA_DIR_RX);
+                               pch_udc_ep_clear_nak(ep);
+                       }
                } else if (setup_supported < 0) {
                        /* if unsupported request, then stall */
                        pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
                }
        } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
                     UDC_EPSTS_OUT_DATA) && !dev->stall) {
-               if (list_empty(&ep->queue)) {
-                       dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
-                       ep->td_data->status = (ep->td_data->status &
-                                              ~PCH_UDC_BUFF_STS) |
-                                              PCH_UDC_BS_HST_RDY;
-                       pch_udc_set_dma(dev, DMA_DIR_RX);
-               } else {
-                       /* control write */
-                       /* next function will pickuo an clear the status */
+               pch_udc_clear_dma(dev, DMA_DIR_RX);
+               pch_udc_ep_set_ddptr(ep, 0);
+               if (!list_empty(&ep->queue)) {
                        ep->epsts = stat;
-
-                       pch_udc_svc_data_out(dev, 0);
-                       /* re-program desc. pointer for possible ZLPs */
-                       pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
-                       pch_udc_set_dma(dev, DMA_DIR_RX);
+                       pch_udc_svc_data_out(dev, PCH_UDC_EP0);
                }
+               pch_udc_set_dma(dev, DMA_DIR_RX);
        }
        pch_udc_ep_set_rrdy(ep);
 }
@@ -2174,7 +2188,7 @@ static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
        struct pch_udc_ep       *ep;
        struct pch_udc_request *req;
 
-       ep = &dev->ep[2*ep_num];
+       ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
        if (!list_empty(&ep->queue)) {
                req = list_entry(ep->queue.next, struct pch_udc_request, queue);
                pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@ static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
        for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
                /* IN */
                if (ep_intr & (0x1 << i)) {
-                       ep = &dev->ep[2*i];
+                       ep = &dev->ep[UDC_EPIN_IDX(i)];
                        ep->epsts = pch_udc_read_ep_status(ep);
                        pch_udc_clear_ep_status(ep, ep->epsts);
                }
                /* OUT */
                if (ep_intr & (0x10000 << i)) {
-                       ep = &dev->ep[2*i+1];
+                       ep = &dev->ep[UDC_EPOUT_IDX(i)];
                        ep->epsts = pch_udc_read_ep_status(ep);
                        pch_udc_clear_ep_status(ep, ep->epsts);
                }
@@ -2563,9 +2577,6 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
        dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
        dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
 
-       dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
-                                 PCI_DMA_FROMDEVICE);
-
        /* remove ep0 in and out from the list.  They have own pointer */
        list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
        list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@ static int init_dma_pools(struct pch_udc_dev *dev)
        dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
        dev->ep[UDC_EP0IN_IDX].td_data = NULL;
        dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+       dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
+       if (!dev->ep0out_buf)
+               return -ENOMEM;
+       dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+                                      UDC_EP0OUT_BUFF_SIZE * 4,
+                                      DMA_FROM_DEVICE);
        return 0;
 }
 
@@ -2700,7 +2718,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 
        pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
 
-       /* Assues that there are no pending requets with this driver */
+       /* Assures that there are no pending requests with this driver */
+       driver->disconnect(&dev->gadget);
        driver->unbind(&dev->gadget);
        dev->gadget.dev.driver = NULL;
        dev->driver = NULL;
@@ -2750,6 +2769,11 @@ static void pch_udc_remove(struct pci_dev *pdev)
                pci_pool_destroy(dev->stp_requests);
        }
 
+       if (dev->dma_addr)
+               dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+                                UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+       kfree(dev->ep0out_buf);
+
        pch_udc_exit(dev);
 
        if (dev->irq_registered)
@@ -2792,11 +2816,7 @@ static int pch_udc_resume(struct pci_dev *pdev)
        int ret;
 
        pci_set_power_state(pdev, PCI_D0);
-       ret = pci_restore_state(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
-               return ret;
-       }
+       pci_restore_state(pdev);
        ret = pci_enable_device(pdev);
        if (ret) {
                dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
                .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
                .class_mask = 0xffffffff,
        },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+               .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+               .class_mask = 0xffffffff,
+       },
        { 0 },
 };
 
index 2fc8636..12ff6cf 100644 (file)
@@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
  * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
  */
 
-static ushort __initdata idVendor;
+static ushort idVendor;
 module_param(idVendor, ushort, S_IRUGO);
 MODULE_PARM_DESC(idVendor, "USB Vendor ID");
 
-static ushort __initdata idProduct;
+static ushort idProduct;
 module_param(idProduct, ushort, S_IRUGO);
 MODULE_PARM_DESC(idProduct, "USB Product ID");
 
-static ushort __initdata bcdDevice;
+static ushort bcdDevice;
 module_param(bcdDevice, ushort, S_IRUGO);
 MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
 
-static char *__initdata iManufacturer;
+static char *iManufacturer;
 module_param(iManufacturer, charp, S_IRUGO);
 MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
 
-static char *__initdata iProduct;
+static char *iProduct;
 module_param(iProduct, charp, S_IRUGO);
 MODULE_PARM_DESC(iProduct, "USB Product string");
 
-static char *__initdata iSerialNum;
+static char *iSerialNum;
 module_param(iSerialNum, charp, S_IRUGO);
 MODULE_PARM_DESC(iSerialNum, "1");
 
-static char *__initdata iPNPstring;
+static char *iPNPstring;
 module_param(iPNPstring, charp, S_IRUGO);
 MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
 
@@ -1596,13 +1596,12 @@ cleanup(void)
        int status;
 
        mutex_lock(&usb_printer_gadget.lock_printer_io);
-       class_destroy(usb_gadget_class);
-       unregister_chrdev_region(g_printer_devno, 2);
-
        status = usb_gadget_unregister_driver(&printer_driver);
        if (status)
                ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
 
+       unregister_chrdev_region(g_printer_devno, 2);
+       class_destroy(usb_gadget_class);
        mutex_unlock(&usb_printer_gadget.lock_printer_io);
 }
 module_exit(cleanup);
index 20d43da..0151185 100644 (file)
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
                break;
        case R8A66597_BULK:
                /* isochronous pipes may be used as bulk pipes */
-               if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
+               if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
                        bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
                else
                        bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
index 24046c0..0e6afa2 100644 (file)
@@ -151,6 +151,8 @@ config USB_EHCI_MSM
          Qualcomm chipsets. Root Hub has inbuilt TT.
          This driver depends on OTG driver for PHY initialization,
          clock management, powering up VBUS, and power management.
+         This driver is not supported on boards like trout which
+         has an external PHY.
 
 config USB_EHCI_HCD_PPC_OF
        bool "EHCI support for PPC USB controller on OF platform bus"
index 2baf8a8..a869e3c 100644 (file)
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
         * mark HW unaccessible.  The PM and USB cores make sure that
         * the root hub is either suspended or stopped.
         */
-       spin_lock_irqsave(&ehci->lock, flags);
        ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+       spin_lock_irqsave(&ehci->lock, flags);
        ehci_writel(ehci, 0, &ehci->regs->intr_enable);
        (void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
index 86e4289..5c761df 100644 (file)
@@ -52,7 +52,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
        struct resource *res;
        int irq;
        int retval;
-       unsigned int temp;
 
        pr_debug("initializing FSL-SOC USB Controller\n");
 
@@ -126,18 +125,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
                goto err3;
        }
 
-       /*
-        * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
-        * flag for 83xx or 8536 system interface registers.
-        */
-       if (pdata->big_endian_mmio)
-               temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
-       else
-               temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
-
-       if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
-               pdata->have_sysif_regs = 1;
-
        /* Enable USB controller, 83xx or 8536 */
        if (pdata->have_sysif_regs)
                setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
index 2c83537..3fabed3 100644 (file)
@@ -19,9 +19,6 @@
 #define _EHCI_FSL_H
 
 /* offsets for the non-ehci registers in the FSL SOC USB controller */
-#define FSL_SOC_USB_ID         0x0
-#define ID_MSK                 0x3f
-#define NID_MSK                        0x3f00
 #define FSL_SOC_USB_ULPIVP     0x170
 #define FSL_SOC_USB_PORTSC1    0x184
 #define PORT_PTS_MSK           (3<<30)
index 6fee3cd..74dcf49 100644 (file)
@@ -572,6 +572,8 @@ static int ehci_init(struct usb_hcd *hcd)
        ehci->iaa_watchdog.function = ehci_iaa_watchdog;
        ehci->iaa_watchdog.data = (unsigned long) ehci;
 
+       hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
        /*
         * hw default: 1K periodic list heads, one per frame.
         * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@ static int ehci_init(struct usb_hcd *hcd)
        ehci->periodic_size = DEFAULT_I_TDPS;
        INIT_LIST_HEAD(&ehci->cached_itd_list);
        INIT_LIST_HEAD(&ehci->cached_sitd_list);
+
+       if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+               /* periodic schedule size can be smaller than default */
+               switch (EHCI_TUNE_FLS) {
+               case 0: ehci->periodic_size = 1024; break;
+               case 1: ehci->periodic_size = 512; break;
+               case 2: ehci->periodic_size = 256; break;
+               default:        BUG();
+               }
+       }
        if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
                return retval;
 
        /* controllers may cache some of the periodic schedule ... */
-       hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
        if (HCC_ISOC_CACHE(hcc_params))         // full frame cache
                ehci->i_thresh = 2 + 8;
        else                                    // N microframes cached
@@ -637,12 +648,6 @@ static int ehci_init(struct usb_hcd *hcd)
                /* periodic schedule size can be smaller than default */
                temp &= ~(3 << 2);
                temp |= (EHCI_TUNE_FLS << 2);
-               switch (EHCI_TUNE_FLS) {
-               case 0: ehci->periodic_size = 1024; break;
-               case 1: ehci->periodic_size = 512; break;
-               case 2: ehci->periodic_size = 256; break;
-               default:        BUG();
-               }
        }
        if (HCC_LPM(hcc_params)) {
                /* support link power management EHCI 1.1 addendum */
index 796ea0c..8a515f0 100644 (file)
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
 {
        int             port;
        u32             temp;
+       unsigned long   flags;
 
        /* If remote wakeup is enabled for the root hub but disabled
         * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
        if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
                return;
 
+       spin_lock_irqsave(&ehci->lock, flags);
+
        /* clear phy low-power mode before changing wakeup flags */
        if (ehci->has_hostpc) {
                port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
                        temp = ehci_readl(ehci, hostpc_reg);
                        ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
                }
+               spin_unlock_irqrestore(&ehci->lock, flags);
                msleep(5);
+               spin_lock_irqsave(&ehci->lock, flags);
        }
 
        port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
        /* Does the root hub have a port wakeup pending? */
        if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
                usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+       spin_unlock_irqrestore(&ehci->lock, flags);
 }
 
 static int ehci_bus_suspend (struct usb_hcd *hcd)
index fa59b26..c8e360d 100644 (file)
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
 #include <linux/slab.h>
 
 #include <mach/mxc_ehci.h>
 
+#include <asm/mach-types.h>
+
 #define ULPI_VIEWPORT_OFFSET   0x170
 
 struct ehci_mxc_priv {
@@ -114,6 +117,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
        struct usb_hcd *hcd;
        struct resource *res;
        int irq, ret;
+       unsigned int flags;
        struct ehci_mxc_priv *priv;
        struct device *dev = &pdev->dev;
        struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                clk_enable(priv->ahbclk);
        }
 
-       /* "dr" device has its own clock */
-       if (pdev->id == 0) {
+       /* "dr" device has its own clock on i.MX51 */
+       if (cpu_is_mx51() && (pdev->id == 0)) {
                priv->phy1clk = clk_get(dev, "usb_phy1");
                if (IS_ERR(priv->phy1clk)) {
                        ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
        if (ret)
                goto err_add;
 
+       if (pdata->otg) {
+               /*
+                * efikamx and efikasb have some hardware bug which is
+                * preventing usb to work unless CHRGVBUS is set.
+                * It's in violation of USB specs
+                */
+               if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
+                       flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
+                       flags |= ULPI_OTG_CTRL_CHRGVBUS;
+                       ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
+                       if (ret) {
+                               dev_err(dev, "unable to set CHRVBUS\n");
+                               goto err_add;
+                       }
+               }
+       }
+
        return 0;
 
 err_add:
index 680f2ef..f784ceb 100644 (file)
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
                        dev_name(&pdev->dev));
        if (!hcd) {
-               dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+               dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
                ret = -ENOMEM;
                goto err_create_hcd;
        }
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
 
        ret = omap_start_ehc(omap, hcd);
        if (ret) {
-               dev_dbg(&pdev->dev, "failed to start ehci\n");
+               dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
                goto err_start;
        }
 
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
 
        ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
        if (ret) {
-               dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+               dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
                goto err_add_hcd;
        }
 
index 76179c3..07bb982 100644 (file)
@@ -44,28 +44,35 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
        return 0;
 }
 
-static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
 {
        struct pci_dev *amd_smbus_dev;
        u8 rev = 0;
 
        amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
-       if (!amd_smbus_dev)
-               return 0;
-
-       pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
-       if (rev < 0x40) {
-               pci_dev_put(amd_smbus_dev);
-               amd_smbus_dev = NULL;
-               return 0;
+       if (amd_smbus_dev) {
+               pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+               if (rev < 0x40) {
+                       pci_dev_put(amd_smbus_dev);
+                       amd_smbus_dev = NULL;
+                       return 0;
+               }
+       } else {
+               amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
+               if (!amd_smbus_dev)
+                       return 0;
+               pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+               if (rev < 0x11 || rev > 0x18) {
+                       pci_dev_put(amd_smbus_dev);
+                       amd_smbus_dev = NULL;
+                       return 0;
+               }
        }
 
        if (!amd_nb_dev)
                amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
-       if (!amd_nb_dev)
-               ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
 
-       ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+       ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
 
        pci_dev_put(amd_smbus_dev);
        amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
        /* cache this readonly data; minimize chip reads */
        ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
-       if (ehci_quirk_amd_SB800(ehci))
+       if (ehci_quirk_amd_hudson(ehci))
                ehci->amd_l1_fix = 1;
 
        retval = ehci_halt(ehci);
@@ -360,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * mark HW unaccessible.  The PM and USB cores make sure that
         * the root hub is either suspended or stopped.
         */
-       spin_lock_irqsave (&ehci->lock, flags);
        ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+       spin_lock_irqsave (&ehci->lock, flags);
        ehci_writel(ehci, 0, &ehci->regs->intr_enable);
        (void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
index e8f4f36..a6f21b8 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 
 /**
  * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
index 574b99e..79a66d6 100644 (file)
@@ -262,19 +262,24 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
        }
 }
 
-struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
        .big_endian_desc = 1,
        .big_endian_mmio = 1,
        .es = 1,
+       .have_sysif_regs = 0,
        .le_setup_buf = 1,
        .init = fsl_usb2_mpc5121_init,
        .exit = fsl_usb2_mpc5121_exit,
 };
 #endif /* CONFIG_PPC_MPC512x */
 
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+       .have_sysif_regs = 1,
+};
+
 static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
-       { .compatible = "fsl-usb2-mph", },
-       { .compatible = "fsl-usb2-dr", },
+       { .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+       { .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
 #ifdef CONFIG_PPC_MPC512x
        { .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
 #endif
index 990f06b..2e9602a 100644 (file)
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue(
                        DBG("dev %d ep%d maxpacket %d\n",
                                udev->devnum, epnum, ep->maxpacket);
                        retval = -EINVAL;
+                       kfree(ep);
                        goto fail;
                }
 
index fcbf4ab..0231814 100644 (file)
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
        }
 }
 
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 {
-       void *addr;
+       struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+       void __iomem *addr;
        u32 temp;
        u64 temp_64;
 
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
        }
 }
 
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
        /* Fields are 32 bits wide, DMA addresses are in bytes */
        int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
                dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 }
 
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
                     struct xhci_container_ctx *ctx,
                     unsigned int last_ep)
 {
index 1d0f45f..a953439 100644 (file)
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 
 /***************** Streams structures manipulation *************************/
 
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs,
                struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
  * The stream context array must be a power of 2, and can be as small as
  * 64 bytes or as large as 1MB.
  */
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs, dma_addr_t *dma,
                gfp_t mem_flags)
 {
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        val &= DBOFF_MASK;
        xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
                        " from cap regs base addr\n", val);
-       xhci->dba = (void *) xhci->cap_regs + val;
+       xhci->dba = (void __iomem *) xhci->cap_regs + val;
        xhci_dbg_regs(xhci);
        xhci_print_run_regs(xhci);
        /* Set ir_set to interrupt register set 0 */
-       xhci->ir_set = (void *) xhci->run_regs->ir_set;
+       xhci->ir_set = &xhci->run_regs->ir_set[0];
 
        /*
         * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        /* Set the event ring dequeue address */
        xhci_set_hc_event_deq(xhci);
        xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        /*
         * XXX: Might need to set the Interrupter Moderation Register to
index df558f6..3289bf4 100644 (file)
@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 /* Ring the host controller doorbell after placing a command on the ring */
 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 {
-       u32 temp;
-
        xhci_dbg(xhci, "// Ding dong!\n");
-       temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
-       xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+       xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
        /* Flush PCI posted writes */
        xhci_readl(xhci, &xhci->dba->doorbell[0]);
 }
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
                unsigned int ep_index,
                unsigned int stream_id)
 {
-       struct xhci_virt_ep *ep;
-       unsigned int ep_state;
-       u32 field;
        __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+       struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+       unsigned int ep_state = ep->ep_state;
 
-       ep = &xhci->devs[slot_id]->eps[ep_index];
-       ep_state = ep->ep_state;
        /* Don't ring the doorbell for this endpoint if there are pending
-        * cancellations because the we don't want to interrupt processing.
+        * cancellations because we don't want to interrupt processing.
         * We don't want to restart any stream rings if there's a set dequeue
         * pointer command pending because the device can choose to start any
         * stream once the endpoint is on the HW schedule.
         * FIXME - check all the stream rings for pending cancellations.
         */
-       if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
-                       && !(ep_state & EP_HALTED)) {
-               field = xhci_readl(xhci, db_addr) & DB_MASK;
-               field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
-               xhci_writel(xhci, field, db_addr);
-       }
+       if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+           (ep_state & EP_HALTED))
+               return;
+       xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+       /* The CPU has better things to do at this point than wait for a
+        * write-posting flush.  It'll get there soon enough.
+        */
 }
 
 /* Ring the doorbell for any rings with pending URBs */
@@ -479,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(cur_td->start_seg,
                        dev->eps[ep_index].stopped_trb,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
+
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg(xhci, "Finding endpoint context\n");
        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -491,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
                        state->new_deq_ptr,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
 
        trb = &state->new_deq_ptr->generic;
        if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -1188,7 +1188,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
 
        addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
        temp = xhci_readl(xhci, addr);
-       if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+       if (hcd->state == HC_STATE_SUSPENDED) {
                xhci_dbg(xhci, "resume root hub\n");
                usb_hcd_resume_root_hub(hcd);
        }
@@ -1710,8 +1710,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                /* Others already handled above */
                break;
        }
-       dev_dbg(&td->urb->dev->dev,
-                       "ep %#x - asked for %d bytes, "
+       xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
                        "%d bytes untransferred\n",
                        td->urb->ep->desc.bEndpointAddress,
                        td->urb->transfer_buffer_length,
@@ -2369,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 
                /* Scatter gather list entries may cross 64KB boundaries */
                running_total = TRB_MAX_BUFF_SIZE -
-                       (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+               running_total &= TRB_MAX_BUFF_SIZE - 1;
                if (running_total != 0)
                        num_trbs++;
 
                /* How many more 64KB chunks to transfer, how many more TRBs? */
-               while (running_total < sg_dma_len(sg)) {
+               while (running_total < sg_dma_len(sg) && running_total < temp) {
                        num_trbs++;
                        running_total += TRB_MAX_BUFF_SIZE;
                }
@@ -2389,7 +2389,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
        }
        xhci_dbg(xhci, "\n");
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+               xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
+                               "num_trbs = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
                                num_trbs);
@@ -2399,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
        if (num_trbs != 0)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
                                "TRBs, %d left\n", __func__,
                                urb->ep->desc.bEndpointAddress, num_trbs);
        if (running_total != urb->transfer_buffer_length)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
                                "queued %#x (%d), asked for %#x (%d)\n",
                                __func__,
                                urb->ep->desc.bEndpointAddress,
@@ -2414,14 +2415,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 
 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
                unsigned int ep_index, unsigned int stream_id, int start_cycle,
-               struct xhci_generic_trb *start_trb, struct xhci_td *td)
+               struct xhci_generic_trb *start_trb)
 {
        /*
         * Pass all the TRBs to the hardware at once and make sure this write
         * isn't reordered.
         */
        wmb();
-       start_trb->field[3] |= start_cycle;
+       if (start_cycle)
+               start_trb->field[3] |= start_cycle;
+       else
+               start_trb->field[3] &= ~0x1;
        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
 }
 
@@ -2449,7 +2453,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
         * to set the polling interval (once the API is added).
         */
        if (xhci_interval != ep_interval) {
-               if (!printk_ratelimit())
+               if (printk_ratelimit())
                        dev_dbg(&urb->dev->dev, "Driver uses different interval"
                                        " (%d microframe%s) than xHCI "
                                        "(%d microframe%s)\n",
@@ -2535,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        sg = urb->sg;
        addr = (u64) sg_dma_address(sg);
        this_sg_len = sg_dma_len(sg);
-       trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
        trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
        if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
@@ -2551,9 +2554,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                u32 remainder = 0;
 
                /* Don't change the cycle bit of the first TRB until later */
-               if (first_trb)
+               if (first_trb) {
                        first_trb = false;
-               else
+                       if (start_cycle == 0)
+                               field |= 0x1;
+               } else
                        field |= ep_ring->cycle_state;
 
                /* Chain all the TRBs together; clear the chain bit in the last
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
                                (unsigned int) addr + trb_buff_len);
                if (TRB_MAX_BUFF_SIZE -
-                               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+                               (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
                        xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
                        xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
 
                trb_buff_len = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (addr & (TRB_MAX_BUFF_SIZE - 1));
                trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
                if (running_total + trb_buff_len > urb->transfer_buffer_length)
                        trb_buff_len =
@@ -2625,7 +2630,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        num_trbs = 0;
        /* How much data is (potentially) left before the 64KB boundary? */
        running_total = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
 
        /* If there's some data on this 64KB chunk, or we have to send a
         * zero-length transfer, we need at least one TRB
@@ -2671,7 +2677,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+               xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
+                               "addr = %#llx, num_trbs = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
                                urb->transfer_buffer_length,
@@ -2699,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* How much data is in the first TRB? */
        addr = (u64) urb->transfer_dma;
        trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-       if (urb->transfer_buffer_length < trb_buff_len)
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
 
        first_trb = true;
@@ -2711,9 +2718,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                field = 0;
 
                /* Don't change the cycle bit of the first TRB until later */
-               if (first_trb)
+               if (first_trb) {
                        first_trb = false;
-               else
+                       if (start_cycle == 0)
+                               field |= 0x1;
+               } else
                        field |= ep_ring->cycle_state;
 
                /* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2766,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2818,13 +2827,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* Queue setup TRB - see section 6.4.1.2.1 */
        /* FIXME better way to translate setup_packet into two u32 fields? */
        setup = (struct usb_ctrlrequest *) urb->setup_packet;
+       field = 0;
+       field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+       if (start_cycle == 0)
+               field |= 0x1;
        queue_trb(xhci, ep_ring, false, true,
                        /* FIXME endianness is probably going to bite my ass here. */
                        setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
                        setup->wIndex | setup->wLength << 16,
                        TRB_LEN(8) | TRB_INTR_TARGET(0),
                        /* Immediate data in pointer */
-                       TRB_IDT | TRB_TYPE(TRB_SETUP));
+                       field);
 
        /* If there's data, queue data TRBs */
        field = 0;
@@ -2859,7 +2872,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
 
        giveback_first_trb(xhci, slot_id, ep_index, 0,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2872,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
        td_len = urb->iso_frame_desc[i].length;
 
-       running_total = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
        if (running_total != 0)
                num_trbs++;
 
@@ -2900,6 +2913,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        int running_total, trb_buff_len, td_len, td_remain_len, ret;
        u64 start_addr, addr;
        int i, j;
+       bool more_trbs_coming;
 
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -2910,7 +2924,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        }
 
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+               xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
                                " addr = %#llx, num_tds = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
@@ -2950,7 +2964,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                field |= TRB_TYPE(TRB_ISOC);
                                /* Assume URB_ISO_ASAP is set */
                                field |= TRB_SIA;
-                               if (i > 0)
+                               if (i == 0) {
+                                       if (start_cycle == 0)
+                                               field |= 0x1;
+                               } else
                                        field |= ep_ring->cycle_state;
                                first_trb = false;
                        } else {
@@ -2965,9 +2982,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                         */
                        if (j < trbs_per_td - 1) {
                                field |= TRB_CHAIN;
+                               more_trbs_coming = true;
                        } else {
                                td->last_trb = ep_ring->enqueue;
                                field |= TRB_IOC;
+                               more_trbs_coming = false;
                        }
 
                        /* Calculate TRB length */
@@ -2980,7 +2999,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        length_field = TRB_LEN(trb_buff_len) |
                                remainder |
                                TRB_INTR_TARGET(0);
-                       queue_trb(xhci, ep_ring, false, false,
+                       queue_trb(xhci, ep_ring, false, more_trbs_coming,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3003,10 +3022,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
        }
 
-       wmb();
-       start_trb->field[3] |= start_cycle;
-
-       xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+       giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -3064,7 +3081,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
         * to set the polling interval (once the API is added).
         */
        if (xhci_interval != ep_interval) {
-               if (!printk_ratelimit())
+               if (printk_ratelimit())
                        dev_dbg(&urb->dev->dev, "Driver uses different interval"
                                        " (%d microframe%s) than xHCI "
                                        "(%d microframe%s)\n",
index 45e4a31..2083fc2 100644 (file)
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
 /*
  * Set the run bit and wait for the host to be running.
  */
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
 {
        u32 temp;
        int ret;
@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
 static int xhci_setup_msix(struct xhci_hcd *xhci)
 {
        int i, ret = 0;
-       struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
        /*
         * calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
                        goto disable_msix;
        }
 
+       hcd->msix_enabled = 1;
        return ret;
 
 disable_msix:
@@ -280,7 +282,8 @@ free_entries:
 /* Free any IRQs and disable MSI-X */
 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 {
-       struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
        xhci_free_irq(xhci);
 
@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
                pci_disable_msi(pdev);
        }
 
+       hcd->msix_enabled = 0;
        return;
 }
 
@@ -325,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
 
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
 {
        unsigned long flags;
        int temp;
@@ -469,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
                        xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
        xhci_writel(xhci, ER_IRQ_ENABLE(temp),
                        &xhci->ir_set->irq_pending);
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        if (NUM_TEST_NOOPS > 0)
                doorbell = xhci_setup_one_noop(xhci);
@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
        xhci_reset(xhci);
-       xhci_cleanup_msix(xhci);
        spin_unlock_irq(&xhci->lock);
 
+       xhci_cleanup_msix(xhci);
+
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
        /* Tell the event ring poll function not to reschedule */
        xhci->zombie = 1;
@@ -523,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
        xhci_writel(xhci, ER_IRQ_DISABLE(temp),
                        &xhci->ir_set->irq_pending);
-       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+       xhci_print_ir_set(xhci, 0);
 
        xhci_dbg(xhci, "cleaning up memory\n");
        xhci_mem_cleanup(xhci);
@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
 
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
-       xhci_cleanup_msix(xhci);
        spin_unlock_irq(&xhci->lock);
 
+       xhci_cleanup_msix(xhci);
+
        xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
                    xhci_readl(xhci, &xhci->op_regs->status));
 }
@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
        int                     rc = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
+       int                     i;
 
        spin_lock_irq(&xhci->lock);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
        }
-       /* step 5: remove core well power */
-       xhci_cleanup_msix(xhci);
        spin_unlock_irq(&xhci->lock);
 
+       /* step 5: remove core well power */
+       /* synchronize irq when using MSI-X */
+       if (xhci->msix_entries) {
+               for (i = 0; i < xhci->msix_count; i++)
+                       synchronize_irq(xhci->msix_entries[i].vector);
+       }
+
        return rc;
 }
 
@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
        u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
-       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
        int     old_state, retval;
 
        old_state = hcd->state;
@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                xhci_dbg(xhci, "Stop HCD\n");
                xhci_halt(xhci);
                xhci_reset(xhci);
-               if (hibernated)
-                       xhci_cleanup_msix(xhci);
                spin_unlock_irq(&xhci->lock);
+               xhci_cleanup_msix(xhci);
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
                /* Tell the event ring poll function not to reschedule */
@@ -745,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
                xhci_writel(xhci, ER_IRQ_DISABLE(temp),
                                &xhci->ir_set->irq_pending);
-               xhci_print_ir_set(xhci, xhci->ir_set, 0);
+               xhci_print_ir_set(xhci, 0);
 
                xhci_dbg(xhci, "cleaning up memory\n");
                xhci_mem_cleanup(xhci);
@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                return retval;
        }
 
-       spin_unlock_irq(&xhci->lock);
-       /* Re-setup MSI-X */
-       if (hcd->irq)
-               free_irq(hcd->irq, hcd);
-       hcd->irq = -1;
-
-       retval = xhci_setup_msix(xhci);
-       if (retval)
-               /* fall back to msi*/
-               retval = xhci_setup_msi(xhci);
-
-       if (retval) {
-               /* fall back to legacy interrupt*/
-               retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
-                                       hcd->irq_descr, hcd);
-               if (retval) {
-                       xhci_err(xhci, "request interrupt %d failed\n",
-                                       pdev->irq);
-                       return retval;
-               }
-               hcd->irq = pdev->irq;
-       }
-
-       spin_lock_irq(&xhci->lock);
        /* step 4: set Run/Stop bit */
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command |= CMD_RUN;
@@ -871,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
 /* Returns 1 if the arguments are OK;
  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  */
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
                const char *func) {
        struct xhci_hcd *xhci;
@@ -1707,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
        xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
 }
 
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
                unsigned int slot_id, unsigned int ep_index,
                struct xhci_dequeue_state *deq_state)
 {
@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
                xhci_err(xhci, "Error while assigning device slot ID\n");
                return 0;
        }
-       /* xhci_alloc_virt_device() does not touch rings; no need to lock */
-       if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+       /* xhci_alloc_virt_device() does not touch rings; no need to lock.
+        * Use GFP_NOIO, since this function can be called from
+        * xhci_discover_or_reset_device(), which may be called as part of
+        * mass storage driver error handling.
+        */
+       if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
                /* Disable slot, if we can do it without mem alloc */
                xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
                spin_lock_irqsave(&xhci->lock, flags);
index 170c367..7f127df 100644 (file)
@@ -436,22 +436,18 @@ struct xhci_run_regs {
 /**
  * struct doorbell_array
  *
+ * Bits  0 -  7: Endpoint target
+ * Bits  8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
  * Section 5.6
  */
 struct xhci_doorbell_array {
        u32     doorbell[256];
 };
 
-#define        DB_TARGET_MASK          0xFFFFFF00
-#define        DB_STREAM_ID_MASK       0x0000FFFF
-#define        DB_TARGET_HOST          0x0
-#define        DB_STREAM_ID_HOST       0x0
-#define        DB_MASK                 (0xff << 8)
-
-/* Endpoint Target - bits 0:7 */
-#define EPI_TO_DB(p)           (((p) + 1) & 0xff)
-#define STREAM_ID_TO_DB(p)     (((p) & 0xffff) << 16)
-
+#define DB_VALUE(ep, stream)   ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST          0x00000000
 
 /**
  * struct xhci_protocol_caps
@@ -1352,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
 }
 
 /* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
index 1732d9b..1616ad1 100644 (file)
@@ -45,7 +45,7 @@ struct usb_led {
 
 static void change_color(struct usb_led *led)
 {
-       int retval;
+       int retval = 0;
        unsigned char *buffer;
 
        buffer = kmalloc(8, GFP_KERNEL);
index 4ff2158..f7a2057 100644 (file)
@@ -776,7 +776,6 @@ static const struct usb_device_id uss720_table[] = {
        { USB_DEVICE(0x0557, 0x2001) },
        { USB_DEVICE(0x0729, 0x1284) },
        { USB_DEVICE(0x1293, 0x0002) },
-       { USB_DEVICE(0x1293, 0x0002) },
        { USB_DEVICE(0x050d, 0x0002) },
        { }                                             /* Terminating entry */
 };
index eeba228..9d49d1c 100644 (file)
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
                musb->xceiv->set_power = bfin_musb_set_power;
 
        musb->isr = blackfin_interrupt;
+       musb->double_buffer_not_ok = true;
 
        return 0;
 }
index 07cf394..c292d5c 100644 (file)
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
 
 static inline struct musb *dev_to_musb(struct device *dev)
 {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-       /* usbcore insists dev->driver_data is a "struct hcd *" */
-       return hcd_to_musb(dev_get_drvdata(dev));
-#else
        return dev_get_drvdata(dev);
-#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1869,6 +1864,7 @@ allocate_instance(struct device *dev,
        INIT_LIST_HEAD(&musb->out_bulk);
 
        hcd->uses_new_polling = 1;
+       hcd->has_tt = 1;
 
        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
        musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -1876,10 +1872,9 @@ allocate_instance(struct device *dev,
        musb = kzalloc(sizeof *musb, GFP_KERNEL);
        if (!musb)
                return NULL;
-       dev_set_drvdata(dev, musb);
 
 #endif
-
+       dev_set_drvdata(dev, musb);
        musb->mregs = mbase;
        musb->ctrl_base = mbase;
        musb->nIrq = -ENODEV;
@@ -2191,7 +2186,7 @@ static int __init musb_probe(struct platform_device *pdev)
        void __iomem    *base;
 
        iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!iomem || irq == 0)
+       if (!iomem || irq <= 0)
                return -ENODEV;
 
        base = ioremap(iomem->start, resource_size(iomem));
index d0c236f..e6400be 100644 (file)
@@ -497,6 +497,19 @@ struct musb {
        struct usb_gadget_driver *gadget_driver;        /* its driver */
 #endif
 
+       /*
+        * FIXME: Remove this flag.
+        *
+        * This is only added to allow Blackfin to work
+        * with current driver. For some unknown reason
+        * Blackfin doesn't work with double buffering
+        * and that's enabled by default.
+        *
+        * We added this flag to forcefully disable double
+        * buffering until we get it working.
+        */
+       unsigned                double_buffer_not_ok:1 __deprecated;
+
        struct musb_hdrc_config *config;
 
 #ifdef MUSB_CONFIG_PROC_FS
index 916065b..3a97c4e 100644 (file)
@@ -169,6 +169,9 @@ struct dma_controller {
                                                        dma_addr_t dma_addr,
                                                        u32 length);
        int                     (*channel_abort)(struct dma_channel *);
+       int                     (*is_compatible)(struct dma_channel *channel,
+                                                       u16 maxpacket,
+                                                       void *buf, u32 length);
 };
 
 /* called after channel_program(), may indicate a fault */
index ed58c6c..2fe3046 100644 (file)
 
 /* ----------------------------------------------------------------------- */
 
+#define is_buffer_mapped(req) (is_dma_capable() && \
+                                       (req->map_state != UN_MAPPED))
+
 /* Maps the buffer to dma  */
 
 static inline void map_dma_buffer(struct musb_request *request,
-                               struct musb *musb)
+                       struct musb *musb, struct musb_ep *musb_ep)
 {
+       int compatible = true;
+       struct dma_controller *dma = musb->dma_controller;
+
+       request->map_state = UN_MAPPED;
+
+       if (!is_dma_capable() || !musb_ep->dma)
+               return;
+
+       /* Check if DMA engine can handle this request.
+        * DMA code must reject the USB request explicitly.
+        * Default behaviour is to map the request.
+        */
+       if (dma->is_compatible)
+               compatible = dma->is_compatible(musb_ep->dma,
+                               musb_ep->packet_sz, request->request.buf,
+                               request->request.length);
+       if (!compatible)
+               return;
+
        if (request->request.dma == DMA_ADDR_INVALID) {
                request->request.dma = dma_map_single(
                                musb->controller,
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
                                request->tx
                                        ? DMA_TO_DEVICE
                                        : DMA_FROM_DEVICE);
-               request->mapped = 1;
+               request->map_state = MUSB_MAPPED;
        } else {
                dma_sync_single_for_device(musb->controller,
                        request->request.dma,
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
                        request->tx
                                ? DMA_TO_DEVICE
                                : DMA_FROM_DEVICE);
-               request->mapped = 0;
+               request->map_state = PRE_MAPPED;
        }
 }
 
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
 static inline void unmap_dma_buffer(struct musb_request *request,
                                struct musb *musb)
 {
+       if (!is_buffer_mapped(request))
+               return;
+
        if (request->request.dma == DMA_ADDR_INVALID) {
                DBG(20, "not unmapping a never mapped buffer\n");
                return;
        }
-       if (request->mapped) {
+       if (request->map_state == MUSB_MAPPED) {
                dma_unmap_single(musb->controller,
                        request->request.dma,
                        request->request.length,
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
                                ? DMA_TO_DEVICE
                                : DMA_FROM_DEVICE);
                request->request.dma = DMA_ADDR_INVALID;
-               request->mapped = 0;
-       } else {
+       } else { /* PRE_MAPPED */
                dma_sync_single_for_cpu(musb->controller,
                        request->request.dma,
                        request->request.length,
                        request->tx
                                ? DMA_TO_DEVICE
                                : DMA_FROM_DEVICE);
-
        }
+       request->map_state = UN_MAPPED;
 }
 
 /*
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
 
        ep->busy = 1;
        spin_unlock(&musb->lock);
-       if (is_dma_capable() && ep->dma)
-               unmap_dma_buffer(req, musb);
+       unmap_dma_buffer(req, musb);
        if (request->status == 0)
                DBG(5, "%s done request %p,  %d/%d\n",
                                ep->end_point.name, request,
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
                        csr);
 
 #ifndef        CONFIG_MUSB_PIO_ONLY
-       if (is_dma_capable() && musb_ep->dma) {
+       if (is_buffer_mapped(req)) {
                struct dma_controller   *c = musb->dma_controller;
                size_t request_size;
 
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
                 * Unmap the dma buffer back to cpu if dma channel
                 * programming fails
                 */
-               if (is_dma_capable() && musb_ep->dma)
-                       unmap_dma_buffer(req, musb);
+               unmap_dma_buffer(req, musb);
 
                musb_write_fifo(musb_ep->hw_ep, fifo_count,
                                (u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                return;
        }
 
-       if (is_cppi_enabled() && musb_ep->dma) {
+       if (is_cppi_enabled() && is_buffer_mapped(req)) {
                struct dma_controller   *c = musb->dma_controller;
                struct dma_channel      *channel = musb_ep->dma;
 
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                len = musb_readw(epio, MUSB_RXCOUNT);
                if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-                       if (is_dma_capable() && musb_ep->dma) {
+                       if (is_buffer_mapped(req)) {
                                struct dma_controller   *c;
                                struct dma_channel      *channel;
                                int                     use_dma = 0;
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                        fifo_count = min_t(unsigned, len, fifo_count);
 
 #ifdef CONFIG_USB_TUSB_OMAP_DMA
-                       if (tusb_dma_omap() && musb_ep->dma) {
+                       if (tusb_dma_omap() && is_buffer_mapped(req)) {
                                struct dma_controller *c = musb->dma_controller;
                                struct dma_channel *channel = musb_ep->dma;
                                u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                         * programming fails. This buffer is mapped if the
                         * channel allocation is successful
                         */
-                        if (is_dma_capable() && musb_ep->dma) {
+                        if (is_buffer_mapped(req)) {
                                unmap_dma_buffer(req, musb);
 
                                /*
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
                /* Set TXMAXP with the FIFO size of the endpoint
                 * to disable double buffering mode.
                 */
-               musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+               if (musb->double_buffer_not_ok)
+                       musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+               else
+                       musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+                                       | (musb_ep->hb_mult << 11));
 
                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
                if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
                /* Set RXMAXP with the FIFO size of the endpoint
                 * to disable double buffering mode.
                 */
-               musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+               if (musb->double_buffer_not_ok)
+                       musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+               else
+                       musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+                                       | (musb_ep->hb_mult << 11));
 
                /* force shared fifo to OUT-only mode */
                if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
        request->epnum = musb_ep->current_epnum;
        request->tx = musb_ep->is_in;
 
-       if (is_dma_capable() && musb_ep->dma)
-               map_dma_buffer(request, musb);
-       else
-               request->mapped = 0;
+       map_dma_buffer(request, musb, musb_ep);
 
        spin_lock_irqsave(&musb->lock, lockflags);
 
index dec8dc0..a55354f 100644 (file)
 #ifndef __MUSB_GADGET_H
 #define __MUSB_GADGET_H
 
+enum buffer_map_state {
+       UN_MAPPED = 0,
+       PRE_MAPPED,
+       MUSB_MAPPED
+};
+
 struct musb_request {
        struct usb_request      request;
        struct musb_ep          *ep;
        struct musb             *musb;
        u8 tx;                  /* endpoint direction */
        u8 epnum;
-       u8 mapped;
+       enum buffer_map_state map_state;
 };
 
 static inline struct musb_request *to_musb_request(struct usb_request *req)
index 4d5bcb4..0f523d7 100644 (file)
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
        /* Set RXMAXP with the FIFO size of the endpoint
         * to disable double buffer mode.
         */
-       if (musb->hwvers < MUSB_HWVERS_2000)
+       if (musb->double_buffer_not_ok)
                musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
        else
                musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                /* protocol/endpoint/interval/NAKlimit */
                if (epnum) {
                        musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
-                       if (can_bulk_split(musb, qh->type))
+                       if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
-                                       packet_sz
-                                       | ((hw_ep->max_packet_sz_tx /
-                                               packet_sz) - 1) << 11);
+                                               hw_ep->max_packet_sz_tx);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
-                                       packet_sz);
+                                               qh->maxpacket |
+                                               ((qh->hb_mult - 1) << 11));
                        musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
                } else {
                        musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
index f763d62..21056c9 100644 (file)
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
 {
        musb_writew(mbase,
                MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
-               ((u16)((u32) dma_addr & 0xFFFF)));
+               dma_addr);
        musb_writew(mbase,
                MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
-               ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+               (dma_addr >> 16));
 }
 
 static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
 {
-       return musb_readl(mbase,
+       u32 count = musb_readw(mbase,
                MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+       count = count << 16;
+
+       count |= musb_readw(mbase,
+               MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+       return count;
 }
 
 static inline void musb_write_hsdma_count(void __iomem *mbase,
                                u8 bchannel, u32 len)
 {
-       musb_writel(mbase,
+       musb_writew(mbase,
+               MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
+       musb_writew(mbase,
                MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
-               len);
+               (len >> 16));
 }
 
 #endif /* CONFIG_BLACKFIN */
index a3f1233..bc8badd 100644 (file)
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
 
 static int omap2430_musb_exit(struct musb *musb)
 {
+       del_timer_sync(&musb_idle_timer);
 
        omap2430_low_level_exit(musb);
        otg_put_transceiver(musb->xceiv);
index 9fb875d..9ffc823 100644 (file)
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K
          required after resetting the hardware and power management.
          This driver is required even for peripheral only or host only
          mode configurations.
+         This driver is not supported on boards like trout which
+         has an external PHY.
 
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
index e70014a..8acf165 100644 (file)
@@ -132,6 +132,8 @@ static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, nop);
 
+       BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
+
        return 0;
 exit:
        kfree(nop);
index 059d9ac..770d799 100644 (file)
@@ -45,7 +45,7 @@ struct ulpi_info {
 /* ULPI hardcoded IDs, used for probing */
 static struct ulpi_info ulpi_ids[] = {
        ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
-       ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"),
+       ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
 };
 
 static int ulpi_set_otg_flags(struct otg_transceiver *otg)
index 63f7cc4..7b8815d 100644 (file)
@@ -486,12 +486,22 @@ static void ch341_read_int_callback(struct urb *urb)
        if (actual_length >= 4) {
                struct ch341_private *priv = usb_get_serial_port_data(port);
                unsigned long flags;
+               u8 prev_line_status = priv->line_status;
 
                spin_lock_irqsave(&priv->lock, flags);
                priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
                if ((data[1] & CH341_MULT_STAT))
                        priv->multi_status_change = 1;
                spin_unlock_irqrestore(&priv->lock, flags);
+
+               if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
+                       struct tty_struct *tty = tty_port_tty_get(&port->port);
+                       if (tty)
+                               usb_serial_handle_dcd_change(port, tty,
+                                           priv->line_status & CH341_BIT_DCD);
+                       tty_kref_put(tty);
+               }
+
                wake_up_interruptible(&priv->delta_msr_wait);
        }
 
index 8d7731d..735ea03 100644 (file)
@@ -49,7 +49,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int cp210x_carrier_raised(struct usb_serial_port *p);
 
 static int debug;
 
@@ -87,7 +86,6 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
        { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
        { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
-       { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
        { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -110,7 +108,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
        { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
        { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+       { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
        { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+       { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
        .tiocmget               = cp210x_tiocmget,
        .tiocmset               = cp210x_tiocmset,
        .attach                 = cp210x_startup,
-       .dtr_rts                = cp210x_dtr_rts,
-       .carrier_raised         = cp210x_carrier_raised
+       .dtr_rts                = cp210x_dtr_rts
 };
 
 /* Config request types */
@@ -765,15 +764,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
        return result;
 }
 
-static int cp210x_carrier_raised(struct usb_serial_port *p)
-{
-       unsigned int control;
-       cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
-       if (control & CONTROL_DCD)
-               return 1;
-       return 0;
-}
-
 static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
 {
        struct usb_serial_port *port = tty->driver_data;
index b92070c..666e5a6 100644 (file)
@@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
 static int digi_chars_in_buffer(struct tty_struct *tty);
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
 static void digi_close(struct usb_serial_port *port);
-static int digi_carrier_raised(struct usb_serial_port *port);
 static void digi_dtr_rts(struct usb_serial_port *port, int on);
 static int digi_startup_device(struct usb_serial *serial);
 static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
        .open =                         digi_open,
        .close =                        digi_close,
        .dtr_rts =                      digi_dtr_rts,
-       .carrier_raised =               digi_carrier_raised,
        .write =                        digi_write,
        .write_room =                   digi_write_room,
        .write_bulk_callback =          digi_write_bulk_callback,
@@ -1339,14 +1337,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
        digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
 }
 
-static int digi_carrier_raised(struct usb_serial_port *port)
-{
-       struct digi_port *priv = usb_get_serial_port_data(port);
-       if (priv->dp_modem_signals & TIOCM_CD)
-               return 1;
-       return 0;
-}
-
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
        int ret;
index a2668d0..f349a36 100644 (file)
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk {
 static int   ftdi_jtag_probe(struct usb_serial *serial);
 static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);
 static int   ftdi_NDI_device_setup(struct usb_serial *serial);
+static int   ftdi_stmclite_probe(struct usb_serial *serial);
 static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);
 static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
 
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
        .port_probe = ftdi_HE_TIRA1_setup,
 };
 
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+       .probe  = ftdi_stmclite_probe,
+};
+
 /*
  * The 8U232AM has the same API as the sio except for:
  * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
        { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+       { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
        { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -676,7 +682,17 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
-       { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
+       { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -800,6 +816,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
        { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -1698,6 +1716,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
        return 0;
 }
 
+/*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+       struct usb_device *udev = serial->dev;
+       struct usb_interface *interface = serial->interface;
+
+       dbg("%s", __func__);
+
+       if (interface == udev->actconfig->interface[2])
+               return 0;
+
+       dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+       return -ENODEV;
+}
+
 /*
  * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
  * We have to correct it if we want to read from it.
index bf08672..117e8e6 100644 (file)
 #define RATOC_VENDOR_ID                0x0584
 #define RATOC_PRODUCT_ID_USB60F        0xb020
 
+/*
+ * Acton Research Corp.
+ */
+#define ACTON_VID              0x0647  /* Vendor ID */
+#define ACTON_SPECTRAPRO_PID   0x0100
+
 /*
  * Contec products (http://www.contec.com)
  * Submitted by Daniel Sangorrin
 #define OCT_US101_PID          0x0421  /* OCT US101 USB to RS-232 */
 
 /*
- * Icom ID-1 digital transceiver
+ * Definitions for Icom Inc. devices
  */
-
-#define ICOM_ID1_VID            0x0C26
-#define ICOM_ID1_PID            0x0004
+#define ICOM_VID               0x0C26 /* Icom vendor ID */
+/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
+#define ICOM_ID_1_PID          0x0004 /* ID-1 USB to RS-232 */
+/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
+#define ICOM_OPC_U_UC_PID      0x0018 /* OPC-478UC, OPC-1122U cloning cable */
+/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
+#define ICOM_ID_RP2C1_PID      0x0009 /* ID-RP2C Asset 1 to RS-232 */
+#define ICOM_ID_RP2C2_PID      0x000A /* ID-RP2C Asset 2 to RS-232 */
+#define ICOM_ID_RP2D_PID       0x000B /* ID-RP2D configuration port*/
+#define ICOM_ID_RP2VT_PID      0x000C /* ID-RP2V Transmit config port */
+#define ICOM_ID_RP2VR_PID      0x000D /* ID-RP2V Receive config port */
+#define ICOM_ID_RP4KVT_PID     0x0010 /* ID-RP4000V Transmit config port */
+#define ICOM_ID_RP4KVR_PID     0x0011 /* ID-RP4000V Receive config port */
+#define ICOM_ID_RP2KVT_PID     0x0012 /* ID-RP2000V Transmit config port */
+#define ICOM_ID_RP2KVR_PID     0x0013 /* ID-RP2000V Receive config port */
 
 /*
  * GN Otometrics (http://www.otometrics.com)
 #define STB_PID                        0x0001 /* Sensor Terminal Board */
 #define WHT_PID                        0x0004 /* Wireless Handheld Terminal */
 
+/*
+ * STMicroelectonics
+ */
+#define ST_VID                 0x0483
+#define ST_STMCLT1030_PID      0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
 /*
  * Papouch products (http://www.papouch.com/)
  * Submitted by Folkert van Heusden
index e6833e2..e4db5ad 100644 (file)
@@ -479,6 +479,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
 }
 EXPORT_SYMBOL_GPL(usb_serial_handle_break);
 
+/**
+ *     usb_serial_handle_dcd_change - handle a change of carrier detect state
+ *     @port: usb_serial_port structure for the open port
+ *     @tty: tty_struct structure for the port
+ *     @status: new carrier detect status, nonzero if active
+ */
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+                               struct tty_struct *tty, unsigned int status)
+{
+       struct tty_port *port = &usb_port->port;
+
+       dbg("%s - port %d, status %d", __func__, usb_port->number, status);
+
+       if (status)
+               wake_up_interruptible(&port->open_wait);
+       else if (tty && !C_CLOCAL(tty))
+               tty_hangup(tty);
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
+
 int usb_serial_generic_resume(struct usb_serial *serial)
 {
        struct usb_serial_port *port;
index cd769ef..3b246d9 100644 (file)
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
 
        dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
 
-       edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
-       edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+       edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+       edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
        edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
 
        for (rec = ihex_next_binrec(rec); rec;
index 6ab2a3f..178b22e 100644 (file)
@@ -199,6 +199,7 @@ static struct usb_serial_driver epic_device = {
                .name           = "epic",
        },
        .description            = "EPiC device",
+       .usb_driver             = &io_driver,
        .id_table               = Epic_port_id_table,
        .num_ports              = 1,
        .open                   = edge_open,
index 12ed594..99b97c0 100644 (file)
@@ -1275,6 +1275,7 @@ static struct usb_serial_driver iuu_device = {
                   .name = "iuu_phoenix",
                   },
        .id_table = id_table,
+       .usb_driver = &iuu_driver,
        .num_ports = 1,
        .bulk_in_size = 512,
        .bulk_out_size = 512,
index 2d8baf6..ce134dc 100644 (file)
@@ -546,6 +546,7 @@ static struct usb_serial_driver keyspan_pre_device = {
                .name           = "keyspan_no_firm",
        },
        .description            = "Keyspan - (without firmware)",
+       .usb_driver             = &keyspan_driver,
        .id_table               = keyspan_pre_ids,
        .num_ports              = 1,
        .attach                 = keyspan_fake_startup,
@@ -557,6 +558,7 @@ static struct usb_serial_driver keyspan_1port_device = {
                .name           = "keyspan_1",
        },
        .description            = "Keyspan 1 port adapter",
+       .usb_driver             = &keyspan_driver,
        .id_table               = keyspan_1port_ids,
        .num_ports              = 1,
        .open                   = keyspan_open,
@@ -579,6 +581,7 @@ static struct usb_serial_driver keyspan_2port_device = {
                .name           = "keyspan_2",
        },
        .description            = "Keyspan 2 port adapter",
+       .usb_driver             = &keyspan_driver,
        .id_table               = keyspan_2port_ids,
        .num_ports              = 2,
        .open                   = keyspan_open,
@@ -601,6 +604,7 @@ static struct usb_serial_driver keyspan_4port_device = {
                .name           = "keyspan_4",
        },
        .description            = "Keyspan 4 port adapter",
+       .usb_driver             = &keyspan_driver,
        .id_table               = keyspan_4port_ids,
        .num_ports              = 4,
        .open                   = keyspan_open,
index a10dd56..554a869 100644 (file)
@@ -679,22 +679,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
        }
 }
 
-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
-{
-       struct usb_serial *serial = port->serial;
-       unsigned char modembits;
-
-       /* If we can read the modem status and the DCD is low then
-          carrier is not raised yet */
-       if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
-               if (!(modembits & (1>>6)))
-                       return 0;
-       }
-       /* Carrier raised, or we failed (eg disconnected) so
-          progress accordingly */
-       return 1;
-}
-
 
 static int keyspan_pda_open(struct tty_struct *tty,
                                        struct usb_serial_port *port)
@@ -881,7 +865,6 @@ static struct usb_serial_driver keyspan_pda_device = {
        .id_table =             id_table_std,
        .num_ports =            1,
        .dtr_rts =              keyspan_pda_dtr_rts,
-       .carrier_raised =       keyspan_pda_carrier_raised,
        .open =                 keyspan_pda_open,
        .close =                keyspan_pda_close,
        .write =                keyspan_pda_write,
index cf17183..653465f 100644 (file)
@@ -44,6 +44,7 @@ static struct usb_serial_driver moto_device = {
                .name =         "moto-modem",
        },
        .id_table =             id_table,
+       .usb_driver =           &moto_driver,
        .num_ports =            1,
 };
 
index 7487782..5f46838 100644 (file)
@@ -382,7 +382,16 @@ static void option_instat_callback(struct urb *urb);
 #define HAIER_VENDOR_ID                                0x201e
 #define HAIER_PRODUCT_CE100                    0x2009
 
-#define CINTERION_VENDOR_ID                    0x0681
+/* Cinterion (formerly Siemens) products */
+#define SIEMENS_VENDOR_ID                              0x0681
+#define CINTERION_VENDOR_ID                            0x1e2d
+#define CINTERION_PRODUCT_HC25_MDM             0x0047
+#define CINTERION_PRODUCT_HC25_MDMNET  0x0040
+#define CINTERION_PRODUCT_HC28_MDM             0x004C
+#define CINTERION_PRODUCT_HC28_MDMNET  0x004A /* same for HC28J */
+#define CINTERION_PRODUCT_EU3_E                        0x0051
+#define CINTERION_PRODUCT_EU3_P                        0x0052
+#define CINTERION_PRODUCT_PH8                  0x0053
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -944,7 +953,17 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-       { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+       /* Cinterion */
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+       { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+       { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+       { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+       { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
index 5be866b..7361320 100644 (file)
@@ -157,6 +157,7 @@ static struct usb_serial_driver oti6858_device = {
                .name =         "oti6858",
        },
        .id_table =             id_table,
+       .usb_driver =           &oti6858_driver,
        .num_ports =            1,
        .open =                 oti6858_open,
        .close =                oti6858_close,
index 8ae4c6c..08c9181 100644 (file)
@@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+       { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
 {
 
        struct pl2303_private *priv = usb_get_serial_port_data(port);
+       struct tty_struct *tty;
        unsigned long flags;
        u8 status_idx = UART_STATE;
        u8 length = UART_STATE + 1;
+       u8 prev_line_status;
        u16 idv, idp;
 
        idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
 
        /* Save off the uart status for others to look at */
        spin_lock_irqsave(&priv->lock, flags);
+       prev_line_status = priv->line_status;
        priv->line_status = data[status_idx];
        spin_unlock_irqrestore(&priv->lock, flags);
        if (priv->line_status & UART_BREAK_ERROR)
                usb_serial_handle_break(port);
        wake_up_interruptible(&priv->delta_msr_wait);
+
+       tty = tty_port_tty_get(&port->port);
+       if (!tty)
+               return;
+       if ((priv->line_status ^ prev_line_status) & UART_DCD)
+               usb_serial_handle_dcd_change(port, tty,
+                               priv->line_status & UART_DCD);
+       tty_kref_put(tty);
 }
 
 static void pl2303_read_int_callback(struct urb *urb)
index 43eb9bd..1b025f7 100644 (file)
@@ -21,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MMX          0x0612
 #define PL2303_PRODUCT_ID_GPRS         0x0609
 #define PL2303_PRODUCT_ID_HCR331       0x331a
+#define PL2303_PRODUCT_ID_MOTOROLA     0x0307
 
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
index 214a3e5..30b73e6 100644 (file)
@@ -36,6 +36,7 @@
 #define UTSTARCOM_PRODUCT_UM175_V1             0x3712
 #define UTSTARCOM_PRODUCT_UM175_V2             0x3714
 #define UTSTARCOM_PRODUCT_UM175_ALLTEL         0x3715
+#define PANTECH_PRODUCT_UML290_VZW             0x3718
 
 /* CMOTECH devices */
 #define CMOTECH_VENDOR_ID                      0x16d8
@@ -66,6 +67,7 @@ static struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
@@ -84,6 +86,7 @@ static struct usb_serial_driver qcaux_device = {
                .name =         "qcaux",
        },
        .id_table =             id_table,
+       .usb_driver =           &qcaux_driver,
        .num_ports =            1,
 };
 
index cb8195c..74cd4cc 100644 (file)
@@ -42,6 +42,7 @@ static struct usb_serial_driver siemens_usb_mpi_device = {
                .name =         "siemens_mpi",
        },
        .id_table =             id_table,
+       .usb_driver =           &siemens_usb_mpi_driver,
        .num_ports =            1,
 };
 
index 7481ff8..0457813 100644 (file)
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
 
        { }
index 765aa98..cbfb70b 100644 (file)
@@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg {
 
 /* how come ??? */
 #define UART_STATE                     0x08
-#define UART_STATE_TRANSIENT_MASK      0x74
+#define UART_STATE_TRANSIENT_MASK      0x75
 #define UART_DCD                       0x01
 #define UART_DSR                       0x02
 #define UART_BREAK_ERROR               0x04
@@ -525,6 +525,10 @@ static void spcp8x5_process_read_urb(struct urb *urb)
                /* overrun is special, not associated with a char */
                if (status & UART_OVERRUN_ERROR)
                        tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+               if (status & UART_DCD)
+                       usb_serial_handle_dcd_change(port, tty,
+                                  priv->line_status & MSR_STATUS_LINE_DCD);
        }
 
        tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
@@ -645,6 +649,7 @@ static struct usb_serial_driver spcp8x5_device = {
                .name =         "SPCP8x5",
        },
        .id_table               = id_table,
+       .usb_driver             = &spcp8x5_driver,
        .num_ports              = 1,
        .open                   = spcp8x5_open,
        .dtr_rts                = spcp8x5_dtr_rts,
index b2902f3..a910004 100644 (file)
@@ -369,9 +369,9 @@ failed_1port:
 
 static void __exit ti_exit(void)
 {
+       usb_deregister(&ti_usb_driver);
        usb_serial_deregister(&ti_1port_device);
        usb_serial_deregister(&ti_2port_device);
-       usb_deregister(&ti_usb_driver);
 }
 
 
index 6954de5..546a521 100644 (file)
@@ -1344,11 +1344,15 @@ int usb_serial_register(struct usb_serial_driver *driver)
                return -ENODEV;
 
        fixup_generic(driver);
-       if (driver->usb_driver)
-               driver->usb_driver->supports_autosuspend = 1;
 
        if (!driver->description)
                driver->description = driver->driver.name;
+       if (!driver->usb_driver) {
+               WARN(1, "Serial driver %s has no usb_driver\n",
+                               driver->description);
+               return -EINVAL;
+       }
+       driver->usb_driver->supports_autosuspend = 1;
 
        /* Add this device to our list of devices */
        mutex_lock(&table_lock);
index f2ed6a3..95a8214 100644 (file)
@@ -75,6 +75,7 @@ static struct usb_serial_driver debug_device = {
                .name =         "debug",
        },
        .id_table =             id_table,
+       .usb_driver =           &debug_driver,
        .num_ports =            1,
        .bulk_out_size =        USB_DEBUG_MAX_PACKET_SIZE,
        .break_ctl =            usb_debug_break_ctl,
index b004b2a..9c014e2 100644 (file)
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
                    __func__, status, endpoint);
        } else {
                tty = tty_port_tty_get(&port->port);
-               if (urb->actual_length) {
-                       tty_insert_flip_string(tty, data, urb->actual_length);
-                       tty_flip_buffer_push(tty);
-               } else
-                       dbg("%s: empty read urb received", __func__);
-               tty_kref_put(tty);
+               if (tty) {
+                       if (urb->actual_length) {
+                               tty_insert_flip_string(tty, data,
+                                               urb->actual_length);
+                               tty_flip_buffer_push(tty);
+                       } else
+                               dbg("%s: empty read urb received", __func__);
+                       tty_kref_put(tty);
+               }
 
                /* Resubmit urb so we continue receiving */
                if (status != -ESHUTDOWN) {
index 15a5d89..1c11959 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
 #include "visor.h"
 
 /*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
 
        dbg("%s", __func__);
 
+       /*
+        * some Samsung Android phones in modem mode have the same ID
+        * as SPH-I500, but they are ACM devices, so dont bind to them
+        */
+       if (id->idVendor == SAMSUNG_VENDOR_ID &&
+               id->idProduct == SAMSUNG_SPH_I500_ID &&
+               serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+               serial->dev->descriptor.bDeviceSubClass ==
+                       USB_CDC_SUBCLASS_ACM)
+               return -ENODEV;
+
        if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
                dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
                        serial->dev->actconfig->desc.bConfigurationValue);
index c854fde..2c85530 100644 (file)
@@ -31,4 +31,9 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
                "Cypress ISD-300LP",
                USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
 
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+               "Super Top",
+               "USB 2.0  SATA BRIDGE",
+               USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
 #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
index fcc1e32..c1602b8 100644 (file)
@@ -1044,6 +1044,15 @@ UNUSUAL_DEV(  0x084d, 0x0011, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BULK32),
 
+/* Reported by <ttkspam@free.fr>
+ * The device reports a vendor-specific device class, requiring an
+ * explicit vendor/product match.
+ */
+UNUSUAL_DEV(  0x0851, 0x1542, 0x0002, 0x0002,
+               "MagicPixel",
+               "FW_Omega2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
+
 /* Andrew Lunn <andrew@lunn.ch>
  * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
  * on LUN 4.
@@ -1388,6 +1397,13 @@ UNUSUAL_DEV(  0x0f19, 0x0105, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+               "VTech",
+               "Kidizoom",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY ),
+
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
                "Sony Ericsson",
@@ -1872,6 +1888,22 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/* Patch by Richard Schütz <r.schtz@t-online.de>
+ * This external hard drive enclosure uses a JMicron chip which
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
+               "TrekStor GmbH & Co. KG",
+               "DataStation maxi g.u",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+               "Coby Electronics",
+               "MP3 Player",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
                "ST",
                "2A",
index 9b3ca10..f616cef 100644 (file)
@@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net)
        size_t hdr_size;
        struct socket *sock;
 
-       /* TODO: check that we are running from vhost_worker?
-        * Not sure it's worth it, it's straight-forward enough. */
+       /* TODO: check that we are running from vhost_worker? */
        sock = rcu_dereference_check(vq->private_data, 1);
        if (!sock)
                return;
@@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net)
        size_t len, total_len = 0;
        int err;
        size_t hdr_size;
-       struct socket *sock = rcu_dereference(vq->private_data);
+       /* TODO: check that we are running from vhost_worker? */
+       struct socket *sock = rcu_dereference_check(vq->private_data, 1);
        if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
                return;
 
@@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
        int err, headcount;
        size_t vhost_hlen, sock_hlen;
        size_t vhost_len, sock_len;
-       struct socket *sock = rcu_dereference(vq->private_data);
+       /* TODO: check that we are running from vhost_worker? */
+       struct socket *sock = rcu_dereference_check(vq->private_data, 1);
        if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
                return;
 
index 2af44b7..b3363ae 100644 (file)
@@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
 {
        unsigned acked_features;
 
-       acked_features =
-               rcu_dereference_index_check(dev->acked_features,
-                                           lockdep_is_held(&dev->mutex));
+       /* TODO: check that we are running from vhost_worker or dev mutex is
+        * held? */
+       acked_features = rcu_dereference_index_check(dev->acked_features, 1);
        return acked_features & (1 << bit);
 }
 
index d583bea..391ac93 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1091,12 +1091,12 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
 
        dev_info(info->device, "suspend\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
                mutex_unlock(&(par->open_lock));
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -1107,7 +1107,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
        pci_set_power_state(dev, pci_choose_state(dev, state));
 
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -1122,7 +1122,7 @@ static int ark_pci_resume (struct pci_dev* dev)
 
        dev_info(info->device, "resume\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if (par->ref_count == 0)
@@ -1141,7 +1141,7 @@ static int ark_pci_resume (struct pci_dev* dev)
 
 fail:
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 #else
index dd9de2e..4cb6a57 100644 (file)
@@ -1860,11 +1860,11 @@ static void aty128_early_resume(void *data)
 {
         struct aty128fb_par *par = data;
 
-       if (try_acquire_console_sem())
+       if (!console_trylock())
                return;
        pci_restore_state(par->pdev);
        aty128_do_resume(par->pdev);
-       release_console_sem();
+       console_unlock();
 }
 #endif /* CONFIG_PPC_PMAC */
 
@@ -2438,7 +2438,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 
        printk(KERN_DEBUG "aty128fb: suspending...\n");
        
-       acquire_console_sem();
+       console_lock();
 
        fb_set_suspend(info, 1);
 
@@ -2470,7 +2470,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        if (state.event != PM_EVENT_ON)
                aty128_set_suspend(par, 1);
 
-       release_console_sem();
+       console_unlock();
 
        pdev->dev.power.power_state = state;
 
@@ -2527,9 +2527,9 @@ static int aty128_pci_resume(struct pci_dev *pdev)
 {
        int rc;
 
-       acquire_console_sem();
+       console_lock();
        rc = aty128_do_resume(pdev);
-       release_console_sem();
+       console_unlock();
 
        return rc;
 }
index 767ab4f..94e293f 100644 (file)
@@ -2069,7 +2069,7 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        if (state.event == pdev->dev.power.power_state.event)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
 
        fb_set_suspend(info, 1);
 
@@ -2097,14 +2097,14 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                par->lock_blank = 0;
                atyfb_blank(FB_BLANK_UNBLANK, info);
                fb_set_suspend(info, 0);
-               release_console_sem();
+               console_unlock();
                return -EIO;
        }
 #else
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
 #endif
 
-       release_console_sem();
+       console_unlock();
 
        pdev->dev.power.power_state = state;
 
@@ -2133,7 +2133,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
        if (pdev->dev.power.power_state.event == PM_EVENT_ON)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
 
        /*
         * PCI state will have been restored by the core, so
@@ -2161,7 +2161,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
        par->lock_blank = 0;
        atyfb_blank(FB_BLANK_UNBLANK, info);
 
-       release_console_sem();
+       console_unlock();
 
        pdev->dev.power.power_state = PMSG_ON;
 
index c4e1764..92bda58 100644 (file)
@@ -2626,7 +2626,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
                goto done;
        }
 
-       acquire_console_sem();
+       console_lock();
 
        fb_set_suspend(info, 1);
 
@@ -2690,7 +2690,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
        if (rinfo->pm_mode & radeon_pm_d2)
                radeon_set_suspend(rinfo, 1);
 
-       release_console_sem();
+       console_unlock();
 
  done:
        pdev->dev.power.power_state = mesg;
@@ -2715,10 +2715,10 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
                return 0;
 
        if (rinfo->no_schedule) {
-               if (try_acquire_console_sem())
+               if (!console_trylock())
                        return 0;
        } else
-               acquire_console_sem();
+               console_lock();
 
        printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
               pci_name(pdev), pdev->dev.power.power_state.event);
@@ -2783,7 +2783,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
        pdev->dev.power.power_state = PMSG_ON;
 
  bail:
-       release_console_sem();
+       console_unlock();
 
        return rc;
 }
index 8010aae..dd0e84a 100644 (file)
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
        lcd->spi = spi;
        lcd->power = FB_BLANK_POWERDOWN;
        lcd->buffer = kzalloc(8, GFP_KERNEL);
+       if (!lcd->buffer) {
+               ret = -ENOMEM;
+               goto out_free_lcd;
+       }
 
        ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
        if (IS_ERR(ld)) {
                ret = PTR_ERR(ld);
-               goto out_free_lcd;
+               goto out_free_buffer;
        }
        lcd->ld = ld;
 
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
 
 out_unregister:
        lcd_device_unregister(ld);
+out_free_buffer:
+       kfree(lcd->buffer);
 out_free_lcd:
        kfree(lcd);
        return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
 
        ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
        lcd_device_unregister(lcd->ld);
+       kfree(lcd->buffer);
        kfree(lcd);
 
        return 0;
index 18c5078..47c21fb 100644 (file)
@@ -696,6 +696,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
 {
        struct backlight_properties props;
        dma_addr_t dma_handle;
+       int ret;
 
        if (request_dma(CH_PPI, KBUILD_MODNAME)) {
                pr_err("couldn't request PPI DMA\n");
@@ -704,17 +705,16 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
 
        if (request_ports()) {
                pr_err("couldn't request gpio port\n");
-               free_dma(CH_PPI);
-               return -EFAULT;
+               ret = -EFAULT;
+               goto out_ports;
        }
 
        fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE,
                                       &dma_handle, GFP_KERNEL);
        if (fb_buffer == NULL) {
                pr_err("couldn't allocate dma buffer\n");
-               free_dma(CH_PPI);
-               free_ports();
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_dma_coherent;
        }
 
        if (L1_DATA_A_LENGTH)
@@ -725,10 +725,8 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
 
        if (dma_desc_table == NULL) {
                pr_err("couldn't allocate dma descriptor\n");
-               free_dma(CH_PPI);
-               free_ports();
-               dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_table;
        }
 
        bfin_lq035_fb.screen_base = (void *)fb_buffer;
@@ -771,31 +769,21 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
        bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
        if (bfin_lq035_fb.pseudo_palette == NULL) {
                pr_err("failed to allocate pseudo_palette\n");
-               free_dma(CH_PPI);
-               free_ports();
-               dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_palette;
        }
 
        if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
                pr_err("failed to allocate colormap (%d entries)\n",
                        NBR_PALETTE);
-               free_dma(CH_PPI);
-               free_ports();
-               dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-               kfree(bfin_lq035_fb.pseudo_palette);
-               return -EFAULT;
+               ret = -EFAULT;
+               goto out_cmap;
        }
 
        if (register_framebuffer(&bfin_lq035_fb) < 0) {
                pr_err("unable to register framebuffer\n");
-               free_dma(CH_PPI);
-               free_ports();
-               dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-               fb_buffer = NULL;
-               kfree(bfin_lq035_fb.pseudo_palette);
-               fb_dealloc_cmap(&bfin_lq035_fb.cmap);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_reg;
        }
 
        i2c_add_driver(&ad5280_driver);
@@ -807,11 +795,31 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
 
        lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL,
                                      &bfin_lcd_ops);
+       if (IS_ERR(lcd_dev)) {
+               pr_err("unable to register lcd\n");
+               ret = PTR_ERR(lcd_dev);
+               goto out_lcd;
+       }
        lcd_dev->props.max_contrast = 255,
 
        pr_info("initialized");
 
        return 0;
+out_lcd:
+       unregister_framebuffer(&bfin_lq035_fb);
+out_reg:
+       fb_dealloc_cmap(&bfin_lq035_fb.cmap);
+out_cmap:
+       kfree(bfin_lq035_fb.pseudo_palette);
+out_palette:
+out_table:
+       dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
+       fb_buffer = NULL;
+out_dma_coherent:
+       free_ports();
+out_ports:
+       free_dma(CH_PPI);
+       return ret;
 }
 
 static int __devexit bfin_lq035_remove(struct platform_device *pdev)
index d637e1f..cff742a 100644 (file)
@@ -460,10 +460,10 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        if (!(state.event & PM_EVENT_SLEEP))
                goto done;
 
-       acquire_console_sem();
+       console_lock();
        chipsfb_blank(1, p);
        fb_set_suspend(p, 1);
-       release_console_sem();
+       console_unlock();
  done:
        pdev->dev.power.power_state = state;
        return 0;
@@ -473,10 +473,10 @@ static int chipsfb_pci_resume(struct pci_dev *pdev)
 {
         struct fb_info *p = pci_get_drvdata(pdev);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(p, 0);
        chipsfb_blank(0, p);
-       release_console_sem();
+       console_unlock();
 
        pdev->dev.power.power_state = PMSG_ON;
        return 0;
index 7ccc967..9c092b8 100644 (file)
@@ -375,14 +375,14 @@ static void fb_flashcursor(struct work_struct *work)
        int c;
        int mode;
 
-       acquire_console_sem();
+       console_lock();
        if (ops && ops->currcon != -1)
                vc = vc_cons[ops->currcon].d;
 
        if (!vc || !CON_IS_VISIBLE(vc) ||
            registered_fb[con2fb_map[vc->vc_num]] != info ||
            vc->vc_deccm != 1) {
-               release_console_sem();
+               console_unlock();
                return;
        }
 
@@ -392,7 +392,7 @@ static void fb_flashcursor(struct work_struct *work)
                CM_ERASE : CM_DRAW;
        ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
-       release_console_sem();
+       console_unlock();
 }
 
 static void cursor_timer_handler(unsigned long dev_addr)
@@ -836,7 +836,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
 
        found = search_fb_in_map(newidx);
 
-       acquire_console_sem();
+       console_lock();
        con2fb_map[unit] = newidx;
        if (!err && !found)
                err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -863,7 +863,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
        if (!search_fb_in_map(info_idx))
                info_idx = newidx;
 
-       release_console_sem();
+       console_unlock();
        return err;
 }
 
@@ -3321,7 +3321,7 @@ static ssize_t store_rotate(struct device *device,
        if (fbcon_has_exited)
                return count;
 
-       acquire_console_sem();
+       console_lock();
        idx = con2fb_map[fg_console];
 
        if (idx == -1 || registered_fb[idx] == NULL)
@@ -3331,7 +3331,7 @@ static ssize_t store_rotate(struct device *device,
        rotate = simple_strtoul(buf, last, 0);
        fbcon_rotate(info, rotate);
 err:
-       release_console_sem();
+       console_unlock();
        return count;
 }
 
@@ -3346,7 +3346,7 @@ static ssize_t store_rotate_all(struct device *device,
        if (fbcon_has_exited)
                return count;
 
-       acquire_console_sem();
+       console_lock();
        idx = con2fb_map[fg_console];
 
        if (idx == -1 || registered_fb[idx] == NULL)
@@ -3356,7 +3356,7 @@ static ssize_t store_rotate_all(struct device *device,
        rotate = simple_strtoul(buf, last, 0);
        fbcon_rotate_all(info, rotate);
 err:
-       release_console_sem();
+       console_unlock();
        return count;
 }
 
@@ -3369,7 +3369,7 @@ static ssize_t show_rotate(struct device *device,
        if (fbcon_has_exited)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        idx = con2fb_map[fg_console];
 
        if (idx == -1 || registered_fb[idx] == NULL)
@@ -3378,7 +3378,7 @@ static ssize_t show_rotate(struct device *device,
        info = registered_fb[idx];
        rotate = fbcon_get_rotate(info);
 err:
-       release_console_sem();
+       console_unlock();
        return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
 }
 
@@ -3392,7 +3392,7 @@ static ssize_t show_cursor_blink(struct device *device,
        if (fbcon_has_exited)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        idx = con2fb_map[fg_console];
 
        if (idx == -1 || registered_fb[idx] == NULL)
@@ -3406,7 +3406,7 @@ static ssize_t show_cursor_blink(struct device *device,
 
        blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
 err:
-       release_console_sem();
+       console_unlock();
        return snprintf(buf, PAGE_SIZE, "%d\n", blink);
 }
 
@@ -3421,7 +3421,7 @@ static ssize_t store_cursor_blink(struct device *device,
        if (fbcon_has_exited)
                return count;
 
-       acquire_console_sem();
+       console_lock();
        idx = con2fb_map[fg_console];
 
        if (idx == -1 || registered_fb[idx] == NULL)
@@ -3443,7 +3443,7 @@ static ssize_t store_cursor_blink(struct device *device,
        }
 
 err:
-       release_console_sem();
+       console_unlock();
        return count;
 }
 
@@ -3482,7 +3482,7 @@ static void fbcon_start(void)
        if (num_registered_fb) {
                int i;
 
-               acquire_console_sem();
+               console_lock();
 
                for (i = 0; i < FB_MAX; i++) {
                        if (registered_fb[i] != NULL) {
@@ -3491,7 +3491,7 @@ static void fbcon_start(void)
                        }
                }
 
-               release_console_sem();
+               console_unlock();
                fbcon_takeover(0);
        }
 }
@@ -3552,7 +3552,7 @@ static int __init fb_console_init(void)
 {
        int i;
 
-       acquire_console_sem();
+       console_lock();
        fb_register_client(&fbcon_event_notifier);
        fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
                                     "fbcon");
@@ -3568,7 +3568,7 @@ static int __init fb_console_init(void)
        for (i = 0; i < MAX_NR_CONSOLES; i++)
                con2fb_map[i] = -1;
 
-       release_console_sem();
+       console_unlock();
        fbcon_start();
        return 0;
 }
@@ -3591,12 +3591,12 @@ static void __exit fbcon_deinit_device(void)
 
 static void __exit fb_console_exit(void)
 {
-       acquire_console_sem();
+       console_lock();
        fb_unregister_client(&fbcon_event_notifier);
        fbcon_deinit_device();
        device_destroy(fb_class, MKDEV(0, 0));
        fbcon_exit();
-       release_console_sem();
+       console_unlock();
        unregister_con_driver(&fb_con);
 }      
 
index c97491b..915fd74 100644 (file)
@@ -202,11 +202,7 @@ static void vgacon_scrollback_init(int pitch)
        }
 }
 
-/*
- * Called only duing init so call of alloc_bootmen is ok.
- * Marked __init_refok to silence modpost.
- */
-static void __init_refok vgacon_scrollback_startup(void)
+static void vgacon_scrollback_startup(void)
 {
        vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
        vgacon_scrollback_init(vga_video_num_columns * 2);
index c265aed..8d61ef9 100644 (file)
@@ -1092,9 +1092,10 @@ static int __init fb_probe(struct platform_device *device)
 
 irq_freq:
 #ifdef CONFIG_CPU_FREQ
+       lcd_da8xx_cpufreq_deregister(par);
+#endif
 err_cpu_freq:
        unregister_framebuffer(da8xx_fb_info);
-#endif
 
 err_dealloc_cmap:
        fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -1130,14 +1131,14 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
        struct fb_info *info = platform_get_drvdata(dev);
        struct da8xx_fb_par *par = info->par;
 
-       acquire_console_sem();
+       console_lock();
        if (par->panel_power_ctrl)
                par->panel_power_ctrl(0);
 
        fb_set_suspend(info, 1);
        lcd_disable_raster();
        clk_disable(par->lcdc_clk);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -1146,14 +1147,14 @@ static int fb_resume(struct platform_device *dev)
        struct fb_info *info = platform_get_drvdata(dev);
        struct da8xx_fb_par *par = info->par;
 
-       acquire_console_sem();
+       console_lock();
        if (par->panel_power_ctrl)
                par->panel_power_ctrl(1);
 
        clk_enable(par->lcdc_clk);
        lcd_enable_raster();
        fb_set_suspend(info, 0);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 4ac1201..e2bf953 100644 (file)
@@ -1036,11 +1036,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
                        return -EFAULT;
                if (!lock_fb_info(info))
                        return -ENODEV;
-               acquire_console_sem();
+               console_lock();
                info->flags |= FBINFO_MISC_USEREVENT;
                ret = fb_set_var(info, &var);
                info->flags &= ~FBINFO_MISC_USEREVENT;
-               release_console_sem();
+               console_unlock();
                unlock_fb_info(info);
                if (!ret && copy_to_user(argp, &var, sizeof(var)))
                        ret = -EFAULT;
@@ -1072,9 +1072,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
                        return -EFAULT;
                if (!lock_fb_info(info))
                        return -ENODEV;
-               acquire_console_sem();
+               console_lock();
                ret = fb_pan_display(info, &var);
-               release_console_sem();
+               console_unlock();
                unlock_fb_info(info);
                if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
                        return -EFAULT;
@@ -1119,11 +1119,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
        case FBIOBLANK:
                if (!lock_fb_info(info))
                        return -ENODEV;
-               acquire_console_sem();
+               console_lock();
                info->flags |= FBINFO_MISC_USEREVENT;
                ret = fb_blank(info, arg);
                info->flags &= ~FBINFO_MISC_USEREVENT;
-               release_console_sem();
+               console_unlock();
                unlock_fb_info(info);
                break;
        default:
index 0a08f13..f4a3277 100644 (file)
@@ -90,11 +90,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
        int err;
 
        var->activate |= FB_ACTIVATE_FORCE;
-       acquire_console_sem();
+       console_lock();
        fb_info->flags |= FBINFO_MISC_USEREVENT;
        err = fb_set_var(fb_info, var);
        fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-       release_console_sem();
+       console_unlock();
        if (err)
                return err;
        return 0;
@@ -175,7 +175,7 @@ static ssize_t store_modes(struct device *device,
        if (i * sizeof(struct fb_videomode) != count)
                return -EINVAL;
 
-       acquire_console_sem();
+       console_lock();
        list_splice(&fb_info->modelist, &old_list);
        fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
                                 &fb_info->modelist);
@@ -185,7 +185,7 @@ static ssize_t store_modes(struct device *device,
        } else
                fb_destroy_modelist(&old_list);
 
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -301,11 +301,11 @@ static ssize_t store_blank(struct device *device,
        char *last = NULL;
        int err;
 
-       acquire_console_sem();
+       console_lock();
        fb_info->flags |= FBINFO_MISC_USEREVENT;
        err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
        fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-       release_console_sem();
+       console_unlock();
        if (err < 0)
                return err;
        return count;
@@ -364,9 +364,9 @@ static ssize_t store_pan(struct device *device,
                return -EINVAL;
        var.yoffset = simple_strtoul(last, &last, 0);
 
-       acquire_console_sem();
+       console_lock();
        err = fb_pan_display(fb_info, &var);
-       release_console_sem();
+       console_unlock();
 
        if (err < 0)
                return err;
@@ -399,9 +399,9 @@ static ssize_t store_fbstate(struct device *device,
 
        state = simple_strtoul(buf, &last, 0);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(fb_info, (int)state);
-       release_console_sem();
+       console_unlock();
 
        return count;
 }
index 70b1d9d..b4f19db 100644 (file)
@@ -344,10 +344,10 @@ static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
        struct fb_info *info = pci_get_drvdata(pdev);
 
        if (state.event == PM_EVENT_SUSPEND) {
-               acquire_console_sem();
+               console_lock();
                gx_powerdown(info);
                fb_set_suspend(info, 1);
-               release_console_sem();
+               console_unlock();
        }
 
        /* there's no point in setting PCI states; we emulate PCI, so
@@ -361,7 +361,7 @@ static int gxfb_resume(struct pci_dev *pdev)
        struct fb_info *info = pci_get_drvdata(pdev);
        int ret;
 
-       acquire_console_sem();
+       console_lock();
        ret = gx_powerup(info);
        if (ret) {
                printk(KERN_ERR "gxfb:  power up failed!\n");
@@ -369,7 +369,7 @@ static int gxfb_resume(struct pci_dev *pdev)
        }
 
        fb_set_suspend(info, 0);
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 #endif
index 39bdbed..416851c 100644 (file)
@@ -465,10 +465,10 @@ static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
        struct fb_info *info = pci_get_drvdata(pdev);
 
        if (state.event == PM_EVENT_SUSPEND) {
-               acquire_console_sem();
+               console_lock();
                lx_powerdown(info);
                fb_set_suspend(info, 1);
-               release_console_sem();
+               console_unlock();
        }
 
        /* there's no point in setting PCI states; we emulate PCI, so
@@ -482,7 +482,7 @@ static int lxfb_resume(struct pci_dev *pdev)
        struct fb_info *info = pci_get_drvdata(pdev);
        int ret;
 
-       acquire_console_sem();
+       console_lock();
        ret = lx_powerup(info);
        if (ret) {
                printk(KERN_ERR "lxfb:  power up failed!\n");
@@ -490,7 +490,7 @@ static int lxfb_resume(struct pci_dev *pdev)
        }
 
        fb_set_suspend(info, 0);
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 #else
index 5743ea2..318f6fb 100644 (file)
@@ -1574,7 +1574,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
                return 0;
        }
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(info, 1);
 
        if (info->fbops->fb_sync)
@@ -1587,7 +1587,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
        pci_save_state(dev);
        pci_disable_device(dev);
        pci_set_power_state(dev, pci_choose_state(dev, mesg));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -1605,7 +1605,7 @@ static int i810fb_resume(struct pci_dev *dev)
                return 0;
        }
 
-       acquire_console_sem();
+       console_lock();
        pci_set_power_state(dev, PCI_D0);
        pci_restore_state(dev);
 
@@ -1621,7 +1621,7 @@ static int i810fb_resume(struct pci_dev *dev)
        fb_set_suspend (info, 0);
        info->fbops->fb_blank(VESA_NO_BLANKING, info);
 fail:
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 /***********************************************************************
index 670ecaa..de36693 100644 (file)
@@ -778,9 +778,9 @@ static int jzfb_suspend(struct device *dev)
 {
        struct jzfb *jzfb = dev_get_drvdata(dev);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(jzfb->fb, 1);
-       release_console_sem();
+       console_unlock();
 
        mutex_lock(&jzfb->lock);
        if (jzfb->is_enabled)
@@ -800,9 +800,9 @@ static int jzfb_resume(struct device *dev)
                jzfb_enable(jzfb);
        mutex_unlock(&jzfb->lock);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(jzfb->fb, 0);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index cb01391..7e3a490 100644 (file)
@@ -1177,9 +1177,9 @@ static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
        struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
        struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(mx3fb->fbi, 1);
-       release_console_sem();
+       console_unlock();
 
        if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
                sdc_disable_channel(mx3_fbi);
@@ -1202,9 +1202,9 @@ static int mx3fb_resume(struct platform_device *pdev)
                sdc_set_brightness(mx3fb, mx3fb->backlight_level);
        }
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(mx3fb->fbi, 0);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 62498bd..f838d9e 100644 (file)
@@ -696,6 +696,8 @@ static int nuc900fb_remove(struct platform_device *pdev)
        nuc900fb_stop_lcd(fbinfo);
        msleep(1);
 
+       unregister_framebuffer(fbinfo);
+       nuc900fb_cpufreq_deregister(fbi);
        nuc900fb_unmap_video_memory(fbinfo);
 
        iounmap(fbi->io);
@@ -723,7 +725,7 @@ static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state)
        struct fb_info     *fbinfo = platform_get_drvdata(dev);
        struct nuc900fb_info *info = fbinfo->par;
 
-       nuc900fb_stop_lcd();
+       nuc900fb_stop_lcd(fbinfo);
        msleep(1);
        clk_disable(info->clk);
        return 0;
@@ -740,7 +742,7 @@ static int nuc900fb_resume(struct platform_device *dev)
        msleep(1);
 
        nuc900fb_init_registers(fbinfo);
-       nuc900fb_activate_var(bfinfo);
+       nuc900fb_activate_var(fbinfo);
 
        return 0;
 }
index efe10ff..081dc47 100644 (file)
@@ -1057,7 +1057,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
 
        if (mesg.event == PM_EVENT_PRETHAW)
                mesg.event = PM_EVENT_FREEZE;
-       acquire_console_sem();
+       console_lock();
        par->pm_state = mesg.event;
 
        if (mesg.event & PM_EVENT_SLEEP) {
@@ -1070,7 +1070,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
        }
        dev->dev.power.power_state = mesg;
 
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 
@@ -1079,7 +1079,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
        struct fb_info *info = pci_get_drvdata(dev);
        struct nvidia_par *par = info->par;
 
-       acquire_console_sem();
+       console_lock();
        pci_set_power_state(dev, PCI_D0);
 
        if (par->pm_state != PM_EVENT_FREEZE) {
@@ -1097,7 +1097,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
        nvidiafb_blank(FB_BLANK_UNBLANK, info);
 
 fail:
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 #else
index 9c0144e..65560a1 100644 (file)
@@ -513,9 +513,9 @@ static int ps3fb_release(struct fb_info *info, int user)
        if (atomic_dec_and_test(&ps3fb.f_count)) {
                if (atomic_read(&ps3fb.ext_flip)) {
                        atomic_set(&ps3fb.ext_flip, 0);
-                       if (!try_acquire_console_sem()) {
+                       if (console_trylock()) {
                                ps3fb_sync(info, 0);    /* single buffer */
-                               release_console_sem();
+                               console_unlock();
                        }
                }
        }
@@ -830,14 +830,14 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
                        if (vmode) {
                                var = info->var;
                                fb_videomode_to_var(&var, vmode);
-                               acquire_console_sem();
+                               console_lock();
                                info->flags |= FBINFO_MISC_USEREVENT;
                                /* Force, in case only special bits changed */
                                var.activate |= FB_ACTIVATE_FORCE;
                                par->new_mode_id = val;
                                retval = fb_set_var(info, &var);
                                info->flags &= ~FBINFO_MISC_USEREVENT;
-                               release_console_sem();
+                               console_unlock();
                        }
                        break;
                }
@@ -881,9 +881,9 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
                        break;
 
                dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val);
-               acquire_console_sem();
+               console_lock();
                retval = ps3fb_sync(info, val);
-               release_console_sem();
+               console_unlock();
                break;
 
        default:
@@ -903,9 +903,9 @@ static int ps3fbd(void *arg)
                set_current_state(TASK_INTERRUPTIBLE);
                if (ps3fb.is_kicked) {
                        ps3fb.is_kicked = 0;
-                       acquire_console_sem();
+                       console_lock();
                        ps3fb_sync(info, 0);    /* single buffer */
-                       release_console_sem();
+                       console_unlock();
                }
                schedule();
        }
index cea6403..35f61dd 100644 (file)
@@ -701,16 +701,12 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
         */
        pxa168fb_init_mode(info, mi);
 
-       ret = pxa168fb_check_var(&info->var, info);
-       if (ret)
-               goto failed_free_fbmem;
-
        /*
         * Fill in sane defaults.
         */
        ret = pxa168fb_check_var(&info->var, info);
        if (ret)
-               goto failed;
+               goto failed_free_fbmem;
 
        /*
         * enable controller clock
index b81168d..cf4beb9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  pxa3xx-gc.c - Linux kernel module for PXA3xx graphics controllers
+ *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
  *
  *  This driver needs a DirectFB counterpart in user space, communication
  *  is handled via mmap()ed memory areas and an ioctl.
@@ -421,7 +421,7 @@ pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
                buffer->next = priv->free;
                priv->free = buffer;
                spin_unlock_irqrestore(&priv->spinlock, flags);
-               return ret;
+               return -EFAULT;
        }
 
        buffer->length = words;
index dce8c97..75738a9 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1113,12 +1113,12 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
 
        dev_info(info->device, "suspend\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
                mutex_unlock(&(par->open_lock));
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -1129,7 +1129,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
        pci_set_power_state(dev, pci_choose_state(dev, state));
 
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -1145,12 +1145,12 @@ static int s3_pci_resume(struct pci_dev* dev)
 
        dev_info(info->device, "resume\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if (par->ref_count == 0) {
                mutex_unlock(&(par->open_lock));
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -1159,7 +1159,7 @@ static int s3_pci_resume(struct pci_dev* dev)
        err = pci_enable_device(dev);
        if (err) {
                mutex_unlock(&(par->open_lock));
-               release_console_sem();
+               console_unlock();
                dev_err(info->device, "error %d enabling device for resume\n", err);
                return err;
        }
@@ -1169,7 +1169,7 @@ static int s3_pci_resume(struct pci_dev* dev)
        fb_set_suspend(info, 0);
 
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 842d157..487911e 100644 (file)
@@ -2373,7 +2373,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
        if (mesg.event == PM_EVENT_FREEZE)
                return 0;
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(info, 1);
 
        if (info->fbops->fb_sync)
@@ -2385,7 +2385,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
        pci_save_state(dev);
        pci_disable_device(dev);
        pci_set_power_state(dev, pci_choose_state(dev, mesg));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -2409,7 +2409,7 @@ static int savagefb_resume(struct pci_dev* dev)
                return 0;
        }
 
-       acquire_console_sem();
+       console_lock();
 
        pci_set_power_state(dev, PCI_D0);
        pci_restore_state(dev);
@@ -2423,7 +2423,7 @@ static int savagefb_resume(struct pci_dev* dev)
        savagefb_set_par(info);
        fb_set_suspend(info, 0);
        savagefb_blank(FB_BLANK_UNBLANK, info);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 74d9f54..2b9e56a 100644 (file)
@@ -1151,7 +1151,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
 
                ch = info->par;
 
-               acquire_console_sem();
+               console_lock();
 
                /* HDMI plug in */
                if (!sh_hdmi_must_reconfigure(hdmi) &&
@@ -1171,7 +1171,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
                        fb_set_suspend(info, 0);
                }
 
-               release_console_sem();
+               console_unlock();
        } else {
                ret = 0;
                if (!hdmi->info)
@@ -1181,12 +1181,12 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
                fb_destroy_modedb(hdmi->monspec.modedb);
                hdmi->monspec.modedb = NULL;
 
-               acquire_console_sem();
+               console_lock();
 
                /* HDMI disconnect */
                fb_set_suspend(hdmi->info, 1);
 
-               release_console_sem();
+               console_unlock();
                pm_runtime_put(hdmi->dev);
        }
 
index bd4840a..bf12e53 100644 (file)
@@ -912,9 +912,9 @@ static int sh_mobile_release(struct fb_info *info, int user)
 
        /* Nothing to reconfigure, when called from fbcon */
        if (user) {
-               acquire_console_sem();
+               console_lock();
                sh_mobile_fb_reconfig(info);
-               release_console_sem();
+               console_unlock();
        }
 
        mutex_unlock(&ch->open_lock);
index b7dc180..bcb44a5 100644 (file)
@@ -2010,9 +2010,9 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
 
        /* tell console/fb driver we are suspending */
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(fbi, 1);
-       release_console_sem();
+       console_unlock();
 
        /* backup copies in case chip is powered down over suspend */
 
@@ -2069,9 +2069,9 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
                memcpy_toio(par->cursor.k_addr, par->store_cursor,
                            par->cursor.size);
 
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(fbi, 0);
-       release_console_sem();
+       console_unlock();
 
        vfree(par->store_fb);
        vfree(par->store_cursor);
index 6913fe1..dfef88c 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/fb.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
-/* Why should fb driver call console functions? because acquire_console_sem() */
+/* Why should fb driver call console functions? because console_lock() */
 #include <linux/console.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
@@ -944,7 +944,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
        struct mfd_cell *cell = dev->dev.platform_data;
        int retval = 0;
 
-       acquire_console_sem();
+       console_lock();
 
        fb_set_suspend(info, 1);
 
@@ -965,7 +965,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
        if (cell->suspend)
                retval = cell->suspend(dev);
 
-       release_console_sem();
+       console_unlock();
 
        return retval;
 }
@@ -976,7 +976,7 @@ static int tmiofb_resume(struct platform_device *dev)
        struct mfd_cell *cell = dev->dev.platform_data;
        int retval = 0;
 
-       acquire_console_sem();
+       console_lock();
 
        if (cell->resume) {
                retval = cell->resume(dev);
@@ -992,7 +992,7 @@ static int tmiofb_resume(struct platform_device *dev)
 
        fb_set_suspend(info, 0);
 out:
-       release_console_sem();
+       console_unlock();
        return retval;
 }
 #else
index 52ec095..5180a21 100644 (file)
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
        struct uvesafb_task *utask;
        struct uvesafb_ktask *task;
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        if (msg->seq >= UVESAFB_TASKS_MAX)
index 289edd5..4e66349 100644 (file)
@@ -1674,17 +1674,17 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
 #ifdef CONFIG_PM
 static int viafb_suspend(void *unused)
 {
-       acquire_console_sem();
+       console_lock();
        fb_set_suspend(viafbinfo, 1);
        viafb_sync(viafbinfo);
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
 
 static int viafb_resume(void *unused)
 {
-       acquire_console_sem();
+       console_lock();
        if (viaparinfo->shared->vdev->engine_mmio)
                viafb_reset_engine(viaparinfo);
        viafb_set_par(viafbinfo);
@@ -1692,7 +1692,7 @@ static int viafb_resume(void *unused)
                viafb_set_par(viafbinfo1);
        fb_set_suspend(viafbinfo, 0);
 
-       release_console_sem();
+       console_unlock();
        return 0;
 }
 
index 85d76ec..a2965ab 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -819,12 +819,12 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
 
        dev_info(info->device, "suspend\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
                mutex_unlock(&(par->open_lock));
-               release_console_sem();
+               console_unlock();
                return 0;
        }
 
@@ -835,7 +835,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
        pci_set_power_state(dev, pci_choose_state(dev, state));
 
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
@@ -850,7 +850,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
 
        dev_info(info->device, "resume\n");
 
-       acquire_console_sem();
+       console_lock();
        mutex_lock(&(par->open_lock));
 
        if (par->ref_count == 0)
@@ -869,7 +869,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
 
 fail:
        mutex_unlock(&(par->open_lock));
-       release_console_sem();
+       console_unlock();
 
        return 0;
 }
index 3e6934d..a20218c 100644 (file)
@@ -491,12 +491,12 @@ xenfb_make_preferred_console(void)
        if (console_set_on_cmdline)
                return;
 
-       acquire_console_sem();
+       console_lock();
        for_each_console(c) {
                if (!strcmp(c->name, "tty") && c->index == 0)
                        break;
        }
-       release_console_sem();
+       console_unlock();
        if (c) {
                unregister_console(c);
                c->flags |= CON_CONSDEV;
index 3a7e9ff..38e96ab 100644 (file)
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
 
        /* get interface & functional clock objects */
        hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
-       hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+       if (IS_ERR(hdq_data->hdq_ick)) {
+               dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
+               ret = PTR_ERR(hdq_data->hdq_ick);
+               goto err_ick;
+       }
 
-       if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
-               dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
-               if (IS_ERR(hdq_data->hdq_ick)) {
-                       ret = PTR_ERR(hdq_data->hdq_ick);
-                       goto err_clk;
-               }
-               if (IS_ERR(hdq_data->hdq_fck)) {
-                       ret = PTR_ERR(hdq_data->hdq_fck);
-                       clk_put(hdq_data->hdq_ick);
-                       goto err_clk;
-               }
+       hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+       if (IS_ERR(hdq_data->hdq_fck)) {
+               dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
+               ret = PTR_ERR(hdq_data->hdq_fck);
+               goto err_fck;
        }
 
        hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@ err_fnclk:
        clk_disable(hdq_data->hdq_ick);
 
 err_intfclk:
-       clk_put(hdq_data->hdq_ick);
        clk_put(hdq_data->hdq_fck);
 
-err_clk:
+err_fck:
+       clk_put(hdq_data->hdq_ick);
+
+err_ick:
        iounmap(hdq_data->hdq_base);
 
 err_ioremap:
index 2e2400e..31649b7 100644 (file)
@@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG
 
 # M68K Architecture
 
-config M548x_WATCHDOG
-       tristate "MCF548x watchdog support"
+config M54xx_WATCHDOG
+       tristate "MCF54xx watchdog support"
        depends on M548x
        help
          To compile this driver as a module, choose M here: the
-         module will be called m548x_wdt.
+         module will be called m54xx_wdt.
 
 # MIPS Architecture
 
index dd77665..20e44c4 100644 (file)
@@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 # M32R Architecture
 
 # M68K Architecture
-obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o
+obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
 
 # MIPS Architecture
 obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
similarity index 80%
rename from drivers/watchdog/m548x_wdt.c
rename to drivers/watchdog/m54xx_wdt.c
index cabbcfe..4d43286 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * drivers/watchdog/m548x_wdt.c
+ * drivers/watchdog/m54xx_wdt.c
  *
- * Watchdog driver for ColdFire MCF548x processors
+ * Watchdog driver for ColdFire MCF547x & MCF548x processors
  * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
  *
  * Adapted from the IXP4xx watchdog driver, which carries these notices:
@@ -29,8 +29,8 @@
 #include <linux/uaccess.h>
 
 #include <asm/coldfire.h>
-#include <asm/m548xsim.h>
-#include <asm/m548xgpt.h>
+#include <asm/m54xxsim.h>
+#include <asm/m54xxgpt.h>
 
 static int nowayout = WATCHDOG_NOWAYOUT;
 static unsigned int heartbeat = 30;    /* (secs) Default is 0.5 minute */
@@ -76,7 +76,7 @@ static void wdt_keepalive(void)
        __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
 }
 
-static int m548x_wdt_open(struct inode *inode, struct file *file)
+static int m54xx_wdt_open(struct inode *inode, struct file *file)
 {
        if (test_and_set_bit(WDT_IN_USE, &wdt_status))
                return -EBUSY;
@@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file)
        return nonseekable_open(inode, file);
 }
 
-static ssize_t m548x_wdt_write(struct file *file, const char *data,
+static ssize_t m54xx_wdt_write(struct file *file, const char *data,
                                                size_t len, loff_t *ppos)
 {
        if (len) {
@@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data,
 static const struct watchdog_info ident = {
        .options        = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
                                WDIOF_KEEPALIVEPING,
-       .identity       = "Coldfire M548x Watchdog",
+       .identity       = "Coldfire M54xx Watchdog",
 };
 
-static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
+static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
                                                         unsigned long arg)
 {
        int ret = -ENOTTY;
@@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
        return ret;
 }
 
-static int m548x_wdt_release(struct inode *inode, struct file *file)
+static int m54xx_wdt_release(struct inode *inode, struct file *file)
 {
        if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
                wdt_disable();
@@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file)
 }
 
 
-static const struct file_operations m548x_wdt_fops = {
+static const struct file_operations m54xx_wdt_fops = {
        .owner          = THIS_MODULE,
        .llseek         = no_llseek,
-       .write          = m548x_wdt_write,
-       .unlocked_ioctl = m548x_wdt_ioctl,
-       .open           = m548x_wdt_open,
-       .release        = m548x_wdt_release,
+       .write          = m54xx_wdt_write,
+       .unlocked_ioctl = m54xx_wdt_ioctl,
+       .open           = m54xx_wdt_open,
+       .release        = m54xx_wdt_release,
 };
 
-static struct miscdevice m548x_wdt_miscdev = {
+static struct miscdevice m54xx_wdt_miscdev = {
        .minor          = WATCHDOG_MINOR,
        .name           = "watchdog",
-       .fops           = &m548x_wdt_fops,
+       .fops           = &m54xx_wdt_fops,
 };
 
-static int __init m548x_wdt_init(void)
+static int __init m54xx_wdt_init(void)
 {
        if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
-                                               "Coldfire M548x Watchdog")) {
+                                               "Coldfire M54xx Watchdog")) {
                printk(KERN_WARNING
-                               "Coldfire M548x Watchdog : I/O region busy\n");
+                               "Coldfire M54xx Watchdog : I/O region busy\n");
                return -EBUSY;
        }
        printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
 
-       return misc_register(&m548x_wdt_miscdev);
+       return misc_register(&m54xx_wdt_miscdev);
 }
 
-static void __exit m548x_wdt_exit(void)
+static void __exit m54xx_wdt_exit(void)
 {
-       misc_deregister(&m548x_wdt_miscdev);
+       misc_deregister(&m54xx_wdt_miscdev);
        release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
 }
 
-module_init(m548x_wdt_init);
-module_exit(m548x_wdt_exit);
+module_init(m54xx_wdt_init);
+module_exit(m54xx_wdt_exit);
 
 MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
-MODULE_DESCRIPTION("Coldfire M548x Watchdog");
+MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
 
 module_param(heartbeat, int, 0);
 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
index c7d67e9..7990625 100644 (file)
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = {
 static int __init fitpc2_wdt_init(void)
 {
        int err;
+       const char *brd_name;
 
-       if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
+       brd_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+       if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
                return -ENODEV;
 
-       pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
+       pr_info("%s found\n", brd_name);
 
        if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
                pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
index db8c4c4..2417727 100644 (file)
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
 #ifdef CONFIG_PM_SLEEP
 static int xen_hvm_suspend(void *data)
 {
+       int err;
        struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
        int *cancelled = data;
 
        BUG_ON(!irqs_disabled());
 
+       err = sysdev_suspend(PMSG_SUSPEND);
+       if (err) {
+               printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+                      err);
+               return err;
+       }
+
        *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
 
        xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
                xen_timer_resume();
        }
 
+       sysdev_resume();
+
        return 0;
 }
 
index 0a9d1e2..0384afa 100644 (file)
@@ -35,7 +35,7 @@ fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
 fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.5.0.fw \
                              bnx2x/bnx2x-e1h-6.2.5.0.fw \
                              bnx2x/bnx2x-e2-6.2.5.0.fw
-fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1.fw \
+fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
                             bnx2/bnx2-rv2p-09-6.0.17.fw \
                             bnx2/bnx2-rv2p-09ax-6.0.17.fw \
                             bnx2/bnx2-mips-06-6.2.1.fw \
index 14aaee8..76404f9 100644 (file)
@@ -701,7 +701,7 @@ Driver: BNX2 - Broadcom NetXtremeII
 
 File: bnx2/bnx2-mips-06-6.2.1.fw
 File: bnx2/bnx2-rv2p-06-6.0.15.fw
-File: bnx2/bnx2-mips-09-6.2.1.fw
+File: bnx2/bnx2-mips-09-6.2.1a.fw
 File: bnx2/bnx2-rv2p-09-6.0.17.fw
 File: bnx2/bnx2-rv2p-09ax-6.0.17.fw
 
similarity index 62%
rename from firmware/bnx2/bnx2-mips-09-6.2.1.fw.ihex
rename to firmware/bnx2/bnx2-mips-09-6.2.1a.fw.ihex
index 68279b5..05e7102 100644 (file)
@@ -3,17 +3,17 @@
 :10002000000000380000565C080000A00800000036
 :100030000000574400005694080059200000008436
 :100040000000ADD808005744000001C00000AE5CBD
-:100050000800321008000000000092F80000B01CF8
-:10006000000000000000000000000000080092F8FE
-:100070000000033C00014314080004900800040041
-:10008000000012FC000146500000000000000000CB
-:1000900000000000080016FC000000040001594C9C
-:1000A000080000A80800000000003D280001595089
-:1000B00000000000000000000000000008003D28D3
-:0800C0000000003000019678F9
+:100050000800321008000000000092340000B01CBC
+:1000600000000000000000000000000008009234C2
+:100070000000033C00014250080004900800040006
+:10008000000012FC0001458C000000000000000090
+:1000900000000000080016FC000000040001588861
+:1000A000080000A80800000000003D000001588C76
+:1000B00000000000000000000000000008003D00FB
+:0800C000000000300001958CE6
 :0800C8000A00004600000000E0
 :1000D000000000000000000D636F6D362E322E31DF
-:1000E0000000000006020102000000000000000302
+:1000E00061000000060201020000000000000003A1
 :1000F000000000C800000032000000030000000003
 :1001000000000000000000000000000000000000EF
 :1001100000000010000001360000EA600000000549
 :1056800008001028080010748008010080080080BD
 :04569000800800008E
 :0C5694000A0000280000000000000000D8
-:1056A0000000000D6370362E322E31000000000025
+:1056A0000000000D6370362E322E316100000000C4
 :1056B00006020104000000000000000000000000DD
 :1056C000000000000000000038003C000000000066
 :1056D00000000000000000000000000000000020AA
 :0CB01000080049B808004C6408005050CB
 :04B01C000A000C8496
 :10B0200000000000000000000000000D7278703683
-:10B030002E322E3100000000060201030000000045
+:10B030002E322E31610000000602010300000000E4
 :10B0400000000001000000000000000000000000FF
 :10B0500000000000000000000000000000000000F0
 :10B0600000000000000000000000000000000000E0
 :10E2100000000000000000000000000000000000FE
 :10E220000000000A000000000000000000000000E4
 :10E2300010000003000000000000000D0000000DB1
-:10E240003C020801244296603C0308012463989C28
+:10E240003C020801244295A03C030801246397DCAA
 :10E25000AC4000000043202B1480FFFD244200044A
 :10E260003C1D080037BD9FFC03A0F0213C100800B6
-:10E27000261032103C1C0801279C96600E0012BE2E
+:10E27000261032103C1C0801279C95A00E0012BEEF
 :10E28000000000000000000D3C02800030A5FFFFF0
 :10E2900030C600FF344301803C0880008D0901B87E
 :10E2A0000520FFFE00000000AC6400002404000212
 :10E3000000062B0200051080004448210109182B4B
 :10E310001060001100000000910300002C6400094F
 :10E320005080000991190001000360803C0D080134
-:10E3300025AD92F8018D58218D67000000E000089E
+:10E3300025AD9234018D58218D67000000E0000862
 :10E340000000000091190001011940210109302B42
 :10E3500054C0FFF29103000003E000080000102108
 :10E360000A000CCC25080001910F0001240E000AC0
 :10E45000910400038D43000000072A0000A410254A
 :10E460003466000425080004AD42000C0A000CCC00
 :10E47000AD46000003E000082402000127BDFFE8CC
-:10E48000AFBF0014AFB000100E00167F00808021D7
+:10E48000AFBF0014AFB000100E00164E0080802108
 :10E490003C0480083485008090A600052403FFFE1C
 :10E4A0000200202100C310248FBF00148FB0001081
-:10E4B000A0A200050A00168927BD001827BDFFE8A5
+:10E4B000A0A200050A00165827BD001827BDFFE8D6
 :10E4C000AFB00010AFBF00140E000FD40080802149
 :10E4D0003C06800834C5008090A40000240200504F
 :10E4E000308300FF106200073C09800002002021F9
 :10E5B000AFA0005090CD00002406002031A400FF41
 :10E5C00010860018240E0050108E009300000000EA
 :10E5D0003C1008008E1000DC260F00013C010800F2
-:10E5E000AC2F00DC0E0016F80000000000401821DF
+:10E5E000AC2F00DC0E0016C7000000000040182110
 :10E5F0008FBF00848FBE00808FB7007C8FB60078FD
 :10E600008FB500748FB400708FB3006C8FB2006848
 :10E610008FB100648FB000600060102103E000083B
 :10E760008FB200688FB100648FB00060006010212C
 :10E7700003E0000827BD00880E000D2800002021BE
 :10E780000A000D75004018210A000D9500C02021D7
-:10E790000E00174802C020211440FFE100000000D5
+:10E790000E00171702C020211440FFE10000000006
 :10E7A0003C0B8008356400808C8A003402CA482300
 :10E7B0000520001D000000003C1E08008FDE310017
 :10E7C00027D700013C010800AC3731001260000679
 :10E7D000024020213C1408008E9431F42690000160
-:10E7E0003C010800AC3031F40E00167F3C1E80085E
+:10E7E0003C010800AC3031F40E00164E3C1E80088F
 :10E7F00037CD008091B700250240202136EE00047D
-:10E800000E001689A1AE00250E000CAC024020219E
-:10E810000A000DCA240300013C17080126F797607F
+:10E800000E001658A1AE00250E000CAC02402021CF
+:10E810000A000DCA240300013C17080126F796A040
 :10E820000A000D843C1F80008C86003002C66023E5
 :10E830001980000C2419000C908F004F3C14080024
 :10E840008E94310032B500FC35ED0001268E0001BA
 :10E8900000B410233044FFFFAFA4005832A8000298
 :10E8A0001100002E32AB00103C15800836B00080FD
 :10E8B0009216000832D30040526000FB8EE200083E
-:10E8C0000E00167F02402021240A0018A20A000927
+:10E8C0000E00164E02402021240A0018A20A000958
 :10E8D000921100052409FFFE024020210229902404
-:10E8E0000E001689A2120005240400390000282118
-:10E8F0000E001723240600180A000DCA2403000185
+:10E8E0000E001658A2120005240400390000282149
+:10E8F0000E0016F2240600180A000DCA24030001B7
 :10E9000092FE000C3C0A800835490080001EBB00C6
 :10E910008D27003836F10081024020213225F08118
 :10E920000E000C9B30C600FF0A000DC10000000065
 :10E930003AA7000130E300011460FFA402D4B02123
-:10E940000A000E1D00000000024020210E00176585
+:10E940000A000E1D00000000024020210E001734B6
 :10E95000020028210A000D75004018211160FF7087
 :10E960003C0F80083C0D800835EE00808DC40038D7
 :10E970008FA300548DA60004006660231D80FF68ED
 :10E98000000000000064C02307020001AFA400548F
 :10E990003C1F08008FFF31E433F9000113200015FC
 :10E9A0008FAC00583C07800094E3011A10600012FD
-:10E9B0003C0680080E002192024020213C03080101
-:10E9C00090639791306400021480014500000000BC
+:10E9B0003C0680080E002161024020213C03080132
+:10E9C000906396D13064000214800145000000007D
 :10E9D000306C0004118000078FAC0058306600FBDB
-:10E9E0003C010801A026979132B500FCAFA0005869
+:10E9E0003C010801A02696D132B500FCAFA000582A
 :10E9F0008FAC00583C06800834D30080AFB40018B8
 :10EA0000AFB60010AFAC00143C088000950B01209D
 :10EA10008E6F0030966A005C8FA3005C8FBF003061
 :10EAD000522E0001241E00088FAF002425E40001FF
 :10EAE000AFA400248FAA00143C0B80083565008079
 :10EAF000008A48218CB10030ACA9003090A4004EAF
-:10EB00008CA700303408FFFC0088180400E3F821CB
+:10EB00008CA700303408FFFF0088180400E3F821C8
 :10EB1000ACBF00348FA600308FB900548FB8005CB2
 :10EB200030C200081040000B033898218CAC002044
 :10EB3000119300D330C600FF92EE000C8FA7003473
 :10EB600001F1302318C00097264800803C070800B8
 :10EB70008CE731E42404FF80010418243118007F5D
 :10EB80003C1F80003C19800430F10001AFE300908D
-:10EB900012200006031928213C0308019063979175
+:10EB900012200006031928213C030801906396D136
 :10EBA00030690008152000C6306A00F73C10800864
 :10EBB00036040080908C004F318B000115600042BC
 :10EBC000000000003C0608008CC6319830CE0010D2
 :10EC30008C67000002C7782319E000978FBF00544B
 :10EC4000AC93002024130001AC760000AFB3005059
 :10EC5000AC7F000417C0004E000000008FA90050D8
-:10EC60001520000B000000003C0308019063979101
+:10EC60001520000B000000003C030801906396D1C2
 :10EC7000306A00011140002E8FAB0058306400FE56
-:10EC80003C010801A02497910A000D75000018218D
+:10EC80003C010801A02496D10A000D75000018214E
 :10EC90000E000CAC024020210A000F1300000000FF
 :10ECA0000A000E200000A0210040F80924040017EB
 :10ECB0000A000DCA240300010040F80924040016CC
 :10ECC0000A000DCA240300019094004F240DFFFE9A
 :10ECD000028D2824A085004F30F900011320000682
-:10ECE0003C0480083C03080190639791307F00103A
+:10ECE0003C0480083C030801906396D1307F0010FB
 :10ECF00017E00051306800EF34900080240A0001D2
-:10ED0000024020210E00167FA60A00129203002561
+:10ED0000024020210E00164EA60A00129203002592
 :10ED100024090001AFA90050346200010240202103
-:10ED20000E001689A20200250A000EF93C0D80088B
+:10ED20000E001658A20200250A000EF93C0D8008BC
 :10ED30001160FE83000018218FA5003030AC000464
 :10ED40001180FE2C8FBF00840A000DCB240300012C
 :10ED500027A500380E000CB6AFA000385440FF4382
 :10ED60008EE200048FB40038329001005200FF3F61
 :10ED70008EE200048FA3003C8E6E0058006E682364
 :10ED800005A3FF39AE6300580A000E948EE200041A
-:10ED90000E00167F024020213C038008346800806A
-:10EDA000024020210E001689A11E000903C0302157
-:10EDB000240400370E001723000028210A000F1139
+:10ED90000E00164E024020213C038008346800809B
+:10EDA000024020210E001658A11E000903C0302188
+:10EDB000240400370E0016F2000028210A000F116B
 :10EDC0008FA900508FAB00185960FF8D3C0D800853
-:10EDD0000E00167F02402021920C00252405000120
-:10EDE000AFA5005035820004024020210E00168994
+:10EDD0000E00164E02402021920C00252405000151
+:10EDE000AFA5005035820004024020210E001658C5
 :10EDF000A20200250A000EF93C0D800812240059D9
 :10EE00002A2300151060004D240900162408000C68
 :10EE10005628FF2732B000013C0A8008914C001BA5
 :10EE20002406FFBD241E000E01865824A14B001BA2
-:10EE30000A000EA532B000013C010801A0289791FC
+:10EE30000A000EA532B000013C010801A02896D1BD
 :10EE40000A000EF93C0D80088CB500308EFE0008DB
 :10EE50002404001826B6000103C0F809ACB600303F
-:10EE60003C030801906397913077000116E0FF8121
+:10EE60003C030801906396D13077000116E0FF81E2
 :10EE7000306A00018FB200300A000D753243000481
 :10EE80003C1080009605011A50A0FF2B34C60010DC
 :10EE90000A000EC892EE000C8C6200001456FF6D42
 :10EEA000000000008C7800048FB9005403388823D8
 :10EEB0000621FF638FBF00540A000F0E0000000000
-:10EEC0003C010801A02A97910A000F3030F9000197
+:10EEC0003C010801A02A96D10A000F3030F9000158
 :10EED0001633FF028FAF00240A000EB0241E00106C
-:10EEE0000E00167F024020213C0B80083568008010
+:10EEE0000E00164E024020213C0B80083568008041
 :10EEF00091090025240A0001AFAA0050353300040F
-:10EF0000024020210E001689A11300253C050801AE
-:10EF100090A5979130A200FD3C010801A022979195
+:10EF0000024020210E001658A11300253C050801DF
+:10EF100090A596D130A200FD3C010801A02296D117
 :10EF20000A000E6D004018212411000E53D1FEEA94
 :10EF3000241E00100A000EAF241E00165629FEDC07
 :10EF400032B000013C0A8008914C001B2406FFBD32
 :10F250000A0010588FA9000827BDFFE03C07800076
 :10F2600034E60100AFBF001CAFB20018AFB100140C
 :10F27000AFB0001094C5000E8F87000030A4FFFFD0
-:10F280002483000430E2400010400010AF83002CC3
+:10F280002483000430E2400010400010AF830028C7
 :10F290003C09002000E940241100000D30EC800002
 :10F2A0008F8A0004240BBFFF00EB38243543100085
 :10F2B000AF87000030F220001640000B3C1900041C
 :10F2D000158000423C0E002030F220001240FFF862
 :10F2E0008F8300043C19000400F9C0241300FFF5CB
 :10F2F000241FFFBF34620040AF82000430E20100EF
-:10F300001040001130F010008F83003010600006B4
+:10F300001040001130F010008F83002C10600006B8
 :10F310003C0F80003C05002000E52024148000C044
 :10F320003C0800043C0F800035EE010095CD001E26
 :10F3300095CC001C31AAFFFF000C5C00014B482556
 :10F3D0000060102103E0000827BD002000EE682433
 :10F3E00011A0FFBE30F220008F8F00043C11FFFF00
 :10F3F00036307FFF00F0382435E380000A0010A685
-:10F40000AF87000000EB102450400065AF8000285B
-:10F410008F8C00303C0D0F0000ED18241580008803
+:10F40000AF87000000EB102450400065AF8000245F
+:10F410008F8C002C3C0D0F0000ED18241580008807
 :10F42000AF83001030E8010011000086938F0010B8
 :10F430003C0A0200106A00833C1280003650010032
-:10F44000920500139789002E3626000230AF00FF88
+:10F44000920500139789002A3626000230AF00FF8C
 :10F4500025EE0004000E19C03C0480008C9801B811
 :10F460000700FFFE34880180AD0300003C198008CE
 :10F47000AC830020973100483225FFFF10A0015CCB
 :10F5700000E41824345FFFFF03E3C82B5320FF7B14
 :10F58000241100013C0608008CC6002C24C5000193
 :10F590003C010800AC25002C0A0010D42411000501
-:10F5A0008F85002810A0002FAF80001090A30000CE
+:10F5A0008F85002410A0002FAF80001090A30000D2
 :10F5B000146000792419000310A0002A30E601002D
 :10F5C00010C000CC8F860010241F000210DF00C97D
 :10F5D0008F8B000C3C0708008CE7003824E4FFFF09
 :10F5E00014E0000201641824000018213C0D0800FA
-:10F5F00025AD0038006D1021904C00048F85002C43
+:10F5F00025AD0038006D1021904C00048F85002847
 :10F6000025830004000321C030A5FFFF3626000239
 :10F610000E000FDB000000000A00114D0000182151
 :10F6200000E8302414C0FF403C0F80000E00103D65
 :10F63000000000008F8700000A0010CAAF82000C93
-:10F64000938F00103C180801271896E0000F90C017
-:10F6500002588021AF9000288F85002814A0FFD386
+:10F64000938F00103C18080127189620000F90C0D7
+:10F6500002588021AF9000248F85002414A0FFD38E
 :10F66000AF8F00103C0480008C86400030C5010044
 :10F6700010A000BC322300043C0C08008D8C002438
 :10F6800024120004106000C23190000D3C04800080
 :10F690008C8D40003402FFFF11A201003231FFFBCC
 :10F6A0008C884000310A01005540000124110010EF
-:10F6B00030EE080011C000BE2419FFFB8F98002C0B
+:10F6B00030EE080011C000BE2419FFFB8F9800280F
 :10F6C0002F0F03EF51E000010219802430E90100FF
-:10F6D00011200014320800018F87003014E000FB75
+:10F6D00011200014320800018F87002C14E000FB79
 :10F6E0008F8C000C3C05800034AB0100917F00132F
 :10F6F00033E300FF246A00042403FFFE0203802496
 :10F70000000A21C012000002023230253226FFFF1B
-:10F710000E000FDB9785002E1200FF290000182134
+:10F710000E000FDB9785002A1200FF290000182138
 :10F72000320800011100000D32180004240E0001FF
-:10F73000120E0002023230253226FFFF9785002E7E
+:10F73000120E0002023230253226FFFF9785002A82
 :10F740000E000FDB00002021240FFFFE020F80249B
 :10F750001200FF1B00001821321800045300FF188C
 :10F760002403000102323025241200045612000145
-:10F770003226FFFF9785002E0E000FDB24040100C8
+:10F770003226FFFF9785002A0E000FDB24040100CC
 :10F780002419FFFB021988241220FF0D0000182104
 :10F790000A0010E9240300011079009C00003021C8
 :10F7A00090AD00012402000211A200BE30EA004028
 :10F7B00090B90001241800011338007F30E900409F
-:10F7C0008CA600049785002E00C020210E000FDBC0
+:10F7C0008CA600049785002A00C020210E000FDBC4
 :10F7D0003626000200004021010018218FBF001CC6
 :10F7E0008FB200188FB100148FB00010006010218C
 :10F7F00003E0000827BD0020360F010095EE000C45
 :10F8000031CD020015A0FEE63C0900013C1880083D
-:10F81000971200489789002E362600023248FFFFD3
+:10F81000971200489789002A362600023248FFFFD7
 :10F82000AF8800083C0380008C7101B80620FFFE01
 :10F83000346A0180AD4000001100008E3C0F800052
 :10F84000253F0012011FC82B1320008B240E00033C
 :10F8C00034640100949F000E3C1908008F3900D861
 :10F8D0002404008033E5FFFF273100013C010800CC
 :10F8E000AC3100D80E000FDB240600030A00114DD6
-:10F8F00000001821240A000210CA00598F85002C2C
+:10F8F00000001821240A000210CA00598F85002830
 :10F900003C0308008C6300D0240E0001106E005EE2
 :10F910002CCF000C24D2FFFC2E5000041600002136
 :10F9200000002021241800021078001B2CD9000CA4
 :10F9400030EB020051600004000621C054C00022C8
 :10F9500030A5FFFF000621C030A5FFFF0A00117D82
 :10F96000362600023C0908008D29002431300001B0
-:10F970005200FEF7000018219785002E362600025F
+:10F970005200FEF7000018219785002A3626000263
 :10F980000E000FDB000020210A00114D000018219D
 :10F990000A00119C241200021320FFE624DFFFF866
 :10F9A0000000202130A5FFFF0A00117D362600024D
 :10F9B0000A0011AC021980245120FF828CA6000499
-:10F9C0003C05080190A596E110A0FF7E24080001E7
+:10F9C0003C05080190A5962110A0FF7E24080001A7
 :10F9D0000A0011F0010018210E000FDB3226000191
-:10F9E0008F8600108F85002C0A00124F000621C060
+:10F9E0008F8600108F8500280A00124F000621C064
 :10F9F0008F8500043C18800024120003371001801A
 :10FA0000A212000B0A00112E3C08800090A30001F6
 :10FA1000241100011071FF70240800012409000264
 :10FB3000AFB50024AFB40020AFB3001CAFB20018C3
 :10FB4000AFB100148E0E5000240FFF7F3C068000E2
 :10FB500001CF682435AC380C240B0003AE0C5000E8
-:10FB6000ACCB00083C010800AC2000200E00184A75
+:10FB6000ACCB00083C010800AC2000200E001819A6
 :10FB7000000000003C0A0010354980513C06601628
 :10FB8000AE09537C8CC700003C0860148D0500A0B2
 :10FB90003C03FFFF00E320243C02535300051FC237
 :10FBA0001482000634C57C000003A08002869821E0
 :10FBB0008E7200043C116000025128218CBF007C31
 :10FBC0008CA200783C1E600037C420203C05080150
-:10FBD00024A59328AF820018AF9F001C0E00170EBB
-:10FBE0002406000A3C190001273996E03C01080070
-:10FBF000AC3931DC0E002105AF8000148FD7080826
-:10FC00002418FFF03C15570902F8B02412D503243C
-:10FC100024040001AF8000303C148000369701803E
-:10FC20003C1E080127DE96E0369301008E9000000E
+:10FBD00024A59264AF820018AF9F001C0E0016DDB2
+:10FBE0002406000A3C190001273996203C01080030
+:10FBF000AC3931DC0E0020D4AF8000148FD7080858
+:10FC00002418FFF03C15570902F8B02412D502F56C
+:10FC100024040001AF80002C3C1480003697018042
+:10FC20003C1E080127DE9624369301008E900000CA
 :10FC30003205000310A0FFFD3207000110E000882C
 :10FC4000320600028E7100283C048000AE91002034
 :10FC50008E6500048E66000000A0382100C040219F
 :10FC60008C8301B80460FFFE3C0B0010240A0800DE
-:10FC700000AB4824AC8A01B8552000E2240ABFFF3B
+:10FC700000AB4824AC8A01B8552000E0240BBFFF3C
 :10FC80009675000E3C1208008E52002030AC4000E9
 :10FC900032AFFFFF264E000125ED00043C010800B5
-:10FCA000AC2E0020118000EAAF8D002C3C18002003
-:10FCB00000B8B02412C000E730B980002408BFFFAC
+:10FCA000AC2E0020118000E8AF8D00283C18002009
+:10FCB00000B8B02412C000E530B980002408BFFFAE
 :10FCC00000A8382434C81000AF87000030E62000B8
-:10FCD00010C000EB2409FFBF3C03000400E328240C
+:10FCD00010C000E92409FFBF3C03000400E328240E
 :10FCE00010A00002010910243502004030EA010092
-:10FCF00011400010AF8200048F8B003011600007AC
+:10FCF00011400010AF8200048F8B002C11600007B0
 :10FD00003C0D002000ED6024118000043C0F000435
 :10FD100000EF702411C00239000000009668001E38
 :10FD20009678001C3115FFFF0018B40002B690252C
 :10FD90003C0500018F85000430AE400055C00007CF
 :10FDA0003C0500013C161F0100F690243C0F10009A
 :10FDB000124F01CE000000003C05000100E5302498
-:10FDC00010C000B13C0C10003C1F08008FFF002445
-:10FDD00033E90002152000732403000100601021A4
-:10FDE000104000083C0680003C188000370F0100DE
-:10FDF0008DEE00243C056020ACAE00140000000035
-:10FE00003C0680003C084000ACC8013800000000FF
-:10FE10005220001332060002262A0140262B0080C1
-:10FE2000240DFF80014D2024016D6024000C194039
-:10FE30003162007F0004A9403152007F3C1620004F
-:10FE400036C900020062F82502B2382500E988258B
-:10FE500003E9C825ACD90830ACD10830320600021D
-:10FE600010C0FF723C0F800035E501408CB80000E7
-:10FE700024100040ADF8002090AE000831C300709F
-:10FE8000107000D628680041510000082405006069
-:10FE9000241100201071000E3C0B40003C06800035
-:10FEA000ACCB01780A001304000000001465FFFBCE
-:10FEB0003C0B40000E002022000000003C0B4000E4
-:10FEC0003C068000ACCB01780A001304000000005F
-:10FED00090BF0009241900048CA7000033E900FF3B
-:10FEE000113901B22523FFFA2C72000612400016C8
-:10FEF0003C0680008CAB00048F86002494A2000A8C
-:10FF0000000B5602312500FF10C000053044FFFFF2
-:10FF10002D4C000815800002254A0004240A000325
-:10FF20002410000910B001F828AE000A11C001DC4D
-:10FF3000240F000A2404000810A40028000A41C06D
-:10FF4000010038213C0680008CC801B80500FFFE86
-:10FF500034D00180AE07000034C401409085000811
-:10FF6000240A00023C0B400030B900FF00198A004F
-:10FF70000229C025A6180008A20A000B948F000AC7
-:10FF80003C091000A60F00108C8E0004AE0E002459
-:10FF9000ACC901B83C068000ACCB01780A00130460
-:10FFA000000000003C0A8000354401009483000EEC
-:10FFB0003C0208008C4200D8240400803065FFFF1A
-:10FFC000245500013C010800AC3500D80E000FDBC1
-:10FFD000240600030A00137000001821000BC2025F
-:10FFE000330300FF240A0001146AFFD60100382100
-:10FFF0008F910020AF830024262B00010A0013CA32
+:10FDC00010C000AF3C0C10003C1F08008FFF002447
+:10FDD00033E90002152000712403000100601021A6
+:10FDE000104000083C0680003C08800035180100E7
+:10FDF0008F0F00243C056020ACAF00140000000011
+:10FE00003C0680003C194000ACD9013800000000DD
+:10FE10005220001332060002262B0140262C0080BF
+:10FE2000240EFF80016E2024018E6824000D1940ED
+:10FE3000318A007F0004A9403172007F3C16200007
+:10FE400036C20002006A482502B2382500E2882541
+:10FE50000122F825ACDF0830ACD1083032060002B0
+:10FE600010C0FF723C188000370501408CA80000CC
+:10FE700024100040AF08002090AF000831E300706C
+:10FE8000107000D428790041532000082405006038
+:10FE9000241100201071000E3C0A40003C09800033
+:10FEA000AD2A01780A001304000000001465FFFB6E
+:10FEB0003C0A40000E001FF1000000003C0A400018
+:10FEC0003C098000AD2A01780A00130400000000FC
+:10FED00090A90009241F00048CA70000312800FF0E
+:10FEE000111F01B22503FFFA2C7200061240001404
+:10FEF0003C0680008CA9000494A4000A310500FF90
+:10FF000000095E022D6A00083086FFFF15400002DE
+:10FF10002567000424070003240C000910AC01FA33
+:10FF200028AD000A11A001DE2410000A240E0008EA
+:10FF300010AE0028000731C000C038213C06800008
+:10FF40008CD501B806A0FFFE34D20180AE47000078
+:10FF500034CB0140916E0008240300023C0A4000AB
+:10FF600031C400FF00046A0001A86025A64C000807
+:10FF7000A243000B9562000A3C0810003C09800077
+:10FF8000A64200108D670004AE470024ACC801B83B
+:10FF9000AD2A01780A001304000000003C0A80002A
+:10FFA000354401009483000E3C0208008C4200D8C6
+:10FFB000240400803065FFFF245500013C01080047
+:10FFC000AC3500D80E000FDB240600030A001370C6
+:10FFD000000018210009320230D900FF2418000166
+:10FFE0001738FFD5000731C08F910020262200016D
+:10FFF000AF8200200A0013C800C0382100CB2024A3
 :020000040001F9
-:10000000AF8B002000CA2024AF85000010800008BC
-:10001000AF860004240C87FF00CC5824156000082C
-:100020003C0D006000AD302410C000050000000051
-:100030000E000D42000000000A00137100000000D5
-:100040000E001636000000000A00137100000000C8
-:1000500030B980005320FF1DAF8500003C02002016
-:1000600000A2F82453E0FF19AF8500003C07FFFF12
-:1000700034E47FFF00A438240A00132B34C8800026
-:100080000A0013340109102400EC58245160005A6E
-:10009000AF8000288F8D00303C0E0F0000EE18243A
-:1000A00015A00075AF83001030EF010011E0007360
-:1000B000939800103C120200107200703C06800001
-:1000C00034D90100932800139789002E36A6000228
-:1000D000311800FF27160004001619C03C048000E8
-:1000E0008C8501B804A0FFFE34880180AD030000B8
-:1000F0003C158008AC83002096BF004833E5FFFF25
-:1001000010A001EBAF8500082523001200A3102BDF
-:10011000504001E88F850004348D010095AC00202B
-:10012000240B001A30E44000318AFFFFA10B000BC2
-:10013000108001E92543FFFE00A3702B15C001E7E5
-:100140008F9600048F8F0004A503001435E500018D
-:10015000AF8500043C08800035150180A6A9000E7B
-:10016000A6A9001A8F89000C30BF8000A6A7001036
-:10017000AEA90028A6A6000813E0000F3C0F8000DF
-:10018000350C0100958B0016316AFFFC25440004F4
-:10019000008818218C6240003046FFFF14C0000721
-:1001A0002416BFFF3C0EFFFF35CD7FFF00AD282496
-:1001B000AF8500043C0F80002416BFFF00B69024DA
-:1001C00035E50180A4B20026ACA7002C3C07100046
-:1001D000ADE701B80A001370000018210E00168E5A
-:1001E000000000003C0B40003C068000ACCB0178D6
-:1001F0000A001304000000008F85002810A00025CD
-:10020000AF80001090A3000010600072241F000354
-:10021000107F00FF0000302190AD0001240C00028F
-:1002200011AC015930EE004090BF000124190001CB
-:1002300013F9000930E900408CA600049785002ED0
-:1002400000C020210E000FDB36A600020000402176
-:100250000A001370010018215120FFF88CA6000439
-:100260003C07080190E796E110E0FFF42408000144
-:100270000A00137001001821939800100018C8C0DC
-:10028000033E4021AF8800288F85002814A0FFDDA1
+:10000000AF85000010800008AF860004240D87FF34
+:1000100000CD6024158000083C0E006000AE302446
+:1000200010C00005000000000E000D42000000009E
+:100030000A001371000000000E0016050000000009
+:100040000A0013710000000030B980005320FF1F28
+:10005000AF8500003C02002000A2F82453E0FF1B03
+:10006000AF8500003C07FFFF34E47FFF00A4382485
+:100070000A00132B34C880000A001334010910242D
+:1000800000EC58245160005AAF8000248F8D002C62
+:100090003C0E0F0000EE182415A00075AF83001071
+:1000A00030EF010011E00073939800103C12020041
+:1000B000107200703C06800034D9010093280013B0
+:1000C0009789002A36A60002311800FF271600047F
+:1000D000001619C03C0480008C8501B804A0FFFE06
+:1000E00034880180AD0300003C158008AC830020FB
+:1000F00096BF004833E5FFFF10A001BCAF850008A4
+:100100002523001200A3102B504001B98F85000455
+:10011000348D010095AC0020240B001A30E440001F
+:10012000318AFFFFA10B000B108001BA2543FFFEAF
+:1001300000A3702B15C001B88F9600048F8F0004A8
+:10014000A503001435E50001AF8500043C088000DC
+:1001500035150180A6A9000EA6A9001A8F89000CEA
+:1001600030BF8000A6A70010AEA90028A6A60008F0
+:1001700013E0000F3C0F8000350C0100958B00163A
+:10018000316AFFFC25440004008818218C6240007D
+:100190003046FFFF14C000072416BFFF3C0EFFFFD0
+:1001A00035CD7FFF00AD2824AF8500043C0F8000D3
+:1001B0002416BFFF00B6902435E50180A4B20026C6
+:1001C000ACA7002C3C071000ADE701B80A00137083
+:1001D000000018210E00165D000000003C0A4000DF
+:1001E0003C098000AD2A01780A00130400000000D9
+:1001F0008F85002410A00027AF80001090A300007E
+:10020000106000742409000310690101000030210E
+:1002100090AE0001240D000211CD014230EF0040EC
+:1002200090A90001241F0001113F000930E20040A5
+:100230008CA600049785002A00C020210E000FDB49
+:1002400036A60002000040210A00137001001821A8
+:100250005040FFF88CA600043C07080190E7962167
+:1002600010E0FFF4240800010A00137001001821B7
+:10027000939800103C1F080127FF96200018C8C063
+:10028000033F4021AF8800248F85002414A0FFDBAA
 :10029000AF9800103C0480008C86400030C50100FF
-:1002A00010A0008732AA00043C0B08008D6B0024CC
-:1002B00024160004154000033172000D24160002BC
-:1002C0003C0480008C8D4000340CFFFF11AC012CED
-:1002D00032B5FFFB8C8F400031EE010055C00001AC
-:1002E0002415001030F8080013000038241FFFFB0D
-:1002F0008F99002C2F2803EF51000001025F9024FA
-:1003000030E9010011200014325900018F870030BC
-:1003100014E001278F8B000C3C0480003486010020
-:1003200090C5001330A300FF24620004000221C026
-:100330002408FFFE024890241240000202B6302535
-:1003400032A6FFFF0E000FDB9785002E1240FEA3A2
-:1003500000001821325900011320000D324700041B
-:10036000241F0001125F000202B6302532A6FFFFF3
-:100370009785002E0E000FDB000020212409FFFED0
-:10038000024990241240FE950000182132470004D3
+:1002A00010A0008732AB00043C0C08008D8C0024A9
+:1002B00024160004156000033192000D241600027C
+:1002C0003C0480008C8E4000340DFFFF11CD0113E3
+:1002D00032B5FFFB8C984000330F010055E0000160
+:1002E0002415001030E80800110000382409FFFB35
+:1002F0008F9F00282FF903EF53200001024990241B
+:1003000030E2010010400014325F00018F87002CA2
+:1003100014E0010E8F8C000C3C0480003486010038
+:1003200090C5001330AA00FF25430004000321C03C
+:100330002419FFFE025990241240000202B6302513
+:1003400032A6FFFF0E000FDB9785002A1240FEA3A6
+:1003500000001821325F000113E0000D3247000455
+:10036000240900011249000202B6302532A6FFFF1F
+:100370009785002A0E000FDB000020212402FFFEDB
+:10038000024290241240FE950000182132470004DA
 :1003900050E0FE922403000102B63025241600042A
-:1003A0005656000132A6FFFF9785002E0E000FDB88
-:1003B000240401002402FFFB0242A82412A0FE87AD
+:1003A0005656000132A6FFFF9785002A0E000FDB8C
+:1003B000240401002403FFFB0243A82412A0FE87AB
 :1003C000000018210A001370240300010A0014B968
-:1003D000025F902410A0FFAF30E5010010A00017CD
-:1003E0008F8600102402000210C200148F84000CBB
-:1003F0003C0608008CC6003824C3FFFF14C000026E
-:1004000000831024000010213C0D080025AD0038A9
-:10041000004D6021918B00048F85002C256A00041B
-:10042000000A21C030A5FFFF36A600020E000FDB38
-:10043000000000000A00137000001821240E0002C2
-:1004400010CE0088241200013C0308008C6300D009
-:100450001072008D8F85002C24C8FFFC2D1800041D
-:100460001700006300002021241900021079005DAC
-:100470002CDF000C24C2FFF82C4900041520FFE9F2
-:100480000000202130E3020050600004000621C07B
+:1003D0000249902410A0FFAF30E5010010A00017E3
+:1003E0008F8600102403000210C300148F84000CB9
+:1003F0003C0608008CC6003824CAFFFF14C0000267
+:10040000008A1024000010213C0E080025CE003880
+:10041000004E682191AC00048F850028258B0004D4
+:10042000000B21C030A5FFFF36A600020E000FDB37
+:10043000000000000A00137000001821240F0002C1
+:1004400010CF0088241600013C0308008C6300D004
+:100450001076008D8F85002824D9FFFC2F280004FA
+:100460001500006300002021241F0002107F005DA2
+:100470002CC9000C24C3FFF82C6200041440FFE9CF
+:100480000000202130EA020051400004000621C093
 :1004900054C0000530A5FFFF000621C030A5FFFFB6
 :1004A0000A00150436A600020E000FDB32A600017A
-:1004B0008F8600108F85002C0A001520000621C0B1
-:1004C0003C0308008C630024307200015240FE435C
-:1004D000000018219785002E36A600020E000FDBC3
+:1004B0008F8600108F8500280A001520000621C0B5
+:1004C0003C0A08008D4A0024315200015240FE438C
+:1004D000000018219785002A36A600020E000FDBC7
 :1004E000000020210A001370000018219668000CFB
 :1004F000311802005700FE313C0500013C1F800806
-:1005000097F900489789002E36A600023328FFFF8E
+:1005000097F900489789002A36A600023328FFFF92
 :10051000AF8800083C0380008C7501B806A0FFFE80
-:100520003C04800034820180AC400000110000E7F0
-:1005300024180003252A0012010A182B106000E37A
+:100520003C04800034820180AC400000110000B621
+:1005300024180003252A0012010A182B106000B2AB
 :1005400000000000966F00203C0E8000240D001A71
 :1005500031ECFFFF35CA018030EB4000A14D000BAC
-:10056000116000E12583FFFE0103902B164000DFA0
+:10056000116000B02583FFFE0103902B164000AE02
 :100570002416FFFE34A50001A5430014AF85000436
 :100580002419BFFF00B94024A6E9000EA6E9001A0D
 :10059000A6E60008A6E80026A6E700103C07100023
 :1005A000AE8701B80A001370000018213C048000D7
-:1005B0008C8901B80520FFFE349601802415001CAB
+:1005B0008C8201B80440FFFE349601802415001C93
 :1005C000AEC70000A2D5000B3C071000AC8701B8F5
-:1005D0003C0B40003C068000ACCB01780A001304C1
-:1005E0000000000013E0FFA424C2FFF80000202157
+:1005D0003C0A40003C098000AD2A01780A0013045F
+:1005E000000000005120FFA424C3FFF800002021D8
 :1005F00030A5FFFF0A00150436A600020E00103DCC
 :10060000000000008F8700000A001346AF82000C34
-:1006100090A30001241500011075FF0D24080001AE
-:10062000240900021069000430E60040240800019B
-:100630000A0013700100182150C0FFFD24080001BA
-:100640003C0B8000356A01009543001094A4000221
-:100650003062FFFF5082FDE1010018210A0015857C
-:10066000240800018F85002C2CAF03EF11E0FDDB87
-:10067000240300013C0308008C6300D02412000115
-:100680001472FF7624C8FFFC2CD6000C12C0FF7237
+:1006100090A30001241500011075FF0B24080001B0
+:10062000240600021066000430E2004024080001A5
+:100630000A001370010018215040FFFD240800013A
+:100640003C0C8000358B0100956A001094A40002D8
+:100650003143FFFF5083FDE1010018210A00158599
+:10066000240800018F8500282CB203EF1240FDDB27
+:10067000240300013C0308008C6300D02416000111
+:100680001476FF7624D9FFFC2CD8000C1300FF72DF
 :10069000000621C030A5FFFF0A00150436A600029F
-:1006A00010AF005700043A022406000B14A6FE24E3
-:1006B000000A41C0316600FF0006FE00001F5E0315
-:1006C0000562007230C6007F000668C001BE382196
-:1006D000A0E00001A0E000003C1660008ED21820CF
-:1006E000240C000100CC100400021827000A41C0AD
-:1006F0000243A824A4E0000201003821AED518204E
-:100700000A0013CB3C06800014C000368F8D0020F9
-:10071000000A41C03C1F80003C058008AFE8002073
-:1007200094B9004013200002240500012405004173
-:100730003C0480008C8701B804E0FFFE3495018002
-:1007400024020003AEA80000A2A2000BA6A0000E87
-:10075000A6A0001AA6A00010AEA00028A6A500081A
-:1007600096A3002634720001A6B20026AEA0002C8B
-:100770003C161000AC9601B80A0013CA01003821DB
-:100780000A0014B22415002011C0FEB53C088000F8
-:10079000351801009716001094B2000232CF0FFFF7
-:1007A000164FFEAF000000000A00148490BF000145
-:1007B0003C0A08008D4A0038254CFFFF1540000216
-:1007C000016C1024000010213C1808002718003884
-:1007D0000058782191EE000425CD00040A0014C5CC
-:1007E000000D21C0000A41C025ACFFFF1580FDD4DB
-:1007F000AF8C0020010038210A0013CAAF8000240A
-:10080000240300FF10E3FDCE000A41C010C0001712
-:1008100000078600000720C0009E18212402000166
-:100820003C05080124A596E4009E7821000A41C0F9
-:100830000085C821000B8C02A0620000AF280000D8
-:10084000A1F100013C0E60008DC6182024180001A3
-:1008500000F8500400CA202501003821A5EB000251
-:10086000ADC418200A0013CB3C06800000104603DC
-:100870000502000D30E7007F11430006000720C08D
-:10088000009E18210A001601240200020A0015AB7E
-:10089000AF800020009E18210A00160124020003E8
-:1008A0000A0012FFAF8400300A001617AF8700203D
-:1008B0008F8500043C19800024080003373801802C
-:1008C000A308000B0A00144F3C088000A2F8000B9C
-:1008D0000A00155A2419BFFF8F9600042412FFFE48
-:1008E0000A00144D02D228242416FFFE0A001558CF
-:1008F00000B628243C038000346401008C8500008D
-:1009000030A2003E1440000800000000AC60004827
-:100910008C87000030E607C010C000050000000012
-:10092000AC60004CAC60005003E000082402000101
-:10093000AC600054AC6000408C880000310438008A
-:100940001080FFF9000000002402000103E000080D
-:10095000AC6000443C0380008C6201B80440FFFEA0
-:1009600034670180ACE4000024080001ACE000041E
-:10097000A4E5000824050002A0E8000A3464014050
-:10098000A0E5000B9483000A14C00008A4E3001043
-:10099000ACE000243C07800034E901803C041000F6
-:1009A000AD20002803E00008ACE401B88C86000408
-:1009B0003C041000ACE600243C07800034E90180D0
-:1009C000AD20002803E00008ACE401B83C0680003C
-:1009D0008CC201B80440FFFE34C701802409000224
-:1009E000ACE40000ACE40004A4E50008A0E9000ABF
-:1009F00034C50140A0E9000B94A8000A3C04100093
-:100A0000A4E80010ACE000248CA30004ACE30028B0
-:100A100003E00008ACC401B83C039000346200015C
-:100A2000008220253C038000AC6400208C650020FF
-:100A300004A0FFFE0000000003E00008000000002A
-:100A40003C028000344300010083202503E00008BD
-:100A5000AC44002027BDFFE03C098000AFBF001878
-:100A6000AFB10014AFB00010352801408D10000068
-:100A7000910400099107000891050008308400FFE7
-:100A800030E600FF00061A002C820081008330252A
-:100A90001040002A30A50080000460803C0D080151
-:100AA00025AD9350018D58218D6A0000014000084A
-:100AB000000000003C038000346201409445000ABD
-:100AC00014A0001E8F91FCC09227000530E60004A0
-:100AD00014C0001A000000000E00167F0200202142
-:100AE000922A000502002021354900040E001689D3
-:100AF000A229000592280005310400041480000298
-:100B0000000000000000000D922D0000240B0020CA
-:100B100031AC00FF158B00093C0580008CAE01B89C
-:100B200005C0FFFE34B10180AE3000003C0F100064
-:100B300024100005A230000BACAF01B80000000D7E
-:100B40008FBF00188FB100148FB0001003E00008B1
-:100B500027BD00200200202100C028218FBF0018DF
-:100B60008FB100148FB00010240600010A00164E49
-:100B700027BD00200000000D0200202100C0282118
-:100B80008FBF00188FB100148FB00010000030210B
-:100B90000A00164E27BD002014A0FFE80000000048
-:100BA000020020218FBF00188FB100148FB00010F9
-:100BB00000C028210A00166C27BD00203C078000D9
-:100BC0008CEE01B805C0FFFE34F00180241F000246
-:100BD000A21F000B34F80140A60600089719000A6E
-:100BE0003C0F1000A61900108F110004A61100126E
-:100BF000ACEF01B80A0016CA8FBF001827BDFFE886
-:100C0000AFBF00100E000FD4000000003C028000B7
-:100C10008FBF001000002021AC4001800A00108F1F
-:100C200027BD00183084FFFF30A5FFFF10800007AC
-:100C30000000182130820001104000020004204210
-:100C4000006518211480FFFB0005284003E0000820
-:100C50000060102110C00007000000008CA20000FE
-:100C600024C6FFFF24A50004AC82000014C0FFFBD3
-:100C70002484000403E000080000000010A0000825
-:100C800024A3FFFFAC86000000000000000000006D
-:100C90002402FFFF2463FFFF1462FFFA2484000490
-:100CA00003E00008000000003C03800027BDFFF8BF
-:100CB00034620180AFA20000308C00FF30AD00FF35
-:100CC00030CE00FF3C0B80008D6401B80480FFFE35
-:100CD000000000008FA900008D6801288FAA000085
-:100CE0008FA700008FA40000240500012402000249
-:100CF000A085000A8FA30000359940003C05100034
-:100D0000A062000B8FB800008FAC00008FA600001F
-:100D10008FAF000027BD0008AD280000AD400004E3
-:100D2000AD800024ACC00028A4F90008A70D001075
-:100D3000A5EE001203E00008AD6501B83C0680088E
-:100D400027BDFFE834C50080AFBF001090A70009A1
-:100D50002402001230E300FF1062000B00803021FB
-:100D60008CA8005000882023048000088FBF00104A
-:100D70008CAA0034240400390000282100CA48232A
-:100D800005200005240600128FBF00102402000178
-:100D900003E0000827BD00180E0017230000000024
-:100DA0008FBF00102402000103E0000827BD0018D7
-:100DB00027BDFFC8AFB20030AFB00028AFBF0034CE
-:100DC000AFB1002C00A0802190A5000D30A600102E
-:100DD00010C00010008090213C0280088C44000468
-:100DE0008E0300081064000C30A7000530A6000533
-:100DF00010C00093240400018FBF00348FB2003074
-:100E00008FB1002C8FB000280080102103E0000873
-:100E100027BD003830A7000510E0000F30AB0012EE
-:100E200010C00006240400013C0980088E08000858
-:100E30008D2500045105009C240400388FBF003428
-:100E40008FB200308FB1002C8FB0002800801021AD
-:100E500003E0000827BD0038240A0012156AFFE6E7
-:100E6000240400010200202127A500100E000CB66A
-:100E7000AFA000101440007C3C198008372400808B
-:100E800090980008331100081220000A8FA7001064
-:100E900030FF010013E000A48FA300148C860058DB
-:100EA00000661023044000043C0A8008AC8300580C
-:100EB0008FA700103C0A800835480080910900087F
-:100EC000312400081480000224080003000040219F
-:100ED0003C1F800893F1001193F9001237E600805F
-:100EE0008CCC0054333800FF03087821322D00FFEA
-:100EF000000F708001AE282100AC582B1160006FEC
-:100F00000000000094CA005C8CC900543144FFFF0B
-:100F1000012510230082182B1460006800000000D7
-:100F20008CCB00540165182330EC00041180006C58
-:100F3000000830808FA8001C0068102B1040006251
-:100F400030ED0004006610232C46008010C0000223
-:100F500000408821241100800E00167F02402021CD
-:100F60003C0D800835A6008024070001ACC7000CAA
-:100F700090C800080011484035A70100310C007FDF
-:100F8000A0CC00088E05000424AB0001ACCB0030DF
-:100F9000A4D1005C8CCA003C9602000E01422021C4
-:100FA000ACC400208CC3003C0069F821ACDF001CFD
-:100FB0008E190004ACF900008E180008ACF800048B
-:100FC0008FB10010322F000855E0004793A6002093
-:100FD000A0C0004E90D8004E2411FFDFA0F80008FA
-:100FE00090CF000801F17024A0CE00088E05000803
-:100FF0003C0B800835690080AD2500388D6A0014EF
-:101000008D2200302419005001422021AD240034EB
-:1010100091230000307F00FF13F90036264F0100B6
-:101020000E001689024020212404003800002821E7
-:101030000E0017232406000A0A0017882404000162
-:101040000E000D28000020218FBF00348FB2003029
-:101050008FB1002C8FB0002800402021008010218B
-:1010600003E0000827BD00388E0E00083C0F800802
-:1010700035F00080AE0E005402402021AE0000305A
-:101080000E00167F00000000920D00250240202176
-:1010900035AC00200E001689A20C00250E000CAC09
-:1010A00002402021240400382405008D0E0017235F
-:1010B000240600120A0017882404000194C5005C6D
-:1010C0000A0017C330A3FFFF2407021811A0FF9ED8
-:1010D00000E610238FAE001C0A0017CB01C61023B8
-:1010E0000A0017C82C620218A0E600080A0017F5CB
-:1010F0008E0500082406FF8001E6C0243C11800014
-:10110000AE3800288E0D000831E7007F3C0E800CC1
-:1011100000EE6021AD8D00E08E080008AF8C003C31
-:101120000A001801AD8800E4AC80005890850008E2
-:101130002403FFF700A33824A08700080A0017A69D
-:101140008FA700103C05080024A5616C3C04080032
-:10115000248470B83C020800244261742403000611
-:101160003C010801AC2597603C010801AC24976460
-:101170003C010801AC2297683C010801A023976C50
-:1011800003E000080000000003E000082402000162
-:101190003C028000308800FF344701803C0680001C
-:1011A0008CC301B80460FFFE000000008CC501285C
-:1011B0002418FF803C0D800A24AF010001F8702440
-:1011C00031EC007FACCE0024018D2021ACE5000085
-:1011D000948B00EA3509600024080002316AFFFFA1
-:1011E000ACEA000424020001A4E90008A0E8000B16
-:1011F000ACE000243C071000ACC701B8AF84003C51
-:1012000003E00008AF8500709388004C8F8900646C
-:101210008F82003C30C600FF0109382330E900FF0F
-:101220000122182130A500FF2468008810C00002A8
-:10123000012438210080382130E4000314800003A9
-:1012400030AA00031140000D312B000310A000094B
-:101250000000102190ED0000244E000131C200FF7B
-:101260000045602BA10D000024E700011580FFF967
-:101270002508000103E00008000000001560FFF3EE
-:101280000000000010A0FFFB000010218CF80000FF
-:1012900024590004332200FF0045782BAD180000CC
-:1012A00024E7000415E0FFF92508000403E0000826
-:1012B000000000009385004C9388005C8F870064D9
-:1012C000000432003103007F00E5102B30C47F00A2
-:1012D0001040000F006428258F84003C3C098000EA
-:1012E0008C8A00ECAD2A00A43C03800000A35825A2
-:1012F000AC6B00A08C6C00A00580FFFE000000001D
-:101300008C6D00ACAC8D00EC03E000088C6200A892
-:101310000A0018B38F84003C9388005D3C02800073
-:1013200000805021310300FEA383005D30ABFFFF3E
-:1013300030CC00FF30E7FFFF344801803C098000DB
-:101340008D2401B80480FFFE8F8D007024180016D4
-:10135000AD0D00008D2201248F8D003CAD020004F4
-:101360008D590020A5070008240201C4A119000A14
-:10137000A118000B952F01208D4E00088D47000409
-:10138000978300608D59002401CF302100C72821A8
-:1013900000A320232418FFFFA504000CA50B000EBA
-:1013A000A5020010A50C0012AD190018AD180024FC
-:1013B00095AF00E83C0B10002407FFF731EEFFFF6C
-:1013C000AD0E00288DAC0084AD0C002CAD2B01B807
-:1013D0008D46002000C7282403E00008AD4500200A
-:1013E0008F88003C0080582130E7FFFF910900D62C
-:1013F0003C02800030A5FFFF312400FF00041A00EA
-:101400000067502530C600FF344701803C0980004A
-:101410008D2C01B80580FFFE8F820070240F00170D
-:10142000ACE200008D390124ACF900048D78002075
-:10143000A4EA0008241901C4A0F8000AA0EF000BD8
-:10144000952301208D6E00088D6D00049784006047
-:1014500001C35021014D602101841023A4E2000C3E
-:10146000A4E5000EA4F90010A4E60012ACE00014FC
-:101470008D780024240DFFFFACF800188D0F007C40
-:10148000ACEF001C8D0E00783C0F1000ACEE00207D
-:10149000ACED0024950A00BE240DFFF73146FFFF96
-:1014A000ACE60028950C00809504008231837FFF14
-:1014B0000003CA003082FFFF0322C021ACF8002CD9
-:1014C000AD2F01B8950E00828D6A002000AE30214C
-:1014D000014D2824A506008203E00008AD65002028
-:1014E0003C028000344501803C0480008C8301B8BC
-:1014F0000460FFFE8F8A0048240600199549001CED
-:101500003128FFFF000839C0ACA70000A0A6000BDF
-:101510003C05100003E00008AC8501B88F8700503F
-:101520000080402130C400FF3C0680008CC201B81E
-:101530000440FFFE8F8900709383006C3499600033
-:10154000ACA90000A0A300058CE20010240F00024B
-:101550002403FFF7A4A20006A4B900088D180020F8
-:10156000A0B8000AA0AF000B8CEE0000ACAE0010DB
-:101570008CED0004ACAD00148CEC001CACAC002471
-:101580008CEB0020ACAB00288CEA002C3C07100050
-:10159000ACAA002C8D090024ACA90018ACC701B876
-:1015A0008D05002000A3202403E00008AD040020E6
-:1015B0008F86003C27BDFFE0AFB10014AFBF00181D
-:1015C000AFB0001090C300D430A500FF30620020FF
-:1015D00010400008008088218CCB00D02409FFDF58
-:1015E000256A0001ACCA00D090C800D40109382493
-:1015F000A0C700D414A000403C0C80008F84003CA5
-:10160000908700D42418FFBF2406FFEF30E3007F4B
-:10161000A08300D4979F00608F8200648F8D003C70
-:1016200003E2C823A7990060A5A000BC91AF00D435
-:1016300001F87024A1AE00D48F8C003CA18000D7AB
-:101640008F8A003CA5400082AD4000EC914500D45B
-:1016500000A65824A14B00D48F9000388F840064DA
-:10166000978600600204282110C0000FAF85003863
-:10167000A380005C3C0780008E2C000894ED0120C4
-:101680008E2B0004018D5021014B80210206202366
-:101690003086FFFF30C8000F3909000131310001E9
-:1016A00016200009A388005C9386004C8FBF0018A9
-:1016B0008FB100148FB0001027BD0020AF850068E7
-:1016C00003E00008AF86006400C870238FBF0018D5
-:1016D0009386004C8FB100148FB0001034EF0C00D3
-:1016E000010F282127BD0020ACEE0084AF850068E3
-:1016F00003E00008AF8600643590018002002821D5
-:101700000E001940240600828F84003C908600D48D
-:1017100030C5004050A0FFBAA380006C8F850050F8
-:101720003C0680008CCD01B805A0FFFE8F890070BB
-:101730002408608224070002AE090000A608000801
-:10174000A207000B8CA300083C0E1000AE03001093
-:101750008CA2000CAE0200148CBF0014AE1F001847
-:101760008CB90018AE1900248CB80024AE180028DB
-:101770008CAF0028AE0F002CACCE01B80A0019794E
-:10178000A380006C8F8A003C27BDFFE0AFB100143E
-:10179000AFB000108F880064AFBF0018938900407D
-:1017A000954200BC30D100FF0109182B0080802138
-:1017B00030AC00FF3047FFFF0000582114600003E9
-:1017C000310600FF01203021010958239783006072
-:1017D0000068202B148000270000000010680056CD
-:1017E000241900011199006334E708803165FFFF77
-:1017F0000E0018F1020020218F8300703C0780004A
-:1018000034E601803C0580008CAB01B80560FFFE2A
-:10181000240A00188F84003CACC30000A0CA000B4F
-:10182000948900BE3C081000A4C90010ACC0003070
-:10183000ACA801B89482008024430001A4830080F6
-:10184000949F00803C0608008CC6318833EC7FFFF3
-:101850001186005E000000000200202102202821E5
-:101860008FBF00188FB100148FB000100A001965E7
-:1018700027BD0020914400D42403FF8000838825E5
-:10188000A15100D4978400603088FFFF51000023ED
-:10189000938C00408F85003C2402EFFF008B78235F
-:1018A00094AE00BC0168502B31E900FF01C26824EE
-:1018B000A4AD00BC51400039010058213C1F8000FC
-:1018C00037E601008CD800043C19000103194024BC
-:1018D0005500000134E740008E0A00202403FFFB7E
-:1018E0002411000101432024AE0400201191002D99
-:1018F00034E7800002002021012030210E0018F181
-:101900003165FFFF978700608F890064A7800060C2
-:1019100001278023AF900064938C00408F8B003CA4
-:101920008FBF00188FB100148FB0001027BD0020AA
-:1019300003E00008A16C00D73C0D800035AA01002F
-:101940008D4800043C0900010109282454A000012D
-:1019500034E740008E0F00202418FFFB34E780009E
-:1019600001F8702424190001AE0E00201599FF9F84
-:1019700034E70880020020210E0018BF3165FFFF08
-:1019800002002021022028218FBF00188FB10014EF
-:101990008FB000100A00196527BD00200A001A2820
-:1019A0000000482102002021012030210E0018BF34
-:1019B0003165FFFF978700608F890064A780006012
-:1019C000012780230A001A3FAF900064948C0080A6
-:1019D000241F8000019F3024A4860080908B00800B
-:1019E000908F0080316700FF0007C9C20019C0272F
-:1019F000001871C031ED007F01AE2825A085008060
-:101A00000A001A10020020219385006C24030001B3
-:101A100027BDFFE800A330042CA20020AFB00010C7
-:101A2000AFBF001400C01821104000132410FFFEA7
-:101A30003C0708008CE7319000E610243C08800049
-:101A40003505018014400005240600848F89003C80
-:101A5000240A00042410FFFFA12A00FC0E001940F4
-:101A600000000000020010218FBF00148FB0001092
-:101A700003E0000827BD00183C0608008CC631941E
-:101A80000A001A8800C310248F87004827BDFFE092
-:101A9000AFB20018AFB10014AFB00010AFBF001C60
-:101AA00030D000FF90E6000D00A08821008090213A
-:101AB00030C5007FA0E5000D8F85003C8E23001807
-:101AC0008CA200D01062002E240A000E0E001A7B99
-:101AD000A38A006C2409FFFF104900222404FFFFA1
-:101AE00052000020000020218E2600003C0C001037
-:101AF00000CC5824156000393C0E000800CE682444
-:101B000055A0003F024020213C18000200D880244C
-:101B10001200001F3C0A00048F8700488CE200146A
-:101B20008CE300108CE500140043F82303E5C82B78
-:101B300013200005024020218E24002C8CF100107F
-:101B4000109100310240202124020012A382006C77
-:101B50000E001A7B2412FFFF105200022404FFFF24
-:101B6000000020218FBF001C8FB200188FB100141D
-:101B70008FB000100080102103E0000827BD002076
-:101B800090A800D4350400200A001AB1A0A400D403
-:101B900000CA48241520000B8F8B00488F8D004809
-:101BA0008DAC00101580000B024020218E2E002CE1
-:101BB00051C0FFEC00002021024020210A001ACC75
-:101BC000240200178D66001050C0FFE6000020219F
-:101BD000024020210A001ACC2402001102402021D8
-:101BE000240200150E001A7BA382006C240FFFFF55
-:101BF000104FFFDC2404FFFF0A001ABB8E260000F2
-:101C00000A001AF2240200143C08000400C8382418
-:101C100050E0FFD400002021024020210A001ACC0D
-:101C2000240200138F85003C27BDFFD8AFB3001CF2
-:101C3000AFB20018AFB10014AFB00010AFBF0020BA
-:101C400090A700D48F9000502412FFFF34E2004090
-:101C500092060000A0A200D48E03001000809821FC
-:101C60001072000630D1003F2408000D0E001A7BD0
-:101C7000A388006C105200252404FFFF8F8A003CCB
-:101C80008E0900188D4400D0112400070260202125
-:101C9000240C000E0E001A7BA38C006C240BFFFF9B
-:101CA000104B001A2404FFFF240400201224000417
-:101CB0008F8D003C91AF00D435EE0020A1AE00D452
-:101CC0008F85005810A00019000000001224004A5F
-:101CD0008F98003C8F92FCC0971000809651000AAC
-:101CE000523000488F9300443C1F08008FFF318C16
-:101CF00003E5C82B1720001E0260202100002821C8
-:101D00000E0019DA24060001000020218FBF0020F8
-:101D10008FB3001C8FB200188FB100148FB0001069
-:101D20000080102103E0000827BD00285224002A6B
-:101D30008E0500148F84003C948A008025490001A0
-:101D4000A4890080948800803C0208008C4231887D
-:101D500031077FFF10E2000E00000000026020212A
-:101D60000E001965240500010A001B3C000020211B
-:101D70002402002D0E001A7BA382006C2403FFFFB7
-:101D80001443FFE12404FFFF0A001B3D8FBF002026
-:101D900094990080241F800024050001033FC02483
-:101DA000A498008090920080908E0080325100FFB5
-:101DB000001181C200107827000F69C031CC007F6C
-:101DC000018D5825A08B00800E001965026020212E
-:101DD0000A001B3C000020212406FFFF54A6FFD66A
-:101DE0008F84003C026020210E001965240500014B
-:101DF0000A001B3C00002021026020210A001B5623
-:101E00002402000A2404FFFD0A001B3CAF93006477
-:101E10008F88003C27BDFFE8AFB00010AFBF0014B3
-:101E2000910A00D48F8700500080802135490040FE
-:101E30008CE60010A10900D43C0208008C4231B0AD
-:101E400030C53FFF00A2182B106000078F8500549B
-:101E5000240DFF8090AE000D01AE6024318B00FF99
-:101E6000156000080006C382020020212403000D33
-:101E70008FBF00148FB0001027BD00180A001A7B16
-:101E8000A383006C33060003240F000254CFFFF736
-:101E90000200202194A2001C8F85003C24190023FD
-:101EA000A4A200E88CE8000000081E02307F003F7A
-:101EB00013F900353C0A00838CE800188CA600D08A
-:101EC00011060008000000002405000E0E001A7B19
-:101ED000A385006C2407FFFF104700182404FFFFB0
-:101EE0008F85003C90A900D435240020A0A400D404
-:101EF0008F8C0048918E000D31CD007FA18D000D9B
-:101F00008F8300581060001C020020218F84005431
-:101F10008C9800100303782B11E0000D2419001891
-:101F200002002021A399006C0E001A7B2410FFFFF1
-:101F3000105000022404FFFF000020218FBF001476
-:101F40008FB000100080102103E0000827BD0018AA
-:101F50008C8600108F9F00480200202100C31023B0
-:101F6000AFE20010240500010E0019DA240600017A
-:101F70000A001BC8000020210E001965240500017D
-:101F80000A001BC800002021010A5824156AFFD945
-:101F90008F8C0048A0A600FC0A001BB5A386005E3B
-:101FA00030A500FF2406000124A9000100C9102B60
-:101FB0001040000C00004021240A000100A6182354
-:101FC000308B000124C60001006A3804000420425E
-:101FD0001160000200C9182B010740251460FFF8AA
-:101FE00000A6182303E000080100102127BDFFD838
-:101FF000AFB000188F900050AFB1001CAFBF0020F1
-:102000002403FFFF2411002FAFA30010920600004D
-:102010002405000826100001006620260E001BE1A2
-:10202000308400FF00021E003C021EDC34466F417B
-:102030000A001C090000102110A0000900801821CE
-:102040002445000130A2FFFF2C4500080461FFFA7F
-:10205000000320400086202614A0FFF900801821EC
-:102060000E001BE1240500208FA300102629FFFF8E
-:10207000313100FF00034202240700FF1627FFE270
-:102080000102182600035027AFAA0014AFAA0010BF
-:102090000000302127A8001027A7001400E67823AD
-:1020A00091ED000324CE000100C8602131C600FF7D
-:1020B0002CCB00041560FFF9A18D00008FA2001049
-:1020C0008FBF00208FB1001C8FB0001803E0000804
-:1020D00027BD002827BDFFD0AFB3001CAFB0001054
-:1020E000AFBF0028AFB50024AFB40020AFB20018D6
-:1020F000AFB100143C0C80008D880128240FFF80B4
-:102100003C06800A25100100250B0080020F682480
-:102110003205007F016F7024AD8E009000A628214B
-:10212000AD8D002490A600FC3169007F3C0A80043C
-:10213000012A1821A386005E9067007C0080982108
-:10214000AF83003430E20002AF880070AF85003CFE
-:1021500000A018211440000224040034240400309C
-:10216000A384004C8C7200DC30D100FF24040004F6
-:10217000AF92006412240004A380006C8E740004EB
-:102180001680001E3C0880009386005D30C7000169
-:1021900050E0000F8F8600648CA400848CA800841B
-:1021A0002413FF8000936024000C49403110007F0D
-:1021B000013078253C19200001F9682530DF00FE48
-:1021C0003C038000AC6D0830A39F005D8F860064E7
-:1021D0008FBF00288FB500248FB400208FB3001C60
-:1021E0008FB200188FB100148FB0001024020001CC
-:1021F00027BD003003E00008ACA600DC8E7F00089D
-:10220000950201208E67001003E2C8213326FFFFEC
-:1022100030D8000F33150001AF87003816A00058E2
-:10222000A398005C35090C000309382100D8182355
-:10223000AD030084AF8700688E6A00043148FFFF59
-:102240001100007EA78A006090AC00D42407FF80B4
-:1022500000EC302430CB00FF1560004B9786006007
-:10226000938E005E240D000230D5FFFF11CD02A237
-:102270000000A0218F85006402A5802B160000BC01
-:102280009388004C3C11800096240120310400FF0B
-:10229000148500888F8400688F98003833120003FB
-:1022A0005640008530A500FF8F900068310C00FF7C
-:1022B0002406003411860095AF900050920400046B
-:1022C000148001198F8E003CA38000408E0D000405
-:1022D0008DC800D83C0600FF34CCFFFF01AC302491
-:1022E0000106182B14600121AF8600588F87006407
-:1022F00097980060AF8700440307402310C000C7D1
-:10230000A78800608F91003430C300030003582376
-:10231000922A007C3171000302261021000A2082DB
-:10232000309200010012488000492821311FFFFF30
-:1023300003E5C82B132001208F88003C8F850038CF
-:102340008F8800681105025A3C0E3F018E0600007E
-:102350003C0C250000CE682411AC01638F84005032
-:1023600030E500FF0E00187B000030218F88003C14
-:102370008F8700648F8500380A001DE88F8600581B
-:102380000A001C87AF87006890AC00D400EC2024C2
-:10239000309000FF120000169386005D90B5008813
-:1023A00090B400D724A8008832A2003F2446FFE062
-:1023B0002CD10020A39400401220000CAF880050C4
-:1023C000240E000100CE2004308A00191540012B94
-:1023D0003C06800034D80002009858241560022E74
-:1023E0003092002016400234000000009386005D09
-:1023F00030CE000111C0000F978800608CA90084C6
-:102400008CAF00842410FF800130C82400191940CB
-:1024100031ED007F006D38253C1F200000FF902526
-:1024200030CB00FE3C188000AF120830A38B005D5B
-:10243000978800601500FF84000000008E63002074
-:10244000306C00041180FF519386005D2404FFFB73
-:10245000006430243C038000AE66002034660180B6
-:102460008C7301B80660FFFE8F8E0070346A010025
-:102470003C150001ACCE00008C620124240760856D
-:10248000ACC200048D54000402958824522000013F
-:1024900024076083241200023C1810003C0B8000CB
-:1024A000A4C70008A0D2000BAD7801B80A001C5CDC
-:1024B0009386005D30A500FF0E00187B2406000106
-:1024C0008F8800703C05800034A90900250201882E
-:1024D0009388004C304A0007304B00783C03408022
-:1024E0002407FF800163C825014980210047F824A3
-:1024F000310C00FF24060034ACBF0800AF90005040
-:10250000ACB908105586FF6E920400048F84003C1D
-:102510008E110030908E00D431CD001015A0001027
-:102520008F8300642C6F000515E000E400000000BC
-:10253000909800D42465FFFC331200101640000868
-:1025400030A400FF8F9F00688F99003813F90004B2
-:102550003887000130E20001144001C8000000008B
-:102560000E001BF4000000000A001E2900000000FD
-:102570008F84006830C500FF0E00187B2406000120
-:10258000938E004C240A003411CA00A08F85003CB1
-:102590008F860064978300603062FFFF00C288234B
-:1025A000AF910064A78000601280FF900280182124
-:1025B0002414FFFD5474FFA28E6300208E69000472
-:1025C0002403FFBF240BFFEF0135C823AE790004BD
-:1025D00090AF00D431ED007FA0AD00D48E66002016
-:1025E0008F98003CA780006034DF0002AE7F00209F
-:1025F000A70000BC931200D402434024A30800D4D7
-:102600008F95003CAEA000EC92AE00D401CB5024DC
-:10261000A2AA00D40A001D088F85003C8F910038C3
-:10262000AF80006402275821AF8B003800002021C2
-:102630002403FFFF108301B48F85003C8E0C001033
-:102640003C0D08008DAD31B09208000031843FFF91
-:10265000008D802B12000023310D003F3C19080033
-:102660008F3931A88F9F0070000479802408FF8083
-:10267000033F2021008FC8219385005D0328F824A3
-:102680003C0600803C0F800034D80001001F9140C0
-:102690003331007F8F86003C0251502535EE0940D2
-:1026A000332B0078333000073C0310003C02800CD1
-:1026B00001789025020E48210143C02502223821CD
-:1026C00034AE0001ADFF0804AF890054ADF2081428
-:1026D000AF870048ADFF0028ACD90084ADF80830C2
-:1026E000A38E005D9383005E2407000350670028DB
-:1026F00025A3FFE0240C0001146CFFAB8F85003C88
-:102700002411002311B10084000000002402000BFA
-:10271000026020210E001A7BA382006C0040A021E1
-:102720000A001D638F85003C02602021240B000CF1
-:102730000E001A7BA38B006C240AFFFF104AFFBC1B
-:102740002404FFFF8F8E003CA38000408E0D000408
-:102750008DC800D83C0600FF34CCFFFF01AC30240C
-:102760000106182B1060FEE1AF86005802602021A0
-:10277000241200190E001A7BA392006C240FFFFF95
-:10278000104FFFAB2404FFFF0A001CB48F860058D3
-:102790002C7400201280FFDE2402000B000328802E
-:1027A0003C1108012631955400B148218D2D0000BF
-:1027B00001A00008000000008F85003800A710214C
-:1027C00093850040AF82003802251821A383004082
-:1027D000951F00BC0226282137F91000A51900BC5E
-:1027E0005240FF92AF850064246A0004A38A00402F
-:1027F000950900BC24A40004AF8400643532200095
-:10280000A51200BC0A001D85000020218F860064EF
-:102810002CCB00051560FF60978300603072FFFFCE
-:1028200000D240232D18000513000003306400FF80
-:1028300024DFFFFC33E400FF8F8500688F860038BB
-:1028400010A60004388F000131ED000115A00138F9
-:10285000000000008F84003C908C00D4358700106D
-:10286000A08700D48F85003C8F860064978300602A
-:10287000ACA000EC0A001D603062FFFF8CAA00844F
-:102880008CB500843C0410000147102400028940EC
-:1028900032B4007F0234302500C460253C0880003B
-:1028A0002405000102602021240600010E0019DA2F
-:1028B000AD0C08300A001CF48F85003C8C8200ECC3
-:1028C0001222FE7E0260202124090005A389006CEB
-:1028D0000E001A7B2411FFFF1451FE782404FFFF21
-:1028E0000A001D862403FFFF8F8F00508F88003C55
-:1028F0008DF80000AD1800888DE70010AD07009836
-:102900008F8700640A001DE88F8600582407FFFFA8
-:1029100011870005000000000E001B7D02602021D1
-:102920000A001DC10040A0210E001B0202602021F0
-:102930000A001DC10040A0218F9000503C090800F2
-:102940008D2931B08E11001032323FFF0249682BC1
-:1029500011A0000C240AFF808F85005490AE000D5A
-:10296000014E1024304C00FF11800007026020212E
-:102970000011C38233030003240B0001106B010517
-:1029800000000000026020212418000D0E001A7BB8
-:10299000A398006C004020218F85003C0A001D6335
-:1029A0000080A0218F9000503C0A08008D4A31B071
-:1029B0008F8500548E0400100000A0218CB10014FB
-:1029C00030823FFF004A602B8CB200205180FFEE26
-:1029D0000260202190B8000D240BFF800178702444
-:1029E00031C300FF5060FFE80260202100044382F1
-:1029F0003106000314C0FFE40260202194BF001CD4
-:102A00008F99003C8E060028A73F00E88CAF00108D
-:102A1000022F202314C40139026020218F83005823
-:102A200000C36821022D382B14E001352402001860
-:102A30008F8A00488F820034024390218D4B001012
-:102A400001637023AD4E0010AD5200208C4C007419
-:102A50000192282B14A00156026020218F8400547B
-:102A60008E0800248C8600241106000702602021B5
-:102A70002419001C0E001A7BA399006C240FFFFF81
-:102A8000104FFFC52404FFFF8F8400488C8700246B
-:102A900024FF0001AC9F0024125101338F8D0034BC
-:102AA0008DB10074123201303C0B00808E0E00009C
-:102AB00001CB502415400075000000008E03001467
-:102AC0002411FFFF10710006026020212418001B52
-:102AD0000E001A7BA398006C1051FFAF2404FFFF77
-:102AE0008E0300003C0800010068302410C0001371
-:102AF0003C0400800064A024168000090200282104
-:102B0000026020212419001A0E001A7BA399006C80
-:102B1000240FFFFF104FFFA02404FFFF0200282115
-:102B2000026020210E001A9B240600012410FFFFE2
-:102B30001050FF992404FFFF241400018F9F0048C8
-:102B4000026020210280302197F100342405000129
-:102B500026270001A7E700340E0019DA0000000064
-:102B6000000020218F85003C0A001D630080A02109
-:102B70008F9000503C1408008E9431B08E070010E6
-:102B800030E83FFF0114302B10C000618F860054E5
-:102B9000241FFF8090C5000D03E52024309200FF24
-:102BA0005240005C026020218F8D005811A0000768
-:102BB00000078B828F85003C8F89FCC094AF00801A
-:102BC0009539000A132F00F68F870044322C00033A
-:102BD000158000630000000092020002104000D740
-:102BE000000000008E0A0024154000D80260202159
-:102BF0009204000324060002308800FF1506000539
-:102C0000308500FF8F940058528000F2026020212E
-:102C1000308500FF38AD00102DA400012CBF00014D
-:102C200003E43025020028210E001A9B02602021B7
-:102C30002410FFFF105000BE8F85003C8F8300588A
-:102C4000106000C4240500013C1908008F39318C44
-:102C50000323782B15E000B12409002D0260202108
-:102C6000000028210E0019DA240600018F85003C9F
-:102C7000000018210A001D630060A0210E0018A6A4
-:102C8000000000000A001E2900000000AC800020A7
-:102C90000A001EA98E0300140000282102602021D2
-:102CA0000E0019DA240600010A001CF48F85003C8E
-:102CB0000A001DE88F88003C8CB000848CB9008429
-:102CC0003C0310000207482400096940332F007FAD
-:102CD00001AFF82503E32825ACC5083091070001B2
-:102CE00024050001026020210E0019DA30E60001FF
-:102CF0000A001CF48F85003C938F004C2403FFFDD9
-:102D00000A001D65AF8F00640A001D652403FFFFE4
-:102D1000026020212410000D0E001A7BA390006C8D
-:102D2000004018218F85003C0A001D630060A0212F
-:102D30000E0018A600000000978300608F860064D4
-:102D40003070FFFF00D048232D3900051320FE12FC
-:102D50008F85003CACA200EC0A001D603062FFFFD2
-:102D600090C3000D307800085700FFA292040003C2
-:102D700002602021240200100E001A7BA382006C46
-:102D80002403FFFF5443FF9B920400030A001F43E8
-:102D90008F85003C90A8000D3106000810C00095FA
-:102DA0008F9400581680009E026020218E0F000C28
-:102DB0008CA4002055E40005026020218E1F00082D
-:102DC0008CB9002413F9003A02602021240200206B
-:102DD0000E001A7BA382006C2405FFFF1045FEEE57
-:102DE0002404FFFF8F8F0048240CFFF72403FF808B
-:102DF00091E9000D3C14800E3C0B8000012CC8248E
-:102E0000A1F9000D8F8F00343C0708008CE731AC2E
-:102E10008F8D007095E500788F99004800ED902126
-:102E200030BF7FFF001F20400244302130C8007FA8
-:102E300000C3C02401147021AD78002CA5D100007E
-:102E40008F2A002825420001AF2200288F29002C5C
-:102E50008E0C002C012C6821AF2D002C8E07002C2D
-:102E6000AF2700308E050014AF250034973F003A9D
-:102E700027E40001A724003A95F200783C100800EE
-:102E80008E1031B02643000130717FFF1230005C9C
-:102E9000006030218F83003402602021240500016E
-:102EA0000E001965A46600780A001ED200002021D9
-:102EB0008E0700142412FFFF10F200638F8C003C79
-:102EC0008E0900188D8D00D0152D005D0260202127
-:102ED0008E0A00248CA200281142005324020021F3
-:102EE0000E001A7BA382006C1452FFBE2404FFFF65
-:102EF0008F85003C0A001D630080A0212402001F72
-:102F00000E001A7BA382006C2409FFFF1049FEA269
-:102F10002404FFFF0A001E858F83005802602021D1
-:102F20000E001A7BA389006C1450FF518F85003C62
-:102F30002403FFFF0A001D630060A0218CCE002443
-:102F40008E0B0024116EFF2A026020210A001F57F9
-:102F50002402000F0E001965026020218F85003CBD
-:102F60000A001F16000018218E0900003C05008091
-:102F7000012590241640FF452402001A02602021FA
-:102F80000E001A7BA382006C240CFFFF144CFECBB6
-:102F90002404FFFF8F85003C0A001D630080A021F0
-:102FA0002403FFFD0060A0210A001D63AF870064B9
-:102FB0002418001D0E001A7BA398006C2403FFFF49
-:102FC0001443FEA62404FFFF8F85003C0A001D6306
-:102FD0000080A0212412002C0E001A7BA392006C0A
-:102FE0002403FFFF1043FF508F85003C0A001EFDA5
-:102FF00092040003026020210A001F6D24020024B5
-:10300000240B8000006B702431CAFFFF000A13C23A
-:10301000305100FF001180270A001F9E001033C0AE
-:103020000A001F6D240200278E0600288CAE002C9B
-:1030300010CE0008026020210A001FB12402001FE8
-:103040000A001FB12402000E026020210A001FB1F5
-:10305000240200258E04002C1080000D8F83003484
-:103060008C7800740304582B5560000C02602021FA
-:103070008CA800140086A0210114302B10C0FF5A28
-:103080008F8F0048026020210A001FB12402002215
-:10309000026020210A001FB1240200230A001FB190
-:1030A0002402002627BDFFD8AFB3001CAFB1001427
-:1030B000AFBF0020AFB20018AFB000103C028000DC
-:1030C0008C5201408C4B01483C048000000B8C0268
-:1030D000322300FF317300FF8C8501B804A0FFFE8E
-:1030E00034900180AE1200008C8701442464FFF00C
-:1030F000240600022C830013AE070004A61100086A
-:10310000A206000BAE1300241060004F8FBF0020FA
-:10311000000448803C0A0801254A95D4012A402130
-:103120008D04000000800008000000003C0308003F
-:103130008C6331A831693FFF0009998000728021BA
-:10314000021370212405FF80264D0100264C0080CB
-:103150003C02800031B1007F3198007F31CA007F8E
-:103160003C1F800A3C1980043C0F800C01C52024C0
-:1031700001A5302401853824014F1821AC460024D4
-:10318000023F402103194821AC470090AC4400287D
-:10319000AF830048AF88003CAF8900340E0019317E
-:1031A000016080213C0380008C6B01B80560FFFE4C
-:1031B0008F8700488F86003C3465018090E8000DC1
-:1031C000ACB20000A4B000060008260000041603FC
-:1031D00000029027001227C21080008124C20088BC
-:1031E000241F6082A4BF0008A0A0000524020002E2
-:1031F000A0A2000B8F8B0034000424003C082700A1
-:1032000000889025ACB20010ACA00014ACA0002443
-:10321000ACA00028ACA0002C8D6900382413FF80DE
-:10322000ACA9001890E3000D02638024320500FF72
-:1032300010A000058FBF002090ED000D31AC007F85
-:10324000A0EC000D8FBF00208FB3001C8FB20018C0
-:103250008FB100148FB000103C0A10003C0E8000AB
-:1032600027BD002803E00008ADCA01B8265F0100B1
-:103270002405FF8033F8007F3C06800003E57824B6
-:103280003C19800A03192021ACCF0024908E00D471
-:1032900000AE682431AC00FF11800024AF84003CF4
-:1032A000248E008895CD00123C0C08008D8C31A82E
-:1032B00031AB3FFF01924821000B5180012A402190
-:1032C00001052024ACC400283107007F3C06800C97
-:1032D00000E620219083000D00A31024304500FF5C
-:1032E00010A0FFD8AF8400489098000D330F001055
-:1032F00015E0FFD58FBF00200E001931000000003F
-:103300003C0380008C7901B80720FFFE000000001C
-:10331000AE1200008C7F0144AE1F0004A61100080D
-:1033200024110002A211000BAE1300243C1308016B
-:1033300092739790327000015200FFC38FBF00203C
-:103340000E00216E024020210A00208B8FBF00203A
-:103350003C1260008E452C083C03F0033462FFFFF2
-:1033600000A2F824AE5F2C088E582C083C1901C02E
-:1033700003199825AE532C080A00208B8FBF00201C
-:10338000264D010031AF007F3C10800A240EFF80E3
-:1033900001F0282101AE60243C0B8000AD6C0024BC
-:1033A0001660FFA8AF85003C24110003A0B100FC0B
-:1033B0000A00208B8FBF002026480100310A007FC1
-:1033C0003C0B800A2409FF80014B30210109202495
-:1033D0003C078000ACE400240A00208AAF86003C51
-:1033E000944E0012320C3FFF31CD3FFF15ACFF7DF4
-:1033F000241F608290D900D42418FF8003197824F8
-:1034000031EA00FF1140FF770000000024070004AC
-:10341000A0C700FC8F870048241160842406000D9B
-:10342000A4B10008A0A600050A002075240200022D
-:103430003C0400012484977C24030014240200FE31
-:103440003C010800AC2431EC3C010800AC2331E81D
-:103450003C010801A42297983C0408012484979811
-:103460000000182100643021A0C30004246300017F
-:103470002C6500FF54A0FFFC006430213C070800CD
-:1034800024E7010003E00008AF87007C00A058217A
-:10349000008048210000102114A0001200005021DB
-:1034A0000A00216A000000003C010801A42097984E
-:1034B0003C05080194A597988F82007C3C0C08017C
-:1034C000258C979800E2182100AC2021014B302B6D
-:1034D000A089000400001021A460000810C0003979
-:1034E000010048218F86007C0009384000E9402116
-:1034F0000008388000E6282190A8000B90B9000A47
-:103500000008204000881021000218800066C021B9
-:10351000A319000A8F85007C00E5782191EE000A4E
-:1035200091E6000B000E684001AE6021000C208087
-:1035300000851021A046000B3C0308019063979280
-:10354000106000222462FFFF8F83003C3C010801D1
-:10355000A0229792906C00FF1180000400000000F0
-:10356000906E00FF25CDFFFFA06D00FF3C19080104
-:1035700097399798272300013078FFFF2F0F00FF1E
-:1035800011E0FFC9254A00013C010801A4239798D6
-:103590003C05080194A597988F82007C3C0C08019B
-:1035A000258C979800E2182100AC2021014B302B8C
-:1035B000A089000400001021A460000814C0FFC905
-:1035C0000100482103E000080000000003E00008BB
-:1035D0002402000227BDFFE0248501002407FF80AC
-:1035E000AFB00010AFBF0018AFB1001400A718248F
-:1035F0003C10800030A4007F3C06800A0086282111
-:103600008E110024AE03002490A200FF1440000895
-:10361000AF85003CA0A000098FBF0018AE110024A8
-:103620008FB100148FB0001003E0000827BD002008
-:1036300090A900FD90A800FF312400FF0E00211C7E
-:10364000310500FF8F85003C8FBF0018A0A0000946
-:10365000AE1100248FB100148FB0001003E00008F9
-:1036600027BD002027BDFFD0AFB20020AFB1001CA6
-:10367000AFB00018AFBF002CAFB40028AFB3002428
-:103680003C0980009533011635320C00952F011A44
-:103690003271FFFF023280218E08000431EEFFFFFD
-:1036A000248B0100010E6821240CFF8025A5FFFF5B
-:1036B000016C50243166007F3C07800AAD2A00244B
-:1036C00000C73021AF850078AF8800743C01080145
-:1036D000A020979190C300090200D021008098217A
-:1036E000306300FF2862000510400048AF86003CB0
-:1036F000286400021480008E24140001240D0005AB
-:103700003C010801A02D977590CC00FD3C010801FB
-:10371000A02097763C010801A020977790CB000A63
-:10372000240AFF80318500FF014B4824312700FF28
-:1037300010E0000C000058213C1280083651008037
-:103740008E2F00308CD0005C01F0702305C0018EFC
-:103750008F87007490D4000A3284007FA0C4000ACE
-:103760008F86003C3C118008363000808E0F003080
-:103770008F87007400EF702319C000EE0000000076
-:1037800090D4000924120002328400FF10920247F4
-:10379000000000008CC2005800E2F82327F9FFFF68
-:1037A0001B2001300000000090C50009240800041F
-:1037B00030A300FF10680057240A00013C010801F3
-:1037C000A02A977590C900FF252700013C01080138
-:1037D000A02797743C0308019063977524060005A1
-:1037E0001066006A2C780005130000C400009021C8
-:1037F0000003F8803C0408012484962003E4C821D7
-:103800008F25000000A0000800000000241800FF21
-:103810001078005C0000000090CC000A90CA0009FB
-:103820003C080801910897913187008000EA4825FB
-:103830003C010801A029977C90C500FD3C140801BB
-:1038400092949792311100013C010801A025977DC7
-:1038500090DF00FE3C010801A03F977E90D200FF60
-:103860003C010801A032977F8CD900543C0108012B
-:10387000AC3997808CD000583C010801AC3097845B
-:103880008CC3005C3C010801AC34978C3C010801FE
-:10389000AC239788162000088FBF002C8FB4002817
-:1038A0008FB300248FB200208FB1001C8FB000189E
-:1038B00003E0000827BD00303C1180009624010E73
-:1038C0000E000FD43094FFFF3C0B08018D6B9794D2
-:1038D0000260382102802821AE2B01803C130801B0
-:1038E0008E73977401602021240600830E00102F30
-:1038F000AFB300108FBF002C8FB400288FB300240B
-:103900008FB200208FB1001C8FB0001803E00008B8
-:1039100027BD00303C1808008F1831FC270F00012C
-:103920003C010800AC2F31FC0A0021FF0000000020
-:103930001474FFB900000000A0C000FF3C0508009F
-:103940008CA531E43C0308008C6331E03C020800A4
-:103950008C4232048F99003C34A80001241F0002DD
-:103960003C010801AC2397943C010801A0289790E2
-:103970003C010801A0229793A33F00090A0021B847
-:103980008F86003C0E00216E000000000A0021FF1F
-:103990008F86003C3C1F080193FF97742419000197
-:1039A00013F902298F8700743C1008019210977850
-:1039B0003C06080190C6977610C000050200A021C1
-:1039C0003C04080190849779109001E48F87007C73
-:1039D000001088408F9F007C023048210009C88079
-:1039E000033F702195D80008270F0001A5CF0008DC
-:1039F0003C040801908497793C05080190A59776CE
-:103A00000E00211C000000008F87007C0230202166
-:103A10000004308000C720218C8500048F8200784C
-:103A200000A2402305020006AC8200048C8A00003C
-:103A30008F830074014310235C400001AC830000BD
-:103A40008F86003C90CB00FF2D6C00025580002D2E
-:103A5000241400010230F821001F408001072821B2
-:103A600090B9000B8CAE00040019C04003197821F6
-:103A7000000F1880006710218C4D000001AE8823D4
-:103A80002630FFFF5E00001F241400018C44000458
-:103A90008CAA0000008A482319200019240E000473
-:103AA0003C010801A02E977590AD000B8CAB000473
-:103AB000000D8840022D8021001010800047102149
-:103AC0008C440004016460230582020094430008D2
-:103AD00090DF00FE90B9000B33E500FF54B90004FD
-:103AE0000107A021A0D400FE8F87007C0107A02140
-:103AF0009284000B0E00211C240500018F86003CDF
-:103B000024140001125400962E50000116000042A9
-:103B10003C08FFFF241900021659FF3F0000000077
-:103B2000A0C000FF8F86003CA0D200090A0021FF40
-:103B30008F86003C90C700092404000230E300FF98
-:103B40001064016F24090004106901528F88007805
-:103B50008CCE0054010E682325B1000106200175AA
-:103B6000241800043C010801A03897753C010801A5
-:103B7000A020977490D400FD90D200FF2E4F000239
-:103B800015E0FF14328400FF000438408F89007C68
-:103B900090DF00FF00E41021000220800089C8218E
-:103BA0002FE500029324000B14A0FF0A2407000253
-:103BB0000004184000648021001058800169282109
-:103BC0008CAC0004010C50230540FF0200000000F3
-:103BD0003C0308019063977614600005246F000190
-:103BE0003C010801A02497793C010801A0279777A0
-:103BF0003C010801A02F977690CE00FF24E700013A
-:103C000031CD00FF01A7882B1220FFE990A4000B03
-:103C10000A0021EE000000003C0508018CA5977405
-:103C20003C12000400A8F82413F200062402000548
-:103C30003C09080191299775152000022402000310
-:103C4000240200053C010801A022979190C700FFC3
-:103C500014E0012024020002A0C200090A0021FF92
-:103C60008F86003C90CC00FF1180FEDA240A000110
-:103C70008F8C00788F89007C240F000301806821DD
-:103C80001160001E240E0002000540400105A02125
-:103C900000142080008990218E510004019180231E
-:103CA0000600FECC000000003C020801904297761E
-:103CB00014400005245800013C010801A02A977710
-:103CC0003C010801A02597793C010801A0389776AE
-:103CD00090DF00FF010510210002C88033E500FFDE
-:103CE000254A00010329202100AA402B1500FEB916
-:103CF0009085000B1560FFE5000540400005404041
-:103D000001051821000310803C010801A02A9774C6
-:103D10003C010801A0259778004918218C64000413
-:103D200000E4F82327F9FFFF1F20FFE9000000004F
-:103D30008C63000000E358230560013A01A3882347
-:103D400010E301170184C0231B00FEA20000000045
-:103D50003C010801A02E97750A00232D240B0001B9
-:103D6000240E0004A0CE00093C0D08008DAD31F8F2
-:103D70008F86003C25A200013C010800AC2231F8EE
-:103D80000A0021FF000000008CD9005C00F9C0236C
-:103D90001F00FE7B000000008CDF005C10FFFF6551
-:103DA0008F8400788CC3005C0083402325020001CF
-:103DB0001C40FF60000000008CC9005C24870001EB
-:103DC00000E9282B10A0FE943C0D80008DAB01046F
-:103DD0003C0C0001016C50241140FE8F24020010A5
-:103DE0003C010801A02297910A0021FF0000000079
-:103DF0008F9100788F86003C26220001ACC2005CC7
-:103E00000A0022BA241400018F87003C2404FF809A
-:103E10000000882190E9000A2414000101243025C3
-:103E2000A0E6000A3C05080190A597763C0408012D
-:103E3000908497790E00211C000000008F86003CC2
-:103E40008F85007C90C800FD310700FF00074040CF
-:103E50000107F821001FC0800305C8219323000B30
-:103E6000A0C300FD8F85007C8F86003C0305602188
-:103E7000918F000B000F704001CF6821000D8080F2
-:103E8000020510218C4B0000ACCB00548D84000443
-:103E90008F830078006450231940000224820001BF
-:103EA0002462000101074821ACC2005C0009308097
-:103EB00000C5402100E02021240500010E00211C46
-:103EC0009110000B8F86003C90C500FF10A0FF0CE6
-:103ED000001070408F85007C01D06821000D10809B
-:103EE000004558218D6400008F8C00780184502398
-:103EF0002547000104E0FF02263100013C030801D0
-:103F0000906397762E2F0002247800013C0108016F
-:103F1000A03897763C010801A034977711E0FEF8AD
-:103F2000020038210A00238D000740408F84003CA6
-:103F30008F8300788C85005800A340230502FE9AE9
-:103F4000AC8300580A002263000000003C0708010F
-:103F500090E79792240200FF10E200BE8F86003C9B
-:103F60003C1108019631979A3C0308012463979805
-:103F7000262500013230FFFF30ABFFFF0203602136
-:103F80002D6A00FF1540008D918700043C01080157
-:103F9000A420979A8F88003C0007484001272821D9
-:103FA000911800FF0005308024050001271400014E
-:103FB000A11400FF3C120801925297928F88007C56
-:103FC0008F8E0074264F000100C820213C0108019B
-:103FD000A02F9792AC8E00008F8D0078A4850008EA
-:103FE000AC8D00043C030801906397741460007763
-:103FF000000090213C010801A0259774A087000BC8
-:104000008F8C007C00CC5021A147000A8F82003C9D
-:10401000A04700FD8F84003CA08700FE8F86003CF7
-:104020008F9F0074ACDF00548F990078ACD9005892
-:104030008F8D007C0127C02100185880016DA021C0
-:10404000928F000A000F704001CF18210003888072
-:10405000022D8021A207000B8F86007C0166602163
-:10406000918A000B000A1040004A20210004288099
-:1040700000A64021A107000A3C07800834E900801F
-:104080008D2200308F86003CACC2005C0A0022BA50
-:104090002414000190CA00FF1540FEAD8F880078FF
-:1040A000A0C400090A0021FF8F86003CA0C000FDCB
-:1040B0008F98003C24060001A30000FE3C0108018B
-:1040C000A02697753C010801A02097740A0021EEF4
-:1040D0000000000090CB00FF3C04080190849793FF
-:1040E000316C00FF0184502B1540000F24020003A7
-:1040F00024020004A0C200090A0021FF8F86003CB0
-:1041000090C3000A2410FF8002035824316C00FF82
-:104110001180FDC1000000003C010801A02097753E
-:104120000A0021EE00000000A0C200090A0021FFE1
-:104130008F86003C90D4000A2412FF800254482449
-:10414000312800FF1500FFF4240200083C0108019B
-:10415000A02297910A0021FF000000000010884073
-:104160008F8B0074023018210003688001A7202182
-:10417000AC8B00008F8A0078240C0001A48C00080E
-:10418000AC8A00043C05080190A597762402000142
-:1041900010A2FE1E24A5FFFF0A0022799084000BC6
-:1041A0000184A0231A80FD8B000000003C0108015F
-:1041B000A02E97750A00232D240B00013C01080155
-:1041C000A425979A0A0023DF8F88003C240B000166
-:1041D000106B00228F98003C8F85003C90BF00FF41
-:1041E00033F900FF1079002B000000003C1F08018C
-:1041F00093FF9778001FC840033FC0210018A0809C
-:104200000288782191EE000AA08E000A8F8D007C32
-:104210003C0308019063977800CD88210A002405AB
-:10422000A223000B263000010600003101A49023D8
-:104230000640002B240200033C010801A02F9775C3
-:104240000A00232D240B00018F89003C0A00226301
-:10425000AD2700540A0022B924120001931400FD76
-:10426000A094000B8F88003C8F8F007C910E00FE85
-:1042700000CF6821A1AE000A8F91003CA22700FD6B
-:104280008F8300748F90003CAE0300540A00240614
-:104290008F8D007C90B000FEA090000A8F8B003CB8
-:1042A0008F8C007C916A00FD00CC1021A04A000B8D
-:1042B0008F84003CA08700FE8F8600788F85003CAD
-:1042C000ACA600580A0024068F8D007C94B8000824
-:1042D000ACA40004030378210A0022ADA4AF0008B7
-:1042E0003C010801A02297750A0021EE00000000A1
-:1042F00090CF0009240D000431EE00FF11CDFD85A3
-:10430000240200013C010801A02297750A0021EE59
-:0443100000000000A9
-:0C43140008003344080033440800342043
-:10432000080033F4080033D8080033280800332885
-:10433000080033280800334C800801008008008002
-:10434000800800005F865437E4AC62CC50103A45D8
-:1043500036621985BF14C0E81BC27A1E84F4B556B4
-:10436000094EA6FE7DDA01E7C04D748108005B3876
-:1043700008005B7C08005B2008005B2008005B20D5
-:1043800008005B2008005B3808005B2008005B2009
-:1043900008005B8408005B2008005A9808005B2036
-:1043A00008005B2008005B8408005B2008005B209D
-:1043B00008005B2008005B2008005B2008005B20F1
-:1043C00008005B2008005B2008005B2008005B20E1
-:1043D00008005B5808005B2008005B5808005B2061
-:1043E00008005B2008005B2008005B5C08005B584D
-:1043F00008005B2008005B2008005B2008005B20B1
-:1044000008005B2008005B2008005B2008005B20A0
-:1044100008005B2008005B2008005B2008005B2090
-:1044200008005B2008005B2008005B2008005B2080
-:1044300008005B2008005B2008005B2008005B2070
-:1044400008005B5C08005B5C08005B2008005B5CAC
-:1044500008005B2008005B2008005B2008005B2050
-:1044600008005B2008005B2008005B2008005B2040
-:1044700008005B2008005B2008005B2008005B2030
-:1044800008005B2008005B2008005B2008005B2020
-:1044900008005B2008005B2008005B2008005B2010
-:1044A00008005B2008005B2008005B2008005B2000
-:1044B00008005B2008005B2008005B2008005B20F0
-:1044C00008005B2008005B2008005B2008005B20E0
-:1044D00008005B2008005B2008005B2008005B20D0
-:1044E00008005B2008005B2008005B2008005B20C0
-:1044F00008005B2008005B2008005B2008005B20B0
-:1045000008005B2008005B2008005B2008005B209F
-:1045100008005B2008005B2008005B2008005B208F
-:1045200008005B2008005B2008005B2008005B207F
-:1045300008005B2008005B2008005B2008005B206F
-:1045400008005B2008005B2008005B2008005B205F
-:1045500008005B2008005B2008005B2008005B204F
-:1045600008005B2008005B2008005B2008005BA0BF
-:10457000080078F008007B54080078FC080076F00A
-:10458000080078FC08007988080078FC080076F0BC
-:10459000080076F0080076F0080076F0080076F063
-:1045A000080076F0080076F0080076F0080076F053
-:1045B000080076F00800791C0800790C080076F0F5
-:1045C000080076F0080076F0080076F0080076F033
-:1045D000080076F0080076F0080076F0080076F023
-:1045E000080076F0080076F0080076F00800790CF4
-:1045F0000800839C08008228080083640800822841
-:1046000008008334080081100800822808008228EE
-:1046100008008228080082280800822808008228D2
-:1046200008008228080082280800822808008228C2
-:1046300008008228080082280800825008008DD4D3
-:1046400008008F3008008F100800897808008DEC72
-:104650000A00012400000000000000000000000D1E
-:10466000747061362E322E31000000000602010106
-:10467000000000000000000000000000000000003A
-:10468000000000000000000000000000000000002A
-:10469000000000000000000000000000000000001A
-:1046A000000000000000000000000000000000000A
-:1046B00000000000000000000000000000000000FA
-:1046C00000000000000000000000000000000000EA
-:1046D00000000000000000000000000000000000DA
-:1046E0000000000010000003000000000000000DAA
-:1046F0000000000D3C020800244217203C03080083
-:1047000024632A10AC4000000043202B1480FFFDDE
-:10471000244200043C1D080037BD2FFC03A0F021FB
-:104720003C100800261004903C1C0800279C172011
-:104730000E000262000000000000000D2402FF8055
-:1047400027BDFFE000821024AFB00010AF42002070
-:10475000AFBF0018AFB10014936500043084007F30
-:10476000034418213C0200080062182130A50020F3
-:10477000036080213C080111277B000814A000027F
-:104780002466005C246600589202000497430104EA
-:10479000920400043047000F3063FFFF3084004074
-:1047A00000672823108000090000482192020005BC
-:1047B00030420004104000050000000010A000037B
-:1047C0000000000024A5FFFC24090004920200055B
-:1047D00030420004104000120000000010A0001041
-:1047E000000000009602000200A7202101044025DD
-:1047F0002442FFFEA7421016920300042402FF8009
-:1048000000431024304200FF104000033C0204002B
-:104810000A000174010240258CC20000AF4210184A
-:104820008F4201780440FFFE2402000AA7420140A3
-:1048300096020002240400093042000700021023FF
-:1048400030420007A7420142960200022442FFFEC6
-:10485000A7420144A740014697420104A7420148EC
-:104860008F42010830420020504000012404000122
-:104870009202000430420010144000023483001001
-:1048800000801821A743014A00000000000000003A
-:104890000000000000000000AF4810000000000011
-:1048A0000000000000000000000000008F42100027
-:1048B0000441FFFE3102FFFF10400007000000002E
-:1048C0009202000430420040144000030000000047
-:1048D0008F421018ACC20000960200063042FFFF63
-:1048E00024420002000210430002104003628821AB
-:1048F000962200001120000D3044FFFF00A7102178
-:104900008F8300388F45101C000210820002108037
-:1049100000431021AC45000030A6FFFF0E00058DBE
-:1049200000052C0200402021A62200009203000472
-:104930002402FF8000431024304200FF1040001F7B
-:104940000000000092020005304200021040001BEF
-:10495000000000009742100C2442FFFEA7421016F0
-:10496000000000003C02040034420030AF4210005E
-:104970000000000000000000000000000000000037
-:104980008F4210000441FFFE000000009742100C0F
-:104990008F45101C3042FFFF24420030000210827D
-:1049A00000021080005B1021AC45000030A6FFFF24
-:1049B0000E00058D00052C02A622000096040002C0
-:1049C000248400080E0001E93084FFFF97440104AD
-:1049D0000E0001F73084FFFF8FBF00188FB1001465
-:1049E0008FB000103C02100027BD002003E000083B
-:1049F000AF4201783084FFFF308200078F850024AA
-:104A000010400002248300073064FFF800A4102146
-:104A100030421FFF03421821247B4000AF8500284D
-:104A2000AF82002403E00008AF4200843084FFFF1F
-:104A30003082000F8F85002C8F86003410400002DA
-:104A40002483000F3064FFF000A410210046182BCF
-:104A5000AF8500300046202314600002AF82002C96
-:104A6000AF84002C8F82002C340480000342182174
-:104A700000641821AF83003803E00008AF420080D3
-:104A80008F820014104000088F8200048F82FFDCA8
-:104A9000144000058F8200043C02FFBF3442FFFF38
-:104AA000008220248F82000430430006240200028A
-:104AB0001062000F3C0201012C620003504000050F
-:104AC000240200041060000F3C0200010A000230C2
-:104AD0000000000010620005240200061462000CB1
-:104AE0003C0201110A000229008210253C0200113B
-:104AF00000821025AF421000240200010A0002309B
-:104B0000AF82000C00821025AF421000AF80000C75
-:104B100000000000000000000000000003E00008AA
-:104B2000000000008F82000C104000040000000014
-:104B30008F4210000441FFFE0000000003E0000867
-:104B4000000000008F8200102443F800000231C2F0
-:104B500024C2FFF02C630301106000030002104226
-:104B60000A000257AC8200008F85001800C5102B88
-:104B70001440000B0000182100C510232447000139
-:104B80008F82001C00A210212442FFFF0046102B40
-:104B9000544000042402FFFF0A000257AC870000C3
-:104BA0002402FFFF0A000260AC8200008C82000039
-:104BB00000021940006218210003188000621821C9
-:104BC000000318803C0208002442175C0062182190
-:104BD00003E000080060102127BDFFD8AFBF002010
-:104BE000AFB1001CAFB000183C0460088C825000CC
-:104BF0002403FF7F3C066000004310243442380C3D
-:104C0000AC8250008CC24C1C3C1A80000002160280
-:104C10003042000F10400007AF82001C8CC34C1CB8
-:104C20003C02001F3442FC0000621824000319C239
-:104C3000AF8300188F420008275B40003442000118
-:104C4000AF420008AF8000243C02601CAF400080EF
-:104C5000AF4000848C4500088CC3080834028000F3
-:104C6000034220212402FFF0006218243C0200804D
-:104C70003C010800AC2204203C025709AF840038F4
-:104C800014620004AF850034240200010A0002927D
-:104C9000AF820014AF8000148F4200003842000140
-:104CA000304200011440FFFC8F82001410400016B7
-:104CB0000000000097420104104000058F830000AF
-:104CC000146000072462FFFF0A0002A72C62000A9A
-:104CD0002C620010504000048F8300002462000109
-:104CE000AF8200008F8300002C62000A1440000392
-:104CF0002C6200070A0002AEAF80FFDC1040000209
-:104D000024020001AF82FFDC8F4301088F440100C1
-:104D100030622000AF83000410400008AF84001010
-:104D20003C0208008C42042C244200013C01080093
-:104D3000AC22042C0A00058A3C02400030650200C7
-:104D400014A0000324020F001482026024020D004C
-:104D500097420104104002C83C024000306240000B
-:104D6000144000AD8F8200388C4400088F420178D7
-:104D70000440FFFE24020800AF420178240200082C
-:104D8000A7420140A7400142974201048F840004DA
-:104D90003051FFFF308200011040000702208021C7
-:104DA0002623FFFE240200023070FFFFA7420146C7
-:104DB0000A0002DBA7430148A74001463C02080065
-:104DC0008C42043C1440000D8F8300103082002080
-:104DD0001440000224030009240300010060202184
-:104DE0008F83001024020900506200013484000403
-:104DF000A744014A0A0002F60000000024020F0046
-:104E00001462000530820020144000062403000DC7
-:104E10000A0002F5240300051440000224030009DF
-:104E200024030001A743014A3C0208008C420420ED
-:104E30003C0400480E00020C004420250E00023500
-:104E4000000000008F82000C1040003E00000000B7
-:104E50008F4210003C030020004310241040003912
-:104E60008F82000430420002104000360000000033
-:104E700097421014144000330000000097421008BD
-:104E80008F8800383042FFFF24420006000218825B
-:104E90000003388000E83021304300018CC400005A
-:104EA00010600004304200030000000D0A000337C8
-:104EB00000E81021544000103084FFFF3C05FFFF44
-:104EC00000852024008518260003182B0004102BD1
-:104ED0000043102410400005000000000000000006
-:104EE0000000000D00000000240002228CC200001F
-:104EF0000A000336004520253883FFFF0003182BE6
-:104F00000004102B00431024104000050000000096
-:104F1000000000000000000D000000002400022B33
-:104F20008CC200003444FFFF00E81021AC440000B4
-:104F30003C0208008C420430244200013C0108007D
-:104F4000AC2204308F6200008F840038AF820008EA
-:104F50008C8300003402FFFF1462000F0000102158
-:104F60003C0508008CA504543C0408008C840450C3
-:104F700000B0282100B0302B0082202100862021A3
-:104F80003C010800AC2504543C010800AC2404504A
-:104F90000A000580240400088C82000030420100D1
-:104FA0001040000F000010213C0508008CA5044CA7
-:104FB0003C0408008C84044800B0282100B0302B49
-:104FC00000822021008620213C010800AC25044CF1
-:104FD0003C010800AC2404480A00058024040008B1
-:104FE0003C0508008CA504443C0408008C84044063
-:104FF00000B0282100B0302B008220210086202123
-:105000003C010800AC2504443C010800AC240440E9
-:105010000A000580240400088F6200088F620000E7
-:1050200000021602304300F0240200301062000536
-:1050300024020040106200E08F8200200A000588F0
-:105040002442000114A00005000000000000000040
-:105050000000000D00000000240002568F4201787D
-:105060000440FFFE000000000E00023D27A40010D7
-:105070001440000500408021000000000000000DE9
-:10508000000000002400025D8E02000010400005B8
-:1050900000000000000000000000000D0000000003
-:1050A000240002608F62000C04430003240200010C
-:1050B0000A00042EAE000000AE0200008F8200380D
-:1050C0008C480008A20000078F65000C8F64000464
-:1050D00030A3FFFF0004240200852023308200FF5C
-:1050E0000043102124420005000230832CC20081BD
-:1050F000A605000A14400005A204000400000000F8
-:105100000000000D00000000240002788F850038A8
-:105110000E0005AB260400148F6200048F430108C3
-:10512000A60200083C02100000621824106000086B
-:105130000000000097420104920300072442FFECA4
-:10514000346300023045FFFF0A0003C3A2030007D7
-:10515000974201042442FFF03045FFFF9606000805
-:105160002CC200135440000592030007920200076E
-:1051700034420001A202000792030007240200014A
-:1051800010620005240200031062000B8F820038B9
-:105190000A0003E030C6FFFF8F8200383C04FFFFA7
-:1051A0008C43000C0064182400651825AC43000CE7
-:1051B0000A0003E030C6FFFF3C04FFFF8C430010F1
-:1051C0000064182400651825AC43001030C6FFFFAA
-:1051D00024C2000200021083A20200058F8300385F
-:1051E000304200FF00021080004328218CA80000FC
-:1051F0008CA20000240300040002170214430012D2
-:1052000000000000974201043C03FFFF0103182443
-:105210003042FFFF004610232442FFFE006240257B
-:10522000ACA8000092030005306200FF000210806D
-:1052300000501021904200143042000F0043102112
-:105240000A000415A20200068CA40004974201047F
-:105250009603000A3088FFFF3042FFFF004610230C
-:105260002442FFD60002140001024025ACA800042D
-:1052700092020007920400052463002800031883AB
-:105280000064182134420004A2030006A2020007B1
-:105290008F8200042403FFFB3442000200431024E9
-:1052A000AF820004920300068F8700380003188045
-:1052B000007010218C4400203C02FFF63442FFFFB6
-:1052C0000082402400671821AE04000CAC68000C7A
-:1052D000920500063C03FF7F8E02000C000528802B
-:1052E00000B020213463FFFF01033024948800269E
-:1052F00000A7282100431024AE02000CAC86002039
-:10530000AC880024ACA8001024020010A742014081
-:1053100024020002A7400142A7400144A7420146DF
-:10532000974201043C0400082442FFFEA7420148C2
-:10533000240200010E00020CA742014A9603000A53
-:105340009202000400431021244200023042000770
-:1053500000021023304200070E000235AE0200109A
-:105360008F6200003C0308008C6304442404001096
-:10537000AF820008974201043042FFFF2442FFFE43
-:1053800000403821000237C33C0208008C42044030
-:10539000006718210067282B0046102100451021C6
-:1053A0003C010800AC2304443C010800AC2204404A
-:1053B0000A0005150000000014A000050000000010
-:1053C000000000000000000D000000002400030A9F
-:1053D0008F4201780440FFFE000000000E00023DF5
-:1053E00027A40014144000050040802100000000A4
-:1053F0000000000D00000000240003118E020000D8
-:105400005440000692020007000000000000000D5A
-:10541000000000002400031C920200073042000438
-:10542000104000058F8200042403FFFB3442000279
-:1054300000431024AF8200048F620004044300087C
-:1054400092020007920200068E03000CAE000000DC
-:105450000002108000501021AC430020920200078F
-:1054600030420004544000099602000A92020005EE
-:105470003C03000100021080005010218C460018EF
-:1054800000C33021AC4600189602000A92060004C0
-:10549000277100080220202100C2302124C6000507
-:1054A000260500140E0005AB0006308292040006AB
-:1054B0008F6500043C027FFF0004208000912021C2
-:1054C0008C8300043442FFFF00A2282400651821C9
-:1054D000AC830004920200079204000592030004CA
-:1054E000304200041040001496070008308400FF8A
-:1054F00000042080009120218C8600049742010442
-:105500009605000A306300FF3042FFFF0043102180
-:105510000045102130E3FFFF004310232442FFD851
-:1055200030C6FFFF0002140000C23025AC86000424
-:105530000A0004C992030007308500FF0005288097
-:1055400000B128218CA4000097420104306300FFC1
-:105550003042FFFF00431021004710233C03FFFFB0
-:10556000008320243042FFFF00822025ACA40000ED
-:1055700092030007240200011062000600000000F0
-:105580002402000310620011000000000A0004EC75
-:105590008E03001097420104920300049605000A4E
-:1055A0008E24000C00431021004510212442FFF2FC
-:1055B0003C03FFFF008320243042FFFF00822025B0
-:1055C000AE24000C0A0004EC8E0300109742010484
-:1055D000920300049605000A8E2400100043102157
-:1055E000004510212442FFEE3C03FFFF00832024EE
-:1055F0003042FFFF00822025AE2400108E030010F1
-:105600002402000AA7420140A74301429603000A70
-:10561000920200043C04004000431021A7420144D0
-:10562000A740014697420104A74201482402000115
-:105630000E00020CA742014A0E00023500000000D5
-:105640008F6200009203000400002021AF82000856
-:10565000974201049606000A3042FFFF00621821BB
-:10566000006028213C0308008C6304443C020800CD
-:105670008C42044000651821004410210065382B3D
-:10568000004710213C010800AC2304443C01080001
-:10569000AC22044092040004008620212484000AE5
-:1056A0003084FFFF0E0001E9000000009744010470
-:1056B0003084FFFF0E0001F7000000003C021000E4
-:1056C000AF4201780A0005878F82002014820027EC
-:1056D0003062000697420104104000673C0240001F
-:1056E0003062400010400005000000000000000093
-:1056F0000000000D00000000240004208F4201780B
-:105700000440FFFE24020800AF4201782402000892
-:10571000A7420140A74001428F8200049743010441
-:1057200030420001104000073070FFFF2603FFFEEB
-:1057300024020002A7420146A74301480A00053F90
-:105740002402000DA74001462402000DA742014A91
-:105750008F62000024040008AF8200080E0001E9F7
-:10576000000000000A00051902002021104000423C
-:105770003C02400093620000304300F0240200101D
-:105780001062000524020070106200358F82002034
-:105790000A000588244200018F620000974301043B
-:1057A0003050FFFF3071FFFF8F4201780440FFFE51
-:1057B0003202000700021023304200072403000ACF
-:1057C0002604FFFEA7430140A7420142A74401442B
-:1057D000A7400146A75101488F42010830420020EE
-:1057E000144000022403000924030001A743014AD6
-:1057F0000E00020C3C0400400E00023500000000C8
-:105800003C0708008CE70444021110212442FFFEEB
-:105810003C0608008CC604400040182100E33821F3
-:10582000000010218F65000000E3402B00C23021F2
-:105830002604000800C830213084FFFFAF8500082F
-:105840003C010800AC2704443C010800AC2604409D
-:105850000E0001E9000000000A00051902202021C5
-:105860000E00013B000000008F8200202442000156
-:10587000AF8200203C024000AF4201380A00029291
-:10588000000000003084FFFF30C6FFFF00052C0041
-:1058900000A628253882FFFF004510210045282B4F
-:1058A0000045102100021C023042FFFF004310217E
-:1058B00000021C023042FFFF004310213842FFFF6C
-:1058C00003E000083042FFFF3084FFFF30A5FFFFF8
-:1058D0000000182110800007000000003082000145
-:1058E0001040000200042042006518210A0005A1B2
-:1058F0000005284003E000080060102110C00006E9
-:1059000024C6FFFF8CA2000024A50004AC82000086
-:105910000A0005AB2484000403E000080000000036
-:1059200010A0000824A3FFFFAC86000000000000C8
-:10593000000000002402FFFF2463FFFF1462FFFA4F
-:0C5940002484000403E0000800000000C4
-:04594C000000000156
-:105950000A00002A00000000000000000000000D06
-:10596000747870362E322E310000000006020100DD
-:1059700000000000000001360000EA6000000000A6
-:105980000000000000000000000000000000000017
-:105990000000000000000000000000000000000007
-:1059A00000000000000000000000000000000000F7
-:1059B00000000016000000000000000000000000D1
-:1059C00000000000000000000000000000000000D7
-:1059D00000000000000000000000000000000000C7
-:1059E000000000000000000000001388000000001C
-:1059F000000005DC000000000000000010000003B3
-:105A0000000000000000000D0000000D3C02080036
-:105A100024423D883C0308002463403CAC40000025
-:105A20000043202B1480FFFD244200043C1D08008D
-:105A300037BD7FFC03A0F0213C100800261000A811
-:105A40003C1C0800279C3D880E00044E000000000E
-:105A50000000000D27BDFFB4AFA10000AFA20004FD
-:105A6000AFA30008AFA4000CAFA50010AFA60014B0
-:105A7000AFA70018AFA8001CAFA90020AFAA002450
-:105A8000AFAB0028AFAC002CAFAD0030AFAE0034F0
-:105A9000AFAF0038AFB8003CAFB90040AFBC004476
-:105AA000AFBF00480E000591000000008FBF004806
-:105AB0008FBC00448FB900408FB8003C8FAF0038D6
-:105AC0008FAE00348FAD00308FAC002C8FAB002830
-:105AD0008FAA00248FA900208FA8001C8FA7001870
-:105AE0008FA600148FA500108FA4000C8FA30008B0
-:105AF0008FA200048FA1000027BD004C3C1B600456
-:105B00008F7A5030377B502803400008AF7A00006E
-:105B10008F86003C3C0390003C02800000862825D4
-:105B200000A32025AC4400203C0380008C670020AB
-:105B300004E0FFFE0000000003E000080000000099
-:105B40000A000070240400018F85003C3C048000A2
-:105B50003483000100A3102503E00008AC8200207C
-:105B600003E00008000010213084FFFF30A5FFFF94
-:105B70001080000700001821308200011040000250
-:105B800000042042006518211480FFFB0005284016
-:105B900003E000080060102110C0000700000000B2
-:105BA0008CA2000024C6FFFF24A50004AC820000E4
-:105BB00014C0FFFB2484000403E000080000000080
-:105BC00010A0000824A3FFFFAC8600000000000026
-:105BD000000000002402FFFF2463FFFF1462FFFAAD
-:105BE0002484000403E000080000000090AA0031B3
-:105BF0008FAB00108CAC00403C0300FF8D680004AC
-:105C0000AD6C00208CAD004400E060213462FFFFE9
-:105C1000AD6D00248CA700483C09FF000109C02499
-:105C2000AD6700288CAE004C0182C824031978258A
-:105C3000AD6F0004AD6E002C8CAD0038314A00FF12
-:105C4000AD6D001C94A900323128FFFFAD68001033
-:105C500090A70030A5600002A1600004A1670000C9
-:105C600090A30032306200FF00021982106000052C
-:105C7000240500011065000E0000000003E000088C
-:105C8000A16A00018CD80028354A0080AD78001840
-:105C90008CCF0014AD6F00148CCE0030AD6E0008B8
-:105CA0008CC4002CA16A000103E00008AD64000C64
-:105CB0008CCD001CAD6D00188CC90014AD690014AA
-:105CC0008CC80024AD6800088CC70020AD67000CAC
-:105CD0008CC200148C8300700043C82B1320000773
-:105CE000000000008CC20014144CFFE4000000000F
-:105CF000354A008003E00008A16A00018C82007030
-:105D00000A0000E6000000009089003027BDFFF87F
-:105D10008FA8001CA3A900008FA300003C0DFF80EA
-:105D200035A2FFFF8CAC002C00625824AFAB000002
-:105D3000A100000400C05821A7A000028D060004A5
-:105D400000A048210167C8218FA5000000805021D4
-:105D50003C18FF7F032C20263C0E00FF2C8C0001FA
-:105D6000370FFFFF35CDFFFF3C02FF0000AFC82417
-:105D700000EDC02400C27824000C1DC00323682558
-:105D800001F87025AD0D0000AD0E00048D24002437
-:105D9000AFAD0000AD0400088D2C00202404FFFFEF
-:105DA000AD0C000C9547003230E6FFFFAD06001049
-:105DB0009145004830A200FF000219C25060000166
-:105DC0008D240034AD0400148D4700388FAA0018CC
-:105DD00027BD0008AD0B0028AD0A0024AD07001C4C
-:105DE000AD00002CAD00001803E00008AD0000205D
-:105DF00027BDFFE0AFB20018AFB10014AFB0001084
-:105E0000AFBF001C9098003000C088213C0D00FFFF
-:105E1000330F007FA0CF0000908E003135ACFFFF24
-:105E20003C0AFF00A0CE000194A6001EA2200004A0
-:105E30008CAB00148E29000400A08021016C282462
-:105E4000012A40240080902101052025A626000279
-:105E5000AE24000426050020262400080E0000922F
-:105E6000240600029247003026050028262400144C
-:105E700000071E000003160324060004044000036C
-:105E80002403FFFF965900323323FFFF0E000092D8
-:105E9000AE230010262400248FBF001C8FB20018F0
-:105EA0008FB100148FB000102405000300003021D2
-:105EB0000A00009C27BD002027BDFFD8AFB1001C01
-:105EC000AFB00018AFBF002090A90030240200013D
-:105ED00000E050213123003F00A040218FB000405E
-:105EE0000080882100C04821106200148FA700386C
-:105EF000240B000500A0202100C02821106B0013F6
-:105F0000020030210E000128000000009225007CD4
-:105F100030A400021080000326030030AE000030E1
-:105F2000260300348FBF00208FB1001C8FB00018F3
-:105F30000060102103E0000827BD00280E0000A724
-:105F4000AFB000100A00016F000000008FA3003CFA
-:105F5000010020210120282101403021AFA30010A1
-:105F60000E0000EEAFB000140A00016F0000000048
-:105F70003C06800034C20E008C4400108F85004423
-:105F8000ACA400208C43001803E00008ACA300245C
-:105F90003C06800034C20E008C4400148F850044FF
-:105FA000ACA400208C43001C03E00008ACA3002438
-:105FB0009382000C1040001B2483000F2404FFF088
-:105FC0000064382410E00019978B00109784000EAD
-:105FD0009389000D3C0A601C0A0001AC0164402357
-:105FE00001037021006428231126000231C2FFFF43
-:105FF00030A2FFFF0047302B50C0000E00E44821C4
-:106000008D4D000C31A3FFFF00036400000C2C0336
-:1060100004A1FFF30000302130637FFF0A0001A4D8
-:106020002406000103E00008000000009784000E31
-:1060300000E448213123FFFF3168FFFF0068382B5F
-:1060400054E0FFF8A783000E938A000D114000056D
-:10605000240F0001006BC023A380000D03E00008A3
-:10606000A798000E006BC023A38F000D03E000086B
-:10607000A798000E03E000080000000027BDFFE81D
-:10608000AFB000103C10800036030140308BFFFFA2
-:1060900093AA002BAFBF0014A46B000436040E00BB
-:1060A0009488001630C600FF8FA90030A46800064F
-:1060B000AC650008A0660012A46A001AAC67002054
-:1060C0008FA5002CA4690018012020210E00019842
-:1060D000AC6500143C021000AE0201788FBF0014C2
-:1060E0008FB0001003E0000827BD00188F85000066
-:1060F0002484000727BDFFF83084FFF83C068000A9
-:1061000094CB008A316AFFFFAFAA00008FA900007C
-:10611000012540232507FFFF30E31FFF0064102BFC
-:106120001440FFF700056882000D288034CC400041
-:1061300000AC102103E0000827BD00088F8200009A
-:106140002486000730C5FFF800A2182130641FFF25
-:1061500003E00008AF8400008F87003C8F84004478
-:1061600027BDFFB0AFB70044AFB40038AFB1002CCB
-:10617000AFBF0048AFB60040AFB5003CAFB300348E
-:10618000AFB20030AFB000283C0B80008C860024FA
-:10619000AD6700808C8A002035670E0035690100EC
-:1061A000ACEA00108C8800248D2500040000B82182
-:1061B000ACE800188CE3001000A688230000A021A2
-:1061C000ACE300148CE20018ACE2001C122000FECC
-:1061D00000E0B021936C0008118000F40000000082
-:1061E000976F001031EEFFFF022E682B15A000EF15
-:1061F00000000000977200103250FFFFAED0000088
-:106200003C0380008C740000329300081260FFFD94
-:106210000000000096D800088EC700043305FFFF79
-:1062200030B5000112A000E4000000000000000DE5
-:1062300030BFA0402419004013F9011B30B4A00066
-:10624000128000DF00000000937300081260000855
-:1062500000000000976D001031ACFFFF00EC202B18
-:106260001080000330AE004011C000D500000000D7
-:10627000A7850040AF8700389363000802202821DB
-:10628000AFB10020146000F527B40020AF60000C0F
-:10629000978F004031F140001620000224030016C1
-:1062A0002403000E24054007A363000AAF65001411
-:1062B000938A00428F70001431550001001512407E
-:1062C00002024825AF690014979F00408F780014A0
-:1062D00033F9001003194025AF680014979200406D
-:1062E0003247000810E0016E000000008F670014C4
-:1062F0003C1210003C11800000F27825AF6F0014B2
-:1063000036230E00946E000A3C0D81002406000E18
-:1063100031CCFFFF018D2025AF640004A36600028D
-:106320009373000A3406FFFC266B0004A36B000A7B
-:1063300097980040330820001100015F0000000022
-:106340003C05800034A90E00979900409538000C58
-:1063500097870040001940423312C0003103000308
-:1063600000127B0330F11000006F682500117203EA
-:1063700001AE6025000C20C0A76400129793004076
-:10638000936A000A001359823175003C02AA102159
-:106390002450003CA3700009953F000C33F93FFFE7
-:1063A000A779001097700012936900090130F82155
-:1063B00027E5000230B900070019C02333080007A1
-:1063C000A368000B9371000997720012976F001079
-:1063D000322700FF8F910038978D004000F218217E
-:1063E000006F702101C6602131A6004010C0000579
-:1063F0003185FFFF00B1102B3C12800010400017C8
-:10640000000098210225A82B56A0013E8FA5002050
-:106410003C048000348A0E008D5300143C0680003A
-:10642000AD5300108D4B001CAD4B0018AD45000066
-:106430008CCD000031AC00081180FFFD34CE0E0081
-:1064400095C3000800A0882100009021A783004088
-:106450008DC6000424130001AF860038976F00102A
-:1064600031F5FFFF8E9F000003F1282310A0011FCC
-:10647000AE85000093620008144000DD00000000BB
-:106480000E0001E7240400108F90004800402821EE
-:106490003C023200320600FF000654000142F8259B
-:1064A00026090001AF890048ACBF000093790009BC
-:1064B00097780012936F000A332800FF3303FFFF21
-:1064C0000103382100076C0031EE00FF01AE6025AA
-:1064D000ACAC00048F840048978B0040316A2000E8
-:1064E0001140010AACA4000897640012308BFFFF32
-:1064F00006400108ACAB000C978E004031C5000887
-:1065000014A0000226280006262800023C1F800056
-:1065100037E70E0094F900148CE5001C8F67000427
-:10652000937800023324FFFF330300FFAFA3001072
-:106530008F6F0014AFA800180E0001CBAFAF00148E
-:10654000240400100E0001FB000000008E920000E9
-:1065500016400005000000008F7800142403FFBFE0
-:106560000303A024AF7400148F67000C00F5C8214A
-:10657000AF79000C9375000816A000080000000019
-:1065800012600006000000008F6800143C0AEFFF54
-:106590003549FFFE0109F824AF7F0014A3730008FA
-:1065A0008FA500200A00034F02202021AED1000059
-:1065B0000A00022D3C03800014E0FF1E30BFA04003
-:1065C0000E0001900000A0212E9100010237B0259D
-:1065D00012C000188FBF00488F87003C24170F009F
-:1065E00010F700D43C0680008CD901780720FFFE0C
-:1065F000241F0F0010FF00F634CA0E008D56001441
-:1066000034C7014024080240ACF600048D49001C48
-:106610003C141000ACE90008A0E00012A4E0001A4D
-:10662000ACE00020A4E00018ACE80014ACD4017881
-:106630008FBF00488FB700448FB600408FB5003C35
-:106640008FB400388FB300348FB200308FB1002C7C
-:106650008FB0002803E0000827BD00508F9100385C
-:10666000978800403C1280000220A821310700409A
-:1066700014E0FF7C00009821977900108F92003879
-:106680003338FFFF131200A8000020210080A02152
-:10669000108000F300A088211620FECE000000002C
-:1066A0000A00031F2E9100013C0380008C620178D8
-:1066B0000440FFFE240808008F860000AC680178C3
-:1066C0003C038000946D008A31ACFFFF01865823A3
-:1066D000256AFFFF31441FFF2C8900081520FFF9B0
-:1066E000000000008F8F0048347040008F83003C12
-:1066F00000E0A021240E0F0025E70001AF8700482D
-:1067000000D03021023488233C08800031F500FF9E
-:10671000106E000524070001939800423313000116
-:106720000013924036470001001524003C0A010086
-:10673000008A4825ACC900008F82004830BF00366F
-:1067400030B90008ACC200041320009900FF98255E
-:1067500035120E009650000A8F8700003C0F810012
-:106760003203FFFF24ED000835060140006F60256D
-:106770003C0E100031AB1FFF269200062405000ED0
-:10678000ACCC0020026E9825A4C5001AAF8B000087
-:10679000A4D20018162000083C1080008F89003C0D
-:1067A00024020F005122000224170001367300401A
-:1067B0000E0001883C10800036060E008CCB0014C1
-:1067C000360A014002402021AD4B00048CC5001C5C
-:1067D000AD450008A1550012AD5300140E000198FC
-:1067E0003C151000AE1501780A00035200000000AD
-:1067F000936F0009976E0012936D000B31E500FF57
-:1068000000AE202131AC00FF008C80212602000A5E
-:106810003050FFFF0E0001E7020020218F86004864
-:106820003C0341003C05800024CB0001AF8B0048B5
-:10683000936A00099769001230C600FF315F00FFBC
-:106840003128FFFF03E8382124F900020006C400C4
-:106850000319782501E37025AC4E00008F6D000C04
-:1068600034A40E00948B001401B26025AC4C0004DB
-:106870008C85001C8F670004936A00023164FFFF5F
-:10688000314900FFAFA900108F680014AFB10018A4
-:106890000E0001CBAFA800140A0002FD0200202167
-:1068A000AF600004A3600002979800403308200006
-:1068B0001500FEA300003021A7600012978400405D
-:1068C000936B000A3C10800030931F00001351832B
-:1068D000014BA82126A20028A362000936090E0058
-:1068E000953F000C0A000295A77F00108F700014DE
-:1068F000360900400E000188AF6900140A0002C981
-:10690000000000000A00034F000020210641FEFAAB
-:10691000ACA0000C8CAC000C3C0D8000018D9025CF
-:106920000A0002EAACB2000C000090210A0002C585
-:1069300024130001128000073C028000344B0E003B
-:106940009566000830D30040126000490000000046
-:106950003C0680008CD001780600FFFE34C50E0096
-:1069600094B500103C03050034CC014032B8FFFF61
-:1069700003039025AD92000C8CAF0014240D200071
-:106980003C041000AD8F00048CAE001CAD8E0008DE
-:10699000A1800012A580001AAD800020A5800018FB
-:1069A000AD8D0014ACC401780A0003263C068000BB
-:1069B0008F9F0000351801402692000227F9000839
-:1069C00033281FFFA71200180A000391AF880000A8
-:1069D0003C02800034450140ACA0000C1280001B3A
-:1069E00034530E0034510E008E370010ACB7000443
-:1069F0008E2400183C0B8000ACA4000835700140C8
-:106A000024040040A20000128FBF0048A600001A14
-:106A10008FB70044AE0000208FB60040A6000018DB
-:106A20008FB5003CAE0400148FB400388FB300342F
-:106A30008FB200308FB1002C8FB000283C021000C4
-:106A400027BD005003E00008AD6201788E66001497
-:106A5000ACA600048E64001C0A00042A3C0B8000D3
-:106A60000E0001902E9100010A0003200237B0258C
-:106A7000000000000000000D000000002400036979
-:106A80000A0004013C06800027BDFFD8AFBF0020EC
-:106A90003C0980003C1F20FFAFB200183C0760009B
-:106AA00035320E002402001037F9FFFDACE2300849
-:106AB000AFB3001CAFB10014AFB00010AE5900006E
-:106AC00000000000000000000000000000000000C6
-:106AD000000000003C1800FF3713FFFDAE5300001C
-:106AE0003C0B60048D7050002411FF7F3C0E0002AF
-:106AF0000211782435EC380C35CD0109ACED4C1879
-:106B0000240A0009AD6C50008CE80438AD2A000856
-:106B1000AD2000148CE54C1C3106FFFF38C42F71EA
-:106B200000051E023062000F2486C0B3104000072B
-:106B3000AF8200088CE54C1C3C09001F3528FC0086
-:106B400000A81824000321C2AF8400048CF10808B7
-:106B50003C0F57092412F0000232702435F0001067
-:106B600001D0602601CF68262DAA00012D8B0001DF
-:106B7000014B382550E00009A380000C3C1F601C2D
-:106B80008FF8000824190001A399000C33137C002E
-:106B9000A7930010A780000EA380000DAF800048CF
-:106BA00014C00003AF8000003C066000ACC0442C61
-:106BB0000E0005B93C1080000E000F2436110100B4
-:106BC0003C12080026523DF03C13080026733E702C
-:106BD0008E03000038640001308200011440FFFC85
-:106BE0003C0B800A8E2600002407FF8024C9024047
-:106BF000312A007F014B402101272824AE060020C6
-:106C0000AF880044AE0500243C048000AF86003C01
-:106C10008C8C01780580FFFE24180800922F000854
-:106C2000AC980178A38F0042938E004231CD0001D1
-:106C300011A0000F24050D0024DFF8002FF9030137
-:106C40001320001C000629C224A4FFF000041042F7
-:106C5000000231400E00020200D2D8213C02400066
-:106C60003C068000ACC201380A0004A0000000000D
-:106C700010C50023240D0F0010CD00273C1F8008F5
-:106C800037F9008093380000240E0050330F00FFC6
-:106C900015EEFFF33C0240000E000A400000000029
-:106CA0003C0240003C068000ACC201380A0004A04F
-:106CB000000000008F83000400A3402B1500000B90
-:106CC0008F8B0008006B50212547FFFF00E5482B04
-:106CD0001520000600A36023000C19400E000202DC
-:106CE0000073D8210A0004C43C0240000000000DDB
-:106CF0000E000202000000000A0004C43C02400032
-:106D00003C1B0800277B3F700E00020200000000C1
-:106D10000A0004C43C0240003C1B0800277B3F9053
-:106D20000E000202000000000A0004C43C02400001
-:106D30003C0660043C09080025290104ACC9502C1C
-:106D40008CC850003C0580003C02000235070080E2
-:106D5000ACC750003C040800248415A43C03080080
-:106D60002463155CACA50008ACA2000C3C01080033
-:106D7000AC243D803C010800AC233D8403E00008C6
-:106D80002402000100A030213C1C0800279C3D8803
-:106D90003C0C04003C0B0002008B3826008C402683
-:106DA0002CE200010007502B2D050001000A48804D
-:106DB0003C03080024633D80004520250123182161
-:106DC0001080000300001021AC66000024020001C6
-:106DD00003E00008000000003C1C0800279C3D88E0
-:106DE0003C0B04003C0A0002008A3026008B382647
-:106DF0002CC200010006482B2CE500010009408050
-:106E00003C03080024633D80004520250103182130
-:106E100010800005000010213C0C0800258C155C3A
-:106E2000AC6C00002402000103E000080000000038
-:106E30003C0900023C0804000088302600893826FE
-:106E40002CC30001008028212CE4000100831025C0
-:106E50001040000B000030213C1C0800279C3D889E
-:106E60003C0A80008D4E00082406000101CA6825F6
-:106E7000AD4D00088D4C000C01855825AD4B000C24
-:106E800003E0000800C010213C1C0800279C3D883E
-:106E90003C0580008CA6000C000420272402000181
-:106EA00000C4182403E00008ACA3000C3C0200025C
-:106EB0001082000B3C0560003C07040010870003B3
-:106EC0000000000003E00008000000008CA908D0CA
-:106ED000240AFFFD012A402403E00008ACA808D0E2
-:106EE0008CA408D02406FFFE0086182403E00008C6
-:106EF000ACA308D03C05601A34A600108CC30080F7
-:106F000027BDFFF88CC50084AFA3000093A4000048
-:106F10002402000110820003AFA5000403E0000872
-:106F200027BD000893A7000114E0001497AC0002ED
-:106F300097B800023C0F8000330EFFFC01CF6821A0
-:106F4000ADA50000A3A000003C0660008CC708D0DF
-:106F50002408FFFE3C04601A00E82824ACC508D0D1
-:106F60008FA300048FA200003499001027BD0008F1
-:106F7000AF22008003E00008AF2300843C0B8000B8
-:106F8000318AFFFC014B48218D2800000A00057D55
-:106F9000AFA8000427BDFFE8AFBF00103C1C0800ED
-:106FA000279C3D883C0580008CA4000C8CA200042A
-:106FB0003C0300020044282410A0000A00A3182467
-:106FC0003C0604003C0400021460000900A61024E2
-:106FD0001440000F3C0404000000000D3C1C08009D
-:106FE000279C3D888FBF001003E0000827BD0018D4
-:106FF0003C0208008C423D800040F809000000007F
-:107000003C1C0800279C3D880A0005A68FBF001085
-:107010003C0208008C423D840040F809000000005A
-:107020000A0005AC00000000000411C003E00008E5
-:10703000244202403C04080024843FD42405001A62
-:107040000A00009C0000302127BDFFE0AFB0001017
-:107050003C108000AFBF0018AFB100143611010022
-:10706000922200090E0005B63044007F8E3F0000DA
-:107070008F89003C3C0F008003E26021258800409E
-:107080000049F821240DFF80310E007831980078F6
-:1070900035F9000135F100020319382501D14825E1
-:1070A000010D302403ED5824018D2824240A0040CA
-:1070B00024040080240300C0AE0B0024AE0008109E
-:1070C000AE0A0814AE040818AE03081CAE05080486
-:1070D000AE070820AE060808AE09082436090900E4
-:1070E0009539000C3605098033ED007F3338FFFFFA
-:1070F000001889C0AE110800AE0F0828952C000CAE
-:107100008FBF00188FB10014318BFFFF000B51C0EF
-:10711000AE0A002C8CA400508FB000108CA3003C51
-:107120008D2700048CA8001C8CA600383C0E800A19
-:1071300001AE102127BD0020AF820044AF84005073
-:10714000AF830054AF87004CAF88005C03E00008B9
-:10715000AF8600603C09080091293FF924A800028D
-:107160003C05110000093C0000E8302500C5182549
-:1071700024820008AC83000003E00008AC80000417
-:107180003C098000352309009128010B906A001109
-:107190002402002800804821314700FF00A0702110
-:1071A00000C068213108004010E20002340C86DD86
-:1071B000240C08003C0A800035420A9A94470000DB
-:1071C000354B0A9C35460AA030F9FFFFAD39000067
-:1071D0008D780000354B0A8024040001AD3800048E
-:1071E0008CCF0000AD2F00089165001930A300037B
-:1071F0001064009A28640002148000B9240500027B
-:10720000106500A8240F0003106F00BE35450AA4C6
-:10721000240A0800118A004D0000000051000042BD
-:107220003C0B80003C04800034830900906700120E
-:1072300030E200FF004D7821000FC88027240001B4
-:107240003C0A8000354F090091E50019354C098052
-:107250008D87002830A300FF000315000047582544
-:107260000004C4003C19600001793025370806FF8E
-:10727000AD260000AD2800048DEA002C252800284A
-:10728000AD2A00088DEC0030AD2C000C8DE50034EB
-:10729000AD2500108DE40038AD2400148DE3001CF2
-:1072A000AD2300188DE70020AD27001C8DE20024DF
-:1072B000AD2200208DF90028AD3900243C09800062
-:1072C0003526093C8CCF0000352A0100AD0E0004A4
-:1072D000AD0F00008D4E000C3523090035250980C7
-:1072E000AD0E0008906C00128D47000C8CB9003474
-:1072F0003C18080093183FF8318200FF004D5821D8
-:1073000003277823000B37000018240000C47025E1
-:1073100031E9FFFC01C9682525020014AD0D000C00
-:1073200003E00008AD000010357809009306001254
-:107330003C05080094A53FE830C800FF010D50212E
-:10734000000A60800A00063C0185202115000060CB
-:10735000000000003C08080095083FEE3C060800CD
-:1073600094C63FE8010610213C0B800035790900E6
-:1073700093380011932A001935660A80330800FFFC
-:1073800094CF002A00086082314500FF978A005898
-:10739000000C1E00000524003047FFFF006410258C
-:1073A0000047C02501EA30213C0B4000030B40257B
-:1073B00000066400AD280000AD2C000493250018E1
-:1073C0003C0300062528001400053E0000E31025BC
-:1073D000AD2200088F24002C254F000131EB7FFFE8
-:1073E000AD24000C8F38001CA78B0058AD3800105E
-:1073F0003C0980003526093C8CCF0000352A01006D
-:10740000AD0E0004AD0F00008D4E000C35230900B9
-:1074100035250980AD0E0008906C00128D47000CD8
-:107420008CB900343C18080093183FF8318200FFF3
-:10743000004D582103277823000B37000018240043
-:1074400000C4702531E9FFFC01C96825250200143C
-:10745000AD0D000C03E00008AD0000103C02080078
-:1074600094423FF23C05080094A53FE835440AA445
-:107470003C07080094E73FE4948B00000045C821D6
-:107480000327C023000B1C002706FFF200665025CF
-:10749000AD2A000CAD200010AD2C00140A000630FF
-:1074A00025290018354F0AA495E5000095640028A9
-:1074B0000005140000043C003459810000EC5825FC
-:1074C000AD39000CAD2B00100A0006302529001440
-:1074D0003C0C0800958C3FEE0A00068625820001D0
-:1074E0005460FF4C240A080035580AA4970600008F
-:1074F00000061C00006C5025AD2A000C0A00063066
-:10750000252900103C03080094633FF23C07080063
-:1075100094E73FE83C0F080095EF3FE494A4000097
-:107520009579002800671021004F582300041C00A3
-:10753000001934002578FFEE00D87825346A8100E0
-:10754000AD2A000CAD2F0010AD200014AD2C00189A
-:107550000A0006302529001C03E00008240207D099
-:1075600027BDFFE0AFB20018AFB10014AFB00010FC
-:10757000AFBF001C0E00007C008088218F88005463
-:107580008F87004C3C05800834B20080011128210F
-:107590003C10800024020080240300C000A72023A8
-:1075A000AE0208183C068008AE03081C18800004D0
-:1075B000AF850054ACC500048CC90004AF89004CF1
-:1075C00012200009360409800E00070200000000A6
-:1075D000924C00278E0B007401825004014B302125
-:1075E000AE46000C360409808C8E001C8F8F005C28
-:1075F00001CF682319A000048FBF001C8C90001CD1
-:10760000AF90005C8FBF001C8FB200188FB10014C8
-:107610008FB000100A00007E27BD00208F8600502A
-:107620008F8300548F82004C3C05800834A4008076
-:10763000AC860050AC83003C03E00008ACA2000420
-:107640003C0308008C63005427BDFFF8308400FF22
-:107650002462000130A500FF3C010800AC22005468
-:1076600030C600FF3C0780008CE801780500FFFE73
-:107670003C0C7FFFA3A400038FAA0000358BFFFF03
-:10768000014B4824000627C001244025AFA8000074
-:1076900034E201009043000AA3A000023C1980FFDD
-:1076A000A3A300018FAF000030AE007F3738FFFF8B
-:1076B00001F86024000E6E003C0A002034E5014011
-:1076C000018D5825354920002406FF803C04100018
-:1076D00027BD0008ACAB000CACA90014A4A0001896
-:1076E000A0A6001203E00008ACE40178308800FF97
-:1076F00030A700FF3C0380008C6201780440FFFE4D
-:107700003C0C8000358A0A008D4B002035840140F6
-:1077100035850980AC8B00048D4900240007302B8F
-:1077200000061540AC890008A088001090A3004C0A
-:10773000A083002D03E00008A480001827BDFFE807
-:10774000308400FFAFBF00100E00076730A500FFB8
-:107750008F8300548FBF00103C06800034C5014069
-:10776000344700402404FF903C02100027BD00185D
-:10777000ACA3000CA0A40012ACA7001403E0000806
-:10778000ACC2017827BDFFE03C088008AFBF001CF9
-:10779000AFB20018AFB10014AFB0001035100080C8
-:1077A0008E0600183C078000309200FF00C720259D
-:1077B000AE0400180E00007C30B100FF92030005FB
-:1077C000346200080E00007EA20200050240202163
-:1077D0000E00077B02202821024020218FBF001CC1
-:1077E0008FB200188FB100148FB00010240500056F
-:1077F000240600010A00073C27BD00203C0580004C
-:1078000034A309809066000830C200081040000FC1
-:107810003C0A01013549080AAC8900008CA80074B3
-:10782000AC8800043C07080090E73FF830E5001002
-:1078300050A00008AC8000083C0D800835AC0080EA
-:107840008D8B0058AC8B00082484000C03E00008EA
-:10785000008010210A0007BF2484000C27BDFFE828
-:107860003C098000AFB00010AFBF0014352609807E
-:1078700090C800092402000600A05821310300FF2F
-:107880003527090000808021240500041062007B58
-:107890002408000294CF005C3C0E020431EDFFFF8F
-:1078A00001AE6025AE0C000090CA000831440020F3
-:1078B000108000080000000090C2004E3C1F010331
-:1078C00037F90300305800FF03193025240500085C
-:1078D000AE06000490F9001190E6001290E4001149
-:1078E000333800FF0018708230CF00FF01CF5021E5
-:1078F000014B6821308900FF31AAFFFF392300289E
-:10790000000A60801460002C020C482390E40012EE
-:107910003C198000372F0100308C00FF018B1821AB
-:10792000000310800045F821001F8400360706FF81
-:10793000AD270004373F090093EC001193EE0012CD
-:10794000372609800005C0828DE4000C8CC5003408
-:1079500031CD00FF01AB10210058182100A4F823FD
-:107960000008840000033F0000F0302533F9FFFFDA
-:10797000318F00FC00D970250158202101E96821D0
-:1079800000045080ADAE000C0E00007C012A802166
-:107990003C088008240B0004350500800E00007EA2
-:1079A000A0AB0009020010218FBF00148FB000109F
-:1079B00003E0000827BD001890EC001190E30019C7
-:1079C0003C18080097183FEE318200FF0002F88251
-:1079D000307000FF001FCE0000103C000327302550
-:1079E00000D870253C0F400001CF68253C1980006D
-:1079F000AD2D0000373F090093EC001193EE00120B
-:107A0000372F0100372609800005C0828DE4000C65
-:107A10008CC5003431CD00FF01AB10210058182176
-:107A200000A4F8230008840000033F0000F0302584
-:107A300033F9FFFF318F00FC00D970250158202158
-:107A400001E9682100045080ADAE000C0E00007CFE
-:107A5000012A80213C088008240B000435050080A1
-:107A60000E00007EA0AB0009020010218FBF0014A1
-:107A70008FB0001003E0000827BD00180A0007D1EE
-:107A80002408001227BDFFD03C038000AFB60028B9
-:107A9000AFB50024AFB40020AFB10014AFBF002CCD
-:107AA000AFB3001CAFB20018AFB0001034670100D4
-:107AB00090E6000B309400FF30B500FF30C200307C
-:107AC0000000B02110400099000088213464098032
-:107AD0009088000800082E0000051E03046000C006
-:107AE000240400048F8600543C010800A0243FF8C1
-:107AF0003C0C8000AD8000483C048000348E0100C6
-:107B000091CD000B31A5002010A000073C0780009C
-:107B100034930980927200080012860000107E03E0
-:107B200005E000C43C1F800834EC0100918A000B82
-:107B300034EB098091690008314400400004402B77
-:107B40003123000800C898231460000224120003A7
-:107B5000000090213C10800036180A80360409008D
-:107B6000970E002C90830011908900129305001845
-:107B7000307F00FF312800FF024810210002C8803A
-:107B8000930D0018033F782101F1302130B100FF3F
-:107B900000D11821A78E00583C010800A4263FEE12
-:107BA0003C010800A4233FF015A0000200000000E3
-:107BB0000000000D920B010B3065FFFF3C01080037
-:107BC000A4233FF2316A00403C010800A4203FE8B2
-:107BD0003C010800A4203FE41140000224A4000A54
-:107BE00024A4000B3091FFFF0E0001E702202021AA
-:107BF0009206010B3C0C0800958C3FF200402021BE
-:107C00000006698231A700010E00060101872821C4
-:107C100000402021026028210E00060C0240302185
-:107C20000E0007AB0040202116C000690040202153
-:107C30009212010B3256004012C000053C0500FFB5
-:107C40008C93000034AEFFFF026E8024AC900000E5
-:107C50000E0001FB022020213C0F080091EF3FF8AD
-:107C600031F10003122000163C1380088F8200546B
-:107C70003C09800835280080245F0001AD1F003CCE
-:107C80003C0580088CB9000403E02021033FC02399
-:107C90001B000002AF9F00548CA400040E000702DA
-:107CA000ACA400043C0780008CEB00743C0480080A
-:107CB00034830080004B5021AC6A000C3C138008D8
-:107CC000367000800280202102A02821A200006BD3
-:107CD0000E0007673C1480008F920054368C0140E0
-:107CE000AD92000C8F8600483C151000344D000604
-:107CF00024D60001AF9600488FBF002CA186001249
-:107D00008FB60028AD8D00148FB3001CAE9501789E
-:107D10008FB200188FB500248FB400208FB10014EB
-:107D20008FB0001003E0000827BD003034640980E4
-:107D3000908F0008000F7600000E6E0305A0003340
-:107D4000347F090093F8001B241900103C0108003F
-:107D5000A0393FF8331300021260FF678F8600548A
-:107D60008F8200601446FF653C0480000E00007C9A
-:107D7000000000003C0480083485008090A80009C1
-:107D800024060016310300FF1066000D00000000FD
-:107D900090AB00093C07080090E73FF82409000871
-:107DA000316400FF34EA00013C010800A02A3FF8DA
-:107DB0001089002F240C000A108C00282402000CCB
-:107DC0000E00007E000000000A00086A8F86005442
-:107DD0000E0007C3024028210A0008B800402021F5
-:107DE0003C0B8008356A00808D4600548CE9000CFD
-:107DF0001120FF3DAF860054240700143C01080009
-:107E0000A0273FF80A0008693C0C80009091000808
-:107E1000241200023C010800A0323FF8323000205A
-:107E20001200000B241600018F8600540A00086A15
-:107E30002411000837F800808F020038AFE20004F8
-:107E40008FF90004AF19003C0A0008763C07800057
-:107E50008F8600540A00086A24110004A0A20009B9
-:107E60000E00007E000000000A00086A8F860054A1
-:107E7000240200140A000944A0A2000927BDFFE85B
-:107E8000AFB000103C108000AFBF001436020100FC
-:107E9000904400090E000767240500013C04800897
-:107EA0009099000E34830080909F000F906F002601
-:107EB0009089000A33F800FF00196E000018740062
-:107EC00031EC00FF01AE5025000C5A00014B382563
-:107ED000312800FF360301403445600000E83025BA
-:107EE0002402FF813C041000AC66000C8FBF00141C
-:107EF000AC650014A0620012AE0401788FB00010CF
-:107F000003E0000827BD001827BDFFE8308400FF0C
-:107F1000AFBF00100E00076730A500FF3C058000D2
-:107F200034A40140344700402406FF92AC8700147B
-:107F3000A08600128F8300548FBF00103C021000F7
-:107F400027BD0018AC83000C03E00008ACA2017848
-:107F500027BDFFD8AFB00010308400FF30B000FF65
-:107F60003C058000AFB10014AFBF0020AFB3001CD0
-:107F7000AFB20018000410C234A6010032030002A0
-:107F8000305100011460000790D200093C098008BC
-:107F900035330080926800053107000810E0000CBE
-:107FA000308A0010024020210E00078D0220282177
-:107FB000240200018FBF00208FB3001C8FB2001875
-:107FC0008FB100148FB0001003E0000827BD002817
-:107FD0001540003434A50A008CB800248CAF00088A
-:107FE000130F004B000038213C0D800835B3008092
-:107FF000926C006824060002318B00FF1166008439
-:108000003C06800034C201009263004C9059000984
-:10801000307F00FF53F900043213007C10E0006948
-:10802000000000003213007C5660005C02402021FA
-:1080300016200009320D00013C0C8000358401003F
-:10804000358B0A008D6500248C86000414A6FFD9A8
-:1080500000001021320D000111A0000E024020216D
-:108060003C188000371001008E0F000C8F8E0050DE
-:1080700011EE0008000000000E00084D022028212B
-:108080008E19000C3C1F800837F00080AE1900509C
-:10809000024020210E00077B022028210A000999B6
-:1080A000240200013C0508008CA5006424A4000102
-:1080B0003C010800AC2400641600000D0000000024
-:1080C000022028210E00077B02402021926E0068CA
-:1080D000240C000231CD00FF11AC0022024020210F
-:1080E0000E00094B000000000A000999240200015B
-:1080F0000E00007024040001926B0025020B302555
-:108100000E00007EA26600250A0009DD022028215B
-:108110008E6200188CDF00048CB9002400021E025D
-:1081200017F9FFB13065007F9268004C26440001CA
-:108130003093007F12650040310300FF1464FFABF1
-:108140003C0D80082647000130F1007F30E200FF3F
-:108150001225000B24070001004090210A0009A607
-:1081600024110001240500040E00073C2406000130
-:108170000E00094B000000000A00099924020001CA
-:108180002405FF800245202400859026324200FF0E
-:10819000004090210A0009A6241100010E00084D9C
-:1081A000022028213207003010E0FFA132100082A7
-:1081B000024020210E00078D022028210A00099983
-:1081C000240200018E69001802402021022028218B
-:1081D000012640250E00096EAE6800189264004C1E
-:1081E00024050003240600010E00073C308400FF34
-:1081F0000E00007024040001927100250211502528
-:108200000E00007EA26A00250A00099924020001DE
-:108210008E6F00183C1880000240202101F8702564
-:10822000022028210E00077BAE6E00189264004CDD
-:108230000A000A2524050004324A008039490080DA
-:108240001469FF6A3C0D80080A0009FE26470001F8
-:1082500027BDFFC0AFB000183C108000AFBF003892
-:10826000AFB70034AFB60030AFB5002CAFB40028C4
-:10827000AFB30024AFB200200E0005BEAFB1001CAA
-:10828000360201009045000B0E0009809044000862
-:10829000144000E78FBF00383C0880083507008095
-:1082A000A0E0006B3606098090C500002403005052
-:1082B0003C17080026F73FB030A400FF3C1308002D
-:1082C00026733FC0108300033C1080000000B821DB
-:1082D00000009821241F00103611010036120A00F8
-:1082E000361509808E5800248E3400048EAF00208D
-:1082F0008F8C00543C010800A03F3FF836190A80DB
-:10830000972B002C8EF60000932A001802987023F9
-:1083100001EC68233C010800AC2E3FD43C0108006E
-:10832000AC2D3FD83C010800AC2C3FFCA78B00587B
-:1083300002C0F809315400FF30490002152000E95D
-:1083400030420001504000C49227000992A9000861
-:108350003128000815000002241500030000A821A0
-:108360003C0A80003543090035440A008C8D002406
-:108370009072001190700012907F0011325900FF2E
-:10838000321100FF02B110210002C08033EF00FF64
-:108390000319B021028F702102D4602125CB001077
-:1083A0003C010800A4363FEE3C010800AC2D400023
-:1083B0003C010800A42C3FF03C010800A42B3FEC3A
-:1083C000355601003554098035510E008F87005411
-:1083D0008F89005C8E850020240800060127302349
-:1083E0003C010800AC283FF400A7282304C000B5D6
-:1083F0000000902104A000B300C5502B114000B52F
-:10840000000000003C010800AC263FD88E6200004E
-:108410000040F809000000003046000214C000745B
-:1084200000408021304B0001556000118E62000435
-:108430003C0D08008DAD3FDC3C0EC0003C048000CC
-:1084400001AE6025AE2C00008C980000330F0008B0
-:1084500011E0FFFD00000000963F0008241200011B
-:10846000A79F00408E390004AF9900388E62000447
-:108470000040F809000000000202802532030002DB
-:10848000146000B3000000003C09080095293FE497
-:108490003C06080094C63FF03C0A0800954A3FE6B7
-:1084A0003C0708008CE73FDC012670213C030800F4
-:1084B0008C6340003C08080095083FFA01CA20215F
-:1084C0008ED9000C00E92821249F000200A8782101
-:1084D0000067C02133E4FFFFAF9900503C01080062
-:1084E000AC3840003C010800A42F3FE83C010800E4
-:1084F000A42E3FF20E0001E7000000008F8D00481F
-:10850000004020213C010800A02D3FF98E620008A8
-:1085100025AC0001AF8C00480040F80900000000C5
-:108520008F85005402A030210E00060C004020214F
-:108530000E0007AB004020218E6B000C0160F80993
-:10854000004020213C0A0800954A3FF23C06080002
-:1085500094C63FE601464821252800020E0001FB93
-:108560003104FFFF3C0508008CA53FD43C07080000
-:108570008CE73FDC00A720233C010800AC243FD45B
-:1085800014800006000000003C0208008C423FF40A
-:10859000344B00403C010800AC2B3FF41240004338
-:1085A0008F8E00448E2D00108F920044AE4D00201F
-:1085B0008E2C0018AE4C00243C04080094843FE844
-:1085C0000E000704000000008F9F00548E6700100B
-:1085D0003C010800AC3F3FFC00E0F809000000004F
-:1085E0003C1908008F393FD41720FF798F8700543A
-:1085F000979300583C11800E321601000E0007338D
-:10860000A633002C16C00045320300105460004C05
-:108610008EE50004320800405500001D8EF0000871
-:108620008EE4000C0080F809000000008FBF0038C5
-:108630008FB700348FB600308FB5002C8FB4002870
-:108640008FB300248FB200208FB1001C8FB00018B0
-:1086500003E0000827BD00408F86003C36110E0065
-:1086600000072E0000A62025AE0400808E430020C7
-:108670008E500024AFA30010AE2300148FB2001060
-:10868000AE320010AE30001C0A000A7FAE30001877
-:108690000200F809000000008EE4000C0080F809D8
-:1086A000000000000A000B388FBF003824180001BA
-:1086B000240F0001A5C00020A5D800220A000B1A33
-:1086C000ADCF00243C010800AC203FD80A000AB01E
-:1086D0008E6200003C010800AC253FD80A000AB0B9
-:1086E0008E620000922400090E00077B0000282102
-:1086F0008FBF00388FB700348FB600308FB5002C95
-:108700008FB400288FB300248FB200208FB1001CDB
-:108710008FB0001803E0000827BD00403C14800023
-:1087200092950109000028210E00084D32A400FF97
-:10873000320300105060FFB8320800408EE500049C
-:1087400000A0F809000000000A000B3232080040C7
-:108750005240FFA8979300588E3400148F93004422
-:10876000AE7400208E35001CAE7500240A000B2963
-:10877000979300588F8200140004218003E00008C2
-:10878000008210213C07800834E200809043006999
-:1087900000804021106000093C0401003C070800F3
-:1087A0008CE73FFC8F83003000E320230480000827
-:1087B0009389001C14E300030100202103E000085A
-:1087C000008010213C04010003E00008008010211B
-:1087D0001120000B006738233C0D800035AC098068
-:1087E000918B007C316A0002114000202409003482
-:1087F00000E9702B15C0FFF10100202100E93823AA
-:108800002403FFFC00A3C82400E3C02400F9782B54
-:1088100015E0FFEA0308202130C400030004102300
-:1088200014C00014304900030000302100A9782151
-:1088300001E6702100EE682B11A0FFE03C0401006E
-:108840002D3800010006C82B0105482103193824E2
-:1088500014E0FFDA2524FFFC2402FFFC00A2182408
-:108860000068202103E00008008010210A000BA806
-:10887000240900303C0C80003586098090CB007CB8
-:10888000316A00041540FFE9240600040A000BB712
-:10889000000030213C0308008C63005C8F820018CC
-:1088A00027BDFFE0AFBF0018AFB100141062000594
-:1088B000AFB00010000329C024A40280AF840014CC
-:1088C000AF8300183C10800036020A009445003245
-:1088D000361101000E000B8930A43FFF8E240000EA
-:1088E000241FFF803C1100800082C021031F6024F0
-:1088F0003309007F000CC94003294025330E00785E
-:10890000362F00033C0D1000010D502501CF5825D6
-:10891000AE0C002836080980AE0C080CAE0B082CF3
-:10892000AE0A0830910300693C06800C012638210C
-:1089300010600006AF8700348D09003C8D03006C89
-:108940000123382318E00082000000003C0B80085F
-:10895000356A00803C108000A1400069360609801D
-:108960008CC200383C06800034C50A0090A8003C48
-:10897000310C00201180001AAF820030240D00015C
-:108980003C0E800035D10A00A38D001CAF8000246E
-:108990008E2400248F850024240D0008AF80002041
-:1089A000AF8000283C010800A42D3FE63C010800F0
-:1089B000A4203FFA0E000B8D000030219228003CCD
-:1089C0008FBF00188FB100148FB0001000086142F3
-:1089D000AF82002C27BD002003E000083182000197
-:1089E00090B80032240E0001330F00FF000F2182E7
-:1089F000108E0041241900021099006434C40AC08A
-:108A00003C03800034640A008C8F002415E0001EB3
-:108A100034660900909F00302418000533F9003FA8
-:108A20001338004E240300018F860020A383001C0E
-:108A3000AF860028AF8600243C0E800035D10A00A6
-:108A40008E2400248F850024240D00083C0108009A
-:108A5000A42D3FE63C010800A4203FFA0E000B8D38
-:108A6000000000009228003C8FBF00188FB1001456
-:108A70008FB0001000086142AF82002C27BD00209B
-:108A800003E00008318200018C8A00088C8B0024EE
-:108A90008CD000643C0E800035D10A00014B2823A5
-:108AA000AF900024A380001CAF8500288E240024F2
-:108AB0008F8600208F850024240D00083C010800CB
-:108AC000A42D3FE63C010800A4203FFA0E000B8DC8
-:108AD000000000009228003C8FBF00188FB10014E6
-:108AE0008FB0001000086142AF82002C27BD00202B
-:108AF00003E000083182000190A200303051003FB5
-:108B00005224002834C50AC08CB00024160000226C
-:108B100034CB09008CA600483C0A7FFF3545FFFF97
-:108B200000C510243C0E8000AF82002035C509002E
-:108B30008F8800208CAD0060010D602B1580000235
-:108B4000010020218CA400600A000C2CAF840020BE
-:108B50008D02006C0A000C063C0680008C820048E6
-:108B60008F8600203C097FFF3527FFFF00478824C0
-:108B70003C04800824030001AF910028AC80006C05
-:108B8000A383001C0A000C3AAF8600248C9F0014BB
-:108B90000A000C2CAF9F00208D6200680A000C7642
-:108BA0003C0E800034C409808C8900708CA30014B2
-:108BB0000123382B10E00004000000008C820070BC
-:108BC0000A000C763C0E80008CA200140A000C7681
-:108BD0003C0E80008F85002427BDFFE0AFBF00184A
-:108BE000AFB1001414A00008AFB000103C04800026
-:108BF00034870A0090E600302402000530C3003FAD
-:108C0000106200B9348409008F91002000A08021F7
-:108C10003C048000348E0A008DCD00043C06080020
-:108C20008CC63FD831A73FFF00E6602B558000017E
-:108C300000E03021938F001C11E0007800D0282B39
-:108C4000349F098093F9007C3338000213000079C7
-:108C50002403003400C3102B144000D9000000008E
-:108C600000C3302300D0282B3C010800A4233FE49C
-:108C700014A0006E020018213C0408008C843FD42C
-:108C80000064402B55000001006020213C0580005D
-:108C900034A90A00912A003C3C010800AC243FDCC6
-:108CA00031430020146000030000482134AB0E0063
-:108CB0008D6900188F88002C0128202B1080005F00
-:108CC000000000003C0508008CA53FDC00A96821DD
-:108CD000010D602B1180005C00B0702B010938235E
-:108CE00000E028213C010800AC273FDC1200000313
-:108CF000240AFFFC10B0008D3224000300AA1824BF
-:108D00003C010800A4203FFA3C010800AC233FDCF2
-:108D1000006028218F840024120400063C0B800888
-:108D20008D6C006C02002021AF9100202590000185
-:108D3000AD70006C8F8D002800858823AF910024D2
-:108D400001A52023AF840028122000022407001868
-:108D5000240700103C1880083706008090CF006878
-:108D60003C010800A0273FF82407000131EE00FF76
-:108D700011C70047000000001480001800002821DF
-:108D80003C06800034D1098034CD010091A6000951
-:108D90008E2C001824C40001000C86023205007FCE
-:108DA000308B007F1165007F2407FF803C1980080D
-:108DB00037290080A124004C3C0808008D083FF4AE
-:108DC000241800023C010800A0384039350F000883
-:108DD0003C010800AC2F3FF4240500103C02800049
-:108DE00034440A009083003C307F002013E00005EB
-:108DF00000A02021240A00013C010800AC2A3FDC2D
-:108E000034A400018FBF00188FB100148FB0001080
-:108E10000080102103E0000827BD00203C0108006D
-:108E2000A4203FE410A0FF94020018210A000CCAFD
-:108E300000C018210A000CC1240300303C050800C2
-:108E40008CA53FDC00B0702B11C0FFA80000000013
-:108E50003C19080097393FE40325C0210307782B0C
-:108E600011E000072CAA00043C0360008C6254044B
-:108E7000305F003F17E0FFE3240400422CAA000407
-:108E80001140FF9A240400420A000D2E8FBF0018E3
-:108E90001528FFB9000000008CCA00183C1F800094
-:108EA00024020002015F1825ACC3001837F90A003C
-:108EB000A0C200689329003C2404000400A01021F3
-:108EC000312800203C010800A02440391100000294
-:108ED00024050010240200013C010800AC223FD40C
-:108EE0000A000D243C0280008F8800288C890060D5
-:108EF0000109282B14A00002010088218C91006038
-:108F00003C048000348B0E008D640018240A00019C
-:108F10000220282102203021A38A001C0E000B8D84
-:108F2000022080210A000CB0AF82002C00045823DC
-:108F300012200007316400033C0E800035C7098011
-:108F400090ED007C31AC000415800019248F0004E2
-:108F50003C010800A4243FFA3C1F080097FF3FFA99
-:108F600003E5C82100D9C02B1300FF6B8F840024B8
-:108F70002CA6000514C0FFA32404004230A2000365
-:108F80001440000200A2182324A3FFFC3C010800A7
-:108F9000AC233FDC3C010800A4203FFA0A000CF19E
-:108FA0000060282100C770240A000D1701C7202681
-:108FB0003C010800A42F3FFA0A000D8200000000C7
-:108FC0003C010800AC203FDC0A000D2D24040042C7
-:108FD0008F8300283C05800034AA0A001460000634
-:108FE00000001021914700302406000530E400FF06
-:108FF000108600030000000003E0000800000000ED
-:10900000914B0048316900FF000941C21500FFFA89
-:109010003C0680083C04080094843FE43C030800BC
-:109020008C633FFC3C1908008F393FDC3C0F080083
-:1090300095EF3FFA0064C0218CCD00040319702124
-:1090400001CF602134AB0E00018D282318A0001D34
-:1090500000000000914F004C8F8C0034956D001083
-:1090600031EE00FF8D89000401AE30238D8A0000AF
-:1090700030CEFFFF000E29000125C8210000382155
-:10908000014720210325182B0083C021AD9900043E
-:10909000AD980000918F000A01CF6821A18D000AD0
-:1090A000956500128F8A0034A5450008954B00385D
-:1090B00025690001A54900389148000D35070008D1
-:1090C000A147000D03E000080000000027BDFFD805
-:1090D000AFB000189388001C8FB000143C0A8000C9
-:1090E0003C197FFF8F8700243738FFFFAFBF002078
-:1090F000AFB1001C355F0A000218182493EB003C46
-:1091000000087FC03C02BFFF006F60252CF000010B
-:109110003449FFFF3C1F08008FFF3FFC8F99003050
-:109120003C18080097183FF2018978240010478006
-:109130003C07EFFF3C05F0FF01E818253C118000DB
-:109140003169002034E2FFFF34ADFFFF362E098085
-:1091500027A500102406000203F96023270B000254
-:10916000354A0E000062182400808021152000027C
-:10917000000040218D48001CA7AB0012058000397B
-:109180002407000030E800FF00083F000067582572
-:109190003C028008AFAB0014344F008091EA0068B5
-:1091A0003C08080091083FF93C09DFFF352CFFFF20
-:1091B000000AF82B3C02080094423FECA3A80011DF
-:1091C000016CC024001FCF40031918258FA7001081
-:1091D000AFA300143C0C0800918C3FFBA7A2001623
-:1091E0008FAB001400ED48243C0F01003C0A0FFF38
-:1091F000012FC82531980003355FFFFF016D402422
-:109200003C027000033F382400181E0000E248258D
-:1092100001037825AFAF0014AFA9001091CC007CFA
-:109220000E000092A3AC0015362D0A0091A6003C5A
-:1092300030C4002010800006260200083C110800FF
-:1092400096313FE8262EFFFF3C010800A42E3FE8A0
-:109250008FBF00208FB1001C8FB0001803E0000802
-:1092600027BD00288F8B002C010B502B5540FFC5CC
-:10927000240700010A000E0E30E800FF9383001C53
-:109280003C02800027BDFFD834480A0000805021EE
-:10929000AFBF002034460AC0010028211060000E34
-:1092A0003444098091070030240B00058F89002089
-:1092B00030EC003F118B000B00003821AFA90010EB
-:1092C0003C0B80088D69006CAFAA00180E00015A93
-:1092D000AFA90014A380001C8FBF002003E000088A
-:1092E00027BD00288D1F00483C1808008F183FDC60
-:1092F0008F9900283C027FFF8D0800443443FFFF14
-:10930000AFA900103C0B80088D69006C03E370244A
-:109310000319782101CF682301A83821AFAA0018CA
-:109320000E00015AAFA900140A000E62A380001CAF
-:109330003C05800034A60A0090C7003C3C060800AB
-:1093400094C63FFA3C0208008C423FF430E3002010
-:10935000000624001060001E004438253C088008E8
-:109360003505008090A30068000048212408000112
-:1093700000002821240400013C0680008CCD0178E7
-:1093800005A0FFFE34CF0140ADE800083C02080014
-:109390008C423FFCA5E50004A5E40006ADE2000C0C
-:1093A0003C04080090843FF93C0380083479008035
-:1093B000A1E40012ADE70014A5E900189338004CB1
-:1093C0003C0E1000A1F8002D03E00008ACCE01789F
-:1093D00034A90E008D28001C3C0C08008D8C3FDC4D
-:1093E000952B0016952A0014018648213164FFFF51
-:1093F0000A000E8A3145FFFF3C04800034830A00D6
-:109400009065003C30A200201040001934870E0007
-:109410000000402100003821000020213C0680008F
-:109420008CC901780520FFFE34CA014034CF010009
-:1094300091EB0009AD4800083C0E08008DCE3FFCC2
-:10944000240DFF91240C00403C081000A5440004AA
-:10945000A5470006AD4E000CA14D0012AD4C001406
-:10946000A5400018A14B002D03E00008ACC801780E
-:109470008CE8001894E6001294E4001030C7FFFF57
-:109480000A000EB33084FFFF3C04800034830A00DE
-:109490009065003C30A200201040002727BDFFF857
-:1094A0002409000100003821240800013C06800046
-:1094B0008CCA01780540FFFE3C0280FF34C40100E5
-:1094C000908D00093C0C0800918C4039A3AD00033D
-:1094D0008FAB00003185007F3459FFFF01665025B6
-:1094E000AFAA00009083000AA3A0000200057E003E
-:1094F000A3A300018FB8000034CB0140240C30003E
-:109500000319702401CF6825AD6D000C27BD00083C
-:10951000AD6C0014A5600018AD690008A5670004D3
-:109520002409FF80A56800063C081000A16900120C
-:1095300003E00008ACC8017834870E008CE90018FD
-:1095400094E6001294E4001030C8FFFF0A000ED722
-:109550003087FFFF27BDFFE0AFB100143C11800052
-:10956000AFB00010AFBF001836380A00970F0032B6
-:10957000363001000E000B8931E43FFF8E0E0000F3
-:10958000240DFF803C04200001C25821016D60249D
-:10959000000C4940316A007F012A4025010438252A
-:1095A0003C048008AE2708303486008090C50068EF
-:1095B0002403000230A200FF104300048F9F00200C
-:1095C0008F990024AC9F0068AC9900648FBF00188D
-:1095D0008FB100148FB0001003E0000827BD0020F9
-:1095E0003C0A0800254A3AA83C09080025293B38CE
-:1095F0003C08080025082F443C07080024E73C04E9
-:109600003C06080024C6392C3C05080024A53680F9
-:109610003C040800248432843C030800246339E0BD
-:109620003C0208002442377C3C010800AC2A3FB8C9
-:109630003C010800AC293FB43C010800AC283FB015
-:109640003C010800AC273FBC3C010800AC263FCCE5
-:109650003C010800AC253FC43C010800AC243FC0DD
-:109660003C010800AC233FD03C010800AC223FC8BD
-:0896700003E000080000000007
-:08967800800009408000090098
-:10968000800801008008008080080000800E000033
-:10969000800800808008000080000A8080000A00A6
-:0896A000800009808000090030
+:1006A00010B00037240F000B14AFFE23000731C039
+:1006B000312600FF00065600000A4E0305220047BF
+:1006C00030C6007F0006F8C03C16080126D69620EA
+:1006D00003F68021A2000001A20000003C0F600090
+:1006E0008DF918202405000100C588040011302769
+:1006F0000326C024000731C000C03821ADF81820FF
+:100700000A0013C8A60000028F850020000731C030
+:1007100024A2FFFF0A0013F6AF8200200A0014B2E1
+:100720002415002011E0FECC3C1980003728010080
+:100730009518001094B6000233120FFF16D2FEC6B1
+:10074000000000000A00148290A900013C0B080080
+:100750008D6B0038256DFFFF15600002018D1024A0
+:10076000000010213C080800250800380048C0217E
+:10077000930F000425EE00040A0014C5000E21C0EA
+:1007800000065202241F00FF115FFDEB000731C07D
+:10079000000A20C03C0E080125CE9620008EA8211C
+:1007A000009E602100095C02240D00013C076000EE
+:1007B000A2AD0000AD860000A2AB00018CF21820B3
+:1007C00024030001014310040242B025ACF61820B6
+:1007D00000C038210A0013C8A6A900020A0015AA01
+:1007E000AF8000200A0012FFAF84002C8F85000428
+:1007F0003C1980002408000337380180A308000B4F
+:100800000A00144D3C088000A2F8000B0A00155A9B
+:100810002419BFFF8F9600042412FFFE0A00144B18
+:1008200002D228242416FFFE0A00155800B62824F8
+:100830003C038000346401008C85000030A2003E3F
+:100840001440000800000000AC6000488C870000E5
+:1008500030E607C010C0000500000000AC60004C8E
+:10086000AC60005003E0000824020001AC600054BA
+:10087000AC6000408C880000310438001080FFF923
+:10088000000000002402000103E00008AC60004406
+:100890003C0380008C6201B80440FFFE3467018095
+:1008A000ACE4000024080001ACE00004A4E500086A
+:1008B00024050002A0E8000A34640140A0E5000B12
+:1008C0009483000A14C00008A4E30010ACE00024E4
+:1008D0003C07800034E901803C041000AD20002872
+:1008E00003E00008ACE401B88C8600043C0410006E
+:1008F000ACE600243C07800034E90180AD200028EC
+:1009000003E00008ACE401B83C0680008CC201B8EA
+:100910000440FFFE34C7018024090002ACE400005B
+:10092000ACE40004A4E50008A0E9000A34C50140D5
+:10093000A0E9000B94A8000A3C041000A4E80010F1
+:10094000ACE000248CA30004ACE3002803E0000822
+:10095000ACC401B83C039000346200010082202541
+:100960003C038000AC6400208C65002004A0FFFEE6
+:100970000000000003E00008000000003C028000CE
+:10098000344300010083202503E00008AC4400202C
+:1009900027BDFFE03C098000AFBF0018AFB10014D5
+:1009A000AFB00010352801408D10000091040009FF
+:1009B0009107000891050008308400FF30E600FF31
+:1009C00000061A002C820081008330251040002A86
+:1009D00030A50080000460803C0D080125AD928C9C
+:1009E000018D58218D6A00000140000800000000C0
+:1009F0003C038000346201409445000A14A0001EAC
+:100A00008F91FCBC9227000530E6000414C0001A48
+:100A1000000000000E00164E02002021922A000560
+:100A200002002021354900040E001658A2290005B5
+:100A30009228000531040004148000020000000028
+:100A40000000000D922D0000240B002031AC00FFAF
+:100A5000158B00093C0580008CAE01B805C0FFFE77
+:100A600034B10180AE3000003C0F100024100005AE
+:100A7000A230000BACAF01B80000000D8FBF001812
+:100A80008FB100148FB0001003E0000827BD0020D4
+:100A90000200202100C028218FBF00188FB1001450
+:100AA0008FB00010240600010A00161D27BD00208B
+:100AB0000000000D0200202100C028218FBF001877
+:100AC0008FB100148FB00010000030210A00161DF5
+:100AD00027BD002014A0FFE8000000000200202134
+:100AE0008FBF00188FB100148FB0001000C02821F4
+:100AF0000A00163B27BD00203C0780008CEE01B8A1
+:100B000005C0FFFE34F00180241F0002A21F000B6D
+:100B100034F80140A60600089719000A3C0F10009F
+:100B2000A61900108F110004A6110012ACEF01B835
+:100B30000A0016998FBF001827BDFFE8AFBF00104D
+:100B40000E000FD4000000003C0280008FBF001098
+:100B500000002021AC4001800A00108F27BD001842
+:100B60003084FFFF30A5FFFF108000070000182130
+:100B7000308200011040000200042042006518216C
+:100B80001480FFFB0005284003E0000800601021EE
+:100B900010C00007000000008CA2000024C6FFFF68
+:100BA00024A50004AC82000014C0FFFB24840004D0
+:100BB00003E000080000000010A0000824A3FFFFCD
+:100BC000AC86000000000000000000002402FFFFCF
+:100BD0002463FFFF1462FFFA2484000403E000088A
+:100BE000000000003C03800027BDFFF83462018054
+:100BF000AFA20000308C00FF30AD00FF30CE00FF10
+:100C00003C0B80008D6401B80480FFFE00000000F2
+:100C10008FA900008D6801288FAA00008FA700000F
+:100C20008FA400002405000124020002A085000A10
+:100C30008FA30000359940003C051000A062000B16
+:100C40008FB800008FAC00008FA600008FAF0000AF
+:100C500027BD0008AD280000AD400004AD80002491
+:100C6000ACC00028A4F90008A70D0010A5EE0012E2
+:100C700003E00008AD6501B83C06800827BDFFE829
+:100C800034C50080AFBF001090A7000924020012F5
+:100C900030E300FF1062000B008030218CA8005070
+:100CA00000882023048000088FBF00108CAA003425
+:100CB000240400390000282100CA4823052000052B
+:100CC000240600128FBF00102402000103E0000878
+:100CD00027BD00180E0016F2000000008FBF0010A4
+:100CE0002402000103E0000827BD001827BDFFC84B
+:100CF000AFB20030AFB00028AFBF0034AFB1002CAE
+:100D000000A0802190A5000D30A6001010C000109A
+:100D1000008090213C0280088C4400048E0300086F
+:100D20001064000C30A7000530A6000510C0009329
+:100D3000240400018FBF00348FB200308FB1002C2B
+:100D40008FB000280080102103E0000827BD003884
+:100D500030A7000510E0000F30AB001210C00006F5
+:100D6000240400013C0980088E0800088D25000439
+:100D70005105009C240400388FBF00348FB200302E
+:100D80008FB1002C8FB000280080102103E00008F4
+:100D900027BD0038240A0012156AFFE6240400016A
+:100DA0000200202127A500100E000CB6AFA00010F5
+:100DB0001440007C3C19800837240080909800087B
+:100DC000331100081220000A8FA7001030FF010025
+:100DD00013E000A48FA300148C8600580066102333
+:100DE000044000043C0A8008AC8300588FA7001020
+:100DF0003C0A800835480080910900083124000829
+:100E00001480000224080003000040213C1F8008D9
+:100E100093F1001193F9001237E600808CCC005456
+:100E2000333800FF03087821322D00FF000F708057
+:100E300001AE282100AC582B1160006F00000000AB
+:100E400094CA005C8CC900543144FFFF0125102373
+:100E50000082182B14600068000000008CCB005446
+:100E60000165182330EC00041180006C000830800C
+:100E70008FA8001C0068102B1040006230ED0004A9
+:100E8000006610232C46008010C00002004088211C
+:100E9000241100800E00164E024020213C0D8008D7
+:100EA00035A6008024070001ACC7000C90C80008DC
+:100EB0000011484035A70100310C007FA0CC00088C
+:100EC0008E05000424AB0001ACCB0030A4D1005C43
+:100ED0008CCA003C9602000E01422021ACC40020C6
+:100EE0008CC3003C0069F821ACDF001C8E190004A3
+:100EF000ACF900008E180008ACF800048FB10010A7
+:100F0000322F000855E0004793A60020A0C0004EF5
+:100F100090D8004E2411FFDFA0F8000890CF000801
+:100F200001F17024A0CE00088E0500083C0B80085B
+:100F300035690080AD2500388D6A00148D2200309F
+:100F40002419005001422021AD24003491230000D7
+:100F5000307F00FF13F90036264F01000E001658AF
+:100F60000240202124040038000028210E0016F23F
+:100F70002406000A0A001757240400010E000D2859
+:100F8000000020218FBF00348FB200308FB1002CC1
+:100F90008FB00028004020210080102103E00008CD
+:100FA00027BD00388E0E00083C0F800835F0008009
+:100FB000AE0E005402402021AE0000300E00164E4E
+:100FC00000000000920D00250240202135AC0020D9
+:100FD0000E001658A20C00250E000CAC0240202179
+:100FE000240400382405008D0E0016F22406001299
+:100FF0000A0017572404000194C5005C0A001792E8
+:1010000030A3FFFF2407021811A0FF9E00E6102363
+:101010008FAE001C0A00179A01C610230A0017970A
+:101020002C620218A0E600080A0017C48E0500080A
+:101030002406FF8001E6C0243C118000AE38002861
+:101040008E0D000831E7007F3C0E800C00EE602121
+:10105000AD8D00E08E080008AF8C00380A0017D074
+:10106000AD8800E4AC800058908500082403FFF7A9
+:1010700000A33824A08700080A0017758FA7001066
+:101080003C05080024A560A83C04080024846FF4F3
+:101090003C020800244260B0240300063C01080121
+:1010A000AC2596A03C010801AC2496A43C010801A3
+:1010B000AC2296A83C010801A02396AC03E00008EE
+:1010C0000000000003E00008240200013C02800050
+:1010D000308800FF344701803C0680008CC301B893
+:1010E0000460FFFE000000008CC501282418FF806A
+:1010F0003C0D800A24AF010001F8702431EC007F20
+:10110000ACCE0024018D2021ACE50000948B00EAD8
+:101110003509600024080002316AFFFFACEA0004D0
+:1011200024020001A4E90008A0E8000BACE00024C0
+:101130003C071000ACC701B8AF84003803E00008DA
+:10114000AF85006C938800488F8900608F820038DB
+:1011500030C600FF0109382330E900FF01221821C1
+:1011600030A500FF2468008810C000020124382147
+:101170000080382130E400031480000330AA00030B
+:101180001140000D312B000310A0000900001021B8
+:1011900090ED0000244E000131C200FF0045602B9D
+:1011A000A10D000024E700011580FFF925080001CA
+:1011B00003E00008000000001560FFF300000000DD
+:1011C00010A0FFFB000010218CF80000245900043F
+:1011D000332200FF0045782BAD18000024E70004FF
+:1011E00015E0FFF92508000403E0000800000000F6
+:1011F00093850048938800588F8700600004320070
+:101200003103007F00E5102B30C47F001040000F39
+:10121000006428258F8400383C0980008C8A00EC0B
+:10122000AD2A00A43C03800000A35825AC6B00A0AD
+:101230008C6C00A00580FFFE000000008C6D00ACEF
+:10124000AC8D00EC03E000088C6200A80A00188254
+:101250008F840038938800593C0280000080502120
+:10126000310300FEA383005930ABFFFF30CC00FFF9
+:1012700030E7FFFF344801803C0980008D2401B82D
+:101280000480FFFE8F8D006C24180016AD0D000049
+:101290008D2201248F8D0038AD0200048D5900206D
+:1012A000A5070008240201C4A119000AA118000B17
+:1012B000952F01208D4E00088D4700049783005C18
+:1012C0008D59002401CF302100C7282100A32023FD
+:1012D0002418FFFFA504000CA50B000EA5020010AA
+:1012E000A50C0012AD190018AD18002495AF00E848
+:1012F0003C0B10002407FFF731EEFFFFAD0E002876
+:101300008DAC0084AD0C002CAD2B01B88D460020B7
+:1013100000C7282403E00008AD4500208F8800386E
+:101320000080582130E7FFFF910900D63C02800081
+:1013300030A5FFFF312400FF00041A00006750258C
+:1013400030C600FF344701803C0980008D2C01B875
+:101350000580FFFE8F82006C240F0017ACE20000B6
+:101360008D390124ACF900048D780020A4EA00082E
+:10137000241901C4A0F8000AA0EF000B9523012056
+:101380008D6E00088D6D00049784005C01C35021B0
+:10139000014D602101841023A4E2000CA4E5000E9D
+:1013A000A4F90010A4E60012ACE000148D7800242B
+:1013B000240DFFFFACF800188D0F007CACEF001C73
+:1013C0008D0E00783C0F1000ACEE0020ACED002438
+:1013D000950A00BE240DFFF73146FFFFACE600285A
+:1013E000950C00809504008231837FFF0003CA00C2
+:1013F0003082FFFF0322C021ACF8002CAD2F01B8D2
+:10140000950E00828D6A002000AE3021014D282407
+:10141000A506008203E00008AD6500203C028000C4
+:10142000344501803C0480008C8301B80460FFFED9
+:101430008F8A0044240600199549001C3128FFFFBB
+:10144000000839C0ACA70000A0A6000B3C051000A6
+:1014500003E00008AC8501B88F87004C0080402174
+:1014600030C400FF3C0680008CC201B80440FFFE7F
+:101470008F89006C9383006834996000ACA90000E8
+:10148000A0A300058CE20010240F00022403FFF744
+:10149000A4A20006A4B900088D180020A0B8000A74
+:1014A000A0AF000B8CEE0000ACAE00108CED000481
+:1014B000ACAD00148CEC001CACAC00248CEB002018
+:1014C000ACAB00288CEA002C3C071000ACAA002C26
+:1014D0008D090024ACA90018ACC701B88D05002007
+:1014E00000A3202403E00008AD0400208F8600380C
+:1014F00027BDFFE0AFB10014AFBF0018AFB00010C0
+:1015000090C300D430A500FF3062002010400008D6
+:10151000008088218CCB00D02409FFDF256A0001E0
+:10152000ACCA00D090C800D401093824A0C700D4A8
+:1015300014A000403C0C80008F840038908700D4B9
+:101540002418FFBF2406FFEF30E3007FA08300D400
+:10155000979F005C8F8200608F8D003803E2C82364
+:10156000A799005CA5A000BC91AF00D401F870243D
+:10157000A1AE00D48F8C0038A18000D78F8A0038AC
+:10158000A5400082AD4000EC914500D400A658244F
+:10159000A14B00D48F9000348F8400609786005C4C
+:1015A0000204282110C0000FAF850034A38000582A
+:1015B0003C0780008E2C000894ED01208E2B000447
+:1015C000018D5021014B8021020620233086FFFF30
+:1015D00030C8000F3909000131310001162000091F
+:1015E000A3880058938600488FBF00188FB100145D
+:1015F0008FB0001027BD0020AF85006403E0000815
+:10160000AF86006000C870238FBF00189386004823
+:101610008FB100148FB0001034EF0C00010F28219F
+:1016200027BD0020ACEE0084AF85006403E0000815
+:10163000AF86006035900180020028210E00190F4E
+:10164000240600828F840038908600D430C5004084
+:1016500050A0FFBAA38000688F85004C3C06800034
+:101660008CCD01B805A0FFFE8F89006C2408608234
+:1016700024070002AE090000A6080008A207000B1C
+:101680008CA300083C0E1000AE0300108CA2000CCE
+:10169000AE0200148CBF0014AE1F00188CB90018E5
+:1016A000AE1900248CB80024AE1800288CAF002896
+:1016B000AE0F002CACCE01B80A001948A380006818
+:1016C0008F8A003827BDFFE0AFB10014AFB0001023
+:1016D0008F880060AFBF00189389003C954200BC22
+:1016E00030D100FF0109182B0080802130AC00FFB1
+:1016F0003047FFFF0000582114600003310600FF4F
+:1017000001203021010958239783005C0068202BB9
+:101710001480002700000000106800562419000102
+:101720001199006334E708803165FFFF0E0018C08F
+:10173000020020218F83006C3C07800034E601808A
+:101740003C0580008CAB01B80560FFFE240A001840
+:101750008F840038ACC30000A0CA000B948900BE7F
+:101760003C081000A4C90010ACC00030ACA801B8FF
+:101770009482008024430001A4830080949F008011
+:101780003C0608008CC6318833EC7FFF1186005E72
+:101790000000000002002021022028218FBF001835
+:1017A0008FB100148FB000100A00193427BD00203B
+:1017B000914400D42403FF8000838825A15100D4E4
+:1017C0009784005C3088FFFF51000023938C003C1D
+:1017D0008F8500382402EFFF008B782394AE00BC85
+:1017E0000168502B31E900FF01C26824A4AD00BCA0
+:1017F00051400039010058213C1F800037E60100AC
+:101800008CD800043C190001031940245500000144
+:1018100034E740008E0A00202403FFFB241100015E
+:1018200001432024AE0400201191002D34E78000F4
+:1018300002002021012030210E0018C03165FFFF79
+:101840009787005C8F890060A780005C0127802358
+:10185000AF900060938C003C8F8B00388FBF0018D6
+:101860008FB100148FB0001027BD002003E00008E6
+:10187000A16C00D73C0D800035AA01008D48000402
+:101880003C0900010109282454A0000134E740006C
+:101890008E0F00202418FFFB34E7800001F870242D
+:1018A00024190001AE0E00201599FF9F34E708802F
+:1018B000020020210E00188E3165FFFF020020215A
+:1018C000022028218FBF00188FB100148FB00010A4
+:1018D0000A00193427BD00200A0019F7000048212A
+:1018E00002002021012030210E00188E3165FFFFFB
+:1018F0009787005C8F890060A780005C01278023A8
+:101900000A001A0EAF900060948C0080241F8000A3
+:10191000019F3024A4860080908B0080908F0080EF
+:10192000316700FF0007C9C20019C027001871C045
+:1019300031ED007F01AE2825A08500800A0019DF67
+:1019400002002021938500682403000127BDFFE8E1
+:1019500000A330042CA20020AFB00010AFBF0014D1
+:1019600000C01821104000132410FFFE3C0708009F
+:101970008CE7319000E610243C088000350501809A
+:1019800014400005240600848F890038240A0004CE
+:101990002410FFFFA12A00FC0E00190F0000000018
+:1019A000020010218FBF00148FB0001003E0000868
+:1019B00027BD00183C0608008CC631940A001A574F
+:1019C00000C310248F87004427BDFFE0AFB200188A
+:1019D000AFB10014AFB00010AFBF001C30D000FF9B
+:1019E00090E6000D00A088210080902130C5007F86
+:1019F000A0E5000D8F8500388E2300188CA200D042
+:101A00001062002E240A000E0E001A4AA38A0068F3
+:101A10002409FFFF104900222404FFFF5200002088
+:101A2000000020218E2600003C0C001000CC582421
+:101A3000156000393C0E000800CE682455A0003F18
+:101A4000024020213C18000200D880241200001F10
+:101A50003C0A00048F8700448CE200148CE30010E1
+:101A60008CE500140043F82303E5C82B1320000580
+:101A7000024020218E24002C8CF1001010910031A6
+:101A80000240202124020012A38200680E001A4A9C
+:101A90002412FFFF105200022404FFFF0000202147
+:101AA0008FBF001C8FB200188FB100148FB00010D0
+:101AB0000080102103E0000827BD002090A800D47A
+:101AC000350400200A001A80A0A400D400CA4824CB
+:101AD0001520000B8F8B00448F8D00448DAC0010BF
+:101AE0001580000B024020218E2E002C51C0FFECEF
+:101AF00000002021024020210A001A9B2402001726
+:101B00008D66001050C0FFE6000020210240202119
+:101B10000A001A9B24020011024020212402001511
+:101B20000E001A4AA3820068240FFFFF104FFFDC4B
+:101B30002404FFFF0A001A8A8E2600000A001AC138
+:101B4000240200143C08000400C8382450E0FFD4EC
+:101B500000002021024020210A001A9B24020013C9
+:101B60008F85003827BDFFD8AFB3001CAFB2001877
+:101B7000AFB10014AFB00010AFBF002090A700D4E9
+:101B80008F90004C2412FFFF34E2004092060000C8
+:101B9000A0A200D48E0300100080982110720006CD
+:101BA00030D1003F2408000D0E001A4AA3880068B7
+:101BB000105200252404FFFF8F8A00388E09001878
+:101BC0008D4400D01124000702602021240C000E57
+:101BD0000E001A4AA38C0068240BFFFF104B001A5A
+:101BE0002404FFFF24040020122400048F8D0038F9
+:101BF00091AF00D435EE0020A1AE00D48F85005403
+:101C000010A00019000000001224004A8F9800382C
+:101C10008F92FCBC971000809651000A5230004809
+:101C20008F9300403C1F08008FFF318C03E5C82BC9
+:101C30001720001E02602021000028210E0019A993
+:101C400024060001000020218FBF00208FB3001C5C
+:101C50008FB200188FB100148FB0001000801021D7
+:101C600003E0000827BD00285224002A8E05001436
+:101C70008F840038948A008025490001A48900805F
+:101C8000948800803C0208008C42318831077FFF35
+:101C900010E2000E00000000026020210E00193446
+:101CA000240500010A001B0B000020212402002D46
+:101CB0000E001A4AA38200682403FFFF1443FFE1C9
+:101CC0002404FFFF0A001B0C8FBF002094990080A2
+:101CD000241F800024050001033FC024A498008035
+:101CE00090920080908E0080325100FF001181C2DE
+:101CF00000107827000F69C031CC007F018D582576
+:101D0000A08B00800E001934026020210A001B0BFA
+:101D1000000020212406FFFF54A6FFD68F84003840
+:101D2000026020210E001934240500010A001B0B5B
+:101D300000002021026020210A001B252402000A45
+:101D40002404FFFD0A001B0BAF9300608F8800384E
+:101D500027BDFFE8AFB00010AFBF0014910A00D458
+:101D60008F87004C00808021354900408CE60010B0
+:101D7000A10900D43C0208008C4231B030C53FFFBD
+:101D800000A2182B106000078F850050240DFF80E3
+:101D900090AE000D01AE6024318B00FF156000088D
+:101DA0000006C382020020212403000D8FBF00140F
+:101DB0008FB0001027BD00180A001A4AA3830068DC
+:101DC00033060003240F000254CFFFF70200202146
+:101DD00094A2001C8F85003824190023A4A200E8D7
+:101DE0008CE8000000081E02307F003F13F9003528
+:101DF0003C0A00838CE800188CA600D0110600086D
+:101E0000000000002405000E0E001A4AA385006899
+:101E10002407FFFF104700182404FFFF8F850038B8
+:101E200090A900D435240020A0A400D48F8C0044B5
+:101E3000918E000D31CD007FA18D000D8F83005458
+:101E40001060001C020020218F8400508C9800102C
+:101E50000303782B11E0000D241900180200202143
+:101E6000A39900680E001A4A2410FFFF10500002C8
+:101E70002404FFFF000020218FBF00148FB000104A
+:101E80000080102103E0000827BD00188C86001098
+:101E90008F9F00440200202100C31023AFE20010F6
+:101EA000240500010E0019A9240600010A001B9751
+:101EB000000020210E001934240500010A001B97A0
+:101EC00000002021010A5824156AFFD98F8C004494
+:101ED000A0A600FC0A001B84A386005A30A500FFC0
+:101EE0002406000124A9000100C9102B1040000C99
+:101EF00000004021240A000100A61823308B0001B5
+:101F000024C60001006A3804000420421160000267
+:101F100000C9182B010740251460FFF800A61823FC
+:101F200003E000080100102127BDFFD8AFB0001862
+:101F30008F90004CAFB1001CAFBF00202403FFFF07
+:101F40002411002FAFA30010920600002405000802
+:101F500026100001006620260E001BB0308400FF12
+:101F600000021E003C021EDC34466F410A001BD8F2
+:101F70000000102110A00009008018212445000154
+:101F800030A2FFFF2C4500080461FFFA0003204047
+:101F90000086202614A0FFF9008018210E001BB037
+:101FA000240500208FA300102629FFFF313100FFF8
+:101FB00000034202240700FF1627FFE20102182651
+:101FC00000035027AFAA0014AFAA00100000302170
+:101FD00027A8001027A7001400E6782391ED00033E
+:101FE00024CE000100C8602131C600FF2CCB0004C4
+:101FF0001560FFF9A18D00008FA200108FBF002097
+:102000008FB1001C8FB0001803E0000827BD002826
+:1020100027BDFFD0AFB3001CAFB00010AFBF00288A
+:10202000AFB50024AFB40020AFB20018AFB10014B8
+:102030003C0C80008D880128240FFF803C06800A1C
+:1020400025100100250B0080020F68243205007F57
+:10205000016F7024AD8E009000A62821AD8D002464
+:1020600090A600FC3169007F3C0A8004012A1821F7
+:10207000A386005A9067007C00809821AF830030CF
+:1020800030E20002AF88006CAF85003800A0182154
+:10209000144000022404003424040030A3840048C7
+:1020A0008C7200DC30D100FF24040004AF92006089
+:1020B00012240004A38000688E7400041680001EA1
+:1020C0003C0880009386005930C7000150E0000FA3
+:1020D0008F8600608CA400848CA800842413FF8069
+:1020E00000936024000C49403110007F01307825B6
+:1020F0003C19200001F9682530DF00FE3C03800018
+:10210000AC6D0830A39F00598F8600608FBF0028F8
+:102110008FB500248FB400208FB3001C8FB200183D
+:102120008FB100148FB000102402000127BD0030D1
+:1021300003E00008ACA600DC8E7F000895020120B9
+:102140008E67001003E2C8213326FFFF30D8000F4E
+:1021500033150001AF87003416A00058A39800582B
+:1021600035090C000309382100D81823AD03008479
+:10217000AF8700648E6A00043148FFFF1100007EC3
+:10218000A78A005C90AC00D42407FF8000EC3024C8
+:1021900030CB00FF1560004B9786005C938E005A91
+:1021A000240D000230D5FFFF11CD02A20000A021B6
+:1021B0008F85006002A5802B160000BC9388004824
+:1021C0003C11800096240120310400FF1485008812
+:1021D0008F8400648F9800343312000356400085CA
+:1021E00030A500FF8F900064310C00FF24060034FE
+:1021F00011860095AF90004C9204000414800119E0
+:102200008F8E0038A380003C8E0D00048DC800D84E
+:102210003C0600FF34CCFFFF01AC30240106182B34
+:1022200014600121AF8600548F8700609798005C8E
+:10223000AF8700400307402310C000C7A788005C99
+:102240008F91003030C3000300035823922A007C92
+:102250003171000302261021000A20823092000111
+:102260000012488000492821311FFFFF03E5C82BD9
+:10227000132001208F8800388F8500348F880064F8
+:102280001105025A3C0E3F018E0600003C0C250051
+:1022900000CE682411AC01638F84004C30E500FF50
+:1022A0000E00184A000030218F8800388F870060A8
+:1022B0008F8500340A001DB78F8600540A001C5613
+:1022C000AF87006490AC00D400EC2024309000FF75
+:1022D000120000169386005990B5008890B400D77C
+:1022E00024A8008832A2003F2446FFE02CD1002021
+:1022F000A394003C1220000CAF88004C240E000177
+:1023000000CE2004308A00191540012B3C068000C5
+:1023100034D80002009858241560022E3092002014
+:1023200016400234000000009386005930CE0001B0
+:1023300011C0000F9788005C8CA900848CAF0084CA
+:102340002410FF800130C8240019194031ED007FAE
+:10235000006D38253C1F200000FF902530CB00FE8B
+:102360003C188000AF120830A38B00599788005C9E
+:102370001500FF84000000008E630020306C000414
+:102380001180FF51938600592404FFFB0064302420
+:102390003C038000AE660020346601808C7301B877
+:1023A0000660FFFE8F8E006C346A01003C15000150
+:1023B000ACCE00008C62012424076085ACC200040E
+:1023C0008D54000402958824522000012407608364
+:1023D000241200023C1810003C0B8000A4C7000827
+:1023E000A0D2000BAD7801B80A001C2B93860059CF
+:1023F00030A500FF0E00184A240600018F88006CEB
+:102400003C05800034A90900250201889388004812
+:10241000304A0007304B00783C0340802407FF809F
+:102420000163C825014980210047F824310C00FFD1
+:1024300024060034ACBF0800AF90004CACB90810C3
+:102440005586FF6E920400048F8400388E11003090
+:10245000908E00D431CD001015A000108F83006045
+:102460002C6F000515E000E400000000909800D4F7
+:102470002465FFFC331200101640000830A400FF52
+:102480008F9F00648F99003413F90004388700018E
+:1024900030E20001144001C8000000000E001BC320
+:1024A000000000000A001DF8000000008F84006496
+:1024B00030C500FF0E00184A24060001938E004824
+:1024C000240A003411CA00A08F8500388F8600606E
+:1024D0009783005C3062FFFF00C28823AF910060E9
+:1024E000A780005C1280FF90028018212414FFFD59
+:1024F0005474FFA28E6300208E6900042403FFBF82
+:10250000240BFFEF0135C823AE79000490AF00D44F
+:1025100031ED007FA0AD00D48E6600208F9800388A
+:10252000A780005C34DF0002AE7F0020A70000BC63
+:10253000931200D402434024A30800D48F9500389E
+:10254000AEA000EC92AE00D401CB5024A2AA00D4DD
+:102550000A001CD78F8500388F910034AF8000604F
+:1025600002275821AF8B0034000020212403FFFFF5
+:10257000108301B48F8500388E0C00103C0D0800CC
+:102580008DAD31B09208000031843FFF008D802B6B
+:1025900012000023310D003F3C1908008F3931A88B
+:1025A0008F9F006C000479802408FF80033F202166
+:1025B000008FC821938500590328F8243C06008029
+:1025C0003C0F800034D80001001F91403331007F60
+:1025D0008F8600380251502535EE0940332B0078A4
+:1025E000333000073C0310003C02800C017890253A
+:1025F000020E48210143C0250222382134AE0001D9
+:10260000ADFF0804AF890050ADF20814AF87004455
+:10261000ADFF0028ACD90084ADF80830A38E005976
+:102620009383005A240700035067002825A3FFE086
+:10263000240C0001146CFFAB8F850038241100239B
+:1026400011B10084000000002402000B0260202170
+:102650000E001A4AA38200680040A0210A001D3221
+:102660008F85003802602021240B000C0E001A4ACE
+:10267000A38B0068240AFFFF104AFFBC2404FFFF5D
+:102680008F8E0038A380003C8E0D00048DC800D8CA
+:102690003C0600FF34CCFFFF01AC30240106182BB0
+:1026A0001060FEE1AF860054026020212412001960
+:1026B0000E001A4AA3920068240FFFFF104FFFABD1
+:1026C0002404FFFF0A001C838F8600542C74002012
+:1026D0001280FFDE2402000B000328803C11080159
+:1026E0002631949000B148218D2D000001A00008F2
+:1026F000000000008F85003400A710219385003C66
+:10270000AF82003402251821A383003C951F00BC32
+:102710000226282137F91000A51900BC5240FF926B
+:10272000AF850060246A0004A38A003C950900BCC0
+:1027300024A40004AF84006035322000A51200BC40
+:102740000A001D54000020218F8600602CCB00055C
+:102750001560FF609783005C3072FFFF00D240235A
+:102760002D18000513000003306400FF24DFFFFC78
+:1027700033E400FF8F8500648F86003410A60004C8
+:10278000388F000131ED000115A001380000000074
+:102790008F840038908C00D435870010A08700D437
+:1027A0008F8500388F8600609783005CACA000ECBA
+:1027B0000A001D2F3062FFFF8CAA00848CB50084B4
+:1027C0003C041000014710240002894032B4007F0D
+:1027D0000234302500C460253C0880002405000137
+:1027E00002602021240600010E0019A9AD0C08305A
+:1027F0000A001CC38F8500388C8200EC1222FE7EFA
+:102800000260202124090005A38900680E001A4AED
+:102810002411FFFF1451FE782404FFFF0A001D5508
+:102820002403FFFF8F8F004C8F8800388DF8000045
+:10283000AD1800888DE70010AD0700988F87006005
+:102840000A001DB78F8600542407FFFF118700057B
+:10285000000000000E001B4C026020210A001D90A9
+:102860000040A0210E001AD1026020210A001D9014
+:102870000040A0218F90004C3C0908008D2931B008
+:102880008E11001032323FFF0249682B11A0000C5C
+:10289000240AFF808F85005090AE000D014E102459
+:1028A000304C00FF11800007026020210011C3821C
+:1028B00033030003240B0001106B0105000000002E
+:1028C000026020212418000D0E001A4AA398006807
+:1028D000004020218F8500380A001D320080A02191
+:1028E0008F90004C3C0A08008D4A31B08F85005013
+:1028F0008E0400100000A0218CB1001430823FFF34
+:10290000004A602B8CB200205180FFEE0260202133
+:1029100090B8000D240BFF800178702431C300FFB4
+:102920005060FFE80260202100044382310600036A
+:1029300014C0FFE40260202194BF001C8F9900386E
+:102940008E060028A73F00E88CAF0010022F20233E
+:1029500014C40139026020218F83005400C3682110
+:10296000022D382B14E00135240200188F8A004410
+:102970008F820030024390218D4B00100163702341
+:10298000AD4E0010AD5200208C4C00740192282BEB
+:1029900014A00156026020218F8400508E0800246C
+:1029A0008C86002411060007026020212419001CD7
+:1029B0000E001A4AA3990068240FFFFF104FFFC5AD
+:1029C0002404FFFF8F8400448C87002424FF00012F
+:1029D000AC9F0024125101338F8D00308DB10074F3
+:1029E000123201303C0B00808E0E000001CB5024CF
+:1029F00015400075000000008E0300142411FFFF35
+:102A000010710006026020212418001B0E001A4AD3
+:102A1000A39800681051FFAF2404FFFF8E0300004D
+:102A20003C0800010068302410C000133C04008002
+:102A30000064A024168000090200282102602021E1
+:102A40002419001A0E001A4AA3990068240FFFFFE8
+:102A5000104FFFA02404FFFF020028210260202164
+:102A60000E001A6A240600012410FFFF1050FF997F
+:102A70002404FFFF241400018F9F004402602021E2
+:102A80000280302197F1003424050001262700013F
+:102A9000A7E700340E0019A9000000000000202163
+:102AA0008F8500380A001D320080A0218F90004CD5
+:102AB0003C1408008E9431B08E07001030E83FFFC0
+:102AC0000114302B10C000618F860050241FFF803E
+:102AD00090C5000D03E52024309200FF5240005CB9
+:102AE000026020218F8D005411A0000700078B8207
+:102AF0008F8500388F89FCBC94AF00809539000A1F
+:102B0000132F00F68F870040322C000315800063DE
+:102B10000000000092020002104000D700000000F8
+:102B20008E0A0024154000D8026020219204000380
+:102B300024060002308800FF15060005308500FFDE
+:102B40008F940054528000F202602021308500FFF3
+:102B500038AD00102DA400012CBF000103E4302586
+:102B6000020028210E001A6A026020212410FFFFB3
+:102B7000105000BE8F8500388F830054106000C451
+:102B8000240500013C1908008F39318C0323782B70
+:102B900015E000B12409002D026020210000282149
+:102BA0000E0019A9240600018F85003800001821A5
+:102BB0000A001D320060A0210E0018750000000000
+:102BC0000A001DF800000000AC8000200A001E78FA
+:102BD0008E03001400002821026020210E0019A994
+:102BE000240600010A001CC38F8500380A001DB7A7
+:102BF0008F8800388CB000848CB900843C031000AE
+:102C00000207482400096940332F007F01AFF825EF
+:102C100003E32825ACC50830910700012405000115
+:102C2000026020210E0019A930E600010A001CC331
+:102C30008F850038938F00482403FFFD0A001D3460
+:102C4000AF8F00600A001D342403FFFF02602021C3
+:102C50002410000D0E001A4AA390006800401821AD
+:102C60008F8500380A001D320060A0210E00187503
+:102C7000000000009783005C8F8600603070FFFFCB
+:102C800000D048232D3900051320FE128F8500380F
+:102C9000ACA200EC0A001D2F3062FFFF90C3000DB4
+:102CA000307800085700FFA2920400030260202140
+:102CB000240200100E001A4AA38200682403FFFFBA
+:102CC0005443FF9B920400030A001F128F850038B3
+:102CD00090A8000D3106000810C000958F94005494
+:102CE0001680009E026020218E0F000C8CA4002014
+:102CF00055E40005026020218E1F00088CB90024D5
+:102D000013F9003A02602021240200200E001A4A22
+:102D1000A38200682405FFFF1045FEEE2404FFFF98
+:102D20008F8F0044240CFFF72403FF8091E9000DEE
+:102D30003C14800E3C0B8000012CC824A1F9000D2E
+:102D40008F8F00303C0708008CE731AC8F8D006C12
+:102D500095E500788F99004400ED902130BF7FFF0A
+:102D6000001F20400244302130C8007F00C3C0242F
+:102D700001147021AD78002CA5D100008F2A002805
+:102D800025420001AF2200288F29002C8E0C002C38
+:102D9000012C6821AF2D002C8E07002CAF270030AE
+:102DA0008E050014AF250034973F003A27E4000158
+:102DB000A724003A95F200783C1008008E1031B03C
+:102DC0002643000130717FFF1230005C006030212B
+:102DD0008F83003002602021240500010E00193489
+:102DE000A46600780A001EA1000020218E070014AE
+:102DF0002412FFFF10F200638F8C00388E09001838
+:102E00008D8D00D0152D005D026020218E0A0024DA
+:102E10008CA2002811420053240200210E001A4AFD
+:102E2000A38200681452FFBE2404FFFF8F85003880
+:102E30000A001D320080A0212402001F0E001A4A41
+:102E4000A38200682409FFFF1049FEA22404FFFFAB
+:102E50000A001E548F830054026020210E001A4A7B
+:102E6000A38900681450FF518F8500382403FFFFA9
+:102E70000A001D320060A0218CCE00248E0B00249D
+:102E8000116EFF2A026020210A001F262402000F73
+:102E90000E001934026020218F8500380A001EE5DB
+:102EA000000018218E0900003C05008001259024B7
+:102EB0001640FF452402001A026020210E001A4A23
+:102EC000A3820068240CFFFF144CFECB2404FFFFF8
+:102ED0008F8500380A001D320080A0212403FFFDE9
+:102EE0000060A0210A001D32AF8700602418001D79
+:102EF0000E001A4AA39800682403FFFF1443FEA69D
+:102F00002404FFFF8F8500380A001D320080A021B5
+:102F10002412002C0E001A4AA39200682403FFFF1B
+:102F20001043FF508F8500380A001ECC9204000326
+:102F3000026020210A001F3C24020024240B800090
+:102F4000006B702431CAFFFF000A13C2305100FF2A
+:102F5000001180270A001F6D001033C00A001F3CBB
+:102F6000240200278E0600288CAE002C10CE00080C
+:102F7000026020210A001F802402001F0A001F8017
+:102F80002402000E026020210A001F802402002576
+:102F90008E04002C1080000D8F8300308C7800741C
+:102FA0000304582B5560000C026020218CA80014EB
+:102FB0000086A0210114302B10C0FF5A8F8F0044CF
+:102FC000026020210A001F802402002202602021CA
+:102FD0000A001F80240200230A001F80240200260A
+:102FE00027BDFFD8AFB3001CAFB10014AFBF0020A6
+:102FF000AFB20018AFB000103C0280008C5201400C
+:103000008C4B01483C048000000B8C02322300FFF3
+:10301000317300FF8C8501B804A0FFFE349001805D
+:10302000AE1200008C8701442464FFF024060002E5
+:103030002C830013AE070004A6110008A206000BA3
+:10304000AE1300241060004F8FBF002000044880A2
+:103050003C0A0801254A9510012A40218D040000F0
+:1030600000800008000000003C0308008C6331A8C9
+:1030700031693FFF0009998000728021021370219D
+:103080002405FF80264D0100264C00803C02800074
+:1030900031B1007F3198007F31CA007F3C1F800A28
+:1030A0003C1980043C0F800C01C5202401A530246C
+:1030B00001853824014F1821AC460024023F4021ED
+:1030C00003194821AC470090AC440028AF8300446A
+:1030D000AF880038AF8900300E00190001608021F0
+:1030E0003C0380008C6B01B80560FFFE8F870044B5
+:1030F0008F8600383465018090E8000DACB2000086
+:10310000A4B0000600082600000416030002902761
+:10311000001227C21080008124C20088241F608210
+:10312000A4BF0008A0A0000524020002A0A2000B7A
+:103130008F8B0030000424003C0827000088902575
+:10314000ACB20010ACA00014ACA00024ACA00028CD
+:10315000ACA0002C8D6900382413FF80ACA90018A6
+:1031600090E3000D02638024320500FF10A00005EB
+:103170008FBF002090ED000D31AC007FA0EC000D62
+:103180008FBF00208FB3001C8FB200188FB10014C6
+:103190008FB000103C0A10003C0E800027BD0028B4
+:1031A00003E00008ADCA01B8265F01002405FF80D6
+:1031B00033F8007F3C06800003E578243C19800A40
+:1031C00003192021ACCF0024908E00D400AE6824D7
+:1031D00031AC00FF11800024AF840038248E0088B9
+:1031E00095CD00123C0C08008D8C31A831AB3FFF0F
+:1031F00001924821000B5180012A40210105202421
+:10320000ACC400283107007F3C06800C00E620217A
+:103210009083000D00A31024304500FF10A0FFD8BC
+:10322000AF8400449098000D330F001015E0FFD5D7
+:103230008FBF00200E001900000000003C0380003A
+:103240008C7901B80720FFFE00000000AE120000DC
+:103250008C7F0144AE1F0004A61100082411000257
+:10326000A211000BAE1300243C130801927396D0F8
+:10327000327000015200FFC38FBF00200E00213DBD
+:10328000024020210A00205A8FBF00203C1260001B
+:103290008E452C083C03F0033462FFFF00A2F824A3
+:1032A000AE5F2C088E582C083C1901C003199825D4
+:1032B000AE532C080A00205A8FBF0020264D010073
+:1032C00031AF007F3C10800A240EFF8001F02821DE
+:1032D00001AE60243C0B8000AD6C00241660FFA89A
+:1032E000AF85003824110003A0B100FC0A00205A69
+:1032F0008FBF002026480100310A007F3C0B800A66
+:103300002409FF80014B3021010920243C07800063
+:10331000ACE400240A002059AF860038944E001215
+:10332000320C3FFF31CD3FFF15ACFF7D241F608283
+:1033300090D900D42418FF800319782431EA00FFC3
+:103340001140FF770000000024070004A0C700FC24
+:103350008F870044241160842406000DA4B1000866
+:10336000A0A600050A002044240200023C0400013B
+:10337000248496BC24030014240200FE3C010800AF
+:10338000AC2431EC3C010800AC2331E83C010801DD
+:10339000A42296D83C040801248496D80000182161
+:1033A00000643021A0C30004246300012C6500FFE9
+:1033B00054A0FFFC006430213C07080024E7010012
+:1033C00003E00008AF87007800A058210080482162
+:1033D0000000102114A00012000050210A00213921
+:1033E000000000003C010801A42096D83C0508011B
+:1033F00094A596D88F8200783C0C0801258C96D82D
+:1034000000E2182100AC2021014B302BA0890004E0
+:1034100000001021A460000810C0003901004821FC
+:103420008F8600780009384000E940210008388084
+:1034300000E6282190A8000B90B9000A000820405F
+:1034400000881021000218800066C021A319000A1C
+:103450008F85007800E5782191EE000A91E6000B57
+:10346000000E684001AE6021000C20800085102114
+:10347000A046000B3C030801906396D21060002226
+:103480002462FFFF8F8300383C010801A02296D2FE
+:10349000906C00FF1180000400000000906E00FF9F
+:1034A00025CDFFFFA06D00FF3C190801973996D884
+:1034B000272300013078FFFF2F0F00FF11E0FFC925
+:1034C000254A00013C010801A42396D83C050801C7
+:1034D00094A596D88F8200783C0C0801258C96D84C
+:1034E00000E2182100AC2021014B302BA089000400
+:1034F00000001021A460000814C0FFC90100482189
+:1035000003E000080000000003E0000824020002BD
+:1035100027BDFFE0248501002407FF80AFB0001025
+:10352000AFBF0018AFB1001400A718243C108000F2
+:1035300030A4007F3C06800A008628218E110024DA
+:10354000AE03002490A200FF14400008AF850038AD
+:10355000A0A000098FBF0018AE1100248FB1001485
+:103560008FB0001003E0000827BD002090A900FDE7
+:1035700090A800FF312400FF0E0020EB310500FF72
+:103580008F8500388FBF0018A0A00009AE1100245D
+:103590008FB100148FB0001003E0000827BD002099
+:1035A00027BDFFD0AFB20020AFB1001CAFB00018F4
+:1035B000AFBF002CAFB40028AFB300243C0980009B
+:1035C0009533011635320C00952F011A3271FFFF29
+:1035D000023280218E08000431EEFFFF248B0100AF
+:1035E000010E6821240CFF8025A5FFFF016C5024EB
+:1035F0003166007F3C07800AAD2A002400C73021D5
+:10360000AF850074AF8800703C010801A02096D1FE
+:1036100090C300090200D02100809821306300FF90
+:103620002862000510400048AF8600382864000278
+:103630001480008E24140001240D00053C010801B3
+:10364000A02D96B590CC00FD3C010801A02096B6B7
+:103650003C010801A02096B790CB000A240AFF8005
+:10366000318500FF014B4824312700FF10E0000C9A
+:10367000000058213C128008365100808E2F003007
+:103680008CD0005C01F0702305C0018E8F87007024
+:1036900090D4000A3284007FA0C4000A8F860038CC
+:1036A0003C118008363000808E0F00308F8700700C
+:1036B00000EF702319C000EE0000000090D4000954
+:1036C00024120002328400FF109202470000000022
+:1036D0008CC2005800E2F82327F9FFFF1B200130BD
+:1036E0000000000090C500092408000430A300FF7A
+:1036F00010680057240A00013C010801A02A96B571
+:1037000090C900FF252700013C010801A02796B4BD
+:103710003C030801906396B5240600051066006A14
+:103720002C780005130000C4000090210003F880ED
+:103730003C0408012484955C03E4C8218F25000023
+:1037400000A0000800000000241800FF1078005CB2
+:103750000000000090CC000A90CA00093C08080153
+:10376000910896D13187008000EA48253C01080184
+:10377000A02996BC90C500FD3C140801929496D2F5
+:10378000311100013C010801A02596BD90DF00FE2B
+:103790003C010801A03F96BE90D200FF3C01080109
+:1037A000A03296BF8CD900543C010801AC3996C0B8
+:1037B0008CD000583C010801AC3096C48CC3005C2E
+:1037C0003C010801AC3496CC3C010801AC2396C8FE
+:1037D000162000088FBF002C8FB400288FB3002460
+:1037E0008FB200208FB1001C8FB0001803E00008DA
+:1037F00027BD00303C1180009624010E0E000FD42E
+:103800003094FFFF3C0B08018D6B96D40260382189
+:1038100002802821AE2B01803C1308018E7396B4E0
+:1038200001602021240600830E00102FAFB300108A
+:103830008FBF002C8FB400288FB300248FB20020DC
+:103840008FB1001C8FB0001803E0000827BD0030C6
+:103850003C1808008F1831FC270F00013C010800BC
+:10386000AC2F31FC0A0021CE000000001474FFB917
+:1038700000000000A0C000FF3C0508008CA531E45A
+:103880003C0308008C6331E03C0208008C423204A7
+:103890008F99003834A80001241F00023C01080160
+:1038A000AC2396D43C010801A02896D03C01080125
+:1038B000A02296D3A33F00090A0021878F860038F3
+:1038C0000E00213D000000000A0021CE8F86003846
+:1038D0003C1F080193FF96B42419000113F9022933
+:1038E0008F8700703C100801921096B83C060801C2
+:1038F00090C696B610C000050200A0213C04080145
+:10390000908496B9109001E48F8700780010884069
+:103910008F9F0078023048210009C880033F702142
+:1039200095D80008270F0001A5CF00083C04080126
+:10393000908496B93C05080190A596B60E0020EB40
+:10394000000000008F8700780230202100043080C2
+:1039500000C720218C8500048F82007400A24023C0
+:1039600005020006AC8200048C8A00008F83007080
+:10397000014310235C400001AC8300008F860038B7
+:1039800090CB00FF2D6C00025580002D2414000107
+:103990000230F821001F40800107282190B9000B58
+:1039A0008CAE00040019C04003197821000F188064
+:1039B000006710218C4D000001AE88232630FFFFE8
+:1039C0005E00001F241400018C4400048CAA000037
+:1039D000008A482319200019240E00043C01080124
+:1039E000A02E96B590AD000B8CAB0004000D884066
+:1039F000022D802100101080004710218C4400040B
+:103A000001646023058202009443000890DF00FEF9
+:103A100090B9000B33E500FF54B900040107A02161
+:103A2000A0D400FE8F8700780107A0219284000BAC
+:103A30000E0020EB240500018F86003824140001BD
+:103A4000125400962E500001160000423C08FFFF61
+:103A5000241900021659FF3F00000000A0C000FF1B
+:103A60008F860038A0D200090A0021CE8F86003848
+:103A700090C700092404000230E300FF1064016FC6
+:103A800024090004106901528F8800748CCE005400
+:103A9000010E682325B100010620017524180004D9
+:103AA0003C010801A03896B53C010801A02096B45D
+:103AB00090D400FD90D200FF2E4F000215E0FF14BD
+:103AC000328400FF000438408F89007890DF00FFC7
+:103AD00000E41021000220800089C8212FE50002A7
+:103AE0009324000B14A0FF0A2407000200041840CE
+:103AF0000064802100105880016928218CAC0004EA
+:103B0000010C50230540FF02000000003C030801A7
+:103B1000906396B614600005246F00013C01080113
+:103B2000A02496B93C010801A02796B73C010801E2
+:103B3000A02F96B690CE00FF24E7000131CD00FF04
+:103B400001A7882B1220FFE990A4000B0A0021BDD9
+:103B5000000000003C0508018CA596B43C1200044E
+:103B600000A8F82413F20006240200053C0908010D
+:103B7000912996B5152000022402000324020005B5
+:103B80003C010801A02296D190C700FF14E001205B
+:103B900024020002A0C200090A0021CE8F8600384C
+:103BA00090CC00FF1180FEDA240A00018F8C007493
+:103BB0008F890078240F0003018068211160001EA6
+:103BC000240E0002000540400105A02100142080C1
+:103BD000008990218E510004019180230600FECCC3
+:103BE000000000003C020801904296B61440000517
+:103BF000245800013C010801A02A96B73C010801A5
+:103C0000A02596B93C010801A03896B690DF00FFC8
+:103C1000010510210002C88033E500FF254A00019C
+:103C20000329202100AA402B1500FEB99085000B26
+:103C30001560FFE5000540400005404001051821E2
+:103C4000000310803C010801A02A96B43C01080141
+:103C5000A02596B8004918218C64000400E4F823DC
+:103C600027F9FFFF1F20FFE9000000008C63000020
+:103C700000E358230560013A01A3882310E30117EC
+:103C80000184C0231B00FEA2000000003C010801CB
+:103C9000A02E96B50A0022FC240B0001240E00047D
+:103CA000A0CE00093C0D08008DAD31F88F8600389C
+:103CB00025A200013C010800AC2231F80A0021CE07
+:103CC000000000008CD9005C00F9C0231F00FE7BBF
+:103CD000000000008CDF005C10FFFF658F84007423
+:103CE0008CC3005C00834023250200011C40FF6060
+:103CF000000000008CC9005C2487000100E9282B2B
+:103D000010A0FE943C0D80008DAB01043C0C000122
+:103D1000016C50241140FE8F240200103C01080168
+:103D2000A02296D10A0021CE000000008F910074DD
+:103D30008F86003826220001ACC2005C0A0022896E
+:103D4000241400018F8700382404FF80000088219C
+:103D500090E9000A2414000101243025A0E6000A9D
+:103D60003C05080190A596B63C040801908496B9DC
+:103D70000E0020EB000000008F8600388F85007851
+:103D800090C800FD310700FF000740400107F821FF
+:103D9000001FC0800305C8219323000BA0C300FDB2
+:103DA0008F8500788F86003803056021918F000B86
+:103DB000000F704001CF6821000D808002051021A6
+:103DC0008C4B0000ACCB00548D8400048F830074B6
+:103DD0000064502319400002248200012462000183
+:103DE00001074821ACC2005C0009308000C54021B9
+:103DF00000E02021240500010E0020EB9110000BB3
+:103E00008F86003890C500FF10A0FF0C0010704096
+:103E10008F85007801D06821000D10800045582161
+:103E20008D6400008F8C00740184502325470001AD
+:103E300004E0FF02263100013C030801906396B6BE
+:103E40002E2F0002247800013C010801A03896B60C
+:103E50003C010801A03496B711E0FEF802003821B9
+:103E60000A00235C000740408F8400388F83007471
+:103E70008C85005800A340230502FE9AAC830058AD
+:103E80000A002232000000003C07080190E796D2A9
+:103E9000240200FF10E200BE8F8600383C110801AA
+:103EA000963196DA3C030801246396D82625000152
+:103EB0003230FFFF30ABFFFF020360212D6A00FFAD
+:103EC0001540008D918700043C010801A42096DA7A
+:103ED0008F8800380007484001272821911800FFEB
+:103EE000000530802405000127140001A11400FF03
+:103EF0003C120801925296D28F8800788F8E007003
+:103F0000264F000100C820213C010801A02F96D2B5
+:103F1000AC8E00008F8D0074A4850008AC8D000469
+:103F20003C030801906396B4146000770000902170
+:103F30003C010801A02596B4A087000B8F8C007867
+:103F400000CC5021A147000A8F820038A04700FD15
+:103F50008F840038A08700FE8F8600388F9F007006
+:103F6000ACDF00548F990074ACD900588F8D007865
+:103F70000127C02100185880016DA021928F000AEE
+:103F8000000F704001CF182100038880022D80218E
+:103F9000A207000B8F86007801666021918A000BD2
+:103FA000000A1040004A20210004288000A6402179
+:103FB000A107000A3C07800834E900808D22003008
+:103FC0008F860038ACC2005C0A00228924140001EC
+:103FD00090CA00FF1540FEAD8F880074A0C4000990
+:103FE0000A0021CE8F860038A0C000FD8F980038CF
+:103FF00024060001A30000FE3C010801A02696B59E
+:104000003C010801A02096B40A0021BD0000000078
+:1040100090CB00FF3C040801908496D3316C00FFE4
+:104020000184502B1540000F2402000324020004D9
+:10403000A0C200090A0021CE8F86003890C3000A72
+:104040002410FF8002035824316C00FF1180FDC151
+:10405000000000003C010801A02096B50A0021BD27
+:1040600000000000A0C200090A0021CE8F8600389F
+:1040700090D4000A2412FF8002544824312800FF03
+:104080001500FFF4240200083C010801A02296D18B
+:104090000A0021CE00000000001088408F8B0070C5
+:1040A000023018210003688001A72021AC8B00009A
+:1040B0008F8A0074240C0001A48C0008AC8A0004D0
+:1040C0003C05080190A596B62402000110A2FE1E30
+:1040D00024A5FFFF0A0022489084000B0184A0233E
+:1040E0001A80FD8B000000003C010801A02E96B54F
+:1040F0000A0022FC240B00013C010801A42596DAE9
+:104100000A0023AE8F880038240B0001106B0022B8
+:104110008F9800388F85003890BF00FF33F900FF7B
+:104120001079002B000000003C1F080193FF96B897
+:10413000001FC840033FC0210018A08002887821DA
+:1041400091EE000AA08E000A8F8D00783C030801D2
+:10415000906396B800CD88210A0023D4A223000BD7
+:10416000263000010600003101A490230640002BF8
+:10417000240200033C010801A02F96B50A0022FC8E
+:10418000240B00018F8900380A002232AD27005429
+:104190000A00228824120001931400FDA094000B51
+:1041A0008F8800388F8F0078910E00FE00CF682135
+:1041B000A1AE000A8F910038A22700FD8F83007006
+:1041C0008F900038AE0300540A0023D58F8D0078FD
+:1041D00090B000FEA090000A8F8B00388F8C007882
+:1041E000916A00FD00CC1021A04A000B8F8400389A
+:1041F000A08700FE8F8600748F850038ACA600581B
+:104200000A0023D58F8D007894B80008ACA4000470
+:10421000030378210A00227CA4AF00083C010801B6
+:10422000A02296B50A0021BD0000000090CF000931
+:10423000240D000431EE00FF11CDFD8524020001A4
+:104240003C010801A02296B50A0021BD0000000033
+:10425000080033440800334408003420080033F4D5
+:10426000080033D808003328080033280800332812
+:104270000800334C8008010080080080800800009E
+:104280005F865437E4AC62CC50103A4536621985EB
+:10429000BF14C0E81BC27A1E84F4B556094EA6FEB0
+:1042A0007DDA01E7C04D748108005A7408005AB8DD
+:1042B00008005A5C08005A5C08005A5C08005A5C06
+:1042C00008005A7408005A5C08005A5C08005AC07A
+:1042D00008005A5C080059D408005A5C08005A5C6F
+:1042E00008005AC008005A5C08005A5C08005A5C72
+:1042F00008005A5C08005A5C08005A5C08005A5CC6
+:1043000008005A5C08005A5C08005A5C08005A947D
+:1043100008005A5C08005A9408005A5C08005A5C6D
+:1043200008005A5C08005A9808005A9408005A5C21
+:1043300008005A5C08005A5C08005A5C08005A5C85
+:1043400008005A5C08005A5C08005A5C08005A5C75
+:1043500008005A5C08005A5C08005A5C08005A5C65
+:1043600008005A5C08005A5C08005A5C08005A5C55
+:1043700008005A5C08005A5C08005A5C08005A9809
+:1043800008005A9808005A5C08005A9808005A5CBD
+:1043900008005A5C08005A5C08005A5C08005A5C25
+:1043A00008005A5C08005A5C08005A5C08005A5C15
+:1043B00008005A5C08005A5C08005A5C08005A5C05
+:1043C00008005A5C08005A5C08005A5C08005A5CF5
+:1043D00008005A5C08005A5C08005A5C08005A5CE5
+:1043E00008005A5C08005A5C08005A5C08005A5CD5
+:1043F00008005A5C08005A5C08005A5C08005A5CC5
+:1044000008005A5C08005A5C08005A5C08005A5CB4
+:1044100008005A5C08005A5C08005A5C08005A5CA4
+:1044200008005A5C08005A5C08005A5C08005A5C94
+:1044300008005A5C08005A5C08005A5C08005A5C84
+:1044400008005A5C08005A5C08005A5C08005A5C74
+:1044500008005A5C08005A5C08005A5C08005A5C64
+:1044600008005A5C08005A5C08005A5C08005A5C54
+:1044700008005A5C08005A5C08005A5C08005A5C44
+:1044800008005A5C08005A5C08005A5C08005A5C34
+:1044900008005A5C08005A5C08005A5C08005A5C24
+:1044A00008005A5C08005A5C08005ADC0800782CA6
+:1044B00008007A90080078380800762C08007838D0
+:1044C000080078C4080078380800762C0800762C9C
+:1044D0000800762C0800762C0800762C0800762C34
+:1044E0000800762C0800762C0800762C0800762C24
+:1044F00008007858080078480800762C0800762CC8
+:104500000800762C0800762C0800762C0800762C03
+:104510000800762C0800762C0800762C0800762CF3
+:104520000800762C0800762C08007848080082D80D
+:1045300008008164080082A008008164080082707D
+:104540000800804C080081640800816408008164D0
+:1045500008008164080081640800816408008164A7
+:104560000800816408008164080081640800816497
+:10457000080081640800818C08008D1008008E6C92
+:0C45800008008E4C080088B408008D284C
+:04458C000A000124FC
+:1045900000000000000000000000000D7470613693
+:1045A0002E322E31610000000602010100000000E1
+:1045B00000000000000000000000000000000000FB
+:1045C00000000000000000000000000000000000EB
+:1045D00000000000000000000000000000000000DB
+:1045E00000000000000000000000000000000000CB
+:1045F00000000000000000000000000000000000BB
+:1046000000000000000000000000000000000000AA
+:10461000000000000000000000000000000000009A
+:1046200010000003000000000000000D0000000D5D
+:104630003C020800244217203C03080024632A108F
+:10464000AC4000000043202B1480FFFD24420004F6
+:104650003C1D080037BD2FFC03A0F0213C100800D2
+:10466000261004903C1C0800279C17200E000262B4
+:10467000000000000000000D2402FF8027BDFFE0C5
+:1046800000821024AFB00010AF420020AFBF00186E
+:10469000AFB10014936500043084007F03441821F7
+:1046A0003C0200080062182130A500200360802130
+:1046B0003C080111277B000814A000022466005C5E
+:1046C00024660058920200049743010492040004F7
+:1046D0003047000F3063FFFF30840040006728231D
+:1046E00010800009000048219202000530420004B9
+:1046F000104000050000000010A0000300000000B2
+:1047000024A5FFFC240900049202000530420004A5
+:10471000104000120000000010A000100000000077
+:104720009602000200A72021010440252442FFFE3A
+:10473000A7421016920300042402FF8000431024B5
+:10474000304200FF104000033C0204000A000174E4
+:10475000010240258CC20000AF4210188F42017840
+:104760000440FFFE2402000AA74201409602000214
+:1047700024040009304200070002102330420007E1
+:10478000A7420142960200022442FFFEA7420144D2
+:10479000A740014697420104A74201488F42010801
+:1047A0003042002050400001240400019202000425
+:1047B00030420010144000023483001000801821A1
+:1047C000A743014A000000000000000000000000B4
+:1047D00000000000AF4810000000000000000000D2
+:1047E00000000000000000008F4210000441FFFEA6
+:1047F0003102FFFF10400007000000009202000499
+:104800003042004014400003000000008F421018A6
+:10481000ACC20000960200063042FFFF24420002B4
+:10482000000210430002104003628821962200001B
+:104830001120000D3044FFFF00A710218F830038A6
+:104840008F45101C000210820002108000431021CE
+:10485000AC45000030A6FFFF0E00058D00052C02C0
+:1048600000402021A6220000920300042402FF80C1
+:1048700000431024304200FF1040001F00000000E1
+:1048800092020005304200021040001B00000000B0
+:104890009742100C2442FFFEA742101600000000B1
+:1048A0003C02040034420030AF421000000000001F
+:1048B0000000000000000000000000008F42100017
+:1048C0000441FFFE000000009742100C8F45101CB1
+:1048D0003042FFFF244200300002108200021080AC
+:1048E000005B1021AC45000030A6FFFF0E00058DD7
+:1048F00000052C02A6220000960400022484000871
+:104900000E0001E93084FFFF974401040E0001F717
+:104910003084FFFF8FBF00188FB100148FB00010DC
+:104920003C02100027BD002003E00008AF420178E0
+:104930003084FFFF308200078F8500241040000282
+:10494000248300073064FFF800A4102130421FFFC9
+:1049500003421821247B4000AF850028AF82002449
+:1049600003E00008AF4200843084FFFF3082000F74
+:104970008F85002C8F860034104000022483000FA6
+:104980003064FFF000A410210046182BAF850030E2
+:104990000046202314600002AF82002CAF84002C5C
+:1049A0008F82002C340480000342182100641821F7
+:1049B000AF83003803E00008AF4200808F8200140C
+:1049C000104000088F8200048F82FFDC1440000535
+:1049D0008F8200043C02FFBF3442FFFF008220248C
+:1049E0008F82000430430006240200021062000F90
+:1049F0003C0201012C620003504000052402000427
+:104A00001060000F3C0200010A00023000000000AC
+:104A100010620005240200061462000C3C02011121
+:104A20000A000229008210253C0200110082102594
+:104A3000AF421000240200010A000230AF82000CD5
+:104A400000821025AF421000AF80000C0000000073
+:104A5000000000000000000003E00008000000006B
+:104A60008F82000C10400004000000008F421000F4
+:104A70000441FFFE0000000003E000080000000009
+:104A80008F8200102443F800000231C224C2FFF0DC
+:104A90002C63030110600003000210420A00025759
+:104AA000AC8200008F85001800C5102B1440000B4D
+:104AB0000000182100C51023244700018F82001C2C
+:104AC00000A210212442FFFF0046102B5440000496
+:104AD0002402FFFF0A000257AC8700002402FFFFF8
+:104AE0000A000260AC8200008C82000000021940C3
+:104AF000006218210003188000621821000318804A
+:104B00003C0208002442175C0062182103E0000800
+:104B10000060102127BDFFD8AFBF0020AFB1001C3F
+:104B2000AFB000183C0460088C8250002403FF7F63
+:104B30003C066000004310243442380CAC82500024
+:104B40008CC24C1C3C1A8000000216023042000F3E
+:104B500010400007AF82001C8CC34C1C3C02001F9D
+:104B60003442FC0000621824000319C2AF8300180D
+:104B70008F420008275B400034420001AF4200082A
+:104B8000AF8000243C02601CAF400080AF40008436
+:104B90008C4500088CC308083402800003422021A1
+:104BA0002402FFF0006218243C0200803C0108004F
+:104BB000AC2204203C025709AF8400381462000480
+:104BC000AF850034240200010A000292AF82001473
+:104BD000AF8000148F4200003842000130420001D3
+:104BE0001440FFFC8F8200141040001600000000EB
+:104BF00097420104104000058F83000014600007F5
+:104C00002462FFFF0A0002A72C62000A2C62001037
+:104C1000504000048F83000024620001AF82000036
+:104C20008F8300002C62000A144000032C620007EE
+:104C30000A0002AEAF80FFDC104000022402000137
+:104C4000AF82FFDC8F4301088F44010030622000F7
+:104C5000AF83000410400008AF8400103C0208003D
+:104C60008C42042C244200013C010800AC22042C9C
+:104C70000A00058A3C0240003065020014A00003CF
+:104C800024020F001482026024020D0097420104E6
+:104C9000104002C83C02400030624000144000ADA9
+:104CA0008F8200388C4400088F4201780440FFFE58
+:104CB00024020800AF42017824020008A742014004
+:104CC000A7400142974201048F8400043051FFFF46
+:104CD0003082000110400007022080212623FFFEC1
+:104CE000240200023070FFFFA74201460A0002DBE7
+:104CF000A7430148A74001463C0208008C42043CFF
+:104D00001440000D8F8300103082002014400002F8
+:104D10002403000924030001006020218F83001078
+:104D2000240209005062000134840004A744014AAF
+:104D30000A0002F60000000024020F0014620005C1
+:104D400030820020144000062403000D0A0002F502
+:104D50002403000514400002240300092403000179
+:104D6000A743014A3C0208008C4204203C0400484E
+:104D70000E00020C004420250E0002350000000049
+:104D80008F82000C1040003E000000008F42100097
+:104D90003C03002000431024104000398F8200049F
+:104DA000304200021040003600000000974210140C
+:104DB0001440003300000000974210088F8800382C
+:104DC0003042FFFF244200060002188200033880B0
+:104DD00000E83021304300018CC400001060000462
+:104DE000304200030000000D0A00033700E81021E4
+:104DF000544000103084FFFF3C05FFFF0085202455
+:104E0000008518260003182B0004102B00431024E3
+:104E10001040000500000000000000000000000D30
+:104E200000000000240002228CC200000A000336A9
+:104E3000004520253883FFFF0003182B0004102BAA
+:104E40000043102410400005000000000000000096
+:104E50000000000D000000002400022B8CC20000A6
+:104E60003444FFFF00E81021AC4400003C0208007D
+:104E70008C420430244200013C010800AC22043082
+:104E80008F6200008F840038AF8200088C8300009E
+:104E90003402FFFF1462000F000010213C050800DF
+:104EA0008CA504543C0408008C84045000B02821D4
+:104EB00000B0302B00822021008620213C01080018
+:104EC000AC2504543C010800AC2404500A000580C1
+:104ED000240400088C820000304201001040000FC2
+:104EE000000010213C0508008CA5044C3C0408007F
+:104EF0008C84044800B0282100B0302B008220218F
+:104F0000008620213C010800AC25044C3C0108002F
+:104F1000AC2404480A000580240400083C0508006D
+:104F20008CA504443C0408008C84044000B0282173
+:104F300000B0302B00822021008620213C01080097
+:104F4000AC2504443C010800AC2404400A00058060
+:104F5000240400088F6200088F620000000216021D
+:104F6000304300F0240200301062000524020040AB
+:104F7000106200E08F8200200A00058824420001B0
+:104F800014A0000500000000000000000000000D5B
+:104F900000000000240002568F4201780440FFFE0A
+:104FA000000000000E00023D27A400101440000580
+:104FB00000408021000000000000000D0000000003
+:104FC0002400025D8E020000104000050000000079
+:104FD000000000000000000D00000000240002603E
+:104FE0008F62000C04430003240200010A00042E17
+:104FF000AE000000AE0200008F8200388C4800082E
+:10500000A20000078F65000C8F64000430A3FFFF2F
+:105010000004240200852023308200FF0043102179
+:1050200024420005000230832CC20081A605000A3C
+:1050300014400005A2040004000000000000000D60
+:1050400000000000240002788F8500380E0005ABB8
+:10505000260400148F6200048F430108A602000892
+:105060003C021000006218241060000800000000DC
+:1050700097420104920300072442FFEC34630002CC
+:105080003045FFFF0A0003C3A20300079742010453
+:105090002442FFF03045FFFF960600082CC20013A3
+:1050A00054400005920300079202000734420001B9
+:1050B000A20200079203000724020001106200050B
+:1050C000240200031062000B8F8200380A0003E004
+:1050D00030C6FFFF8F8200383C04FFFF8C43000C7A
+:1050E0000064182400651825AC43000C0A0003E096
+:1050F00030C6FFFF3C04FFFF8C43001000641824FF
+:1051000000651825AC43001030C6FFFF24C2000222
+:1051100000021083A20200058F830038304200FF96
+:1051200000021080004328218CA800008CA20000FF
+:1051300024030004000217021443001200000000C0
+:10514000974201043C03FFFF010318243042FFFF94
+:10515000004610232442FFFE00624025ACA8000058
+:1051600092030005306200FF000210800050102101
+:10517000904200143042000F004310210A00041531
+:10518000A20200068CA40004974201049603000AC0
+:105190003088FFFF3042FFFF004610232442FFD635
+:1051A0000002140001024025ACA80004920200078E
+:1051B000920400052463002800031883006418216A
+:1051C00034420004A2030006A20200078F820004FA
+:1051D0002403FFFB3442000200431024AF8200048A
+:1051E000920300068F87003800031880007010219A
+:1051F0008C4400203C02FFF63442FFFF0082402432
+:1052000000671821AE04000CAC68000C9205000683
+:105210003C03FF7F8E02000C0005288000B0202197
+:105220003463FFFF010330249488002600A728215F
+:1052300000431024AE02000CAC860020AC88002491
+:10524000ACA8001024020010A74201402402000272
+:10525000A7400142A7400144A742014697420104EA
+:105260003C0400082442FFFEA7420148240200013A
+:105270000E00020CA742014A9603000A92020004A3
+:105280000043102124420002304200070002102394
+:10529000304200070E000235AE0200108F6200009F
+:1052A0003C0308008C63044424040010AF8200080F
+:1052B000974201043042FFFF2442FFFE00403821A4
+:1052C000000237C33C0208008C42044000671821EA
+:1052D0000067282B00461021004510213C010800E2
+:1052E000AC2304443C010800AC2204400A0005152C
+:1052F0000000000014A000050000000000000000F5
+:105300000000000D000000002400030A8F42017815
+:105310000440FFFE000000000E00023D27A4001420
+:105320001440000500408021000000000000000D36
+:1053300000000000240003118E020000544000060B
+:1053400092020007000000000000000D00000000B5
+:105350002400031C920200073042000410400005A4
+:105360008F8200042403FFFB344200020043102418
+:10537000AF8200048F620004044300089202000719
+:10538000920200068E03000CAE00000000021080A6
+:1053900000501021AC43002092020007304200046C
+:1053A000544000099602000A920200053C030001E5
+:1053B00000021080005010218C46001800C33021DC
+:1053C000AC4600189602000A9206000427710008F5
+:1053D0000220202100C2302124C600052605001429
+:1053E0000E0005AB00063082920400068F650004B3
+:1053F0003C027FFF00042080009120218C83000468
+:105400003442FFFF00A2282400651821AC83000469
+:105410009202000792040005920300043042000447
+:105420001040001496070008308400FF000420801C
+:10543000009120218C860004974201049605000A01
+:10544000306300FF3042FFFF004310210045102170
+:1054500030E3FFFF004310232442FFD830C6FFFF94
+:105460000002140000C23025AC8600040A0004C902
+:1054700092030007308500FF0005288000B1282135
+:105480008CA4000097420104306300FF3042FFFF0C
+:1054900000431021004710233C03FFFF008320241A
+:1054A0003042FFFF00822025ACA4000092030007D9
+:1054B0002402000110620006000000002402000324
+:1054C00010620011000000000A0004EC8E030010BE
+:1054D00097420104920300049605000A8E24000CF2
+:1054E00000431021004510212442FFF23C03FFFF3E
+:1054F000008320243042FFFF00822025AE24000CD0
+:105500000A0004EC8E030010974201049203000489
+:105510009605000A8E24001000431021004510213A
+:105520002442FFEE3C03FFFF008320243042FFFFB4
+:1055300000822025AE2400108E0300102402000AF1
+:10554000A7420140A74301429603000A92020004C9
+:105550003C04004000431021A7420144A7400146FB
+:1055600097420104A7420148240200010E00020CE8
+:10557000A742014A0E000235000000008F620000C1
+:105580009203000400002021AF820008974201042A
+:105590009606000A3042FFFF0062182100602821B1
+:1055A0003C0308008C6304443C0208008C42044025
+:1055B00000651821004410210065382B0047102198
+:1055C0003C010800AC2304443C010800AC22044028
+:1055D00092040004008620212484000A3084FFFF06
+:1055E0000E0001E900000000974401043084FFFF31
+:1055F0000E0001F7000000003C021000AF420178ED
+:105600000A0005878F82002014820027306200067E
+:1056100097420104104000673C02400030624000A5
+:105620001040000500000000000000000000000D18
+:1056300000000000240004208F4201780440FFFE97
+:1056400024020800AF42017824020008A74201406A
+:10565000A74001428F8200049743010430420001B9
+:10566000104000073070FFFF2603FFFE24020002F7
+:10567000A7420146A74301480A00053F2402000D46
+:10568000A74001462402000DA742014A8F62000094
+:1056900024040008AF8200080E0001E900000000A9
+:1056A0000A00051902002021104000423C0240007F
+:1056B00093620000304300F02402001010620005E5
+:1056C00024020070106200358F8200200A000588D5
+:1056D000244200018F620000974301043050FFFF15
+:1056E0003071FFFF8F4201780440FFFE3202000755
+:1056F00000021023304200072403000A2604FFFEA4
+:10570000A7430140A7420142A7440144A7400146E4
+:10571000A75101488F420108304200201440000286
+:105720002403000924030001A743014A0E00020CD0
+:105730003C0400400E000235000000003C07080059
+:105740008CE70444021110212442FFFE3C060800AD
+:105750008CC604400040182100E3382100001021CD
+:105760008F65000000E3402B00C2302126040008B2
+:1057700000C830213084FFFFAF8500083C010800DD
+:10578000AC2704443C010800AC2604400E0001E9AB
+:10579000000000000A000519022020210E00013B34
+:1057A000000000008F82002024420001AF82002010
+:1057B0003C024000AF4201380A00029200000000A3
+:1057C0003084FFFF30C6FFFF00052C0000A628250F
+:1057D0003882FFFF004510210045282B004510218D
+:1057E00000021C023042FFFF0043102100021C0295
+:1057F0003042FFFF004310213842FFFF03E0000862
+:105800003042FFFF3084FFFF30A5FFFF000018216A
+:1058100010800007000000003082000110400002EC
+:1058200000042042006518210A0005A10005284057
+:1058300003E000080060102110C0000624C6FFFF2E
+:105840008CA2000024A50004AC8200000A0005AB75
+:105850002484000403E000080000000010A00008F9
+:1058600024A3FFFFAC860000000000000000000041
+:105870002402FFFF2463FFFF1462FFFA2484000464
+:0858800003E000080000000035
+:04588800000000011B
+:04588C000A00002AE4
+:1058900000000000000000000000000D7478703669
+:1058A0002E322E31610000000602010000000000CF
+:1058B000000001360000EA60000000000000000067
+:1058C00000000000000000000000000000000000D8
+:1058D00000000000000000000000000000000000C8
+:1058E00000000000000000000000000000000016A2
+:1058F00000000000000000000000000000000000A8
+:105900000000000000000000000000000000000097
+:105910000000000000000000000000000000000087
+:10592000000000000000138800000000000005DCFB
+:105930000000000000000000100000030000000054
+:105940000000000D0000000D3C02080024423D68EC
+:105950003C0308002463401CAC4000000043202BA3
+:105960001480FFFD244200043C1D080037BD7FFC6D
+:1059700003A0F0213C100800261000A83C1C0800E1
+:10598000279C3D680E00044E000000000000000D42
+:1059900027BDFFB4AFA10000AFA20004AFA3000871
+:1059A000AFA4000CAFA50010AFA60014AFA700185D
+:1059B000AFA8001CAFA90020AFAA0024AFAB0028FD
+:1059C000AFAC002CAFAD0030AFAE0034AFAF00389D
+:1059D000AFB8003CAFB90040AFBC0044AFBF004817
+:1059E0000E000591000000008FBF00488FBC0044EE
+:1059F0008FB900408FB8003C8FAF00388FAE0034B5
+:105A00008FAD00308FAC002C8FAB00288FAA002404
+:105A10008FA900208FA8001C8FA700188FA6001444
+:105A20008FA500108FA4000C8FA300088FA2000484
+:105A30008FA1000027BD004C3C1B60048F7A5030C2
+:105A4000377B502803400008AF7A00008F86003C67
+:105A50003C0390003C0280000086282500A32025FE
+:105A6000AC4400203C0380008C67002004E0FFFE73
+:105A70000000000003E00008000000000A000070C1
+:105A8000240400018F85003C3C0480003483000125
+:105A900000A3102503E00008AC82002003E000080A
+:105AA000000010213084FFFF30A5FFFF10800007A9
+:105AB0000000182130820001104000020004204242
+:105AC000006518211480FFFB0005284003E0000852
+:105AD0000060102110C00007000000008CA2000030
+:105AE00024C6FFFF24A50004AC82000014C0FFFB05
+:105AF0002484000403E000080000000010A0000857
+:105B000024A3FFFFAC86000000000000000000009E
+:105B10002402FFFF2463FFFF1462FFFA24840004C1
+:105B200003E000080000000090AA00318FAB0010D5
+:105B30008CAC00403C0300FF8D680004AD6C00207D
+:105B40008CAD004400E060213462FFFFAD6D0024A5
+:105B50008CA700483C09FF000109C024AD6700285C
+:105B60008CAE004C0182C82403197825AD6F000467
+:105B7000AD6E002C8CAD0038314A00FFAD6D001CBD
+:105B800094A900323128FFFFAD68001090A70030C3
+:105B9000A5600002A1600004A167000090A300328C
+:105BA000306200FF00021982106000052405000128
+:105BB0001065000E0000000003E00008A16A00016B
+:105BC0008CD80028354A0080AD7800188CCF00149E
+:105BD000AD6F00148CCE0030AD6E00088CC4002C6C
+:105BE000A16A000103E00008AD64000C8CCD001C2C
+:105BF000AD6D00188CC90014AD6900148CC8002468
+:105C0000AD6800088CC70020AD67000C8CC2001482
+:105C10008C8300700043C82B132000070000000095
+:105C20008CC20014144CFFE400000000354A0080D0
+:105C300003E00008A16A00018C8200700A0000E6FF
+:105C4000000000009089003027BDFFF88FA8001CDD
+:105C5000A3A900008FA300003C0DFF8035A2FFFF29
+:105C60008CAC002C00625824AFAB0000A1000004F3
+:105C700000C05821A7A000028D06000400A0482102
+:105C80000167C8218FA50000008050213C18FF7FCC
+:105C9000032C20263C0E00FF2C8C0001370FFFFF49
+:105CA00035CDFFFF3C02FF0000AFC82400EDC0244B
+:105CB00000C27824000C1DC00323682501F870255C
+:105CC000AD0D0000AD0E00048D240024AFAD00002A
+:105CD000AD0400088D2C00202404FFFFAD0C000C47
+:105CE0009547003230E6FFFFAD06001091450048B1
+:105CF00030A200FF000219C2506000018D24003460
+:105D0000AD0400148D4700388FAA001827BD000885
+:105D1000AD0B0028AD0A0024AD07001CAD00002C1F
+:105D2000AD00001803E00008AD00002027BDFFE033
+:105D3000AFB20018AFB10014AFB00010AFBF001C7D
+:105D40009098003000C088213C0D00FF330F007F89
+:105D5000A0CF0000908E003135ACFFFF3C0AFF0061
+:105D6000A0CE000194A6001EA22000048CAB00145B
+:105D70008E29000400A08021016C2824012A4024DF
+:105D80000080902101052025A6260002AE240004F3
+:105D900026050020262400080E000092240600029A
+:105DA00092470030260500282624001400071E0014
+:105DB0000003160324060004044000032403FFFF2D
+:105DC000965900323323FFFF0E000092AE230010DD
+:105DD000262400248FBF001C8FB200188FB100143E
+:105DE0008FB0001024050003000030210A00009C41
+:105DF00027BD002027BDFFD8AFB1001CAFB00018F1
+:105E0000AFBF002090A900302402000100E0502123
+:105E10003123003F00A040218FB000400080882146
+:105E200000C04821106200148FA70038240B000521
+:105E300000A0202100C02821106B00130200302197
+:105E40000E000128000000009225007C30A4000212
+:105E50001080000326030030AE000030260300341B
+:105E60008FBF00208FB1001C8FB000180060102180
+:105E700003E0000827BD00280E0000A7AFB0001007
+:105E80000A00016F000000008FA3003C01002021E8
+:105E90000120282101403021AFA300100E0000EEA8
+:105EA000AFB000140A00016F000000003C06800043
+:105EB00034C20E008C4400108F850044ACA4002036
+:105EC0008C43001803E00008ACA300243C068000CB
+:105ED00034C20E008C4400148F850044ACA4002012
+:105EE0008C43001C03E00008ACA300249382000C48
+:105EF0001040001B2483000F2404FFF000643824AA
+:105F000010E00019978B00109784000E9389000D04
+:105F10003C0A601C0A0001AC0164402301037021AB
+:105F2000006428231126000231C2FFFF30A2FFFFC8
+:105F30000047302B50C0000E00E448218D4D000C6E
+:105F400031A3FFFF00036400000C2C0304A1FFF346
+:105F50000000302130637FFF0A0001A42406000105
+:105F600003E00008000000009784000E00E44821D0
+:105F70003123FFFF3168FFFF0068382B54E0FFF842
+:105F8000A783000E938A000D11400005240F000125
+:105F9000006BC023A380000D03E00008A798000E4B
+:105FA000006BC023A38F000D03E00008A798000E2C
+:105FB00003E000080000000027BDFFE8AFB00010BC
+:105FC0003C10800036030140308BFFFF93AA002B6A
+:105FD000AFBF0014A46B000436040E0094880016B2
+:105FE00030C600FF8FA90030A4680006AC65000829
+:105FF000A0660012A46A001AAC6700208FA5002CCE
+:10600000A4690018012020210E000198AC6500143D
+:106010003C021000AE0201788FBF00148FB0001058
+:1060200003E0000827BD00188F85000024840007C6
+:1060300027BDFFF83084FFF83C06800094CB008A2F
+:10604000316AFFFFAFAA00008FA90000012540239D
+:106050002507FFFF30E31FFF0064102B1440FFF7FC
+:1060600000056882000D288034CC400000AC10216F
+:1060700003E0000827BD00088F8200002486000787
+:1060800030C5FFF800A2182130641FFF03E00008AC
+:10609000AF8400008F87003C8F84004427BDFFB091
+:1060A000AFB70044AFB40038AFB1002CAFBF004869
+:1060B000AFB60040AFB5003CAFB30034AFB2003074
+:1060C000AFB000283C0B80008C860024AD670080B8
+:1060D0008C8A002035670E0035690100ACEA00109B
+:1060E0008C8800248D2500040000B821ACE800183D
+:1060F0008CE3001000A688230000A021ACE300146C
+:106100008CE20018ACE2001C122000FE00E0B0217E
+:10611000936C0008118000F400000000976F0010DD
+:1061200031EEFFFF022E682B15A000EF00000000EB
+:10613000977200103250FFFFAED000003C03800089
+:106140008C740000329300081260FFFD0000000014
+:1061500096D800088EC700043305FFFF30B5000154
+:1061600012A000E4000000000000000D30BFA040BD
+:106170002419004013F9011B30B4A000128000DF85
+:106180000000000093730008126000080000000087
+:10619000976D001031ACFFFF00EC202B1080000346
+:1061A00030AE004011C000D500000000A7850040BF
+:1061B000AF8700389363000802202821AFB1002088
+:1061C000146000F527B40020AF60000C978F0040EA
+:1061D00031F1400016200002240300162403000EB3
+:1061E00024054007A363000AAF650014938A0042A8
+:1061F0008F7000143155000100151240020248252D
+:10620000AF690014979F00408F78001433F9001095
+:1062100003194025AF6800149792004032470008E8
+:1062200010E0016E000000008F6700143C121000A7
+:106230003C11800000F27825AF6F001436230E0069
+:10624000946E000A3C0D81002406000E31CCFFFF45
+:10625000018D2025AF640004A36600029373000A39
+:106260003406FFFC266B0004A36B000A97980040DD
+:10627000330820001100015F000000003C05800091
+:1062800034A90E00979900409538000C978700407C
+:10629000001940423312C0003103000300127B0397
+:1062A00030F11000006F68250011720301AE602507
+:1062B000000C20C0A764001297930040936A000A64
+:1062C000001359823175003C02AA10212450003C71
+:1062D000A3700009953F000C33F93FFFA779001028
+:1062E00097700012936900090130F82127E5000238
+:1062F00030B900070019C02333080007A368000B5A
+:106300009371000997720012976F0010322700FFF7
+:106310008F910038978D004000F21821006F702196
+:1063200001C6602131A6004010C000053185FFFF85
+:1063300000B1102B3C128000104000170000982183
+:106340000225A82B56A0013E8FA500203C0480000A
+:10635000348A0E008D5300143C068000AD530010AB
+:106360008D4B001CAD4B0018AD4500008CCD0000DE
+:1063700031AC00081180FFFD34CE0E0095C300083B
+:1063800000A0882100009021A78300408DC6000452
+:1063900024130001AF860038976F001031F5FFFF1E
+:1063A0008E9F000003F1282310A0011FAE8500007E
+:1063B00093620008144000DD000000000E0001E7B9
+:1063C000240400108F900048004028213C02320035
+:1063D000320600FF000654000142F825260900019C
+:1063E000AF890048ACBF000093790009977800128C
+:1063F000936F000A332800FF3303FFFF01033821A6
+:1064000000076C0031EE00FF01AE6025ACAC00046B
+:106410008F840048978B0040316A20001140010AA8
+:10642000ACA4000897640012308BFFFF06400108FF
+:10643000ACAB000C978E004031C5000814A00002E0
+:1064400026280006262800023C1F800037E70E00A1
+:1064500094F900148CE5001C8F6700049378000207
+:106460003324FFFF330300FFAFA300108F6F00142E
+:10647000AFA800180E0001CBAFAF00142404001029
+:106480000E0001FB000000008E9200001640000587
+:10649000000000008F7800142403FFBF0303A02432
+:1064A000AF7400148F67000C00F5C821AF79000CA1
+:1064B0009375000816A00008000000001260000696
+:1064C000000000008F6800143C0AEFFF3549FFFE12
+:1064D0000109F824AF7F0014A37300088FA50020E2
+:1064E0000A00034F02202021AED100000A00022D35
+:1064F0003C03800014E0FF1E30BFA0400E0001905E
+:106500000000A0212E9100010237B02512C0001812
+:106510008FBF00488F87003C24170F0010F700D46E
+:106520003C0680008CD901780720FFFE241F0F0055
+:1065300010FF00F634CA0E008D56001434C7014017
+:1065400024080240ACF600048D49001C3C141000E5
+:10655000ACE90008A0E00012A4E0001AACE00020C2
+:10656000A4E00018ACE80014ACD401788FBF004858
+:106570008FB700448FB600408FB5003C8FB4003811
+:106580008FB300348FB200308FB1002C8FB0002851
+:1065900003E0000827BD00508F9100389788004025
+:1065A0003C1280000220A8213107004014E0FF7C4B
+:1065B00000009821977900108F9200383338FFFF40
+:1065C000131200A8000020210080A021108000F3F9
+:1065D00000A088211620FECE000000000A00031F44
+:1065E0002E9100013C0380008C6201780440FFFE84
+:1065F000240808008F860000AC6801783C03800006
+:10660000946D008A31ACFFFF01865823256AFFFF95
+:1066100031441FFF2C8900081520FFF900000000FD
+:106620008F8F0048347040008F83003C00E0A02131
+:10663000240E0F0025E70001AF87004800D030216D
+:10664000023488233C08800031F500FF106E0005FD
+:106650002407000193980042331300010013924075
+:1066600036470001001524003C0A0100008A482535
+:10667000ACC900008F82004830BF003630B9000836
+:10668000ACC200041320009900FF982535120E00BB
+:106690009650000A8F8700003C0F81003203FFFFF5
+:1066A00024ED000835060140006F60253C0E100007
+:1066B00031AB1FFF269200062405000EACCC002053
+:1066C000026E9825A4C5001AAF8B0000A4D2001852
+:1066D000162000083C1080008F89003C24020F0027
+:1066E0005122000224170001367300400E00018879
+:1066F0003C10800036060E008CCB0014360A014098
+:1067000002402021AD4B00048CC5001CAD450008A3
+:10671000A1550012AD5300140E0001983C15100055
+:10672000AE1501780A00035200000000936F0009C3
+:10673000976E0012936D000B31E500FF00AE202133
+:1067400031AC00FF008C80212602000A3050FFFF90
+:106750000E0001E7020020218F8600483C03410023
+:106760003C05800024CB0001AF8B0048936A0009F0
+:106770009769001230C600FF315F00FF3128FFFF2C
+:1067800003E8382124F900020006C4000319782523
+:1067900001E37025AC4E00008F6D000C34A40E0098
+:1067A000948B001401B26025AC4C00048C85001C55
+:1067B0008F670004936A00023164FFFF314900FFD4
+:1067C000AFA900108F680014AFB100180E0001CB04
+:1067D000AFA800140A0002FD02002021AF600004EF
+:1067E000A360000297980040330820001500FEA324
+:1067F00000003021A760001297840040936B000ACC
+:106800003C10800030931F0000135183014BA821DE
+:1068100026A20028A362000936090E00953F000C4D
+:106820000A000295A77F00108F70001436090040FF
+:106830000E000188AF6900140A0002C900000000C0
+:106840000A00034F000020210641FEFAACA0000C14
+:106850008CAC000C3C0D8000018D90250A0002EAF2
+:10686000ACB2000C000090210A0002C52413000104
+:10687000128000073C028000344B0E009566000831
+:1068800030D3004012600049000000003C06800048
+:106890008CD001780600FFFE34C50E0094B50010C0
+:1068A0003C03050034CC014032B8FFFF03039025C0
+:1068B000AD92000C8CAF0014240D20003C0410009D
+:1068C000AD8F00048CAE001CAD8E0008A1800012BC
+:1068D000A580001AAD800020A5800018AD8D0014A1
+:1068E000ACC401780A0003263C0680008F9F00009C
+:1068F000351801402692000227F9000833281FFFAF
+:10690000A71200180A000391AF8800003C02800023
+:1069100034450140ACA0000C1280001B34530E0023
+:1069200034510E008E370010ACB700048E240018CE
+:106930003C0B8000ACA400083570014024040040EA
+:10694000A20000128FBF0048A600001A8FB70044B3
+:10695000AE0000208FB60040A60000188FB5003CA6
+:10696000AE0400148FB400388FB300348FB20030FF
+:106970008FB1002C8FB000283C02100027BD0050C2
+:1069800003E00008AD6201788E660014ACA6000436
+:106990008E64001C0A00042A3C0B80000E0001904B
+:1069A0002E9100010A0003200237B02500000000EC
+:1069B0000000000D00000000240003690A0004012B
+:1069C0003C06800027BDFFD8AFBF00203C098000F7
+:1069D0003C1F20FFAFB200183C07600035320E00AC
+:1069E0002402001037F9FFFDACE23008AFB3001C01
+:1069F000AFB10014AFB00010AE59000000000000AD
+:106A00000000000000000000000000000000000086
+:106A10003C1800FF3713FFFDAE5300003C0B600431
+:106A20008D7050002411FF7F3C0E0002021178246B
+:106A300035EC380C35CD0109ACED4C18240A0009B1
+:106A4000AD6C50008CE80438AD2A0008AD2000146D
+:106A50008CE54C1C3106FFFF38C42F7100051E0267
+:106A60003062000F2486C0B310400007AF820008D8
+:106A70008CE54C1C3C09001F3528FC0000A818249C
+:106A8000000321C2AF8400048CF108083C0F5709B1
+:106A90002412F0000232702435F0001001D060267C
+:106AA00001CF68262DAA00012D8B0001014B38254E
+:106AB00050E00009A380000C3C1F601C8FF8000808
+:106AC00024190001A399000C33137C00A793001034
+:106AD000A780000EA380000DAF80004814C0000303
+:106AE000AF8000003C066000ACC0442C0E0005B92D
+:106AF0003C1080000E000F1A361101003C120800F5
+:106B000026523DD03C13080026733E508E030000F1
+:106B100038640001308200011440FFFC3C0B800A05
+:106B20008E2600002407FF8024C90240312A007FFE
+:106B3000014B402101272824AE060020AF880044E5
+:106B4000AE0500243C048000AF86003C8C8C0178AC
+:106B50000580FFFE24180800922F0008AC980178E9
+:106B6000A38F0042938E004231CD000111A0000F8F
+:106B700024050D0024DFF8002FF903011320001C69
+:106B8000000629C224A4FFF0000410420002314094
+:106B90000E00020200D2D8213C0240003C068000D8
+:106BA000ACC201380A0004A00000000010C5002398
+:106BB000240D0F0010CD00273C1F800837F90080FE
+:106BC00093380000240E0050330F00FF15EEFFF342
+:106BD0003C0240000E000A36000000003C0240006B
+:106BE0003C068000ACC201380A0004A0000000008E
+:106BF0008F83000400A3402B1500000B8F8B00082F
+:106C0000006B50212547FFFF00E5482B15200006AB
+:106C100000A36023000C19400E0002020073D8216B
+:106C20000A0004C43C0240000000000D0E000202F5
+:106C3000000000000A0004C43C0240003C1B0800A5
+:106C4000277B3F500E000202000000000A0004C42F
+:106C50003C0240003C1B0800277B3F700E000202F4
+:106C6000000000000A0004C43C0240003C0660042E
+:106C70003C09080025290104ACC9502C8CC85000DF
+:106C80003C0580003C02000235070080ACC7500084
+:106C90003C040800248415A43C0308002463155C0C
+:106CA000ACA50008ACA2000C3C010800AC243D607F
+:106CB0003C010800AC233D6403E00008240200010D
+:106CC00000A030213C1C0800279C3D683C0C0400BF
+:106CD0003C0B0002008B3826008C40262CE2000181
+:106CE0000007502B2D050001000A48803C030800D6
+:106CF00024633D60004520250123182110800003F6
+:106D000000001021AC6600002402000103E000082E
+:106D1000000000003C1C0800279C3D683C0B040060
+:106D20003C0A0002008A3026008B38262CC2000163
+:106D30000006482B2CE50001000940803C030800B8
+:106D400024633D60004520250103182110800005C3
+:106D5000000010213C0C0800258C155CAC6C000078
+:106D60002402000103E00008000000003C090002CA
+:106D70003C08040000883026008938262CC3000116
+:106D8000008028212CE40001008310251040000B16
+:106D9000000030213C1C0800279C3D683C0A800014
+:106DA0008D4E00082406000101CA6825AD4D00087B
+:106DB0008D4C000C01855825AD4B000C03E00008FC
+:106DC00000C010213C1C0800279C3D683C05800049
+:106DD0008CA6000C000420272402000100C4182403
+:106DE00003E00008ACA3000C3C0200021082000B80
+:106DF0003C0560003C070400108700030000000011
+:106E000003E00008000000008CA908D0240AFFFD60
+:106E1000012A402403E00008ACA808D08CA408D0C4
+:106E20002406FFFE0086182403E00008ACA308D067
+:106E30003C05601A34A600108CC3008027BDFFF803
+:106E40008CC50084AFA3000093A4000024020001BD
+:106E500010820003AFA5000403E0000827BD00086E
+:106E600093A7000114E0001497AC000297B8000249
+:106E70003C0F8000330EFFFC01CF6821ADA5000060
+:106E8000A3A000003C0660008CC708D02408FFFEC9
+:106E90003C04601A00E82824ACC508D08FA3000485
+:106EA0008FA200003499001027BD0008AF22008097
+:106EB00003E00008AF2300843C0B8000318AFFFC14
+:106EC000014B48218D2800000A00057DAFA8000471
+:106ED00027BDFFE8AFBF00103C1C0800279C3D68A1
+:106EE0003C0580008CA4000C8CA200043C03000232
+:106EF0000044282410A0000A00A318243C06040023
+:106F00003C0400021460000900A610241440000F85
+:106F10003C0404000000000D3C1C0800279C3D6858
+:106F20008FBF001003E0000827BD00183C020800D6
+:106F30008C423D600040F809000000003C1C080045
+:106F4000279C3D680A0005A68FBF00103C02080080
+:106F50008C423D640040F809000000000A0005ACC6
+:106F600000000000000411C003E0000824420240B9
+:106F70003C04080024843FB42405001A0A00009C45
+:106F80000000302127BDFFE0AFB000103C108000B2
+:106F9000AFBF0018AFB100143611010092220009F2
+:106FA0000E0005B63044007F8E3F00008F89003C04
+:106FB0003C0F008003E26021258800400049F82151
+:106FC000240DFF80310E00783198007835F90001EA
+:106FD00035F100020319382501D14825010D30246F
+:106FE00003ED5824018D2824240A00402404008045
+:106FF000240300C0AE0B0024AE000810AE0A081433
+:10700000AE040818AE03081CAE050804AE0708203D
+:10701000AE060808AE090824360909009539000CA7
+:107020003605098033ED007F3338FFFF001889C033
+:10703000AE110800AE0F0828952C000C8FBF001869
+:107040008FB10014318BFFFF000B51C0AE0A002C32
+:107050008CA400508FB000108CA3003C8D2700043E
+:107060008CA8001C8CA600383C0E800A01AE1021B2
+:1070700027BD0020AF820044AF840050AF8300548E
+:10708000AF87004CAF88005C03E00008AF8600606B
+:107090003C09080091293FD924A800023C051100B1
+:1070A00000093C0000E8302500C5182524820008AE
+:1070B000AC83000003E00008AC8000043C098000C1
+:1070C000352309009128010B906A00112402002841
+:1070D00000804821314700FF00A0702100C06821D6
+:1070E0003108004010E20002340C86DD240C080058
+:1070F0003C0A800035420A9A94470000354B0A9CAE
+:1071000035460AA030F9FFFFAD3900008D78000048
+:10711000354B0A8024040001AD3800048CCF0000F8
+:10712000AD2F00089165001930A300031064009092
+:1071300028640002148000AF240500021065009E40
+:10714000240F0003106F00B435450AA4240A080078
+:10715000118A0048000000005100003D3C0B8000F7
+:107160003C048000348309009067001230E200FF85
+:10717000004D7821000FC880272400013C0A8000C0
+:10718000354F090091E50019354C09808D8700289D
+:1071900030A300FF00031500004758250004C40079
+:1071A0003C19600001793025370806FFAD26000044
+:1071B000AD2800048DEA002C25280028AD2A0008FF
+:1071C0008DEC0030AD2C000C8DE50034AD250010A9
+:1071D0008DE400383C05800034AC093CAD2400143B
+:1071E0008DE3001CAD2300188DE70020AD27001CA7
+:1071F0008DE20024AD2200208DF9002834A2010088
+:10720000AD3900248D830000AD0E000434B90900AF
+:10721000AD0300008C47000C25020014AD070008E8
+:10722000932B00123C04080090843FD8AD0000105E
+:10723000317800FF030D302100064F0000047C0070
+:10724000012F702535CDFFFC03E00008AD0D000CCB
+:1072500035780900930600123C05080094A53FC844
+:1072600030C800FF010D5021000A60800A00063C72
+:10727000018520211500005B000000003C0808008B
+:1072800095083FCE3C06080094C63FC80106102171
+:107290003C0B80003579090093380011932A0019BE
+:1072A00035660A80330800FF94CF002A0008608208
+:1072B000314500FF978A0058000C1E00000524008D
+:1072C0003047FFFF006410250047C02501EA302148
+:1072D0003C0B4000030B402500066400AD28000075
+:1072E000AD2C0004932500183C030006252800144B
+:1072F00000053E0000E31025AD2200088F24002C7D
+:107300003C05800034AC093CAD24000C8F38001CD7
+:1073100034A20100254F0001AD3800108D8300001C
+:10732000AD0E000431EB7FFFAD0300008C47000C75
+:1073300034B90900A78B0058AD070008932B001241
+:107340003C04080090843FD825020014317800FFE7
+:10735000030D302100064F0000047C00012F702532
+:1073600035CDFFFCAD00001003E00008AD0D000CB2
+:107370003C02080094423FD23C05080094A53FC857
+:1073800035440AA43C07080094E73FC4948B0000EE
+:107390000045C8210327C023000B1C002706FFF26D
+:1073A00000665025AD2A000CAD200010AD2C001455
+:1073B0000A00063025290018354F0AA495E500007B
+:1073C000956400280005140000043C003459810035
+:1073D00000EC5825AD39000CAD2B00100A0006302A
+:1073E000252900143C0C0800958C3FCE0A0006812C
+:1073F000258200015460FF56240A080035580AA46B
+:107400009706000000061C00006C5025AD2A000CF9
+:107410000A000630252900103C03080094633FD27F
+:107420003C07080094E73FC83C0F080095EF3FC4B5
+:1074300094A400009579002800671021004F58237C
+:1074400000041C00001934002578FFEE00D87825D0
+:10745000346A8100AD2A000CAD2F0010AD2000145D
+:10746000AD2C00180A0006302529001C03E0000896
+:10747000240207D027BDFFE0AFB20018AFB100145F
+:10748000AFB00010AFBF001C0E00007C0080882150
+:107490008F8800548F87004C3C05800834B20080F0
+:1074A000011128213C10800024020080240300C028
+:1074B00000A72023AE0208183C068008AE03081C73
+:1074C00018800004AF850054ACC500048CC90004CA
+:1074D000AF89004C12200009360409800E0006F81E
+:1074E00000000000924C00278E0B007401825004B3
+:1074F000014B3021AE46000C360409808C8E001CF6
+:107500008F8F005C01CF682319A000048FBF001C7F
+:107510008C90001CAF90005C8FBF001C8FB20018D5
+:107520008FB100148FB000100A00007E27BD00202C
+:107530008F8600508F8300548F82004C3C0580085A
+:1075400034A40080AC860050AC83003C03E000080B
+:10755000ACA200043C0308008C63005427BDFFF874
+:10756000308400FF2462000130A500FF3C010800C8
+:10757000AC22005430C600FF3C0780008CE8017844
+:107580000500FFFE3C0C7FFFA3A400038FAA0000B0
+:10759000358BFFFF014B4824000627C001244025FE
+:1075A000AFA8000034E201009043000AA3A000024B
+:1075B0003C1980FFA3A300018FAF000030AE007F15
+:1075C0003738FFFF01F86024000E6E003C0A0020EF
+:1075D00034E50140018D5825354920002406FF80FF
+:1075E0003C04100027BD0008ACAB000CACA9001493
+:1075F000A4A00018A0A6001203E00008ACE40178E3
+:10760000308800FF30A700FF3C0380008C620178C7
+:107610000440FFFE3C0C8000358A0A008D4B0020A0
+:107620003584014035850980AC8B00048D490024E8
+:107630000007302B00061540AC890008A088001018
+:1076400090A3004CA083002D03E00008A480001844
+:1076500027BDFFE8308400FFAFBF00100E00075DBC
+:1076600030A500FF8F8300548FBF00103C068000C0
+:1076700034C50140344700402404FF903C02100010
+:1076800027BD0018ACA3000CA0A40012ACA70014E6
+:1076900003E00008ACC2017827BDFFE03C08800889
+:1076A000AFBF001CAFB20018AFB10014AFB00010F4
+:1076B000351000808E0600183C078000309200FFD5
+:1076C00000C72025AE0400180E00007C30B100FF7A
+:1076D00092030005346200080E00007EA20200053D
+:1076E000024020210E0007710220282102402021A3
+:1076F0008FBF001C8FB200188FB100148FB0001024
+:1077000024050005240600010A00073227BD0020D9
+:107710003C05800034A309809066000830C2000850
+:107720001040000F3C0A01013549080AAC890000ED
+:107730008CA80074AC8800043C07080090E73FD890
+:1077400030E5001050A00008AC8000083C0D800817
+:1077500035AC00808D8B0058AC8B00082484000C65
+:1077600003E00008008010210A0007B52484000C03
+:1077700027BDFFE83C098000AFB00010AFBF001488
+:107780003526098090C800092402000600A058216F
+:10779000310300FF35270900008080212405000403
+:1077A0001062007B2408000294CF005C3C0E0204AF
+:1077B00031EDFFFF01AE6025AE0C000090CA00085D
+:1077C00031440020108000080000000090C2004EEC
+:1077D0003C1F010337F90300305800FF031930251F
+:1077E00024050008AE06000490F9001190E600128E
+:1077F00090E40011333800FF0018708230CF00FF92
+:1078000001CF5021014B6821308900FF31AAFFFFD1
+:1078100039230028000A60801460002C020C4823E1
+:1078200090E400123C198000372F0100308C00FFDB
+:10783000018B1821000310800045F821001F8400EF
+:10784000360706FFAD270004373F090093EC00110F
+:1078500093EE0012372609800005C0828DE4000CEB
+:107860008CC5003431CD00FF01AB10210058182128
+:1078700000A4F8230008840000033F0000F0302536
+:1078800033F9FFFF318F00FC00D97025015820210A
+:1078900001E9682100045080ADAE000C0E00007CB0
+:1078A000012A80213C088008240B00043505008053
+:1078B0000E00007EA0AB0009020010218FBF001453
+:1078C0008FB0001003E0000827BD001890EC0011F5
+:1078D00090E300193C18080097183FCE318200FF52
+:1078E0000002F882307000FF001FCE0000103C0044
+:1078F0000327302500D870253C0F400001CF6825B4
+:107900003C198000AD2D0000373F090093EC0011B9
+:1079100093EE0012372F0100372609800005C08240
+:107920008DE4000C8CC5003431CD00FF01AB10217B
+:107930000058182100A4F8230008840000033F0029
+:1079400000F0302533F9FFFF318F00FC00D970259E
+:107950000158202101E9682100045080ADAE000CDF
+:107960000E00007C012A80213C088008240B0004C2
+:10797000350500800E00007EA0AB0009020010213A
+:107980008FBF00148FB0001003E0000827BD00185F
+:107990000A0007C72408001227BDFFD03C0380005F
+:1079A000AFB60028AFB50024AFB40020AFB10014CB
+:1079B000AFBF002CAFB3001CAFB20018AFB00010C7
+:1079C0003467010090E6000B309400FF30B500FFF3
+:1079D00030C200300000B021104000990000882122
+:1079E000346409809088000800082E0000051E03FA
+:1079F000046000C0240400048F8600543C01080089
+:107A0000A0243FD83C0C8000AD8000483C0480009E
+:107A1000348E010091CD000B31A5002010A000078D
+:107A20003C0780003493098092720008001286009F
+:107A300000107E0305E000C43C1F800834EC010008
+:107A4000918A000B34EB09809169000831440040B1
+:107A50000004402B3123000800C898231460000262
+:107A600024120003000090213C10800036180A8088
+:107A700036040900970E002C9083001190890012A3
+:107A800093050018307F00FF312800FF02481021C5
+:107A90000002C880930D0018033F782101F13021C6
+:107AA00030B100FF00D11821A78E00583C0108001A
+:107AB000A4263FCE3C010800A4233FD015A000021D
+:107AC000000000000000000D920B010B3065FFFF6D
+:107AD0003C010800A4233FD2316A00403C01080069
+:107AE000A4203FC83C010800A4203FC4114000026C
+:107AF00024A4000A24A4000B3091FFFF0E0001E72C
+:107B0000022020219206010B3C0C0800958C3FD2EC
+:107B1000004020210006698231A700010E00060105
+:107B20000187282100402021026028210E00060C38
+:107B3000024030210E0007A10040202116C000693C
+:107B4000004020219212010B3256004012C0000565
+:107B50003C0500FF8C93000034AEFFFF026E8024D2
+:107B6000AC9000000E0001FB022020213C0F080019
+:107B700091EF3FD831F10003122000163C1380082A
+:107B80008F8200543C09800835280080245F000162
+:107B9000AD1F003C3C0580088CB9000403E02021A7
+:107BA000033FC0231B000002AF9F00548CA40004BD
+:107BB0000E0006F8ACA400043C0780008CEB0074B7
+:107BC0003C04800834830080004B5021AC6A000CD8
+:107BD0003C138008367000800280202102A02821FA
+:107BE000A200006B0E00075D3C1480008F920054D1
+:107BF000368C0140AD92000C8F8600483C15100079
+:107C0000344D000624D60001AF9600488FBF002CEB
+:107C1000A18600128FB60028AD8D00148FB3001C12
+:107C2000AE9501788FB200188FB500248FB4002074
+:107C30008FB100148FB0001003E0000827BD0030A2
+:107C400034640980908F0008000F7600000E6E03E8
+:107C500005A00033347F090093F8001B241900109D
+:107C60003C010800A0393FD8331300021260FF67BF
+:107C70008F8600548F8200601446FF653C048000AC
+:107C80000E00007C000000003C0480083485008069
+:107C900090A8000924060016310300FF1066000DAD
+:107CA0000000000090AB00093C07080090E73FD8B7
+:107CB00024090008316400FF34EA00013C01080097
+:107CC000A02A3FD81089002F240C000A108C00280D
+:107CD0002402000C0E00007E000000000A00086074
+:107CE0008F8600540E0007B9024028210A0008AE12
+:107CF000004020213C0B8008356A00808D460054EE
+:107D00008CE9000C1120FF3DAF86005424070014BD
+:107D10003C010800A0273FD80A00085F3C0C800007
+:107D200090910008241200023C010800A0323FD8C4
+:107D3000323000201200000B241600018F86005400
+:107D40000A0008602411000837F800808F0200380C
+:107D5000AFE200048FF90004AF19003C0A00086C80
+:107D60003C0780008F8600540A000860241100043C
+:107D7000A0A200090E00007E000000000A000860BA
+:107D80008F860054240200140A00093AA0A20009B8
+:107D900027BDFFE8AFB000103C108000AFBF00145B
+:107DA00036020100904400090E00075D2405000121
+:107DB0003C0480089099000E34830080909F000F4F
+:107DC000906F00269089000A33F800FF00196E00BA
+:107DD0000018740031EC00FF01AE5025000C5A0071
+:107DE000014B3825312800FF36030140344560003F
+:107DF00000E830252402FF813C041000AC66000C32
+:107E00008FBF0014AC650014A0620012AE040178AC
+:107E10008FB0001003E0000827BD001827BDFFE861
+:107E2000308400FFAFBF00100E00075D30A500FFDB
+:107E30003C05800034A40140344700402406FF92F2
+:107E4000AC870014A08600128F8300548FBF0010EF
+:107E50003C02100027BD0018AC83000C03E00008B2
+:107E6000ACA2017827BDFFD8AFB00010308400FF6E
+:107E700030B000FF3C058000AFB10014AFBF002060
+:107E8000AFB3001CAFB20018000410C234A601004A
+:107E900032030002305100011460000790D2000943
+:107EA0003C098008353300809268000531070008DE
+:107EB00010E0000C308A0010024020210E000783E1
+:107EC00002202821240200018FBF00208FB3001C54
+:107ED0008FB200188FB100148FB0001003E00008BB
+:107EE00027BD00281540003434A50A008CB80024B2
+:107EF0008CAF0008130F004B000038213C0D8008A8
+:107F000035B30080926C006824060002318B00FFBC
+:107F1000116600843C06800034C201009263004C6C
+:107F200090590009307F00FF53F900043213007CA0
+:107F300010E00069000000003213007C5660005C15
+:107F40000240202116200009320D00013C0C800067
+:107F500035840100358B0A008D6500248C86000471
+:107F600014A6FFD900001021320D000111A0000E4F
+:107F7000024020213C188000371001008E0F000CB9
+:107F80008F8E005011EE0008000000000E00084324
+:107F9000022028218E19000C3C1F800837F0008039
+:107FA000AE190050024020210E0007710220282146
+:107FB0000A00098F240200013C0508008CA500641A
+:107FC00024A400013C010800AC2400641600000D4C
+:107FD00000000000022028210E000771024020212D
+:107FE000926E0068240C000231CD00FF11AC00221B
+:107FF000024020210E000941000000000A00098F04
+:10800000240200010E00007024040001926B002580
+:10801000020B30250E00007EA26600250A0009D35F
+:10802000022028218E6200188CDF00048CB9002405
+:1080300000021E0217F9FFB13065007F9268004C04
+:10804000264400013093007F12650040310300FF99
+:108050001464FFAB3C0D80082647000130F1007F1F
+:1080600030E200FF1225000B2407000100409021A0
+:108070000A00099C24110001240500040E000732A7
+:10808000240600010E000941000000000A00098FCB
+:10809000240200012405FF8002452024008590264B
+:1080A000324200FF004090210A00099C2411000187
+:1080B0000E000843022028213207003010E0FFA103
+:1080C00032100082024020210E0007830220282166
+:1080D0000A00098F240200018E6900180240202145
+:1080E00002202821012640250E000964AE680018F0
+:1080F0009264004C24050003240600010E000732A0
+:10810000308400FF0E0000702404000192710025ED
+:10811000021150250E00007EA26A00250A00098F78
+:10812000240200018E6F00183C18800002402021BC
+:1081300001F87025022028210E000771AE6E00188C
+:108140009264004C0A000A1B24050004324A008095
+:10815000394900801469FF6A3C0D80080A0009F45F
+:108160002647000127BDFFC0AFB000183C108000BB
+:10817000AFBF0038AFB70034AFB60030AFB5002C9A
+:10818000AFB40028AFB30024AFB200200E0005BE8C
+:10819000AFB1001C360201009045000B0E000976BD
+:1081A00090440008144000E78FBF00383C08800866
+:1081B00035070080A0E0006B3606098090C50000FE
+:1081C000240300503C17080026F73F9030A400FF1E
+:1081D0003C13080026733FA0108300033C1080006E
+:1081E0000000B82100009821241F00103611010062
+:1081F00036120A00361509808E5800248E34000489
+:108200008EAF00208F8C00543C010800A03F3FD867
+:1082100036190A80972B002C8EF60000932A00183E
+:108220000298702301EC68233C010800AC2E3FB497
+:108230003C010800AC2D3FB83C010800AC2C3FDCF1
+:10824000A78B005802C0F809315400FF30490002E2
+:10825000152000E930420001504000C49227000977
+:1082600092A9000831280008150000022415000317
+:108270000000A8213C0A80003543090035440A006B
+:108280008C8D00249072001190700012907F00116C
+:10829000325900FF321100FF02B110210002C080EC
+:1082A00033EF00FF0319B021028F702102D4602147
+:1082B00025CB00103C010800A4363FCE3C0108004D
+:1082C000AC2D3FE03C010800A42C3FD03C0108004D
+:1082D000A42B3FCC355601003554098035510E0092
+:1082E0008F8700548F89005C8E850020240800064B
+:1082F000012730233C010800AC283FD400A72823E5
+:1083000004C000B50000902104A000B300C5502BAC
+:10831000114000B5000000003C010800AC263FB849
+:108320008E6200000040F8090000000030460002A4
+:1083300014C0007400408021304B000155600011D2
+:108340008E6200043C0D08008DAD3FBC3C0EC000A9
+:108350003C04800001AE6025AE2C00008C9800002B
+:10836000330F000811E0FFFD00000000963F0008F9
+:1083700024120001A79F00408E390004AF990038F5
+:108380008E6200040040F80900000000020280250F
+:1083900032030002146000B3000000003C09080032
+:1083A00095293FC43C06080094C63FD03C0A08000B
+:1083B000954A3FC63C0708008CE73FBC0126702168
+:1083C0003C0308008C633FE03C08080095083FDA56
+:1083D00001CA20218ED9000C00E92821249F000227
+:1083E00000A878210067C02133E4FFFFAF99005057
+:1083F0003C010800AC383FE03C010800A42F3FC816
+:108400003C010800A42E3FD20E0001E7000000004E
+:108410008F8D0048004020213C010800A02D3FD94D
+:108420008E62000825AC0001AF8C00480040F809BE
+:10843000000000008F85005402A030210E00060CC1
+:10844000004020210E0007A1004020218E6B000C6F
+:108450000160F809004020213C0A0800954A3FD2FB
+:108460003C06080094C63FC6014648212528000264
+:108470000E0001FB3104FFFF3C0508008CA53FB452
+:108480003C0708008CE73FBC00A720233C01080004
+:10849000AC243FB414800006000000003C02080039
+:1084A0008C423FD4344B00403C010800AC2B3FD4FD
+:1084B000124000438F8E00448E2D00108F92004496
+:1084C000AE4D00208E2C0018AE4C00243C04080059
+:1084D00094843FC80E0006FA000000008F9F0054ED
+:1084E0008E6700103C010800AC3F3FDC00E0F8095B
+:1084F000000000003C1908008F393FB41720FF79B5
+:108500008F870054979300583C11800E321601005B
+:108510000E000729A633002C16C0004532030010B8
+:108520005460004C8EE50004320800405500001DE8
+:108530008EF000088EE4000C0080F80900000000B6
+:108540008FBF00388FB700348FB600308FB5002C46
+:108550008FB400288FB300248FB200208FB1001C8D
+:108560008FB0001803E0000827BD00408F86003C54
+:1085700036110E0000072E0000A62025AE04008054
+:108580008E4300208E500024AFA30010AE230014B1
+:108590008FB20010AE320010AE30001C0A000A7517
+:1085A000AE3000180200F809000000008EE4000C54
+:1085B0000080F809000000000A000B2E8FBF003871
+:1085C00024180001240F0001A5C00020A5D8002216
+:1085D0000A000B10ADCF00243C010800AC203FB8CE
+:1085E0000A000AA68E6200003C010800AC253FB8D4
+:1085F0000A000AA68E620000922400090E0007718C
+:10860000000028218FBF00388FB700348FB60030AC
+:108610008FB5002C8FB400288FB300248FB20020B8
+:108620008FB1001C8FB0001803E0000827BD004088
+:108630003C14800092950109000028210E00084397
+:1086400032A400FF320300105060FFB8320800402F
+:108650008EE5000400A0F809000000000A000B28C5
+:10866000320800405240FFA8979300588E340014FF
+:108670008F930044AE7400208E35001CAE7500242C
+:108680000A000B1F979300588F820014000421806A
+:1086900003E00008008210213C07800834E20080DB
+:1086A0009043006900804021106000093C040100F3
+:1086B0003C0708008CE73FDC8F83003000E3202379
+:1086C000048000089389001C14E3000301002021AA
+:1086D00003E00008008010213C04010003E00008D2
+:1086E000008010211120000B006738233C0D800012
+:1086F00035AC0980918B007C316A0002114000206A
+:108700002409003400E9702B15C0FFF1010020217D
+:1087100000E938232403FFFC00A3C82400E3C0249D
+:1087200000F9782B15E0FFEA0308202130C400038C
+:108730000004102314C0001430490003000030214D
+:1087400000A9782101E6702100EE682B11A0FFE05E
+:108750003C0401002D3800010006C82B010548210A
+:108760000319382414E0FFDA2524FFFC2402FFFC5F
+:1087700000A218240068202103E0000800801021D6
+:108780000A000B9E240900303C0C800035860980CD
+:1087900090CB007C316A00041540FFE924060004F8
+:1087A0000A000BAD000030213C0308008C63005C24
+:1087B0008F82001827BDFFE0AFBF0018AFB10014D3
+:1087C00010620005AFB00010000329C024A402808D
+:1087D000AF840014AF8300183C10800036020A00FA
+:1087E00094450032361101000E000B7F30A43FFF8C
+:1087F0008E240000241FFF803C1100800082C021D5
+:10880000031F60243309007F000CC9400329402561
+:10881000330E0078362F00033C0D1000010D50255B
+:1088200001CF5825AE0C002836080980AE0C080C84
+:10883000AE0B082CAE0A0830910300693C06800C90
+:108840000126382110600006AF8700348D09003CF6
+:108850008D03006C0123382318E000820000000023
+:108860003C0B8008356A00803C108000A140006904
+:10887000360609808CC200383C06800034C50A00E8
+:1088800090A8003C310C00201180001AAF8200300B
+:10889000240D00013C0E800035D10A00A38D001C80
+:1088A000AF8000248E2400248F850024240D00082E
+:1088B000AF800020AF8000283C010800A42D3FC6F7
+:1088C0003C010800A4203FDA0E000B830000302199
+:1088D0009228003C8FBF00188FB100148FB0001099
+:1088E00000086142AF82002C27BD002003E0000891
+:1088F0003182000190B80032240E0001330F00FFD6
+:10890000000F2182108E004124190002109900648A
+:1089100034C40AC03C03800034640A008C8F0024F5
+:1089200015E0001E34660900909F003024180005F1
+:1089300033F9003F1338004E240300018F860020D6
+:10894000A383001CAF860028AF8600243C0E800065
+:1089500035D10A008E2400248F850024240D0008C0
+:108960003C010800A42D3FC63C010800A4203FDACA
+:108970000E000B83000000009228003C8FBF0018FF
+:108980008FB100148FB0001000086142AF82002C3C
+:1089900027BD002003E00008318200018C8A000816
+:1089A0008C8B00248CD000643C0E800035D10A00F2
+:1089B000014B2823AF900024A380001CAF85002822
+:1089C0008E2400248F8600208F850024240D00082B
+:1089D0003C010800A42D3FC63C010800A4203FDA5A
+:1089E0000E000B83000000009228003C8FBF00188F
+:1089F0008FB100148FB0001000086142AF82002CCC
+:108A000027BD002003E000083182000190A2003061
+:108A10003051003F5224002834C50AC08CB00024D5
+:108A20001600002234CB09008CA600483C0A7FFFC8
+:108A30003545FFFF00C510243C0E8000AF820020AA
+:108A400035C509008F8800208CAD0060010D602BBA
+:108A500015800002010020218CA400600A000C2275
+:108A6000AF8400208D02006C0A000BFC3C068000E5
+:108A70008C8200488F8600203C097FFF3527FFFF4E
+:108A8000004788243C04800824030001AF9100289B
+:108A9000AC80006CA383001C0A000C30AF8600245D
+:108AA0008C9F00140A000C22AF9F00208D6200688A
+:108AB0000A000C6C3C0E800034C409808C89007064
+:108AC0008CA300140123382B10E0000400000000E8
+:108AD0008C8200700A000C6C3C0E80008CA200148A
+:108AE0000A000C6C3C0E80008F85002427BDFFE03F
+:108AF000AFBF0018AFB1001414A00008AFB0001051
+:108B00003C04800034870A0090E60030240200050F
+:108B100030C3003F106200B9348409008F910020F7
+:108B200000A080213C048000348E0A008DCD00041A
+:108B30003C0608008CC63FB831A73FFF00E6602B1B
+:108B40005580000100E03021938F001C11E0007877
+:108B500000D0282B349F098093F9007C3338000221
+:108B6000130000792403003400C3102B144000D9F3
+:108B70000000000000C3302300D0282B3C01080077
+:108B8000A4233FC414A0006E020018213C04080076
+:108B90008C843FB40064402B55000001006020210C
+:108BA0003C05800034A90A00912A003C3C010800E1
+:108BB000AC243FBC31430020146000030000482176
+:108BC00034AB0E008D6900188F88002C0128202BF3
+:108BD0001080005F000000003C0508008CA53FBC31
+:108BE00000A96821010D602B1180005C00B0702B82
+:108BF0000109382300E028213C010800AC273FBCD4
+:108C000012000003240AFFFC10B0008D3224000380
+:108C100000AA18243C010800A4203FDA3C01080007
+:108C2000AC233FBC006028218F840024120400067E
+:108C30003C0B80088D6C006C02002021AF9100205D
+:108C400025900001AD70006C8F8D00280085882371
+:108C5000AF91002401A52023AF8400281220000238
+:108C600024070018240700103C18800837060080ED
+:108C700090CF00683C010800A0273FD824070001DE
+:108C800031EE00FF11C700470000000014800018FB
+:108C9000000028213C06800034D1098034CD010039
+:108CA00091A600098E2C001824C40001000C860235
+:108CB0003205007F308B007F1165007F2407FF8025
+:108CC0003C19800837290080A124004C3C0808008A
+:108CD0008D083FD4241800023C010800A038401938
+:108CE000350F00083C010800AC2F3FD424050010CC
+:108CF0003C02800034440A009083003C307F002016
+:108D000013E0000500A02021240A00013C01080016
+:108D1000AC2A3FBC34A400018FBF00188FB10014EF
+:108D20008FB000100080102103E0000827BD002054
+:108D30003C010800A4203FC410A0FF9402001821A9
+:108D40000A000CC000C018210A000CB72403003030
+:108D50003C0508008CA53FBC00B0702B11C0FFA8DB
+:108D6000000000003C19080097393FC40325C021CA
+:108D70000307782B11E000072CAA00043C036000D5
+:108D80008C625404305F003F17E0FFE3240400428C
+:108D90002CAA00041140FF9A240400420A000D246A
+:108DA0008FBF00181528FFB9000000008CCA0018FA
+:108DB0003C1F800024020002015F1825ACC300188C
+:108DC00037F90A00A0C200689329003C240400047B
+:108DD00000A01021312800203C010800A0244019E7
+:108DE0001100000224050010240200013C010800CB
+:108DF000AC223FB40A000D1A3C0280008F88002884
+:108E00008C8900600109282B14A000020100882130
+:108E10008C9100603C048000348B0E008D6400183F
+:108E2000240A00010220282102203021A38A001CEC
+:108E30000E000B83022080210A000CA6AF82002CBA
+:108E40000004582312200007316400033C0E800008
+:108E500035C7098090ED007C31AC00041580001905
+:108E6000248F00043C010800A4243FDA3C1F0800C2
+:108E700097FF3FDA03E5C82100D9C02B1300FF6B31
+:108E80008F8400242CA6000514C0FFA324040042F4
+:108E900030A200031440000200A2182324A3FFFC08
+:108EA0003C010800AC233FBC3C010800A4203FDA91
+:108EB0000A000CE70060282100C770240A000D0D8D
+:108EC00001C720263C010800A42F3FDA0A000D78D4
+:108ED000000000003C010800AC203FBC0A000D234C
+:108EE000240400428F8300283C05800034AA0A0035
+:108EF0001460000600001021914700302406000590
+:108F000030E400FF108600030000000003E00008CA
+:108F100000000000914B0048316900FF000941C288
+:108F20001500FFFA3C0680083C04080094843FC406
+:108F30003C0308008C633FDC3C1908008F393FBCC0
+:108F40003C0F080095EF3FDA0064C0218CCD00048F
+:108F50000319702101CF602134AB0E00018D28234D
+:108F600018A0001D00000000914F004C8F8C0034B1
+:108F7000956D001031EE00FF8D89000401AE3023A5
+:108F80008D8A000030CEFFFF000E29000125C82188
+:108F900000003821014720210325182B0083C02120
+:108FA000AD990004AD980000918F000A01CF6821AF
+:108FB000A18D000A956500128F8A0034A54500082E
+:108FC000954B003825690001A54900389148000DEE
+:108FD00035070008A147000D03E00008000000006D
+:108FE00027BDFFD8AFB000189388001C8FB00014C5
+:108FF0003C0A80003C197FFF8F8700243738FFFF31
+:10900000AFBF0020AFB1001C355F0A000218182462
+:1090100093EB003C00087FC03C02BFFF006F60255F
+:109020002CF000013449FFFF3C1F08008FFF3FDC9C
+:109030008F9900303C18080097183FD20189782496
+:10904000001047803C07EFFF3C05F0FF01E81825C2
+:109050003C1180003169002034E2FFFF34ADFFFF96
+:10906000362E098027A500102406000203F960238C
+:10907000270B0002354A0E00006218240080802170
+:1090800015200002000040218D48001CA7AB0012F3
+:10909000058000392407000030E800FF00083F0089
+:1090A000006758253C028008AFAB0014344F0080A5
+:1090B00091EA00683C08080091083FD93C09DFFFAD
+:1090C000352CFFFF000AF82B3C02080094423FCCED
+:1090D000A3A80011016CC024001FCF40031918255C
+:1090E0008FA70010AFA300143C0C0800918C3FDB4D
+:1090F000A7A200168FAB001400ED48243C0F01001E
+:109100003C0A0FFF012FC82531980003355FFFFF90
+:10911000016D40243C027000033F382400181E00FB
+:1091200000E2482501037825AFAF0014AFA9001075
+:1091300091CC007C0E000092A3AC0015362D0A00E5
+:1091400091A6003C30C400201080000626020008D2
+:109150003C11080096313FC8262EFFFF3C01080055
+:10916000A42E3FC88FBF00208FB1001C8FB0001805
+:1091700003E0000827BD00288F8B002C010B502B2B
+:109180005540FFC5240700010A000E0430E800FF27
+:109190009383001C3C02800027BDFFD834480A009E
+:1091A00000805021AFBF002034460AC001002821B2
+:1091B0001060000E3444098091070030240B000534
+:1091C0008F89002030EC003F118B000B000038210C
+:1091D000AFA900103C0B80088D69006CAFAA001885
+:1091E0000E00015AAFA90014A380001C8FBF0020FD
+:1091F00003E0000827BD00288D1F00483C18080028
+:109200008F183FBC8F9900283C027FFF8D080044D7
+:109210003443FFFFAFA900103C0B80088D69006C40
+:1092200003E370240319782101CF682301A83821B2
+:10923000AFAA00180E00015AAFA900140A000E5878
+:10924000A380001C3C05800034A60A0090C7003CA7
+:109250003C06080094C63FDA3C0208008C423FD42A
+:1092600030E30020000624001060001E0044382572
+:109270003C0880083505008090A300680000482164
+:109280002408000100002821240400013C0680007D
+:109290008CCD017805A0FFFE34CF0140ADE8000879
+:1092A0003C0208008C423FDCA5E50004A5E4000672
+:1092B000ADE2000C3C04080090843FD93C038008D8
+:1092C00034790080A1E40012ADE70014A5E900188C
+:1092D0009338004C3C0E1000A1F8002D03E000086C
+:1092E000ACCE017834A90E008D28001C3C0C08007F
+:1092F0008D8C3FBC952B0016952A001401864821C1
+:109300003164FFFF0A000E803145FFFF3C048000FE
+:1093100034830A009065003C30A200201040001900
+:1093200034870E0000004021000038210000202179
+:109330003C0680008CC901780520FFFE34CA01403C
+:1093400034CF010091EB0009AD4800083C0E080045
+:109350008DCE3FDC240DFF91240C00403C08100012
+:10936000A5440004A5470006AD4E000CA14D001217
+:10937000AD4C0014A5400018A14B002D03E00008DF
+:10938000ACC801788CE8001894E6001294E4001050
+:1093900030C7FFFF0A000EA93084FFFF3C048000A5
+:1093A00034830A009065003C30A200201040002762
+:1093B00027BDFFF82409000100003821240800011E
+:1093C0003C0680008CCA01780540FFFE3C0280FF0D
+:1093D00034C40100908D00093C0C0800918C4019A8
+:1093E000A3AD00038FAB00003185007F3459FFFF30
+:1093F00001665025AFAA00009083000AA3A00002D6
+:1094000000057E00A3A300018FB8000034CB01400B
+:10941000240C30000319702401CF6825AD6D000CB9
+:1094200027BD0008AD6C0014A5600018AD690008E8
+:10943000A56700042409FF80A56800063C08100009
+:10944000A169001203E00008ACC8017834870E005F
+:109450008CE9001894E6001294E4001030C8FFFF75
+:109460000A000ECD3087FFFF27BDFFE0AFB100142B
+:109470003C118000AFB00010AFBF001836380A00B2
+:10948000970F0032363001000E000B7F31E43FFFB2
+:109490008E0E0000240DFF803C04200001C25821E4
+:1094A000016D6024000C4940316A007F012A40258B
+:1094B000010438253C048008AE270830348600803B
+:1094C00090C500682403000230A200FF104300048E
+:1094D0008F9F00208F990024AC9F0068AC99006496
+:1094E0008FBF00188FB100148FB0001003E0000888
+:1094F00027BD00203C0A0800254A3A803C090800A4
+:1095000025293B103C08080025082F1C3C070800B3
+:1095100024E73BDC3C06080024C639043C0508006F
+:1095200024A536583C0408002484325C3C0308001F
+:10953000246339B83C020800244237543C01080037
+:10954000AC2A3F983C010800AC293F943C0108003C
+:10955000AC283F903C010800AC273F9C3C01080030
+:10956000AC263FAC3C010800AC253FA43C01080000
+:10957000AC243FA03C010800AC233FB03C010800F4
+:0C958000AC223FA803E00008000000003F
+:04958C008000094012
+:109590008000090080080100800800808008000029
+:1095A000800E0000800800808008000080000A8093
+:0C95B00080000A00800009808000090093
 :00000001FF
 /*
  * This file contains firmware data derived from proprietary unpublished
index 15690bb..789b3af 100644 (file)
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        candidate->first = candidate->last = index;
        candidate->offset_first = from;
        candidate->to_last = to;
+       INIT_LIST_HEAD(&candidate->link);
        candidate->usage = 1;
        candidate->state = AFS_WBACK_PENDING;
        init_waitqueue_head(&candidate->waitq);
index fc557a3..26869cd 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
-#define get_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       atomic_inc(&(kioctx)->users);                                   \
-} while (0)
-#define put_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       if (unlikely(atomic_dec_and_test(&(kioctx)->users)))            \
-               __put_ioctx(kioctx);                                    \
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+       BUG_ON(atomic_read(&kioctx->users) <= 0);
+       atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+       return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+       BUG_ON(atomic_read(&kioctx->users) <= 0);
+       if (unlikely(atomic_dec_and_test(&kioctx->users)))
+               __put_ioctx(kioctx);
+}
 
 /* ioctx_alloc
  *     Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        rcu_read_lock();
 
        hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
-               if (ctx->user_id == ctx_id && !ctx->dead) {
-                       get_ioctx(ctx);
+               /*
+                * RCU protects us against accessing freed memory but
+                * we have to be careful not to get a reference when the
+                * reference count already dropped to 0 (ctx->dead test
+                * is unreliable because of races).
+                */
+               if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
                        ret = ctx;
                        break;
                }
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                goto out_put_req;
 
        spin_lock_irq(&ctx->ctx_lock);
+       /*
+        * We could have raced with io_destroy() and are currently holding a
+        * reference to ctx which should be destroyed. We cannot submit IO
+        * since ctx gets freed as soon as io_submit() puts its reference.  The
+        * check here is reliable: io_destroy() sets ctx->dead before waiting
+        * for outstanding IO and the barrier between these two is realized by
+        * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
+        * increment ctx->reqs_active before checking for ctx->dead and the
+        * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+        * don't see ctx->dead set here, io_destroy() waits for our IO to
+        * finish.
+        */
+       if (ctx->dead) {
+               spin_unlock_irq(&ctx->ctx_lock);
+               ret = -EINVAL;
+               goto out_put_req;
+       }
        aio_run_iocb(req);
        if (!list_empty(&ctx->run_list)) {
                /* drain the run list */
index 333a7bb..8892870 100644 (file)
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
        ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
        if (ret)
                goto out_del;
+       /*
+        * bdev could be deleted beneath us which would implicitly destroy
+        * the holder directory.  Hold on to it.
+        */
+       kobject_get(bdev->bd_part->holder_dir);
 
        list_add(&holder->list, &bdev->bd_holder_disks);
        goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
                del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
                del_symlink(bdev->bd_part->holder_dir,
                            &disk_to_dev(disk)->kobj);
+               kobject_put(bdev->bd_part->holder_dir);
                list_del_init(&holder->list);
                kfree(holder);
        }
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
  * flush_disk - invalidates all buffer-cache entries on a disk
  *
  * @bdev:      struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Invalidates all buffer-cache entries on a disk. It should be called
  * when a disk has been changed -- either by a media change or online
  * resize.
  */
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
 {
-       if (__invalidate_device(bdev)) {
+       if (__invalidate_device(bdev, kill_dirty)) {
                char name[BDEVNAME_SIZE] = "";
 
                if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
                       "%s: detected capacity change from %lld to %lld\n",
                       name, bdev_size, disk_size);
                i_size_write(bdev->bd_inode, disk_size);
-               flush_disk(bdev);
+               flush_disk(bdev, false);
        }
 }
 EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
        if (!(events & DISK_EVENT_MEDIA_CHANGE))
                return 0;
 
-       flush_disk(bdev);
+       flush_disk(bdev, true);
        if (bdops->revalidate_disk)
                bdops->revalidate_disk(bdev->bd_disk);
        return 1;
@@ -1215,12 +1222,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
 
        res = __blkdev_get(bdev, mode, 0);
 
-       /* __blkdev_get() may alter read only status, check it afterwards */
-       if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
-               __blkdev_put(bdev, mode, 0);
-               res = -EACCES;
-       }
-
        if (whole) {
                /* finish claiming */
                mutex_lock(&bdev->bd_mutex);
@@ -1298,6 +1299,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
        if (err)
                return ERR_PTR(err);
 
+       if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+               blkdev_put(bdev, mode);
+               return ERR_PTR(-EACCES);
+       }
+
        return bdev;
 }
 EXPORT_SYMBOL(blkdev_get_by_path);
@@ -1601,7 +1607,7 @@ fail:
 }
 EXPORT_SYMBOL(lookup_bdev);
 
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
 {
        struct super_block *sb = get_super(bdev);
        int res = 0;
@@ -1614,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
                 * hold).
                 */
                shrink_dcache_sb(sb);
-               res = invalidate_inodes(sb);
+               res = invalidate_inodes(sb, kill_dirty);
                drop_super(sb);
        }
        invalidate_bdev(bdev);
index 15b5ca2..9c94934 100644 (file)
@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
        char *value = NULL;
        struct posix_acl *acl;
 
+       if (!IS_POSIXACL(inode))
+               return NULL;
+
        acl = get_cached_acl(inode, type);
        if (acl != ACL_NOT_CACHED)
                return acl;
@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
        struct posix_acl *acl;
        int ret = 0;
 
+       if (!IS_POSIXACL(dentry->d_inode))
+               return -EOPNOTSUPP;
+
        acl = btrfs_get_acl(dentry->d_inode, type);
 
        if (IS_ERR(acl))
index f745287..4d2110e 100644 (file)
@@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
-       int ret;
+       int ret = -ENOMEM;
        u32 *sums;
 
        tree = &BTRFS_I(inode)->io_tree;
@@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        compressed_len = em->block_len;
        cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+       if (!cb)
+               goto out;
+
        atomic_set(&cb->pending_bios, 0);
        cb->errors = 0;
        cb->inode = inode;
@@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
                                 PAGE_CACHE_SIZE;
-       cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
+       cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
                                       GFP_NOFS);
+       if (!cb->compressed_pages)
+               goto fail1;
+
        bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
        for (page_index = 0; page_index < nr_pages; page_index++) {
                cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
                                                              __GFP_HIGHMEM);
+               if (!cb->compressed_pages[page_index])
+                       goto fail2;
        }
        cb->nr_pages = nr_pages;
 
@@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        cb->len = uncompressed_len;
 
        comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
+       if (!comp_bio)
+               goto fail2;
        comp_bio->bi_private = cb;
        comp_bio->bi_end_io = end_compressed_bio_read;
        atomic_inc(&cb->pending_bios);
@@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        bio_put(comp_bio);
        return 0;
+
+fail2:
+       for (page_index = 0; page_index < nr_pages; page_index++)
+               free_page((unsigned long)cb->compressed_pages[page_index]);
+
+       kfree(cb->compressed_pages);
+fail1:
+       kfree(cb);
+out:
+       free_extent_map(em);
+       return ret;
 }
 
 static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
@@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
        return ret;
 }
 
-void __exit btrfs_exit_compress(void)
+void btrfs_exit_compress(void)
 {
        free_workspaces();
 }
index 2c98b3a..6f820fa 100644 (file)
@@ -1254,6 +1254,7 @@ struct btrfs_root {
 #define BTRFS_MOUNT_SPACE_CACHE                (1 << 12)
 #define BTRFS_MOUNT_CLEAR_CACHE                (1 << 13)
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG        (1 << 15)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
                                   u64 start, u64 end);
 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
                               u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root, u64 type);
 
 /* ctree.c */
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
index b531c36..e1aa8d6 100644 (file)
@@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 
        tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-       if (page->private == EXTENT_PAGE_PRIVATE)
+       if (page->private == EXTENT_PAGE_PRIVATE) {
+               WARN_ON(1);
                goto out;
-       if (!page->private)
+       }
+       if (!page->private) {
+               WARN_ON(1);
                goto out;
+       }
        len = page->private >> 2;
        WARN_ON(len == 0);
 
@@ -1550,6 +1554,7 @@ static int transaction_kthread(void *arg)
                spin_unlock(&root->fs_info->new_trans_lock);
 
                trans = btrfs_join_transaction(root, 1);
+               BUG_ON(IS_ERR(trans));
                if (transid == trans->transid) {
                        ret = btrfs_commit_transaction(trans, root);
                        BUG_ON(ret);
@@ -2453,10 +2458,14 @@ int btrfs_commit_super(struct btrfs_root *root)
        up_write(&root->fs_info->cleanup_work_sem);
 
        trans = btrfs_join_transaction(root, 1);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
        ret = btrfs_commit_transaction(trans, root);
        BUG_ON(ret);
        /* run commit again to drop the original snapshot */
        trans = btrfs_join_transaction(root, 1);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
        btrfs_commit_transaction(trans, root);
        ret = btrfs_write_and_wait_transaction(NULL, root);
        BUG_ON(ret);
@@ -2554,6 +2563,8 @@ int close_ctree(struct btrfs_root *root)
        kfree(fs_info->chunk_root);
        kfree(fs_info->dev_root);
        kfree(fs_info->csum_root);
+       kfree(fs_info);
+
        return 0;
 }
 
index 9786963..ff27d7a 100644 (file)
@@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
        int ret;
 
        path = btrfs_alloc_path();
+       if (!path)
+               return ERR_PTR(-ENOMEM);
 
        if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
                key.objectid = root->root_key.objectid;
index b552693..588ff98 100644 (file)
@@ -320,11 +320,6 @@ static int caching_kthread(void *data)
        if (!path)
                return -ENOMEM;
 
-       exclude_super_stripes(extent_root, block_group);
-       spin_lock(&block_group->space_info->lock);
-       block_group->space_info->bytes_readonly += block_group->bytes_super;
-       spin_unlock(&block_group->space_info->lock);
-
        last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
        /*
@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                        cache->cached = BTRFS_CACHE_NO;
                }
                spin_unlock(&cache->lock);
-               if (ret == 1)
+               if (ret == 1) {
+                       free_excluded_extents(fs_info->extent_root, cache);
                        return 0;
+               }
        }
 
        if (load_cache_only)
@@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        u64 reserved;
        u64 max_reclaim;
        u64 reclaimed = 0;
+       long time_left;
        int pause = 1;
        int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+       int loops = 0;
 
        block_rsv = &root->fs_info->delalloc_block_rsv;
        space_info = block_rsv->space_info;
@@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
 
        max_reclaim = min(reserved, to_reclaim);
 
-       while (1) {
+       while (loops < 1024) {
                /* have the flusher threads jump in and do some IO */
                smp_mb();
                nr_pages = min_t(unsigned long, nr_pages,
@@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
 
                spin_lock(&space_info->lock);
-               if (reserved > space_info->bytes_reserved)
+               if (reserved > space_info->bytes_reserved) {
+                       loops = 0;
                        reclaimed += reserved - space_info->bytes_reserved;
+               } else {
+                       loops++;
+               }
                reserved = space_info->bytes_reserved;
                spin_unlock(&space_info->lock);
 
@@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                        return -EAGAIN;
 
                __set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(pause);
+               time_left = schedule_timeout(pause);
+
+               /* We were interrupted, exit */
+               if (time_left)
+                       break;
+
                pause <<= 1;
                if (pause > HZ / 10)
                        pause = HZ / 10;
@@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
 
        if (num_bytes > 0) {
                if (dest) {
-                       block_rsv_add_bytes(dest, num_bytes, 0);
-               } else {
+                       spin_lock(&dest->lock);
+                       if (!dest->full) {
+                               u64 bytes_to_add;
+
+                               bytes_to_add = dest->size - dest->reserved;
+                               bytes_to_add = min(num_bytes, bytes_to_add);
+                               dest->reserved += bytes_to_add;
+                               if (dest->reserved >= dest->size)
+                                       dest->full = 1;
+                               num_bytes -= bytes_to_add;
+                       }
+                       spin_unlock(&dest->lock);
+               }
+               if (num_bytes) {
                        spin_lock(&space_info->lock);
                        space_info->bytes_reserved -= num_bytes;
                        spin_unlock(&space_info->lock);
@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
        atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+       WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
 
        spin_lock(&BTRFS_I(inode)->accounting_lock);
        nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
@@ -5355,7 +5376,7 @@ again:
                               num_bytes, data, 1);
                goto again;
        }
-       if (ret == -ENOSPC) {
+       if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
                struct btrfs_space_info *sinfo;
 
                sinfo = __find_space_info(root->fs_info, data);
@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
              struct btrfs_root *root, u32 blocksize)
 {
        struct btrfs_block_rsv *block_rsv;
+       struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
        int ret;
 
        block_rsv = get_block_rsv(trans, root);
@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        if (block_rsv->size == 0) {
                ret = reserve_metadata_bytes(trans, root, block_rsv,
                                             blocksize, 0);
-               if (ret)
+               /*
+                * If we couldn't reserve metadata bytes try and use some from
+                * the global reserve.
+                */
+               if (ret && block_rsv != global_rsv) {
+                       ret = block_rsv_use_bytes(global_rsv, blocksize);
+                       if (!ret)
+                               return global_rsv;
+                       return ERR_PTR(ret);
+               } else if (ret) {
                        return ERR_PTR(ret);
+               }
                return block_rsv;
        }
 
        ret = block_rsv_use_bytes(block_rsv, blocksize);
        if (!ret)
                return block_rsv;
+       if (ret) {
+               WARN_ON(1);
+               ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
+                                            0);
+               if (!ret) {
+                       spin_lock(&block_rsv->lock);
+                       block_rsv->size += blocksize;
+                       spin_unlock(&block_rsv->lock);
+                       return block_rsv;
+               } else if (ret && block_rsv != global_rsv) {
+                       ret = block_rsv_use_bytes(global_rsv, blocksize);
+                       if (!ret)
+                               return global_rsv;
+               }
+       }
 
        return ERR_PTR(-ENOSPC);
 }
@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
        BUG_ON(!wc);
 
        trans = btrfs_start_transaction(tree_root, 0);
+       BUG_ON(IS_ERR(trans));
+
        if (block_rsv)
                trans->block_rsv = block_rsv;
 
@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 
                        btrfs_end_transaction_throttle(trans, tree_root);
                        trans = btrfs_start_transaction(tree_root, 0);
+                       BUG_ON(IS_ERR(trans));
                        if (block_rsv)
                                trans->block_rsv = block_rsv;
                }
@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
        int ret = 0;
 
        ra = kzalloc(sizeof(*ra), GFP_NOFS);
+       if (!ra)
+               return -ENOMEM;
 
        mutex_lock(&inode->i_mutex);
        first_index = start >> PAGE_CACHE_SHIFT;
@@ -6531,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
        u64 end = start + extent_key->offset - 1;
 
        em = alloc_extent_map(GFP_NOFS);
-       BUG_ON(!em || IS_ERR(em));
+       BUG_ON(!em);
 
        em->start = start;
        em->len = extent_key->offset;
@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
                BUG_ON(reloc_root->commit_root != NULL);
                while (1) {
                        trans = btrfs_join_transaction(root, 1);
-                       BUG_ON(!trans);
+                       BUG_ON(IS_ERR(trans));
 
                        mutex_lock(&root->fs_info->drop_mutex);
                        ret = btrfs_drop_snapshot(trans, reloc_root);
@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
 
        if (found) {
                trans = btrfs_start_transaction(root, 1);
-               BUG_ON(!trans);
+               BUG_ON(IS_ERR(trans));
                ret = btrfs_commit_transaction(trans, root);
                BUG_ON(ret);
        }
@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
 
 
        trans = btrfs_start_transaction(extent_root, 1);
-       BUG_ON(!trans);
+       BUG_ON(IS_ERR(trans));
 
        if (extent_key->objectid == 0) {
                ret = del_extent_zero(trans, extent_root, path, extent_key);
@@ -8013,6 +8065,13 @@ out:
        return ret;
 }
 
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root, u64 type)
+{
+       u64 alloc_flags = get_alloc_profile(root, type);
+       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
 /*
  * helper to account the unused space of all the readonly block group in the
  * list. takes mirrors into account.
@@ -8270,6 +8329,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                if (block_group->cached == BTRFS_CACHE_STARTED)
                        wait_block_group_cache_done(block_group);
 
+               /*
+                * We haven't cached this block group, which means we could
+                * possibly have excluded extents on this block group.
+                */
+               if (block_group->cached == BTRFS_CACHE_NO)
+                       free_excluded_extents(info->extent_root, block_group);
+
                btrfs_remove_free_space_cache(block_group);
                btrfs_put_block_group(block_group);
 
@@ -8384,6 +8450,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                cache->flags = btrfs_block_group_flags(&cache->item);
                cache->sectorsize = root->sectorsize;
 
+               /*
+                * We need to exclude the super stripes now so that the space
+                * info has super bytes accounted for, otherwise we'll think
+                * we have more space than we actually do.
+                */
+               exclude_super_stripes(root, cache);
+
                /*
                 * check for two cases, either we are full, and therefore
                 * don't need to bother with the caching work since we won't
@@ -8392,12 +8465,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                 * time, particularly in the full case.
                 */
                if (found_key.offset == btrfs_block_group_used(&cache->item)) {
-                       exclude_super_stripes(root, cache);
                        cache->last_byte_to_unpin = (u64)-1;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        free_excluded_extents(root, cache);
                } else if (btrfs_block_group_used(&cache->item) == 0) {
-                       exclude_super_stripes(root, cache);
                        cache->last_byte_to_unpin = (u64)-1;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        add_new_free_space(cache, root->fs_info,
index 2e993cf..fd3f172 100644 (file)
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
  */
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end, u64 max_bytes,
-                    unsigned long bits)
+                    unsigned long bits, int contig)
 {
        struct rb_node *node;
        struct extent_state *state;
        u64 cur_start = *start;
        u64 total_bytes = 0;
+       u64 last = 0;
        int found = 0;
 
        if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
                state = rb_entry(node, struct extent_state, rb_node);
                if (state->start > search_end)
                        break;
-               if (state->end >= cur_start && (state->state & bits)) {
+               if (contig && found && state->start > last + 1)
+                       break;
+               if (state->end >= cur_start && (state->state & bits) == bits) {
                        total_bytes += min(search_end, state->end) + 1 -
                                       max(cur_start, state->start);
                        if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
                                *start = state->start;
                                found = 1;
                        }
+                       last = state->end;
+               } else if (contig && found) {
+                       break;
                }
                node = rb_next(node);
                if (!node)
@@ -1865,7 +1871,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
        bio_get(bio);
 
        if (tree->ops && tree->ops->submit_bio_hook)
-               tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+               ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
                                           mirror_num, bio_flags, start);
        else
                submit_bio(rw, bio);
@@ -1920,6 +1926,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                nr = bio_get_nr_vecs(bdev);
 
        bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+       if (!bio)
+               return -ENOMEM;
 
        bio_add_page(bio, page, page_size, offset);
        bio->bi_end_io = end_io_func;
@@ -1944,6 +1952,7 @@ void set_page_extent_mapped(struct page *page)
 
 static void set_page_extent_head(struct page *page, unsigned long len)
 {
+       WARN_ON(!PagePrivate(page));
        set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
 }
 
@@ -2126,7 +2135,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
        ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
                                      &bio_flags);
        if (bio)
-               submit_one_bio(READ, bio, 0, bio_flags);
+               ret = submit_one_bio(READ, bio, 0, bio_flags);
        return ret;
 }
 
@@ -2819,9 +2828,17 @@ int try_release_extent_state(struct extent_map_tree *map,
                 * at this point we can safely clear everything except the
                 * locked bit and the nodatasum bit
                 */
-               clear_extent_bit(tree, start, end,
+               ret = clear_extent_bit(tree, start, end,
                                 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
                                 0, 0, NULL, mask);
+
+               /* if clear_extent_bit failed for enomem reasons,
+                * we can't allow the release to continue.
+                */
+               if (ret < 0)
+                       ret = 0;
+               else
+                       ret = 1;
        }
        return ret;
 }
@@ -2901,6 +2918,46 @@ out:
        return sector;
 }
 
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+                                               u64 offset,
+                                               u64 last,
+                                               get_extent_t *get_extent)
+{
+       u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+       struct extent_map *em;
+       u64 len;
+
+       if (offset >= last)
+               return NULL;
+
+       while(1) {
+               len = last - offset;
+               if (len == 0)
+                       break;
+               len = (len + sectorsize - 1) & ~(sectorsize - 1);
+               em = get_extent(inode, NULL, 0, offset, len, 0);
+               if (!em || IS_ERR(em))
+                       return em;
+
+               /* if this isn't a hole return it */
+               if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+                   em->block_start != EXTENT_MAP_HOLE) {
+                       return em;
+               }
+
+               /* this is a hole, advance to the next extent */
+               offset = extent_map_end(em);
+               free_extent_map(em);
+               if (offset >= last)
+                       break;
+       }
+       return NULL;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -2910,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        u32 flags = 0;
        u32 found_type;
        u64 last;
+       u64 last_for_get_extent = 0;
        u64 disko = 0;
+       u64 isize = i_size_read(inode);
        struct btrfs_key found_key;
        struct extent_map *em = NULL;
        struct extent_state *cached_state = NULL;
        struct btrfs_path *path;
        struct btrfs_file_extent_item *item;
        int end = 0;
-       u64 em_start = 0, em_len = 0;
+       u64 em_start = 0;
+       u64 em_len = 0;
+       u64 em_end = 0;
        unsigned long emflags;
-       int hole = 0;
 
        if (len == 0)
                return -EINVAL;
@@ -2929,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -ENOMEM;
        path->leave_spinning = 1;
 
+       /*
+        * lookup the last file extent.  We're not using i_size here
+        * because there might be preallocation past i_size
+        */
        ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
                                       path, inode->i_ino, -1, 0);
        if (ret < 0) {
@@ -2942,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
        found_type = btrfs_key_type(&found_key);
 
-       /* No extents, just return */
+       /* No extents, but there might be delalloc bits */
        if (found_key.objectid != inode->i_ino ||
            found_type != BTRFS_EXTENT_DATA_KEY) {
-               btrfs_free_path(path);
-               return 0;
+               /* have to trust i_size as the end */
+               last = (u64)-1;
+               last_for_get_extent = isize;
+       } else {
+               /*
+                * remember the start of the last extent.  There are a
+                * bunch of different factors that go into the length of the
+                * extent, so its much less complex to remember where it started
+                */
+               last = found_key.offset;
+               last_for_get_extent = last + 1;
        }
-       last = found_key.offset;
        btrfs_free_path(path);
 
+       /*
+        * we might have some extents allocated but more delalloc past those
+        * extents.  so, we trust isize unless the start of the last extent is
+        * beyond isize
+        */
+       if (last < isize) {
+               last = (u64)-1;
+               last_for_get_extent = isize;
+       }
+
        lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
                         &cached_state, GFP_NOFS);
-       em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+       em = get_extent_skip_holes(inode, off, last_for_get_extent,
+                                  get_extent);
        if (!em)
                goto out;
        if (IS_ERR(em)) {
@@ -2962,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        }
 
        while (!end) {
-               hole = 0;
-               off = em->start + em->len;
+               off = extent_map_end(em);
                if (off >= max)
                        end = 1;
 
-               if (em->block_start == EXTENT_MAP_HOLE) {
-                       hole = 1;
-                       goto next;
-               }
-
                em_start = em->start;
                em_len = em->len;
-
+               em_end = extent_map_end(em);
+               emflags = em->flags;
                disko = 0;
                flags = 0;
 
@@ -2993,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
                        flags |= FIEMAP_EXTENT_ENCODED;
 
-next:
-               emflags = em->flags;
                free_extent_map(em);
                em = NULL;
-               if (!end) {
-                       em = get_extent(inode, NULL, 0, off, max - off, 0);
-                       if (!em)
-                               goto out;
-                       if (IS_ERR(em)) {
-                               ret = PTR_ERR(em);
-                               goto out;
-                       }
-                       emflags = em->flags;
-               }
-
-               if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+               if ((em_start >= last) || em_len == (u64)-1 ||
+                  (last == (u64)-1 && isize <= em_end)) {
                        flags |= FIEMAP_EXTENT_LAST;
                        end = 1;
                }
 
-               if (em_start == last) {
+               /* now scan forward to see if this is really the last extent. */
+               em = get_extent_skip_holes(inode, off, last_for_get_extent,
+                                          get_extent);
+               if (IS_ERR(em)) {
+                       ret = PTR_ERR(em);
+                       goto out;
+               }
+               if (!em) {
                        flags |= FIEMAP_EXTENT_LAST;
                        end = 1;
                }
-
-               if (!hole) {
-                       ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-                                               em_len, flags);
-                       if (ret)
-                               goto out_free;
-               }
+               ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+                                             em_len, flags);
+               if (ret)
+                       goto out_free;
        }
 out_free:
        free_extent_map(em);
@@ -3192,7 +3263,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                }
                if (!PageUptodate(p))
                        uptodate = 0;
-               unlock_page(p);
+
+               /*
+                * see below about how we avoid a nasty race with release page
+                * and why we unlock later
+                */
+               if (i != 0)
+                       unlock_page(p);
        }
        if (uptodate)
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -3216,9 +3293,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
        atomic_inc(&eb->refs);
        spin_unlock(&tree->buffer_lock);
        radix_tree_preload_end();
+
+       /*
+        * there is a race where release page may have
+        * tried to find this extent buffer in the radix
+        * but failed.  It will tell the VM it is safe to
+        * reclaim the, and it will clear the page private bit.
+        * We must make sure to set the page private bit properly
+        * after the extent buffer is in the radix tree so
+        * it doesn't get lost
+        */
+       set_page_extent_mapped(eb->first_page);
+       set_page_extent_head(eb->first_page, eb->len);
+       if (!page0)
+               unlock_page(eb->first_page);
        return eb;
 
 free_eb:
+       if (eb->first_page && !page0)
+               unlock_page(eb->first_page);
+
        if (!atomic_dec_and_test(&eb->refs))
                return exists;
        btrfs_release_extent_buffer(eb);
@@ -3269,10 +3363,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
                        continue;
 
                lock_page(page);
+               WARN_ON(!PagePrivate(page));
+
+               set_page_extent_mapped(page);
                if (i == 0)
                        set_page_extent_head(page, eb->len);
-               else
-                       set_page_private(page, EXTENT_PAGE_PRIVATE);
 
                clear_page_dirty_for_io(page);
                spin_lock_irq(&page->mapping->tree_lock);
@@ -3462,6 +3557,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
 
        for (i = start_i; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
+
+               WARN_ON(!PagePrivate(page));
+
+               set_page_extent_mapped(page);
+               if (i == 0)
+                       set_page_extent_head(page, eb->len);
+
                if (inc_all_pages)
                        page_cache_get(page);
                if (!PageUptodate(page)) {
index 7083cfa..9318dfe 100644 (file)
@@ -191,7 +191,7 @@ void extent_io_exit(void);
 
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end,
-                    u64 max_bytes, unsigned long bits);
+                    u64 max_bytes, unsigned long bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
index b0e1fce..2b6c12e 100644 (file)
@@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask)
 {
        struct extent_map *em;
        em = kmem_cache_alloc(extent_map_cache, mask);
-       if (!em || IS_ERR(em))
-               return em;
+       if (!em)
+               return NULL;
        em->in_tree = 0;
        em->flags = 0;
        em->compress_type = BTRFS_COMPRESS_NONE;
index a562a25..4f19a3e 100644 (file)
@@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        root = root->fs_info->csum_root;
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
 
        while (1) {
                key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        if (path->slots[0] == 0)
                                goto out;
                        path->slots[0]--;
+               } else if (ret < 0) {
+                       goto out;
                }
+
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 
index c800d58..7084140 100644 (file)
@@ -186,6 +186,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                        split = alloc_extent_map(GFP_NOFS);
                if (!split2)
                        split2 = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!split || !split2);
 
                write_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, start, len);
@@ -793,8 +794,12 @@ again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = grab_cache_page(inode->i_mapping, index + i);
                if (!pages[i]) {
-                       err = -ENOMEM;
-                       BUG_ON(1);
+                       int c;
+                       for (c = i - 1; c >= 0; c--) {
+                               unlock_page(pages[c]);
+                               page_cache_release(pages[c]);
+                       }
+                       return -ENOMEM;
                }
                wait_on_page_writeback(pages[i]);
        }
@@ -946,6 +951,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
                     (sizeof(struct page *)));
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
+       if (!pages) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        /* generic_write_checks can change our pos */
        start_pos = pos;
@@ -984,8 +993,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                size_t write_bytes = min(iov_iter_count(&i),
                                         nrptrs * (size_t)PAGE_CACHE_SIZE -
                                         offset);
-               size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
-                                       PAGE_CACHE_SHIFT;
+               size_t num_pages = (write_bytes + offset +
+                                   PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
                WARN_ON(num_pages > nrptrs);
                memset(pages, 0, sizeof(struct page *) * nrptrs);
@@ -1015,8 +1024,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
 
                copied = btrfs_copy_from_user(pos, num_pages,
                                           write_bytes, pages, &i);
-               dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
-                                       PAGE_CACHE_SHIFT;
+               dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
+                               PAGE_CACHE_SHIFT;
 
                if (num_pages > dirty_pages) {
                        if (copied > 0)
index 60d6842..a039065 100644 (file)
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
        return entry;
 }
 
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
-                             struct btrfs_free_space *info)
+static inline void
+__unlink_free_space(struct btrfs_block_group_cache *block_group,
+                   struct btrfs_free_space *info)
 {
        rb_erase(&info->offset_index, &block_group->free_space_offset);
        block_group->free_extents--;
+}
+
+static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+                             struct btrfs_free_space *info)
+{
+       __unlink_free_space(block_group, info);
        block_group->free_space -= info->bytes;
 }
 
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
        u64 max_bytes;
        u64 bitmap_bytes;
        u64 extent_bytes;
+       u64 size = block_group->key.offset;
 
        /*
         * The goal is to keep the total amount of memory used per 1gb of space
         * at or below 32k, so we need to adjust how much memory we allow to be
         * used by extent based free space tracking
         */
-       max_bytes = MAX_CACHE_BYTES_PER_GIG *
-               (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
+       if (size < 1024 * 1024 * 1024)
+               max_bytes = MAX_CACHE_BYTES_PER_GIG;
+       else
+               max_bytes = MAX_CACHE_BYTES_PER_GIG *
+                       div64_u64(size, 1024 * 1024 * 1024);
 
        /*
         * we want to account for 1 more bitmap than what we have so we can make
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
        recalculate_thresholds(block_group);
 }
 
+static void free_bitmap(struct btrfs_block_group_cache *block_group,
+                       struct btrfs_free_space *bitmap_info)
+{
+       unlink_free_space(block_group, bitmap_info);
+       kfree(bitmap_info->bitmap);
+       kfree(bitmap_info);
+       block_group->total_bitmaps--;
+       recalculate_thresholds(block_group);
+}
+
 static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
                              struct btrfs_free_space *bitmap_info,
                              u64 *offset, u64 *bytes)
@@ -1195,6 +1216,7 @@ again:
         */
        search_start = *offset;
        search_bytes = *bytes;
+       search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(block_group, bitmap_info, &search_start,
                            &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
@@ -1211,13 +1233,8 @@ again:
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
-               if (!bitmap_info->bytes) {
-                       unlink_free_space(block_group, bitmap_info);
-                       kfree(bitmap_info->bitmap);
-                       kfree(bitmap_info);
-                       block_group->total_bitmaps--;
-                       recalculate_thresholds(block_group);
-               }
+               if (!bitmap_info->bytes)
+                       free_bitmap(block_group, bitmap_info);
 
                /*
                 * no entry after this bitmap, but we still have bytes to
@@ -1250,13 +1267,8 @@ again:
                        return -EAGAIN;
 
                goto again;
-       } else if (!bitmap_info->bytes) {
-               unlink_free_space(block_group, bitmap_info);
-               kfree(bitmap_info->bitmap);
-               kfree(bitmap_info);
-               block_group->total_bitmaps--;
-               recalculate_thresholds(block_group);
-       }
+       } else if (!bitmap_info->bytes)
+               free_bitmap(block_group, bitmap_info);
 
        return 0;
 }
@@ -1359,22 +1371,14 @@ out:
        return ret;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 offset, u64 bytes)
+bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
+                         struct btrfs_free_space *info, bool update_stat)
 {
-       struct btrfs_free_space *right_info = NULL;
-       struct btrfs_free_space *left_info = NULL;
-       struct btrfs_free_space *info = NULL;
-       int ret = 0;
-
-       info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
-       if (!info)
-               return -ENOMEM;
-
-       info->offset = offset;
-       info->bytes = bytes;
-
-       spin_lock(&block_group->tree_lock);
+       struct btrfs_free_space *left_info;
+       struct btrfs_free_space *right_info;
+       bool merged = false;
+       u64 offset = info->offset;
+       u64 bytes = info->bytes;
 
        /*
         * first we want to see if there is free space adjacent to the range we
@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
        else
                left_info = tree_search_offset(block_group, offset - 1, 0, 0);
 
-       /*
-        * If there was no extent directly to the left or right of this new
-        * extent then we know we're going to have to allocate a new extent, so
-        * before we do that see if we need to drop this into a bitmap
-        */
-       if ((!left_info || left_info->bitmap) &&
-           (!right_info || right_info->bitmap)) {
-               ret = insert_into_bitmap(block_group, info);
-
-               if (ret < 0) {
-                       goto out;
-               } else if (ret) {
-                       ret = 0;
-                       goto out;
-               }
-       }
-
        if (right_info && !right_info->bitmap) {
-               unlink_free_space(block_group, right_info);
+               if (update_stat)
+                       unlink_free_space(block_group, right_info);
+               else
+                       __unlink_free_space(block_group, right_info);
                info->bytes += right_info->bytes;
                kfree(right_info);
+               merged = true;
        }
 
        if (left_info && !left_info->bitmap &&
            left_info->offset + left_info->bytes == offset) {
-               unlink_free_space(block_group, left_info);
+               if (update_stat)
+                       unlink_free_space(block_group, left_info);
+               else
+                       __unlink_free_space(block_group, left_info);
                info->offset = left_info->offset;
                info->bytes += left_info->bytes;
                kfree(left_info);
+               merged = true;
        }
 
+       return merged;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+                        u64 offset, u64 bytes)
+{
+       struct btrfs_free_space *info;
+       int ret = 0;
+
+       info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+       if (!info)
+               return -ENOMEM;
+
+       info->offset = offset;
+       info->bytes = bytes;
+
+       spin_lock(&block_group->tree_lock);
+
+       if (try_merge_free_space(block_group, info, true))
+               goto link;
+
+       /*
+        * There was no extent directly to the left or right of this new
+        * extent then we know we're going to have to allocate a new extent, so
+        * before we do that see if we need to drop this into a bitmap
+        */
+       ret = insert_into_bitmap(block_group, info);
+       if (ret < 0) {
+               goto out;
+       } else if (ret) {
+               ret = 0;
+               goto out;
+       }
+link:
        ret = link_free_space(block_group, info);
        if (ret)
                kfree(info);
@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space(
                node = rb_next(&entry->offset_index);
                rb_erase(&entry->offset_index, &cluster->root);
                BUG_ON(entry->bitmap);
+               try_merge_free_space(block_group, entry, false);
                tree_insert_offset(&block_group->free_space_offset,
                                   entry->offset, &entry->offset_index, 0);
        }
@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
        ret = offset;
        if (entry->bitmap) {
                bitmap_clear_bits(block_group, entry, offset, bytes);
-               if (!entry->bytes) {
-                       unlink_free_space(block_group, entry);
-                       kfree(entry->bitmap);
-                       kfree(entry);
-                       block_group->total_bitmaps--;
-                       recalculate_thresholds(block_group);
-               }
+               if (!entry->bytes)
+                       free_bitmap(block_group, entry);
        } else {
                unlink_free_space(block_group, entry);
                entry->offset += bytes;
@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
 
        ret = search_start;
        bitmap_clear_bits(block_group, entry, ret, bytes);
+       if (entry->bytes == 0)
+               free_bitmap(block_group, entry);
 out:
        spin_unlock(&cluster->lock);
        spin_unlock(&block_group->tree_lock);
@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                entry->offset += bytes;
                entry->bytes -= bytes;
 
-               if (entry->bytes == 0) {
+               if (entry->bytes == 0)
                        rb_erase(&entry->offset_index, &cluster->root);
-                       kfree(entry);
-               }
                break;
        }
 out:
        spin_unlock(&cluster->lock);
 
+       if (!ret)
+               return 0;
+
+       spin_lock(&block_group->tree_lock);
+
+       block_group->free_space -= bytes;
+       if (entry->bytes == 0) {
+               block_group->free_extents--;
+               kfree(entry);
+       }
+
+       spin_unlock(&block_group->tree_lock);
+
        return ret;
 }
 
index 160b55b..0efdb65 100644 (file)
@@ -416,7 +416,7 @@ again:
        }
        if (start == 0) {
                trans = btrfs_join_transaction(root, 1);
-               BUG_ON(!trans);
+               BUG_ON(IS_ERR(trans));
                btrfs_set_trans_block_group(trans, inode);
                trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -612,6 +612,7 @@ retry:
                            GFP_NOFS);
 
                trans = btrfs_join_transaction(root, 1);
+               BUG_ON(IS_ERR(trans));
                ret = btrfs_reserve_extent(trans, root,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
@@ -643,6 +644,7 @@ retry:
                                        async_extent->ram_size - 1, 0);
 
                em = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!em);
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
                em->orig_start = em->start;
@@ -771,7 +773,7 @@ static noinline int cow_file_range(struct inode *inode,
 
        BUG_ON(root == root->fs_info->tree_root);
        trans = btrfs_join_transaction(root, 1);
-       BUG_ON(!trans);
+       BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -819,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode,
                BUG_ON(ret);
 
                em = alloc_extent_map(GFP_NOFS);
+               BUG_ON(!em);
                em->start = start;
                em->orig_start = em->start;
                ram_size = ins.offset;
@@ -1049,7 +1052,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        } else {
                trans = btrfs_join_transaction(root, 1);
        }
-       BUG_ON(!trans);
+       BUG_ON(IS_ERR(trans));
 
        cow_start = (u64)-1;
        cur_offset = start;
@@ -1168,6 +1171,7 @@ out_check:
                        struct extent_map_tree *em_tree;
                        em_tree = &BTRFS_I(inode)->extent_tree;
                        em = alloc_extent_map(GFP_NOFS);
+                       BUG_ON(!em);
                        em->start = cur_offset;
                        em->orig_start = em->start;
                        em->len = num_bytes;
@@ -1557,6 +1561,7 @@ out:
 out_page:
        unlock_page(page);
        page_cache_release(page);
+       kfree(fixup);
 }
 
 /*
@@ -1703,7 +1708,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                                trans = btrfs_join_transaction_nolock(root, 1);
                        else
                                trans = btrfs_join_transaction(root, 1);
-                       BUG_ON(!trans);
+                       BUG_ON(IS_ERR(trans));
                        btrfs_set_trans_block_group(trans, inode);
                        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                        ret = btrfs_update_inode(trans, root, inode);
@@ -1720,6 +1725,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                trans = btrfs_join_transaction_nolock(root, 1);
        else
                trans = btrfs_join_transaction(root, 1);
+       BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -1907,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
 
        private = 0;
        if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-                            (u64)-1, 1, EXTENT_DIRTY)) {
+                            (u64)-1, 1, EXTENT_DIRTY, 0)) {
                ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
                                        start, &private_failure);
                if (ret == 0) {
@@ -2354,6 +2360,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
                 */
                if (is_bad_inode(inode)) {
                        trans = btrfs_start_transaction(root, 0);
+                       BUG_ON(IS_ERR(trans));
                        btrfs_orphan_del(trans, inode);
                        btrfs_end_transaction(trans, root);
                        iput(inode);
@@ -2381,6 +2388,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
 
        if (root->orphan_block_rsv || root->orphan_item_inserted) {
                trans = btrfs_join_transaction(root, 1);
+               BUG_ON(IS_ERR(trans));
                btrfs_end_transaction(trans, root);
        }
 
@@ -2641,7 +2649,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto err;
+               goto out;
        }
 
        path->leave_spinning = 1;
@@ -2714,9 +2722,10 @@ static int check_path_shared(struct btrfs_root *root,
        struct extent_buffer *eb;
        int level;
        u64 refs = 1;
-       int uninitialized_var(ret);
 
        for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+               int ret;
+
                if (!path->nodes[level])
                        break;
                eb = path->nodes[level];
@@ -2727,7 +2736,7 @@ static int check_path_shared(struct btrfs_root *root,
                if (refs > 1)
                        return 1;
        }
-       return ret; /* XXX callers? */
+       return 0;
 }
 
 /*
@@ -4134,7 +4143,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
        }
        srcu_read_unlock(&root->fs_info->subvol_srcu, index);
 
-       if (root != sub_root) {
+       if (!IS_ERR(inode) && root != sub_root) {
                down_read(&root->fs_info->cleanup_work_sem);
                if (!(inode->i_sb->s_flags & MS_RDONLY))
                        btrfs_orphan_cleanup(sub_root);
@@ -4347,6 +4356,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                        trans = btrfs_join_transaction_nolock(root, 1);
                else
                        trans = btrfs_join_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
                btrfs_set_trans_block_group(trans, inode);
                if (nolock)
                        ret = btrfs_end_transaction_nolock(trans, root);
@@ -4372,6 +4383,7 @@ void btrfs_dirty_inode(struct inode *inode)
                return;
 
        trans = btrfs_join_transaction(root, 1);
+       BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
 
        ret = btrfs_update_inode(trans, root, inode);
@@ -5176,6 +5188,8 @@ again:
                                em = NULL;
                                btrfs_release_path(root, path);
                                trans = btrfs_join_transaction(root, 1);
+                               if (IS_ERR(trans))
+                                       return ERR_CAST(trans);
                                goto again;
                        }
                        map = kmap(page);
@@ -5266,6 +5280,128 @@ out:
        return em;
 }
 
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+                                          size_t pg_offset, u64 start, u64 len,
+                                          int create)
+{
+       struct extent_map *em;
+       struct extent_map *hole_em = NULL;
+       u64 range_start = start;
+       u64 end;
+       u64 found;
+       u64 found_end;
+       int err = 0;
+
+       em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+       if (IS_ERR(em))
+               return em;
+       if (em) {
+               /*
+                * if our em maps to a hole, there might
+                * actually be delalloc bytes behind it
+                */
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       return em;
+               else
+                       hole_em = em;
+       }
+
+       /* check to see if we've wrapped (len == -1 or similar) */
+       end = start + len;
+       if (end < start)
+               end = (u64)-1;
+       else
+               end -= 1;
+
+       em = NULL;
+
+       /* ok, we didn't find anything, lets look for delalloc */
+       found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+                                end, len, EXTENT_DELALLOC, 1);
+       found_end = range_start + found;
+       if (found_end < range_start)
+               found_end = (u64)-1;
+
+       /*
+        * we didn't find anything useful, return
+        * the original results from get_extent()
+        */
+       if (range_start > end || found_end <= start) {
+               em = hole_em;
+               hole_em = NULL;
+               goto out;
+       }
+
+       /* adjust the range_start to make sure it doesn't
+        * go backwards from the start they passed in
+        */
+       range_start = max(start,range_start);
+       found = found_end - range_start;
+
+       if (found > 0) {
+               u64 hole_start = start;
+               u64 hole_len = len;
+
+               em = alloc_extent_map(GFP_NOFS);
+               if (!em) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               /*
+                * when btrfs_get_extent can't find anything it
+                * returns one huge hole
+                *
+                * make sure what it found really fits our range, and
+                * adjust to make sure it is based on the start from
+                * the caller
+                */
+               if (hole_em) {
+                       u64 calc_end = extent_map_end(hole_em);
+
+                       if (calc_end <= start || (hole_em->start > end)) {
+                               free_extent_map(hole_em);
+                               hole_em = NULL;
+                       } else {
+                               hole_start = max(hole_em->start, start);
+                               hole_len = calc_end - hole_start;
+                       }
+               }
+               em->bdev = NULL;
+               if (hole_em && range_start > hole_start) {
+                       /* our hole starts before our delalloc, so we
+                        * have to return just the parts of the hole
+                        * that go until  the delalloc starts
+                        */
+                       em->len = min(hole_len,
+                                     range_start - hole_start);
+                       em->start = hole_start;
+                       em->orig_start = hole_start;
+                       /*
+                        * don't adjust block start at all,
+                        * it is fixed at EXTENT_MAP_HOLE
+                        */
+                       em->block_start = hole_em->block_start;
+                       em->block_len = hole_len;
+               } else {
+                       em->start = range_start;
+                       em->len = found;
+                       em->orig_start = range_start;
+                       em->block_start = EXTENT_MAP_DELALLOC;
+                       em->block_len = found;
+               }
+       } else if (hole_em) {
+               return hole_em;
+       }
+out:
+
+       free_extent_map(hole_em);
+       if (err) {
+               free_extent_map(em);
+               return ERR_PTR(err);
+       }
+       return em;
+}
+
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                                                  u64 start, u64 len)
 {
@@ -5280,8 +5416,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
 
        trans = btrfs_join_transaction(root, 0);
-       if (!trans)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(trans))
+               return ERR_CAST(trans);
 
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -5505,7 +5641,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                 * while we look for nocow cross refs
                 */
                trans = btrfs_join_transaction(root, 0);
-               if (!trans)
+               if (IS_ERR(trans))
                        goto must_cow;
 
                if (can_nocow_odirect(trans, inode, start, len) == 1) {
@@ -5640,7 +5776,7 @@ again:
        BUG_ON(!ordered);
 
        trans = btrfs_join_transaction(root, 1);
-       if (!trans) {
+       if (IS_ERR(trans)) {
                err = -ENOMEM;
                goto out;
        }
@@ -6088,7 +6224,7 @@ out:
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
 {
-       return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+       return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
 }
 
 int btrfs_readpage(struct file *file, struct page *page)
index a506a22..5fdb2ab 100644 (file)
@@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 
 
        trans = btrfs_join_transaction(root, 1);
-       BUG_ON(!trans);
+       BUG_ON(IS_ERR(trans));
 
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
@@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
 
        if (new_size > old_size) {
                trans = btrfs_start_transaction(root, 0);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       goto out_unlock;
+               }
                ret = btrfs_grow_device(trans, device, new_size);
                btrfs_commit_transaction(trans, root);
        } else {
@@ -1067,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
        if (copy_from_user(&flags, arg, sizeof(flags)))
                return -EFAULT;
 
-       if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
+       if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
                return -EINVAL;
 
        if (flags & ~BTRFS_SUBVOL_RDONLY)
                return -EOPNOTSUPP;
 
+       if (!is_owner_or_cap(inode))
+               return -EACCES;
+
        down_write(&root->fs_info->subvol_sem);
 
        /* nothing to do */
@@ -1093,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
                goto out_reset;
        }
 
-       ret = btrfs_update_root(trans, root,
+       ret = btrfs_update_root(trans, root->fs_info->tree_root,
                                &root->root_key, &root->root_item);
 
        btrfs_commit_transaction(trans, root);
@@ -1898,7 +1905,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
                        memcpy(&new_key, &key, sizeof(new_key));
                        new_key.objectid = inode->i_ino;
-                       new_key.offset = key.offset + destoff - off;
+                       if (off <= key.offset)
+                               new_key.offset = key.offset + destoff - off;
+                       else
+                               new_key.offset = destoff;
 
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
@@ -2082,7 +2092,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
 
        ret = -ENOMEM;
        trans = btrfs_start_ioctl_transaction(root, 0);
-       if (!trans)
+       if (IS_ERR(trans))
                goto out_drop;
 
        file->private_data = trans;
@@ -2138,9 +2148,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
        path->leave_spinning = 1;
 
        trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
+       if (IS_ERR(trans)) {
                btrfs_free_path(path);
-               return -ENOMEM;
+               return PTR_ERR(trans);
        }
 
        dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
@@ -2201,7 +2211,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        int num_types = 4;
        int alloc_size;
        int ret = 0;
-       int slot_count = 0;
+       u64 slot_count = 0;
        int i, c;
 
        if (copy_from_user(&space_args,
@@ -2240,7 +2250,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                goto out;
        }
 
-       slot_count = min_t(int, space_args.space_slots, slot_count);
+       slot_count = min_t(u64, space_args.space_slots, slot_count);
 
        alloc_size = sizeof(*dest) * slot_count;
 
@@ -2260,6 +2270,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        for (i = 0; i < num_types; i++) {
                struct btrfs_space_info *tmp;
 
+               if (!slot_count)
+                       break;
+
                info = NULL;
                rcu_read_lock();
                list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -2281,7 +2294,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                                memcpy(dest, &space, sizeof(space));
                                dest++;
                                space_args.total_spaces++;
+                               slot_count--;
                        }
+                       if (!slot_count)
+                               break;
                }
                up_read(&info->groups_sem);
        }
@@ -2334,6 +2350,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
        u64 transid;
 
        trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
        transid = trans->transid;
        btrfs_commit_transaction_async(trans, root, 0);
 
index cc9b450..a178f5e 100644 (file)
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
        unsigned long tot_out;
        unsigned long tot_len;
        char *buf;
+       bool may_late_unmap, need_unmap;
 
        data_in = kmap(pages_in[0]);
        tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
 
                tot_in += in_len;
                working_bytes = in_len;
+               may_late_unmap = need_unmap = false;
 
                /* fast path: avoid using the working buffer */
                if (in_page_bytes_left >= in_len) {
                        buf = data_in + in_offset;
                        bytes = in_len;
+                       may_late_unmap = true;
                        goto cont;
                }
 
@@ -329,14 +332,17 @@ cont:
                                if (working_bytes == 0 && tot_in >= tot_len)
                                        break;
 
-                               kunmap(pages_in[page_in_index]);
-                               page_in_index++;
-                               if (page_in_index >= total_pages_in) {
+                               if (page_in_index + 1 >= total_pages_in) {
                                        ret = -1;
-                                       data_in = NULL;
                                        goto done;
                                }
-                               data_in = kmap(pages_in[page_in_index]);
+
+                               if (may_late_unmap)
+                                       need_unmap = true;
+                               else
+                                       kunmap(pages_in[page_in_index]);
+
+                               data_in = kmap(pages_in[++page_in_index]);
 
                                in_page_bytes_left = PAGE_CACHE_SIZE;
                                in_offset = 0;
@@ -346,6 +352,8 @@ cont:
                out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
                                            &out_len);
+               if (need_unmap)
+                       kunmap(pages_in[page_in_index - 1]);
                if (ret != LZO_E_OK) {
                        printk(KERN_WARNING "btrfs decompress failed\n");
                        ret = -1;
@@ -363,8 +371,7 @@ cont:
                        break;
        }
 done:
-       if (data_in)
-               kunmap(pages_in[page_in_index]);
+       kunmap(pages_in[page_in_index]);
        return ret;
 }
 
index 2b61e1d..083a554 100644 (file)
@@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
                                          u64 file_offset)
 {
        struct rb_root *root = &tree->tree;
-       struct rb_node *prev;
+       struct rb_node *prev = NULL;
        struct rb_node *ret;
        struct btrfs_ordered_extent *entry;
 
index 0d126be..fb2605d 100644 (file)
@@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
 #else
                        BUG();
 #endif
+                       break;
                case BTRFS_BLOCK_GROUP_ITEM_KEY:
                        bi = btrfs_item_ptr(l, i,
                                            struct btrfs_block_group_item);
index 045c9c2..31ade58 100644 (file)
@@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
        new_node->bytenr = dest->node->start;
        new_node->level = node->level;
        new_node->lowest = node->lowest;
+       new_node->checked = 1;
        new_node->root = dest;
 
        if (!node->lowest) {
@@ -2028,6 +2029,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 
        while (1) {
                trans = btrfs_start_transaction(root, 0);
+               BUG_ON(IS_ERR(trans));
                trans->block_rsv = rc->block_rsv;
 
                ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
@@ -2147,6 +2149,12 @@ again:
        }
 
        trans = btrfs_join_transaction(rc->extent_root, 1);
+       if (IS_ERR(trans)) {
+               if (!err)
+                       btrfs_block_rsv_release(rc->extent_root,
+                                               rc->block_rsv, num_bytes);
+               return PTR_ERR(trans);
+       }
 
        if (!err) {
                if (num_bytes != rc->merging_rsv_size) {
@@ -3222,6 +3230,7 @@ truncate:
        trans = btrfs_join_transaction(root, 0);
        if (IS_ERR(trans)) {
                btrfs_free_path(path);
+               ret = PTR_ERR(trans);
                goto out;
        }
 
@@ -3628,6 +3637,7 @@ int prepare_to_relocate(struct reloc_control *rc)
        set_reloc_control(rc);
 
        trans = btrfs_join_transaction(rc->extent_root, 1);
+       BUG_ON(IS_ERR(trans));
        btrfs_commit_transaction(trans, rc->extent_root);
        return 0;
 }
@@ -3644,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
        u32 item_size;
        int ret;
        int err = 0;
+       int progress = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3656,8 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
        }
 
        while (1) {
+               progress++;
                trans = btrfs_start_transaction(rc->extent_root, 0);
-
+               BUG_ON(IS_ERR(trans));
+restart:
                if (update_backref_cache(trans, &rc->backref_cache)) {
                        btrfs_end_transaction(trans, rc->extent_root);
                        continue;
@@ -3770,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
                        }
                }
        }
+       if (trans && progress && err == -ENOSPC) {
+               ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+                                             rc->block_group->flags);
+               if (ret == 0) {
+                       err = 0;
+                       progress = 0;
+                       goto restart;
+               }
+       }
 
        btrfs_release_path(rc->extent_root, path);
        clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
@@ -3804,7 +3826,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 
        /* get rid of pinned extents */
        trans = btrfs_join_transaction(rc->extent_root, 1);
-       btrfs_commit_transaction(trans, rc->extent_root);
+       if (IS_ERR(trans))
+               err = PTR_ERR(trans);
+       else
+               btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
        btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
        btrfs_free_path(path);
@@ -4022,6 +4047,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
        int ret;
 
        trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+       BUG_ON(IS_ERR(trans));
 
        memset(&root->root_item.drop_progress, 0,
                sizeof(root->root_item.drop_progress));
@@ -4125,6 +4151,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        set_reloc_control(rc);
 
        trans = btrfs_join_transaction(rc->extent_root, 1);
+       if (IS_ERR(trans)) {
+               unset_reloc_control(rc);
+               err = PTR_ERR(trans);
+               goto out_free;
+       }
 
        rc->merge_reloc_tree = 1;
 
@@ -4154,9 +4185,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        unset_reloc_control(rc);
 
        trans = btrfs_join_transaction(rc->extent_root, 1);
-       btrfs_commit_transaction(trans, rc->extent_root);
-out:
+       if (IS_ERR(trans))
+               err = PTR_ERR(trans);
+       else
+               btrfs_commit_transaction(trans, rc->extent_root);
+out_free:
        kfree(rc);
+out:
        while (!list_empty(&reloc_roots)) {
                reloc_root = list_entry(reloc_roots.next,
                                        struct btrfs_root, root_list);
index b2130c4..d39a989 100644 (file)
@@ -155,7 +155,8 @@ enum {
        Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
-       Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
+       Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+       Opt_enospc_debug, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
        {Opt_space_cache, "space_cache"},
        {Opt_clear_cache, "clear_cache"},
        {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+       {Opt_enospc_debug, "enospc_debug"},
        {Opt_err, NULL},
 };
 
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_user_subvol_rm_allowed:
                        btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
                        break;
+               case Opt_enospc_debug:
+                       btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+                       break;
                case Opt_err:
                        printk(KERN_INFO "btrfs: unrecognized mount option "
                               "'%s'\n", p);
@@ -383,7 +388,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
                struct btrfs_fs_devices **fs_devices)
 {
        substring_t args[MAX_OPT_ARGS];
-       char *opts, *p;
+       char *opts, *orig, *p;
        int error = 0;
        int intarg;
 
@@ -397,6 +402,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
        opts = kstrdup(options, GFP_KERNEL);
        if (!opts)
                return -ENOMEM;
+       orig = opts;
 
        while ((p = strsep(&opts, ",")) != NULL) {
                int token;
@@ -432,7 +438,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
        }
 
  out_free_opts:
-       kfree(opts);
+       kfree(orig);
  out:
        /*
         * If no subvolume name is specified we use the default one.  Allocate
@@ -623,6 +629,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
        btrfs_wait_ordered_extents(root, 0, 0);
 
        trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
        ret = btrfs_commit_transaction(trans, root);
        return ret;
 }
@@ -761,6 +769,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
                }
 
                btrfs_close_devices(fs_devices);
+               kfree(fs_info);
+               kfree(tree_root);
        } else {
                char b[BDEVNAME_SIZE];
 
index bae5c7b..3d73c8d 100644 (file)
@@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
        INIT_DELAYED_WORK(&ac->work, do_async_commit);
        ac->root = root;
        ac->newtrans = btrfs_join_transaction(root, 0);
+       if (IS_ERR(ac->newtrans)) {
+               int err = PTR_ERR(ac->newtrans);
+               kfree(ac);
+               return err;
+       }
 
        /* take transaction reference */
        mutex_lock(&root->fs_info->trans_mutex);
index 054744a..a4bbb85 100644 (file)
@@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                }
                dst_copy = kmalloc(item_size, GFP_NOFS);
                src_copy = kmalloc(item_size, GFP_NOFS);
+               if (!dst_copy || !src_copy) {
+                       btrfs_release_path(root, path);
+                       kfree(dst_copy);
+                       kfree(src_copy);
+                       return -ENOMEM;
+               }
 
                read_extent_buffer(eb, src_copy, src_ptr, item_size);
 
@@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        btrfs_dir_item_key_to_cpu(leaf, di, &location);
        name_len = btrfs_dir_name_len(leaf, di);
        name = kmalloc(name_len, GFP_NOFS);
+       if (!name)
+               return -ENOMEM;
+
        read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
        btrfs_release_path(root, path);
 
@@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log,
        int match = 0;
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
        ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
        if (ret != 0)
                goto out;
@@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        key.offset = (u64)-1;
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
 
        while (1) {
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 
        name_len = btrfs_dir_name_len(eb, di);
        name = kmalloc(name_len, GFP_NOFS);
+       if (!name)
+               return -ENOMEM;
+
        log_type = btrfs_dir_type(eb, di);
        read_extent_buffer(eb, name, (unsigned long)(di + 1),
                   name_len);
@@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                root_owner = btrfs_header_owner(parent);
 
                next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+               if (!next)
+                       return -ENOMEM;
 
                if (*level == 1) {
                        wc->process_func(root, next, wc, ptr_gen);
@@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                wait_log_commit(trans, log_root_tree,
                                log_root_tree->log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
+               ret = 0;
                goto out;
        }
        atomic_set(&log_root_tree->log_commit[index2], 1);
@@ -2096,7 +2116,7 @@ out:
        smp_mb();
        if (waitqueue_active(&root->log_commit_wait[index1]))
                wake_up(&root->log_commit_wait[index1]);
-       return 0;
+       return ret;
 }
 
 static void free_log_tree(struct btrfs_trans_handle *trans,
@@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
 
        log = root->log_root;
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
        di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
                                   name, name_len, -1);
        if (IS_ERR(di)) {
@@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 
        ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
                           nr * sizeof(u32), GFP_NOFS);
+       if (!ins_data)
+               return -ENOMEM;
+
        ins_sizes = (u32 *)ins_data;
        ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
 
@@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        log = root->log_root;
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        dst_path = btrfs_alloc_path();
+       if (!dst_path) {
+               btrfs_free_path(path);
+               return -ENOMEM;
+       }
 
        min_key.objectid = inode->i_ino;
        min_key.type = BTRFS_INODE_ITEM_KEY;
@@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
        BUG_ON(!path);
 
        trans = btrfs_start_transaction(fs_info->tree_root, 0);
+       BUG_ON(IS_ERR(trans));
 
        wc.trans = trans;
        wc.pin = 1;
index d158530..dd13eb8 100644 (file)
@@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
                return -ENOMEM;
 
        trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               return PTR_ERR(trans);
+       }
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
@@ -1334,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
        ret = btrfs_shrink_device(device, 0);
        if (ret)
-               goto error_brelse;
+               goto error_undo;
 
        ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
        if (ret)
-               goto error_brelse;
+               goto error_undo;
 
        device->in_fs_metadata = 0;
 
@@ -1412,6 +1416,13 @@ out:
        mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
        return ret;
+error_undo:
+       if (device->writeable) {
+               list_add(&device->dev_alloc_list,
+                        &root->fs_info->fs_devices->alloc_list);
+               root->fs_info->fs_devices->rw_devices++;
+       }
+       goto error_brelse;
 }
 
 /*
@@ -1601,11 +1612,19 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 
        ret = find_next_devid(root, &device->devid);
        if (ret) {
+               kfree(device->name);
                kfree(device);
                goto error;
        }
 
        trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               kfree(device->name);
+               kfree(device);
+               ret = PTR_ERR(trans);
+               goto error;
+       }
+
        lock_chunks(root);
 
        device->writeable = 1;
@@ -1621,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        device->dev_root = root->fs_info->dev_root;
        device->bdev = bdev;
        device->in_fs_metadata = 1;
-       device->mode = 0;
+       device->mode = FMODE_EXCL;
        set_blocksize(device->bdev, 4096);
 
        if (seeding_dev) {
@@ -1873,7 +1892,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
                return ret;
 
        trans = btrfs_start_transaction(root, 0);
-       BUG_ON(!trans);
+       BUG_ON(IS_ERR(trans));
 
        lock_chunks(root);
 
@@ -2047,7 +2066,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
                BUG_ON(ret);
 
                trans = btrfs_start_transaction(dev_root, 0);
-               BUG_ON(!trans);
+               BUG_ON(IS_ERR(trans));
 
                ret = btrfs_grow_device(trans, device, old_size);
                BUG_ON(ret);
@@ -2213,6 +2232,11 @@ again:
 
        /* Shrinking succeeded, else we would be at "done". */
        trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto done;
+       }
+
        lock_chunks(root);
 
        device->disk_total_bytes = new_size;
index 60d27bc..6b61ded 100644 (file)
@@ -1560,9 +1560,10 @@ retry_locked:
                /* NOTE: no side-effects allowed, until we take s_mutex */
 
                revoking = cap->implemented & ~cap->issued;
-               if (revoking)
-                       dout(" mds%d revoking %s\n", cap->mds,
-                            ceph_cap_string(revoking));
+               dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
+                    cap->mds, cap, ceph_cap_string(cap->issued),
+                    ceph_cap_string(cap->implemented),
+                    ceph_cap_string(revoking));
 
                if (cap == ci->i_auth_cap &&
                    (cap->issued & CEPH_CAP_FILE_WR)) {
@@ -1658,6 +1659,8 @@ ack:
 
                if (cap == ci->i_auth_cap && ci->i_dirty_caps)
                        flushing = __mark_caps_flushing(inode, session);
+               else
+                       flushing = 0;
 
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
@@ -1940,6 +1943,35 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
        }
 }
 
+static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
+                                    struct ceph_mds_session *session,
+                                    struct inode *inode)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_cap *cap;
+       int delayed = 0;
+
+       spin_lock(&inode->i_lock);
+       cap = ci->i_auth_cap;
+       dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
+            ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
+       __ceph_flush_snaps(ci, &session, 1);
+       if (ci->i_flushing_caps) {
+               delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+                                    __ceph_caps_used(ci),
+                                    __ceph_caps_wanted(ci),
+                                    cap->issued | cap->implemented,
+                                    ci->i_flushing_caps, NULL);
+               if (delayed) {
+                       spin_lock(&inode->i_lock);
+                       __cap_delay_requeue(mdsc, ci);
+                       spin_unlock(&inode->i_lock);
+               }
+       } else {
+               spin_unlock(&inode->i_lock);
+       }
+}
+
 
 /*
  * Take references to capabilities we hold, so that we don't release
@@ -2687,7 +2719,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
        ceph_add_cap(inode, session, cap_id, -1,
                     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
                     NULL /* no caps context */);
-       try_flush_caps(inode, session, NULL);
+       kick_flushing_inode_caps(mdsc, session, inode);
        up_read(&mdsc->snap_rwsem);
 
        /* make sure we re-request max_size, if necessary */
@@ -2785,8 +2817,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        case CEPH_CAP_OP_IMPORT:
                handle_cap_import(mdsc, inode, h, session,
                                  snaptrace, snaptrace_len);
-               ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
-                               session);
+               ceph_check_caps(ceph_inode(inode), 0, session);
                goto done_unlocked;
        }
 
index 0bc68de..ebafa65 100644 (file)
@@ -409,7 +409,7 @@ more:
        spin_lock(&inode->i_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
-               ci->i_ceph_flags |= CEPH_I_COMPLETE;
+               /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
                ci->i_max_offset = filp->f_pos;
        }
        spin_unlock(&inode->i_lock);
@@ -496,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
 
        /* .snap dir? */
        if (err == -ENOENT &&
+           ceph_snap(parent) == CEPH_NOSNAP &&
            strcmp(dentry->d_name.name,
                   fsc->mount_options->snapdir_name) == 0) {
                struct inode *inode = ceph_get_snapdir(parent);
@@ -992,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct inode *dir;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        dir = dentry->d_parent->d_inode;
@@ -1029,28 +1030,8 @@ out_touch:
 static void ceph_dentry_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
-       struct inode *parent_inode = NULL;
-       u64 snapid = CEPH_NOSNAP;
 
-       if (!IS_ROOT(dentry)) {
-               parent_inode = dentry->d_parent->d_inode;
-               if (parent_inode)
-                       snapid = ceph_snap(parent_inode);
-       }
-       dout("dentry_release %p parent %p\n", dentry, parent_inode);
-       if (parent_inode && snapid != CEPH_SNAPDIR) {
-               struct ceph_inode_info *ci = ceph_inode(parent_inode);
-
-               spin_lock(&parent_inode->i_lock);
-               if (ci->i_shared_gen == di->lease_shared_gen ||
-                   snapid <= CEPH_MAXSNAP) {
-                       dout(" clearing %p complete (d_release)\n",
-                            parent_inode);
-                       ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
-                       ci->i_release_count++;
-               }
-               spin_unlock(&parent_inode->i_lock);
-       }
+       dout("dentry_release %p\n", dentry);
        if (di) {
                ceph_dentry_lru_del(dentry);
                if (di->lease_session)
index e835eff..193bfa5 100644 (file)
@@ -707,13 +707,9 @@ static int fill_inode(struct inode *inode,
                    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                    (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
                        dout(" marking %p complete (empty)\n", inode);
-                       ci->i_ceph_flags |= CEPH_I_COMPLETE;
+                       /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
                        ci->i_max_offset = 2;
                }
-
-               /* it may be better to set st_size in getattr instead? */
-               if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
-                       inode->i_size = ci->i_rbytes;
                break;
        default:
                pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
@@ -1819,7 +1815,11 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
                else
                        stat->dev = 0;
                if (S_ISDIR(inode->i_mode)) {
-                       stat->size = ci->i_rbytes;
+                       if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
+                                               RBYTES))
+                               stat->size = ci->i_rbytes;
+                       else
+                               stat->size = ci->i_files + ci->i_subdirs;
                        stat->blocks = 0;
                        stat->blksize = 65536;
                }
index 1e30d19..a1ee8fa 100644 (file)
@@ -693,9 +693,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                                dout("choose_mds %p %llx.%llx "
                                     "frag %u mds%d (%d/%d)\n",
                                     inode, ceph_vinop(inode),
-                                    frag.frag, frag.mds,
+                                    frag.frag, mds,
                                     (int)r, frag.ndist);
-                               return mds;
+                               if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+                                   CEPH_MDS_STATE_ACTIVE)
+                                       return mds;
                        }
 
                        /* since this file/dir wasn't known to be
@@ -708,7 +710,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                                dout("choose_mds %p %llx.%llx "
                                     "frag %u mds%d (auth)\n",
                                     inode, ceph_vinop(inode), frag.frag, mds);
-                               return mds;
+                               if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+                                   CEPH_MDS_STATE_ACTIVE)
+                                       return mds;
                        }
                }
        }
index 39c243a..f40b913 100644 (file)
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
        if (lastinode)
                iput(lastinode);
 
-       dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
-       list_for_each_entry(child, &realm->children, child_item)
-               queue_realm_cap_snaps(child);
+       list_for_each_entry(child, &realm->children, child_item) {
+               dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
+                    realm, realm->ino, child, child->ino);
+               list_del_init(&child->dirty_item);
+               list_add(&child->dirty_item, &realm->dirty_item);
+       }
 
+       list_del_init(&realm->dirty_item);
        dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
 }
 
@@ -683,7 +687,9 @@ more:
         * queue cap snaps _after_ we've built the new snap contexts,
         * so that i_head_snapc can be set appropriately.
         */
-       list_for_each_entry(realm, &dirty_realms, dirty_item) {
+       while (!list_empty(&dirty_realms)) {
+               realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
+                                        dirty_item);
                queue_realm_cap_snaps(realm);
        }
 
index bf6f0f3..9c50854 100644 (file)
@@ -290,6 +290,8 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
 
         fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
         fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+       fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
+       fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
         fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
         fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
         fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
index 6e12a6b..8c9eba6 100644 (file)
@@ -219,6 +219,7 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
        struct rb_node **p;
        struct rb_node *parent = NULL;
        struct ceph_inode_xattr *xattr = NULL;
+       int name_len = strlen(name);
        int c;
 
        p = &ci->i_xattrs.index.rb_node;
@@ -226,6 +227,8 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
                parent = *p;
                xattr = rb_entry(parent, struct ceph_inode_xattr, node);
                c = strncmp(name, xattr->name, xattr->name_len);
+               if (c == 0 && name_len > xattr->name_len)
+                       c = 1;
                if (c < 0)
                        p = &(*p)->rb_left;
                else if (c > 0)
index ee45648..7cb0f7f 100644 (file)
@@ -3,6 +3,7 @@ config CIFS
        depends on INET
        select NLS
        select CRYPTO
+       select CRYPTO_MD4
        select CRYPTO_MD5
        select CRYPTO_HMAC
        select CRYPTO_ARC4
index 43b19dd..d875584 100644 (file)
@@ -5,7 +5,7 @@ obj-$(CONFIG_CIFS) += cifs.o
 
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
          link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \
-         md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
+         cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
          readdir.o ioctl.o sess.o export.o
 
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
index 46af99a..fe16835 100644 (file)
@@ -452,6 +452,11 @@ A partial list of the supported mount options follows:
                if oplock (caching token) is granted and held. Note that
                direct allows write operations larger than page size
                to be sent to the server.
+  strictcache   Use for switching on strict cache mode. In this mode the
+               client read from the cache all the time it has Oplock Level II,
+               otherwise - read from the server. All written data are stored
+               in the cache, but if the client doesn't have Exclusive Oplock,
+               it writes the data to the server.
   acl          Allow setfacl and getfacl to manage posix ACLs if server
                supports them.  (default)
   noacl        Do not allow setfacl and getfacl calls on this mount
index 7ed3653..0a265ad 100644 (file)
@@ -282,8 +282,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        cFYI(1, "in %s", __func__);
        BUG_ON(IS_ROOT(mntpt));
 
-       xid = GetXid();
-
        /*
         * The MSDFS spec states that paths in DFS referral requests and
         * responses must be prefixed by a single '\' character instead of
@@ -293,20 +291,21 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        mnt = ERR_PTR(-ENOMEM);
        full_path = build_path_from_dentry(mntpt);
        if (full_path == NULL)
-               goto free_xid;
+               goto cdda_exit;
 
        cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
        tlink = cifs_sb_tlink(cifs_sb);
-       mnt = ERR_PTR(-EINVAL);
        if (IS_ERR(tlink)) {
                mnt = ERR_CAST(tlink);
                goto free_full_path;
        }
        ses = tlink_tcon(tlink)->ses;
 
+       xid = GetXid();
        rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
                &num_referrals, &referrals,
                cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+       FreeXid(xid);
 
        cifs_put_tlink(tlink);
 
@@ -339,8 +338,7 @@ success:
        free_dfs_info_array(referrals, num_referrals);
 free_full_path:
        kfree(full_path);
-free_xid:
-       FreeXid(xid);
+cdda_exit:
        cFYI(1, "leaving %s" , __func__);
        return mnt;
 }
index 1e7636b..beeebf1 100644 (file)
@@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
 
                ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
                                GFP_KERNEL);
+               if (!ppace) {
+                       cERROR(1, "DACL memory allocation error");
+                       return;
+               }
 
                for (i = 0; i < num_aces; ++i) {
                        ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
index 66f3d50..a51585f 100644 (file)
@@ -24,7 +24,6 @@
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifs_debug.h"
-#include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
 #include "ntlmssp.h"
 /* Note that the smb header signature field on input contains the
        sequence number before this function is called */
 
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-                      unsigned char *p24);
-
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
                                struct TCP_Server_Info *server, char *signature)
 {
@@ -234,6 +228,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 /* first calculate 24 bytes ntlm response and then 16 byte session key */
 int setup_ntlm_response(struct cifsSesInfo *ses)
 {
+       int rc = 0;
        unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
        char temp_key[CIFS_SESS_KEY_SIZE];
 
@@ -247,13 +242,26 @@ int setup_ntlm_response(struct cifsSesInfo *ses)
        }
        ses->auth_key.len = temp_len;
 
-       SMBNTencrypt(ses->password, ses->server->cryptkey,
+       rc = SMBNTencrypt(ses->password, ses->server->cryptkey,
                        ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+       if (rc) {
+               cFYI(1, "%s Can't generate NTLM response, error: %d",
+                       __func__, rc);
+               return rc;
+       }
 
-       E_md4hash(ses->password, temp_key);
-       mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+       rc = E_md4hash(ses->password, temp_key);
+       if (rc) {
+               cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+               return rc;
+       }
 
-       return 0;
+       rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+       if (rc)
+               cFYI(1, "%s Can't generate NTLM session key, error: %d",
+                       __func__, rc);
+
+       return rc;
 }
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -649,9 +657,10 @@ calc_seckey(struct cifsSesInfo *ses)
        get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
        tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
-       if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+       if (IS_ERR(tfm_arc4)) {
+               rc = PTR_ERR(tfm_arc4);
                cERROR(1, "could not allocate crypto API arc4\n");
-               return PTR_ERR(tfm_arc4);
+               return rc;
        }
 
        desc.tfm = tfm_arc4;
@@ -700,14 +709,13 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
        unsigned int size;
 
        server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
-       if (!server->secmech.hmacmd5 ||
-                       IS_ERR(server->secmech.hmacmd5)) {
+       if (IS_ERR(server->secmech.hmacmd5)) {
                cERROR(1, "could not allocate crypto hmacmd5\n");
                return PTR_ERR(server->secmech.hmacmd5);
        }
 
        server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
-       if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) {
+       if (IS_ERR(server->secmech.md5)) {
                cERROR(1, "could not allocate crypto md5\n");
                rc = PTR_ERR(server->secmech.md5);
                goto crypto_allocate_md5_fail;
diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h
deleted file mode 100644 (file)
index 15d2ec0..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *   fs/cifs/cifsencrypt.h
- *
- *   Copyright (c) International Business Machines  Corp., 2005
- *   Author(s): Steve French (sfrench@us.ibm.com)
- *
- *   Externs for misc. small encryption routines
- *   so we do not have to put them in cifsproto.h
- *
- *   This library is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU Lesser General Public License as published
- *   by the Free Software Foundation; either version 2.1 of the License, or
- *   (at your option) any later version.
- *
- *   This library is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU Lesser General Public License for more details.
- *
- *   You should have received a copy of the GNU Lesser General Public License
- *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* md4.c */
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-/* smbdes.c */
-extern void E_P16(unsigned char *p14, unsigned char *p16);
-extern void E_P24(unsigned char *p21, const unsigned char *c8,
-                 unsigned char *p24);
-
-
-
index a8323f1..f297013 100644 (file)
@@ -600,10 +600,17 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        ssize_t written;
+       int rc;
 
        written = generic_file_aio_write(iocb, iov, nr_segs, pos);
-       if (!CIFS_I(inode)->clientCanCacheAll)
-               filemap_fdatawrite(inode->i_mapping);
+
+       if (CIFS_I(inode)->clientCanCacheAll)
+               return written;
+
+       rc = filemap_fdatawrite(inode->i_mapping);
+       if (rc)
+               cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode);
+
        return written;
 }
 
@@ -737,7 +744,7 @@ const struct file_operations cifs_file_strict_ops = {
        .read = do_sync_read,
        .write = do_sync_write,
        .aio_read = cifs_strict_readv,
-       .aio_write = cifs_file_aio_write,
+       .aio_write = cifs_strict_writev,
        .open = cifs_open,
        .release = cifs_close,
        .lock = cifs_lock,
@@ -793,7 +800,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
        .read = do_sync_read,
        .write = do_sync_write,
        .aio_read = cifs_strict_readv,
-       .aio_write = cifs_file_aio_write,
+       .aio_write = cifs_strict_writev,
        .open = cifs_open,
        .release = cifs_close,
        .fsync = cifs_strict_fsync,
index f23206d..a9371b6 100644 (file)
@@ -85,7 +85,9 @@ extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
 extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
                                 unsigned long nr_segs, loff_t pos);
 extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
-                        size_t write_size, loff_t *poffset);
+                              size_t write_size, loff_t *poffset);
+extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+                                 unsigned long nr_segs, loff_t pos);
 extern int cifs_lock(struct file *, int, struct file_lock *);
 extern int cifs_fsync(struct file *, int);
 extern int cifs_strict_fsync(struct file *, int);
@@ -125,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* EXPERIMENTAL */
 
-#define CIFS_VERSION   "1.69"
+#define CIFS_VERSION   "1.71"
 #endif                         /* _CIFSFS_H */
index 5bfb753..17afb0f 100644 (file)
@@ -166,6 +166,9 @@ struct TCP_Server_Info {
        struct socket *ssocket;
        struct sockaddr_storage dstaddr;
        struct sockaddr_storage srcaddr; /* locally bind to this IP */
+#ifdef CONFIG_NET_NS
+       struct net *net;
+#endif
        wait_queue_head_t response_q;
        wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
        struct list_head pending_mid_q;
@@ -185,6 +188,8 @@ struct TCP_Server_Info {
        /* multiplexed reads or writes */
        unsigned int maxBuf;    /* maxBuf specifies the maximum */
        /* message size the server can send or receive for non-raw SMBs */
+       /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
+       /* when socket is setup (and during reconnect) before NegProt sent */
        unsigned int max_rw;    /* maxRw specifies the maximum */
        /* message size the server can send or receive for */
        /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
@@ -216,6 +221,36 @@ struct TCP_Server_Info {
 #endif
 };
 
+/*
+ * Macros to allow the TCP_Server_Info->net field and related code to drop out
+ * when CONFIG_NET_NS isn't set.
+ */
+
+#ifdef CONFIG_NET_NS
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+       return srv->net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+       srv->net = net;
+}
+
+#else
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+       return &init_net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+}
+
+#endif
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -619,7 +654,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 #define   MID_REQUEST_SUBMITTED 2
 #define   MID_RESPONSE_RECEIVED 4
 #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
-#define   MID_NO_RESP_NEEDED 0x10
+#define   MID_RESPONSE_MALFORMED 0x10
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
index 982895f..8096f27 100644 (file)
@@ -85,6 +85,8 @@ extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
 extern bool is_valid_oplock_break(struct smb_hdr *smb,
                                  struct TCP_Server_Info *);
 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+                           unsigned int bytes_written);
 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
 extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
 extern unsigned int smbCalcSize(struct smb_hdr *ptr);
@@ -373,7 +375,7 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
 extern int cifs_verify_signature(struct smb_hdr *,
                                 struct TCP_Server_Info *server,
                                __u32 expected_sequence_number);
-extern void SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
+extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
 extern int setup_ntlm_response(struct cifsSesInfo *);
 extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
 extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
@@ -423,4 +425,11 @@ extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
 extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
                const unsigned char *path,
                struct cifs_sb_info *cifs_sb, int xid);
+extern int mdfour(unsigned char *, unsigned char *, int);
+extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
+extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
+                       unsigned char *p24);
+extern void E_P16(unsigned char *p14, unsigned char *p16);
+extern void E_P24(unsigned char *p21, const unsigned char *c8,
+                       unsigned char *p24);
 #endif                 /* _CIFSPROTO_H */
index 3106f5e..904aa47 100644 (file)
@@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
                }
        }
 
-       if (ses->status == CifsExiting)
-               return -EIO;
-
        /*
         * Give demultiplex thread up to 10 seconds to reconnect, should be
         * greater than cifs socket timeout which is 7 seconds
@@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
                 * retrying until process is killed or server comes
                 * back on-line
                 */
-               if (!tcon->retry || ses->status == CifsExiting) {
+               if (!tcon->retry) {
                        cFYI(1, "gave up waiting on reconnect in smb_init");
                        return -EHOSTDOWN;
                }
@@ -4914,7 +4911,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
                   __u16 fid, __u32 pid_of_opener, bool SetAllocation)
 {
        struct smb_com_transaction2_sfi_req *pSMB  = NULL;
-       char *data_offset;
        struct file_end_of_file_info *parm_data;
        int rc = 0;
        __u16 params, param_offset, offset, byte_count, count;
@@ -4938,8 +4934,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
        param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
        offset = param_offset + params;
 
-       data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
        count = sizeof(struct file_end_of_file_info);
        pSMB->MaxParameterCount = cpu_to_le16(2);
        /* BB find exact max SMB PDU from sess structure BB */
index 18d3c77..8d6c17a 100644 (file)
@@ -55,9 +55,6 @@
 /* SMB echo "timeout" -- FIXME: tunable? */
 #define SMB_ECHO_INTERVAL (60 * HZ)
 
-extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
-                        unsigned char *p24);
-
 extern mempool_t *cifs_req_poolp;
 
 struct smb_vol {
@@ -87,6 +84,7 @@ struct smb_vol {
        bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
        bool server_ino:1; /* use inode numbers from server ie UniqueId */
        bool direct_io:1;
+       bool strict_io:1; /* strict cache behavior */
        bool remap:1;      /* set to remap seven reserved chars in filenames */
        bool posix_paths:1; /* unset to not ask for posix pathnames. */
        bool no_linux_ext:1;
@@ -339,8 +337,13 @@ cifs_echo_request(struct work_struct *work)
        struct TCP_Server_Info *server = container_of(work,
                                        struct TCP_Server_Info, echo.work);
 
-       /* no need to ping if we got a response recently */
-       if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+       /*
+        * We cannot send an echo until the NEGOTIATE_PROTOCOL request is
+        * done, which is indicated by maxBuf != 0. Also, no need to ping if
+        * we got a response recently
+        */
+       if (server->maxBuf == 0 ||
+           time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
                goto requeue_echo;
 
        rc = CIFSSMBEcho(server);
@@ -580,14 +583,23 @@ incomplete_rcv:
                else if (reconnect == 1)
                        continue;
 
-               length += 4; /* account for rfc1002 hdr */
+               total_read += 4; /* account for rfc1002 hdr */
 
+               dump_smb(smb_buffer, total_read);
 
-               dump_smb(smb_buffer, length);
-               if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
-                       cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
-                       continue;
-               }
+               /*
+                * We know that we received enough to get to the MID as we
+                * checked the pdu_length earlier. Now check to see
+                * if the rest of the header is OK. We borrow the length
+                * var for the rest of the loop to avoid a new stack var.
+                *
+                * 48 bytes is enough to display the header and a little bit
+                * into the payload for debugging purposes.
+                */
+               length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
+               if (length != 0)
+                       cifs_dump_mem("Bad SMB: ", smb_buffer,
+                                       min_t(unsigned int, total_read, 48));
 
                mid_entry = NULL;
                server->lstrp = jiffies;
@@ -599,7 +611,8 @@ incomplete_rcv:
                        if ((mid_entry->mid == smb_buffer->Mid) &&
                            (mid_entry->midState == MID_REQUEST_SUBMITTED) &&
                            (mid_entry->command == smb_buffer->Command)) {
-                               if (check2ndT2(smb_buffer,server->maxBuf) > 0) {
+                               if (length == 0 &&
+                                  check2ndT2(smb_buffer, server->maxBuf) > 0) {
                                        /* We have a multipart transact2 resp */
                                        isMultiRsp = true;
                                        if (mid_entry->resp_buf) {
@@ -634,12 +647,17 @@ incomplete_rcv:
                                mid_entry->resp_buf = smb_buffer;
                                mid_entry->largeBuf = isLargeBuf;
 multi_t2_fnd:
-                               mid_entry->midState = MID_RESPONSE_RECEIVED;
-                               list_del_init(&mid_entry->qhead);
-                               mid_entry->callback(mid_entry);
+                               if (length == 0)
+                                       mid_entry->midState =
+                                                       MID_RESPONSE_RECEIVED;
+                               else
+                                       mid_entry->midState =
+                                                       MID_RESPONSE_MALFORMED;
 #ifdef CONFIG_CIFS_STATS2
                                mid_entry->when_received = jiffies;
 #endif
+                               list_del_init(&mid_entry->qhead);
+                               mid_entry->callback(mid_entry);
                                break;
                        }
                        mid_entry = NULL;
@@ -655,6 +673,9 @@ multi_t2_fnd:
                                else
                                        smallbuf = NULL;
                        }
+               } else if (length != 0) {
+                       /* response sanity checks failed */
+                       continue;
                } else if (!is_valid_oplock_break(smb_buffer, server) &&
                           !isMultiRsp) {
                        cERROR(1, "No task to wake, unknown frame received! "
@@ -1344,6 +1365,8 @@ cifs_parse_mount_options(char *options, const char *devname,
                        vol->direct_io = 1;
                } else if (strnicmp(data, "forcedirectio", 13) == 0) {
                        vol->direct_io = 1;
+               } else if (strnicmp(data, "strictcache", 11) == 0) {
+                       vol->strict_io = 1;
                } else if (strnicmp(data, "noac", 4) == 0) {
                        printk(KERN_WARNING "CIFS: Mount option noac not "
                                "supported. Instead set "
@@ -1568,6 +1591,9 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
 
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+               if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+                       continue;
+
                if (!match_address(server, addr,
                                   (struct sockaddr *)&vol->srcaddr))
                        continue;
@@ -1598,6 +1624,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
                return;
        }
 
+       put_net(cifs_net_ns(server));
+
        list_del_init(&server->tcp_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
@@ -1672,6 +1700,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
                goto out_err;
        }
 
+       cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
        tcp_ses->hostname = extract_hostname(volume_info->UNC);
        if (IS_ERR(tcp_ses->hostname)) {
                rc = PTR_ERR(tcp_ses->hostname);
@@ -1752,6 +1781,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
 out_err_crypto_release:
        cifs_crypto_shash_release(tcp_ses);
 
+       put_net(cifs_net_ns(tcp_ses));
+
 out_err:
        if (tcp_ses) {
                if (!IS_ERR(tcp_ses->hostname))
@@ -2263,8 +2294,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
        }
 
        if (socket == NULL) {
-               rc = sock_create_kern(sfamily, SOCK_STREAM,
-                                     IPPROTO_TCP, &socket);
+               rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
+                                  IPPROTO_TCP, &socket, 1);
                if (rc < 0) {
                        cERROR(1, "Error %d creating socket", rc);
                        server->ssocket = NULL;
@@ -2576,6 +2607,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
        if (pvolume_info->multiuser)
                cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
                                            CIFS_MOUNT_NO_PERM);
+       if (pvolume_info->strict_io)
+               cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
        if (pvolume_info->direct_io) {
                cFYI(1, "mounting share using direct i/o");
                cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
@@ -2977,7 +3010,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                                         bcc_ptr);
                else
 #endif /* CIFS_WEAK_PW_HASH */
-               SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
+               rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
+                                       bcc_ptr);
 
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
                if (ses->capabilities & CAP_UNICODE) {
index d7d65a7..e964b1c 100644 (file)
@@ -346,7 +346,6 @@ int cifs_open(struct inode *inode, struct file *file)
        struct cifsTconInfo *tcon;
        struct tcon_link *tlink;
        struct cifsFileInfo *pCifsFile = NULL;
-       struct cifsInodeInfo *pCifsInode;
        char *full_path = NULL;
        bool posix_open_ok = false;
        __u16 netfid;
@@ -361,8 +360,6 @@ int cifs_open(struct inode *inode, struct file *file)
        }
        tcon = tlink_tcon(tlink);
 
-       pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-
        full_path = build_path_from_dentry(file->f_path.dentry);
        if (full_path == NULL) {
                rc = -ENOMEM;
@@ -848,7 +845,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
 }
 
 /* update the file size (if needed) after a write */
-static void
+void
 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
                      unsigned int bytes_written)
 {
@@ -1146,7 +1143,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
        char *write_data;
        int rc = -EFAULT;
        int bytes_written = 0;
-       struct cifs_sb_info *cifs_sb;
        struct inode *inode;
        struct cifsFileInfo *open_file;
 
@@ -1154,7 +1150,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
                return -EFAULT;
 
        inode = page->mapping->host;
-       cifs_sb = CIFS_SB(inode->i_sb);
 
        offset += (loff_t)from;
        write_data = kmap(page);
@@ -1619,13 +1614,215 @@ int cifs_flush(struct file *file, fl_owner_t id)
        return rc;
 }
 
+static int
+cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
+{
+       int rc = 0;
+       unsigned long i;
+
+       for (i = 0; i < num_pages; i++) {
+               pages[i] = alloc_page(__GFP_HIGHMEM);
+               if (!pages[i]) {
+                       /*
+                        * save number of pages we have already allocated and
+                        * return with ENOMEM error
+                        */
+                       num_pages = i;
+                       rc = -ENOMEM;
+                       goto error;
+               }
+       }
+
+       return rc;
+
+error:
+       for (i = 0; i < num_pages; i++)
+               put_page(pages[i]);
+       return rc;
+}
+
+static inline
+size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
+{
+       size_t num_pages;
+       size_t clen;
+
+       clen = min_t(const size_t, len, wsize);
+       num_pages = clen / PAGE_CACHE_SIZE;
+       if (clen % PAGE_CACHE_SIZE)
+               num_pages++;
+
+       if (cur_len)
+               *cur_len = clen;
+
+       return num_pages;
+}
+
+static ssize_t
+cifs_iovec_write(struct file *file, const struct iovec *iov,
+                unsigned long nr_segs, loff_t *poffset)
+{
+       unsigned int written;
+       unsigned long num_pages, npages, i;
+       size_t copied, len, cur_len;
+       ssize_t total_written = 0;
+       struct kvec *to_send;
+       struct page **pages;
+       struct iov_iter it;
+       struct inode *inode;
+       struct cifsFileInfo *open_file;
+       struct cifsTconInfo *pTcon;
+       struct cifs_sb_info *cifs_sb;
+       int xid, rc;
+
+       len = iov_length(iov, nr_segs);
+       if (!len)
+               return 0;
+
+       rc = generic_write_checks(file, poffset, &len, 0);
+       if (rc)
+               return rc;
+
+       cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+       num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+
+       pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
+       if (!to_send) {
+               kfree(pages);
+               return -ENOMEM;
+       }
+
+       rc = cifs_write_allocate_pages(pages, num_pages);
+       if (rc) {
+               kfree(pages);
+               kfree(to_send);
+               return rc;
+       }
+
+       xid = GetXid();
+       open_file = file->private_data;
+       pTcon = tlink_tcon(open_file->tlink);
+       inode = file->f_path.dentry->d_inode;
+
+       iov_iter_init(&it, iov, nr_segs, len, 0);
+       npages = num_pages;
+
+       do {
+               size_t save_len = cur_len;
+               for (i = 0; i < npages; i++) {
+                       copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
+                       copied = iov_iter_copy_from_user(pages[i], &it, 0,
+                                                        copied);
+                       cur_len -= copied;
+                       iov_iter_advance(&it, copied);
+                       to_send[i+1].iov_base = kmap(pages[i]);
+                       to_send[i+1].iov_len = copied;
+               }
+
+               cur_len = save_len - cur_len;
+
+               do {
+                       if (open_file->invalidHandle) {
+                               rc = cifs_reopen_file(open_file, false);
+                               if (rc != 0)
+                                       break;
+                       }
+                       rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
+                                          cur_len, *poffset, &written,
+                                          to_send, npages, 0);
+               } while (rc == -EAGAIN);
+
+               for (i = 0; i < npages; i++)
+                       kunmap(pages[i]);
+
+               if (written) {
+                       len -= written;
+                       total_written += written;
+                       cifs_update_eof(CIFS_I(inode), *poffset, written);
+                       *poffset += written;
+               } else if (rc < 0) {
+                       if (!total_written)
+                               total_written = rc;
+                       break;
+               }
+
+               /* get length and number of kvecs of the next write */
+               npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+       } while (len > 0);
+
+       if (total_written > 0) {
+               spin_lock(&inode->i_lock);
+               if (*poffset > inode->i_size)
+                       i_size_write(inode, *poffset);
+               spin_unlock(&inode->i_lock);
+       }
+
+       cifs_stats_bytes_written(pTcon, total_written);
+       mark_inode_dirty_sync(inode);
+
+       for (i = 0; i < num_pages; i++)
+               put_page(pages[i]);
+       kfree(to_send);
+       kfree(pages);
+       FreeXid(xid);
+       return total_written;
+}
+
+static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
+                               unsigned long nr_segs, loff_t pos)
+{
+       ssize_t written;
+       struct inode *inode;
+
+       inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+       /*
+        * BB - optimize the way when signing is disabled. We can drop this
+        * extra memory-to-memory copying and use iovec buffers for constructing
+        * write request.
+        */
+
+       written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
+       if (written > 0) {
+               CIFS_I(inode)->invalid_mapping = true;
+               iocb->ki_pos = pos;
+       }
+
+       return written;
+}
+
+ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+                          unsigned long nr_segs, loff_t pos)
+{
+       struct inode *inode;
+
+       inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+       if (CIFS_I(inode)->clientCanCacheAll)
+               return generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+       /*
+        * In strict cache mode we need to write the data to the server exactly
+        * from the pos to pos+len-1 rather than flush all affected pages
+        * because it may cause a error with mandatory locks on these pages but
+        * not on the region from pos to ppos+len-1.
+        */
+
+       return cifs_user_writev(iocb, iov, nr_segs, pos);
+}
+
 static ssize_t
 cifs_iovec_read(struct file *file, const struct iovec *iov,
                 unsigned long nr_segs, loff_t *poffset)
 {
        int rc;
        int xid;
-       unsigned int total_read, bytes_read = 0;
+       ssize_t total_read;
+       unsigned int bytes_read = 0;
        size_t len, cur_len;
        int iov_offset = 0;
        struct cifs_sb_info *cifs_sb;
index 306769d..e8804d3 100644 (file)
@@ -28,7 +28,6 @@
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
-#include "md5.h"
 
 #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
 #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
        md5_hash[8],  md5_hash[9],  md5_hash[10], md5_hash[11],\
        md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
 
+static int
+symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+{
+       int rc;
+       unsigned int size;
+       struct crypto_shash *md5;
+       struct sdesc *sdescmd5;
+
+       md5 = crypto_alloc_shash("md5", 0, 0);
+       if (IS_ERR(md5)) {
+               rc = PTR_ERR(md5);
+               cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
+               return rc;
+       }
+       size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
+       sdescmd5 = kmalloc(size, GFP_KERNEL);
+       if (!sdescmd5) {
+               rc = -ENOMEM;
+               cERROR(1, "%s: Memory allocation failure\n", __func__);
+               goto symlink_hash_err;
+       }
+       sdescmd5->shash.tfm = md5;
+       sdescmd5->shash.flags = 0x0;
+
+       rc = crypto_shash_init(&sdescmd5->shash);
+       if (rc) {
+               cERROR(1, "%s: Could not init md5 shash\n", __func__);
+               goto symlink_hash_err;
+       }
+       crypto_shash_update(&sdescmd5->shash, link_str, link_len);
+       rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
+
+symlink_hash_err:
+       crypto_free_shash(md5);
+       kfree(sdescmd5);
+
+       return rc;
+}
+
 static int
 CIFSParseMFSymlink(const u8 *buf,
                   unsigned int buf_len,
@@ -56,7 +94,6 @@ CIFSParseMFSymlink(const u8 *buf,
        unsigned int link_len;
        const char *md5_str1;
        const char *link_str;
-       struct MD5Context md5_ctx;
        u8 md5_hash[16];
        char md5_str2[34];
 
@@ -70,9 +107,11 @@ CIFSParseMFSymlink(const u8 *buf,
        if (rc != 1)
                return -EINVAL;
 
-       cifs_MD5_init(&md5_ctx);
-       cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-       cifs_MD5_final(md5_hash, &md5_ctx);
+       rc = symlink_hash(link_len, link_str, md5_hash);
+       if (rc) {
+               cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+               return rc;
+       }
 
        snprintf(md5_str2, sizeof(md5_str2),
                 CIFS_MF_SYMLINK_MD5_FORMAT,
@@ -94,9 +133,9 @@ CIFSParseMFSymlink(const u8 *buf,
 static int
 CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
 {
+       int rc;
        unsigned int link_len;
        unsigned int ofs;
-       struct MD5Context md5_ctx;
        u8 md5_hash[16];
 
        if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
@@ -107,9 +146,11 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
        if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
                return -ENAMETOOLONG;
 
-       cifs_MD5_init(&md5_ctx);
-       cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-       cifs_MD5_final(md5_hash, &md5_ctx);
+       rc = symlink_hash(link_len, link_str, md5_hash);
+       if (rc) {
+               cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+               return rc;
+       }
 
        snprintf(buf, buf_len,
                 CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
deleted file mode 100644 (file)
index a725c26..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
-   Unix SMB/Netbios implementation.
-   Version 1.9.
-   a implementation of MD4 designed for use in the SMB authentication protocol
-   Copyright (C) Andrew Tridgell 1997-1998.
-   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include "cifsencrypt.h"
-
-/* NOTE: This code makes no attempt to be fast! */
-
-static __u32
-F(__u32 X, __u32 Y, __u32 Z)
-{
-       return (X & Y) | ((~X) & Z);
-}
-
-static __u32
-G(__u32 X, __u32 Y, __u32 Z)
-{
-       return (X & Y) | (X & Z) | (Y & Z);
-}
-
-static __u32
-H(__u32 X, __u32 Y, __u32 Z)
-{
-       return X ^ Y ^ Z;
-}
-
-static __u32
-lshift(__u32 x, int s)
-{
-       x &= 0xFFFFFFFF;
-       return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
-}
-
-#define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s)
-#define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s)
-#define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s)
-
-/* this applies md4 to 64 byte chunks */
-static void
-mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D)
-{
-       int j;
-       __u32 AA, BB, CC, DD;
-       __u32 X[16];
-
-
-       for (j = 0; j < 16; j++)
-               X[j] = M[j];
-
-       AA = *A;
-       BB = *B;
-       CC = *C;
-       DD = *D;
-
-       ROUND1(A, B, C, D, 0, 3);
-       ROUND1(D, A, B, C, 1, 7);
-       ROUND1(C, D, A, B, 2, 11);
-       ROUND1(B, C, D, A, 3, 19);
-       ROUND1(A, B, C, D, 4, 3);
-       ROUND1(D, A, B, C, 5, 7);
-       ROUND1(C, D, A, B, 6, 11);
-       ROUND1(B, C, D, A, 7, 19);
-       ROUND1(A, B, C, D, 8, 3);
-       ROUND1(D, A, B, C, 9, 7);
-       ROUND1(C, D, A, B, 10, 11);
-       ROUND1(B, C, D, A, 11, 19);
-       ROUND1(A, B, C, D, 12, 3);
-       ROUND1(D, A, B, C, 13, 7);
-       ROUND1(C, D, A, B, 14, 11);
-       ROUND1(B, C, D, A, 15, 19);
-
-       ROUND2(A, B, C, D, 0, 3);
-       ROUND2(D, A, B, C, 4, 5);
-       ROUND2(C, D, A, B, 8, 9);
-       ROUND2(B, C, D, A, 12, 13);
-       ROUND2(A, B, C, D, 1, 3);
-       ROUND2(D, A, B, C, 5, 5);
-       ROUND2(C, D, A, B, 9, 9);
-       ROUND2(B, C, D, A, 13, 13);
-       ROUND2(A, B, C, D, 2, 3);
-       ROUND2(D, A, B, C, 6, 5);
-       ROUND2(C, D, A, B, 10, 9);
-       ROUND2(B, C, D, A, 14, 13);
-       ROUND2(A, B, C, D, 3, 3);
-       ROUND2(D, A, B, C, 7, 5);
-       ROUND2(C, D, A, B, 11, 9);
-       ROUND2(B, C, D, A, 15, 13);
-
-       ROUND3(A, B, C, D, 0, 3);
-       ROUND3(D, A, B, C, 8, 9);
-       ROUND3(C, D, A, B, 4, 11);
-       ROUND3(B, C, D, A, 12, 15);
-       ROUND3(A, B, C, D, 2, 3);
-       ROUND3(D, A, B, C, 10, 9);
-       ROUND3(C, D, A, B, 6, 11);
-       ROUND3(B, C, D, A, 14, 15);
-       ROUND3(A, B, C, D, 1, 3);
-       ROUND3(D, A, B, C, 9, 9);
-       ROUND3(C, D, A, B, 5, 11);
-       ROUND3(B, C, D, A, 13, 15);
-       ROUND3(A, B, C, D, 3, 3);
-       ROUND3(D, A, B, C, 11, 9);
-       ROUND3(C, D, A, B, 7, 11);
-       ROUND3(B, C, D, A, 15, 15);
-
-       *A += AA;
-       *B += BB;
-       *C += CC;
-       *D += DD;
-
-       *A &= 0xFFFFFFFF;
-       *B &= 0xFFFFFFFF;
-       *C &= 0xFFFFFFFF;
-       *D &= 0xFFFFFFFF;
-
-       for (j = 0; j < 16; j++)
-               X[j] = 0;
-}
-
-static void
-copy64(__u32 *M, unsigned char *in)
-{
-       int i;
-
-       for (i = 0; i < 16; i++)
-               M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) |
-                   (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0);
-}
-
-static void
-copy4(unsigned char *out, __u32 x)
-{
-       out[0] = x & 0xFF;
-       out[1] = (x >> 8) & 0xFF;
-       out[2] = (x >> 16) & 0xFF;
-       out[3] = (x >> 24) & 0xFF;
-}
-
-/* produce a md4 message digest from data of length n bytes */
-void
-mdfour(unsigned char *out, unsigned char *in, int n)
-{
-       unsigned char buf[128];
-       __u32 M[16];
-       __u32 b = n * 8;
-       int i;
-       __u32 A = 0x67452301;
-       __u32 B = 0xefcdab89;
-       __u32 C = 0x98badcfe;
-       __u32 D = 0x10325476;
-
-       while (n > 64) {
-               copy64(M, in);
-               mdfour64(M, &A, &B, &C, &D);
-               in += 64;
-               n -= 64;
-       }
-
-       for (i = 0; i < 128; i++)
-               buf[i] = 0;
-       memcpy(buf, in, n);
-       buf[n] = 0x80;
-
-       if (n <= 55) {
-               copy4(buf + 56, b);
-               copy64(M, buf);
-               mdfour64(M, &A, &B, &C, &D);
-       } else {
-               copy4(buf + 120, b);
-               copy64(M, buf);
-               mdfour64(M, &A, &B, &C, &D);
-               copy64(M, buf + 64);
-               mdfour64(M, &A, &B, &C, &D);
-       }
-
-       for (i = 0; i < 128; i++)
-               buf[i] = 0;
-       copy64(M, buf);
-
-       copy4(out, A);
-       copy4(out + 4, B);
-       copy4(out + 8, C);
-       copy4(out + 12, D);
-
-       A = B = C = D = 0;
-}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
deleted file mode 100644 (file)
index 98b66a5..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * This code implements the MD5 message-digest algorithm.
- * The algorithm is due to Ron Rivest.  This code was
- * written by Colin Plumb in 1993, no copyright is claimed.
- * This code is in the public domain; do with it what you wish.
- *
- * Equivalent code is available from RSA Data Security, Inc.
- * This code has been tested against that, and is equivalent,
- * except that you don't need to include two pages of legalese
- * with every copy.
- *
- * To compute the message digest of a chunk of bytes, declare an
- * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as
- * needed on buffers full of bytes, and then call cifs_MD5_final, which
- * will fill a supplied 16-byte array with the digest.
- */
-
-/* This code slightly modified to fit into Samba by
-   abartlet@samba.org Jun 2001
-   and to fit the cifs vfs by
-   Steve French sfrench@us.ibm.com */
-
-#include <linux/string.h>
-#include "md5.h"
-
-static void MD5Transform(__u32 buf[4], __u32 const in[16]);
-
-/*
- * Note: this code is harmless on little-endian machines.
- */
-static void
-byteReverse(unsigned char *buf, unsigned longs)
-{
-       __u32 t;
-       do {
-               t = (__u32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
-                   ((unsigned) buf[1] << 8 | buf[0]);
-               *(__u32 *) buf = t;
-               buf += 4;
-       } while (--longs);
-}
-
-/*
- * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
- * initialization constants.
- */
-void
-cifs_MD5_init(struct MD5Context *ctx)
-{
-       ctx->buf[0] = 0x67452301;
-       ctx->buf[1] = 0xefcdab89;
-       ctx->buf[2] = 0x98badcfe;
-       ctx->buf[3] = 0x10325476;
-
-       ctx->bits[0] = 0;
-       ctx->bits[1] = 0;
-}
-
-/*
- * Update context to reflect the concatenation of another buffer full
- * of bytes.
- */
-void
-cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
-{
-       register __u32 t;
-
-       /* Update bitcount */
-
-       t = ctx->bits[0];
-       if ((ctx->bits[0] = t + ((__u32) len << 3)) < t)
-               ctx->bits[1]++; /* Carry from low to high */
-       ctx->bits[1] += len >> 29;
-
-       t = (t >> 3) & 0x3f;    /* Bytes already in shsInfo->data */
-
-       /* Handle any leading odd-sized chunks */
-
-       if (t) {
-               unsigned char *p = (unsigned char *) ctx->in + t;
-
-               t = 64 - t;
-               if (len < t) {
-                       memmove(p, buf, len);
-                       return;
-               }
-               memmove(p, buf, t);
-               byteReverse(ctx->in, 16);
-               MD5Transform(ctx->buf, (__u32 *) ctx->in);
-               buf += t;
-               len -= t;
-       }
-       /* Process data in 64-byte chunks */
-
-       while (len >= 64) {
-               memmove(ctx->in, buf, 64);
-               byteReverse(ctx->in, 16);
-               MD5Transform(ctx->buf, (__u32 *) ctx->in);
-               buf += 64;
-               len -= 64;
-       }
-
-       /* Handle any remaining bytes of data. */
-
-       memmove(ctx->in, buf, len);
-}
-
-/*
- * Final wrapup - pad to 64-byte boundary with the bit pattern
- * 1 0* (64-bit count of bits processed, MSB-first)
- */
-void
-cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx)
-{
-       unsigned int count;
-       unsigned char *p;
-
-       /* Compute number of bytes mod 64 */
-       count = (ctx->bits[0] >> 3) & 0x3F;
-
-       /* Set the first char of padding to 0x80.  This is safe since there is
-          always at least one byte free */
-       p = ctx->in + count;
-       *p++ = 0x80;
-
-       /* Bytes of padding needed to make 64 bytes */
-       count = 64 - 1 - count;
-
-       /* Pad out to 56 mod 64 */
-       if (count < 8) {
-               /* Two lots of padding:  Pad the first block to 64 bytes */
-               memset(p, 0, count);
-               byteReverse(ctx->in, 16);
-               MD5Transform(ctx->buf, (__u32 *) ctx->in);
-
-               /* Now fill the next block with 56 bytes */
-               memset(ctx->in, 0, 56);
-       } else {
-               /* Pad block to 56 bytes */
-               memset(p, 0, count - 8);
-       }
-       byteReverse(ctx->in, 14);
-
-       /* Append length in bits and transform */
-       ((__u32 *) ctx->in)[14] = ctx->bits[0];
-       ((__u32 *) ctx->in)[15] = ctx->bits[1];
-
-       MD5Transform(ctx->buf, (__u32 *) ctx->in);
-       byteReverse((unsigned char *) ctx->buf, 4);
-       memmove(digest, ctx->buf, 16);
-       memset(ctx, 0, sizeof(*ctx));   /* In case it's sensitive */
-}
-
-/* The four core functions - F1 is optimized somewhat */
-
-/* #define F1(x, y, z) (x & y | ~x & z) */
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-/* This is the central step in the MD5 algorithm. */
-#define MD5STEP(f, w, x, y, z, data, s) \
-       (w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x)
-
-/*
- * The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data.  cifs_MD5_update blocks
- * the data and converts bytes into longwords for this routine.
- */
-static void
-MD5Transform(__u32 buf[4], __u32 const in[16])
-{
-       register __u32 a, b, c, d;
-
-       a = buf[0];
-       b = buf[1];
-       c = buf[2];
-       d = buf[3];
-
-       MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
-       MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
-       MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
-       MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
-       MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
-       MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
-       MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
-       MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
-       MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
-       MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
-       MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
-       MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
-       MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
-       MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
-       MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
-       MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
-       MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
-       MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
-       MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
-       MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
-       MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
-       MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
-       MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
-       MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
-       MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
-       MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
-       MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
-       MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
-       MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
-       MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
-       MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
-       MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
-       MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
-       MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
-       MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
-       MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
-       MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
-       MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
-       MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
-       MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
-       MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
-       MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
-       MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
-       MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
-       MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
-       MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
-       MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
-       MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
-       MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
-       MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
-       MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
-       MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
-       MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
-       MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
-       MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
-       MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
-       MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
-       MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
-       MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
-       MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
-       MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
-       MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
-       MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
-       MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
-       buf[0] += a;
-       buf[1] += b;
-       buf[2] += c;
-       buf[3] += d;
-}
-
-#if 0   /* currently unused */
-/***********************************************************************
- the rfc 2104 version of hmac_md5 initialisation.
-***********************************************************************/
-static void
-hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-                     struct HMACMD5Context *ctx)
-{
-       int i;
-
-       /* if key is longer than 64 bytes reset it to key=MD5(key) */
-       if (key_len > 64) {
-               unsigned char tk[16];
-               struct MD5Context tctx;
-
-               cifs_MD5_init(&tctx);
-               cifs_MD5_update(&tctx, key, key_len);
-               cifs_MD5_final(tk, &tctx);
-
-               key = tk;
-               key_len = 16;
-       }
-
-       /* start out by storing key in pads */
-       memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-       memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-       memcpy(ctx->k_ipad, key, key_len);
-       memcpy(ctx->k_opad, key, key_len);
-
-       /* XOR key with ipad and opad values */
-       for (i = 0; i < 64; i++) {
-               ctx->k_ipad[i] ^= 0x36;
-               ctx->k_opad[i] ^= 0x5c;
-       }
-
-       cifs_MD5_init(&ctx->ctx);
-       cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-#endif
-
-/***********************************************************************
- the microsoft version of hmac_md5 initialisation.
-***********************************************************************/
-void
-hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-                        struct HMACMD5Context *ctx)
-{
-       int i;
-
-       /* if key is longer than 64 bytes truncate it */
-       if (key_len > 64)
-               key_len = 64;
-
-       /* start out by storing key in pads */
-       memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-       memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-       memcpy(ctx->k_ipad, key, key_len);
-       memcpy(ctx->k_opad, key, key_len);
-
-       /* XOR key with ipad and opad values */
-       for (i = 0; i < 64; i++) {
-               ctx->k_ipad[i] ^= 0x36;
-               ctx->k_opad[i] ^= 0x5c;
-       }
-
-       cifs_MD5_init(&ctx->ctx);
-       cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-
-/***********************************************************************
- update hmac_md5 "inner" buffer
-***********************************************************************/
-void
-hmac_md5_update(const unsigned char *text, int text_len,
-               struct HMACMD5Context *ctx)
-{
-       cifs_MD5_update(&ctx->ctx, text, text_len);     /* then text of datagram */
-}
-
-/***********************************************************************
- finish off hmac_md5 "inner" buffer and generate outer one.
-***********************************************************************/
-void
-hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
-{
-       struct MD5Context ctx_o;
-
-       cifs_MD5_final(digest, &ctx->ctx);
-
-       cifs_MD5_init(&ctx_o);
-       cifs_MD5_update(&ctx_o, ctx->k_opad, 64);
-       cifs_MD5_update(&ctx_o, digest, 16);
-       cifs_MD5_final(digest, &ctx_o);
-}
-
-/***********************************************************
- single function to calculate an HMAC MD5 digest from data.
- use the microsoft hmacmd5 init method because the key is 16 bytes.
-************************************************************/
-#if 0 /* currently unused */
-static void
-hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-        unsigned char *digest)
-{
-       struct HMACMD5Context ctx;
-       hmac_md5_init_limK_to_64(key, 16, &ctx);
-       if (data_len != 0)
-               hmac_md5_update(data, data_len, &ctx);
-
-       hmac_md5_final(digest, &ctx);
-}
-#endif
diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h
deleted file mode 100644 (file)
index 6fba8cb..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef MD5_H
-#define MD5_H
-#ifndef HEADER_MD5_H
-/* Try to avoid clashes with OpenSSL */
-#define HEADER_MD5_H
-#endif
-
-struct MD5Context {
-       __u32 buf[4];
-       __u32 bits[2];
-       unsigned char in[64];
-};
-#endif                         /* !MD5_H */
-
-#ifndef _HMAC_MD5_H
-struct HMACMD5Context {
-       struct MD5Context ctx;
-       unsigned char k_ipad[65];
-       unsigned char k_opad[65];
-};
-#endif                         /* _HMAC_MD5_H */
-
-void cifs_MD5_init(struct MD5Context *context);
-void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf,
-                       unsigned len);
-void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context);
-
-/* The following definitions come from lib/hmacmd5.c  */
-
-/* void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-                       struct HMACMD5Context *ctx);*/
-void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-                       struct HMACMD5Context *ctx);
-void hmac_md5_update(const unsigned char *text, int text_len,
-                       struct HMACMD5Context *ctx);
-void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
-/* void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-                       unsigned char *digest);*/
index a09e077..2a930a7 100644 (file)
@@ -236,10 +236,7 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
 {
        __u16 mid = 0;
        __u16 last_mid;
-       int   collision;
-
-       if (server == NULL)
-               return mid;
+       bool collision;
 
        spin_lock(&GlobalMid_Lock);
        last_mid = server->CurrentMid; /* we do not want to loop forever */
@@ -252,24 +249,38 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
        (and it would also have to have been a request that
         did not time out) */
        while (server->CurrentMid != last_mid) {
-               struct list_head *tmp;
                struct mid_q_entry *mid_entry;
+               unsigned int num_mids;
 
-               collision = 0;
+               collision = false;
                if (server->CurrentMid == 0)
                        server->CurrentMid++;
 
-               list_for_each(tmp, &server->pending_mid_q) {
-                       mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
-                       if ((mid_entry->mid == server->CurrentMid) &&
-                           (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+               num_mids = 0;
+               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+                       ++num_mids;
+                       if (mid_entry->mid == server->CurrentMid &&
+                           mid_entry->midState == MID_REQUEST_SUBMITTED) {
                                /* This mid is in use, try a different one */
-                               collision = 1;
+                               collision = true;
                                break;
                        }
                }
-               if (collision == 0) {
+
+               /*
+                * if we have more than 32k mids in the list, then something
+                * is very wrong. Possibly a local user is trying to DoS the
+                * box by issuing long-running calls and SIGKILL'ing them. If
+                * we get to 2^16 mids then we're in big trouble as this
+                * function could loop forever.
+                *
+                * Go ahead and assign out the mid in this situation, but force
+                * an eventual reconnect to clean out the pending_mid_q.
+                */
+               if (num_mids > 32768)
+                       server->tcpStatus = CifsNeedReconnect;
+
+               if (!collision) {
                        mid = server->CurrentMid;
                        break;
                }
@@ -381,29 +392,31 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 }
 
 static int
-checkSMBhdr(struct smb_hdr *smb, __u16 mid)
+check_smb_hdr(struct smb_hdr *smb, __u16 mid)
 {
-       /* Make sure that this really is an SMB, that it is a response,
-          and that the message ids match */
-       if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
-               (mid == smb->Mid)) {
-               if (smb->Flags & SMBFLG_RESPONSE)
-                       return 0;
-               else {
-               /* only one valid case where server sends us request */
-                       if (smb->Command == SMB_COM_LOCKING_ANDX)
-                               return 0;
-                       else
-                               cERROR(1, "Received Request not response");
-               }
-       } else { /* bad signature or mid */
-               if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
-                       cERROR(1, "Bad protocol string signature header %x",
-                               *(unsigned int *) smb->Protocol);
-               if (mid != smb->Mid)
-                       cERROR(1, "Mids do not match");
+       /* does it have the right SMB "signature" ? */
+       if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
+               cERROR(1, "Bad protocol string signature header 0x%x",
+                       *(unsigned int *)smb->Protocol);
+               return 1;
+       }
+
+       /* Make sure that message ids match */
+       if (mid != smb->Mid) {
+               cERROR(1, "Mids do not match. received=%u expected=%u",
+                       smb->Mid, mid);
+               return 1;
        }
-       cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
+
+       /* if it's a response then accept */
+       if (smb->Flags & SMBFLG_RESPONSE)
+               return 0;
+
+       /* only one valid case where server sends us request */
+       if (smb->Command == SMB_COM_LOCKING_ANDX)
+               return 0;
+
+       cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
        return 1;
 }
 
@@ -448,7 +461,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
                return 1;
        }
 
-       if (checkSMBhdr(smb, mid))
+       if (check_smb_hdr(smb, mid))
                return 1;
        clc_len = smbCalcSize_LE(smb);
 
@@ -465,25 +478,26 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
                        if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
                                return 0; /* bcc wrapped */
                }
-               cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+               cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
                                clc_len, 4 + len, smb->Mid);
-               /* Windows XP can return a few bytes too much, presumably
-               an illegal pad, at the end of byte range lock responses
-               so we allow for that three byte pad, as long as actual
-               received length is as long or longer than calculated length */
-               /* We have now had to extend this more, since there is a
-               case in which it needs to be bigger still to handle a
-               malformed response to transact2 findfirst from WinXP when
-               access denied is returned and thus bcc and wct are zero
-               but server says length is 0x21 bytes too long as if the server
-               forget to reset the smb rfc1001 length when it reset the
-               wct and bcc to minimum size and drop the t2 parms and data */
-               if ((4+len > clc_len) && (len <= clc_len + 512))
-                       return 0;
-               else {
-                       cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+
+               if (4 + len < clc_len) {
+                       cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
                                        len, smb->Mid);
                        return 1;
+               } else if (len > clc_len + 512) {
+                       /*
+                        * Some servers (Windows XP in particular) send more
+                        * data than the lengths in the SMB packet would
+                        * indicate on certain calls (byte range locks and
+                        * trans2 find first calls in particular). While the
+                        * client can handle such a frame by ignoring the
+                        * trailing data, we choose limit the amount of extra
+                        * data to 512 bytes.
+                        */
+                       cERROR(1, "RFC1001 size %u more than 512 bytes larger "
+                                 "than SMB for mid=%u", len, smb->Mid);
+                       return 1;
                }
        }
        return 0;
index 8d9189f..79f641e 100644 (file)
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
 {
        int rc, alen, slen;
        const char *pct;
-       char *endp, scope_id[13];
+       char scope_id[13];
        struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
        struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
 
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
                memcpy(scope_id, pct + 1, slen);
                scope_id[slen] = '\0';
 
-               s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
-               if (endp != scope_id + slen)
-                       return 0;
+               rc = strict_strtoul(scope_id, 0,
+                                       (unsigned long *)&s6->sin6_scope_id);
+               rc = (rc == 0) ? 1 : 0;
        }
 
        return rc;
index 7f25cc3..f8e4cd2 100644 (file)
@@ -764,7 +764,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
 {
        int rc = 0;
        int xid, i;
-       struct cifs_sb_info *cifs_sb;
        struct cifsTconInfo *pTcon;
        struct cifsFileInfo *cifsFile = NULL;
        char *current_entry;
@@ -775,8 +774,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
 
        xid = GetXid();
 
-       cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
        /*
         * Ensure FindFirst doesn't fail before doing filldir() for '.' and
         * '..'. Otherwise we won't be able to notify VFS in case of failure.
index 1adc962..1676570 100644 (file)
@@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate:
 
        if (type == LANMAN) {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-               char lnm_session_key[CIFS_SESS_KEY_SIZE];
+               char lnm_session_key[CIFS_AUTH_RESP_SIZE];
 
                pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
 
                /* no capabilities flags in old lanman negotiation */
 
-               pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+               pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 
                /* Calculate hash with password and copy into bcc_ptr.
                 * Encryption Key (stored as in cryptkey) gets used if the
@@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate:
                                        true : false, lnm_session_key);
 
                ses->flags |= CIFS_SES_LANMAN;
-               memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
-               bcc_ptr += CIFS_SESS_KEY_SIZE;
+               memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+               bcc_ptr += CIFS_AUTH_RESP_SIZE;
 
                /* can not sign if LANMAN negotiated so no need
                to calculate signing key? but what if server
index b6b6dcb..0472148 100644 (file)
@@ -45,7 +45,6 @@
    up with a different answer to the one above)
 */
 #include <linux/slab.h>
-#include "cifsencrypt.h"
 #define uchar unsigned char
 
 static uchar perm1[56] = { 57, 49, 41, 33, 25, 17, 9,
index 192ea51..b5041c8 100644 (file)
@@ -32,9 +32,8 @@
 #include "cifs_unicode.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
-#include "md5.h"
 #include "cifs_debug.h"
-#include "cifsencrypt.h"
+#include "cifsproto.h"
 
 #ifndef false
 #define false 0
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
 #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
 
-/*The following definitions come from  libsmb/smbencrypt.c  */
+/* produce a md4 message digest from data of length n bytes */
+int
+mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+{
+       int rc;
+       unsigned int size;
+       struct crypto_shash *md4;
+       struct sdesc *sdescmd4;
+
+       md4 = crypto_alloc_shash("md4", 0, 0);
+       if (IS_ERR(md4)) {
+               rc = PTR_ERR(md4);
+               cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
+               return rc;
+       }
+       size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
+       sdescmd4 = kmalloc(size, GFP_KERNEL);
+       if (!sdescmd4) {
+               rc = -ENOMEM;
+               cERROR(1, "%s: Memory allocation failure\n", __func__);
+               goto mdfour_err;
+       }
+       sdescmd4->shash.tfm = md4;
+       sdescmd4->shash.flags = 0x0;
+
+       rc = crypto_shash_init(&sdescmd4->shash);
+       if (rc) {
+               cERROR(1, "%s: Could not init md4 shash\n", __func__);
+               goto mdfour_err;
+       }
+       crypto_shash_update(&sdescmd4->shash, link_str, link_len);
+       rc = crypto_shash_final(&sdescmd4->shash, md4_hash);
 
-void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-               unsigned char *p24);
-void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-static void SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-                  unsigned char p24[24]);
-void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+mdfour_err:
+       crypto_free_shash(md4);
+       kfree(sdescmd4);
+
+       return rc;
+}
+
+/* Does the des encryption from the NT or LM MD4 hash. */
+static void
+SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
+             unsigned char p24[24])
+{
+       unsigned char p21[21];
+
+       memset(p21, '\0', 21);
+
+       memcpy(p21, passwd, 16);
+       E_P24(p21, c8, p24);
+}
 
 /*
    This implements the X/Open SMB password encryption
@@ -118,9 +161,10 @@ _my_mbstowcs(__u16 *dst, const unsigned char *src, int len)
  * Creates the MD4 Hash of the users password in NT UNICODE.
  */
 
-void
+int
 E_md4hash(const unsigned char *passwd, unsigned char *p16)
 {
+       int rc;
        int len;
        __u16 wpwd[129];
 
@@ -139,8 +183,10 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
        /* Calculate length in bytes */
        len = _my_wcslen(wpwd) * sizeof(__u16);
 
-       mdfour(p16, (unsigned char *) wpwd, len);
+       rc = mdfour(p16, (unsigned char *) wpwd, len);
        memset(wpwd, 0, 129 * 2);
+
+       return rc;
 }
 
 #if 0 /* currently unused */
@@ -212,19 +258,6 @@ ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
 }
 #endif
 
-/* Does the des encryption from the NT or LM MD4 hash. */
-static void
-SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-             unsigned char p24[24])
-{
-       unsigned char p21[21];
-
-       memset(p21, '\0', 21);
-
-       memcpy(p21, passwd, 16);
-       E_P24(p21, c8, p24);
-}
-
 /* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
 #if 0 /* currently unused */
 static void
@@ -242,16 +275,21 @@ NTLMSSPOWFencrypt(unsigned char passwd[8],
 #endif
 
 /* Does the NT MD4 hash then des encryption. */
-
-void
+int
 SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
 {
+       int rc;
        unsigned char p21[21];
 
        memset(p21, '\0', 21);
 
-       E_md4hash(passwd, p21);
+       rc = E_md4hash(passwd, p21);
+       if (rc) {
+               cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+               return rc;
+       }
        SMBOWFencrypt(p21, c8, p24);
+       return rc;
 }
 
 
index c1ccca1..46d8756 100644 (file)
@@ -236,9 +236,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
                server->tcpStatus = CifsNeedReconnect;
        }
 
-       if (rc < 0) {
+       if (rc < 0 && rc != -EINTR)
                cERROR(1, "Error %d sending data on socket to server", rc);
-       else
+       else
                rc = 0;
 
        /* Don't want to modify the buffer as a
@@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
        if (rc)
                return rc;
 
+       /* enable signing if server requires it */
+       if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+               in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
        mutex_lock(&server->srv_mutex);
        mid = AllocMidQEntry(in_buf, server);
        if (mid == NULL) {
@@ -453,6 +457,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
        case MID_RETRY_NEEDED:
                rc = -EAGAIN;
                break;
+       case MID_RESPONSE_MALFORMED:
+               rc = -EIO;
+               break;
        default:
                cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
                        mid->mid, mid->midState);
@@ -570,17 +577,33 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
 #endif
 
        mutex_unlock(&ses->server->srv_mutex);
-       cifs_small_buf_release(in_buf);
 
-       if (rc < 0)
+       if (rc < 0) {
+               cifs_small_buf_release(in_buf);
                goto out;
+       }
 
-       if (long_op == CIFS_ASYNC_OP)
+       if (long_op == CIFS_ASYNC_OP) {
+               cifs_small_buf_release(in_buf);
                goto out;
+       }
 
        rc = wait_for_response(ses->server, midQ);
-       if (rc != 0)
-               goto out;
+       if (rc != 0) {
+               send_nt_cancel(ses->server, in_buf, midQ);
+               spin_lock(&GlobalMid_Lock);
+               if (midQ->midState == MID_REQUEST_SUBMITTED) {
+                       midQ->callback = DeleteMidQEntry;
+                       spin_unlock(&GlobalMid_Lock);
+                       cifs_small_buf_release(in_buf);
+                       atomic_dec(&ses->server->inFlight);
+                       wake_up(&ses->server->request_q);
+                       return rc;
+               }
+               spin_unlock(&GlobalMid_Lock);
+       }
+
+       cifs_small_buf_release(in_buf);
 
        rc = sync_mid_result(midQ, ses->server);
        if (rc != 0) {
@@ -724,8 +747,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                goto out;
 
        rc = wait_for_response(ses->server, midQ);
-       if (rc != 0)
-               goto out;
+       if (rc != 0) {
+               send_nt_cancel(ses->server, in_buf, midQ);
+               spin_lock(&GlobalMid_Lock);
+               if (midQ->midState == MID_REQUEST_SUBMITTED) {
+                       /* no longer considered to be "in-flight" */
+                       midQ->callback = DeleteMidQEntry;
+                       spin_unlock(&GlobalMid_Lock);
+                       atomic_dec(&ses->server->inFlight);
+                       wake_up(&ses->server->request_q);
+                       return rc;
+               }
+               spin_unlock(&GlobalMid_Lock);
+       }
 
        rc = sync_mid_result(midQ, ses->server);
        if (rc != 0) {
@@ -922,10 +956,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
                        }
                }
 
-               if (wait_for_response(ses->server, midQ) == 0) {
-                       /* We got the response - restart system call. */
-                       rstart = 1;
+               rc = wait_for_response(ses->server, midQ);
+               if (rc) {
+                       send_nt_cancel(ses->server, in_buf, midQ);
+                       spin_lock(&GlobalMid_Lock);
+                       if (midQ->midState == MID_REQUEST_SUBMITTED) {
+                               /* no longer considered to be "in-flight" */
+                               midQ->callback = DeleteMidQEntry;
+                               spin_unlock(&GlobalMid_Lock);
+                               return rc;
+                       }
+                       spin_unlock(&GlobalMid_Lock);
                }
+
+               /* We got the response - restart system call. */
+               rstart = 1;
        }
 
        rc = sync_mid_result(midQ, ses->server);
index 9f493ee..611ffe9 100644 (file)
@@ -176,6 +176,7 @@ static void d_free(struct dentry *dentry)
 
 /**
  * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * @dentry: the target dentry
  * After this call, in-progress rcu-walk path lookup will fail. This
  * should be called after unhashing, and after changing d_inode (if
  * the dentry has not already been unhashed).
@@ -281,6 +282,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
 /**
  * d_kill - kill dentry and return parent
  * @dentry: dentry to kill
+ * @parent: parent dentry
  *
  * The dentry must already be unhashed and removed from the LRU.
  *
@@ -1521,6 +1523,28 @@ struct dentry * d_alloc_root(struct inode * root_inode)
 }
 EXPORT_SYMBOL(d_alloc_root);
 
+static struct dentry * __d_find_any_alias(struct inode *inode)
+{
+       struct dentry *alias;
+
+       if (list_empty(&inode->i_dentry))
+               return NULL;
+       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+       __dget(alias);
+       return alias;
+}
+
+static struct dentry * d_find_any_alias(struct inode *inode)
+{
+       struct dentry *de;
+
+       spin_lock(&inode->i_lock);
+       de = __d_find_any_alias(inode);
+       spin_unlock(&inode->i_lock);
+       return de;
+}
+
+
 /**
  * d_obtain_alias - find or allocate a dentry for a given inode
  * @inode: inode to allocate the dentry for
@@ -1550,7 +1574,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
        if (IS_ERR(inode))
                return ERR_CAST(inode);
 
-       res = d_find_alias(inode);
+       res = d_find_any_alias(inode);
        if (res)
                goto out_iput;
 
@@ -1563,7 +1587,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
 
 
        spin_lock(&inode->i_lock);
-       res = __d_find_alias(inode, 0);
+       res = __d_find_any_alias(inode);
        if (res) {
                spin_unlock(&inode->i_lock);
                dput(tmp);
@@ -1973,7 +1997,7 @@ out:
 /**
  * d_validate - verify dentry provided from insecure source (deprecated)
  * @dentry: The dentry alleged to be valid child of @dparent
- * @parent: The parent dentry (known to be valid)
+ * @dparent: The parent dentry (known to be valid)
  *
  * An insecure source has sent us a dentry, here we verify it and dget() it.
  * This is used by ncpfs in its readdir implementation.
index 9c64ae9..2d8c87b 100644 (file)
@@ -1468,15 +1468,13 @@ static void work_stop(void)
 
 static int work_start(void)
 {
-       recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM |
-                                        WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+       recv_workqueue = create_singlethread_workqueue("dlm_recv");
        if (!recv_workqueue) {
                log_print("can't start dlm_recv");
                return -ENOMEM;
        }
 
-       send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM |
-                                        WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+       send_workqueue = create_singlethread_workqueue("dlm_send");
        if (!send_workqueue) {
                log_print("can't start dlm_send");
                destroy_workqueue(recv_workqueue);
index 6fc4f31..534c1d4 100644 (file)
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct dentry *lower_dentry;
        struct vfsmount *lower_mnt;
-       struct dentry *dentry_save;
-       struct vfsmount *vfsmount_save;
+       struct dentry *dentry_save = NULL;
+       struct vfsmount *vfsmount_save = NULL;
        int rc = 1;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
        if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
                goto out;
-       dentry_save = nd->path.dentry;
-       vfsmount_save = nd->path.mnt;
-       nd->path.dentry = lower_dentry;
-       nd->path.mnt = lower_mnt;
+       if (nd) {
+               dentry_save = nd->path.dentry;
+               vfsmount_save = nd->path.mnt;
+               nd->path.dentry = lower_dentry;
+               nd->path.mnt = lower_mnt;
+       }
        rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
-       nd->path.dentry = dentry_save;
-       nd->path.mnt = vfsmount_save;
+       if (nd) {
+               nd->path.dentry = dentry_save;
+               nd->path.mnt = vfsmount_save;
+       }
        if (dentry->d_inode) {
                struct inode *lower_inode =
                        ecryptfs_inode_to_lower(dentry->d_inode);
index dbc84ed..e007534 100644 (file)
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry,
                       u32 flags);
 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
                                        struct dentry *lower_dentry,
-                                       struct inode *ecryptfs_dir_inode,
-                                       struct nameidata *ecryptfs_nd);
+                                       struct inode *ecryptfs_dir_inode);
 int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
                                         size_t *decrypted_name_size,
                                         struct dentry *ecryptfs_dentry,
index 81e10e6..7d1050e 100644 (file)
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 const struct file_operations ecryptfs_dir_fops = {
        .readdir = ecryptfs_readdir,
+       .read = generic_read_dir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
index bd33f87..b592938 100644 (file)
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
        unsigned int flags_save;
        int rc;
 
-       dentry_save = nd->path.dentry;
-       vfsmount_save = nd->path.mnt;
-       flags_save = nd->flags;
-       nd->path.dentry = lower_dentry;
-       nd->path.mnt = lower_mnt;
-       nd->flags &= ~LOOKUP_OPEN;
+       if (nd) {
+               dentry_save = nd->path.dentry;
+               vfsmount_save = nd->path.mnt;
+               flags_save = nd->flags;
+               nd->path.dentry = lower_dentry;
+               nd->path.mnt = lower_mnt;
+               nd->flags &= ~LOOKUP_OPEN;
+       }
        rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
-       nd->path.dentry = dentry_save;
-       nd->path.mnt = vfsmount_save;
-       nd->flags = flags_save;
+       if (nd) {
+               nd->path.dentry = dentry_save;
+               nd->path.mnt = vfsmount_save;
+               nd->flags = flags_save;
+       }
        return rc;
 }
 
@@ -241,8 +245,7 @@ out:
  */
 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
                                        struct dentry *lower_dentry,
-                                       struct inode *ecryptfs_dir_inode,
-                                       struct nameidata *ecryptfs_nd)
+                                       struct inode *ecryptfs_dir_inode)
 {
        struct dentry *lower_dir_dentry;
        struct vfsmount *lower_mnt;
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
                goto out;
        if (special_file(lower_inode->i_mode))
                goto out;
-       if (!ecryptfs_nd)
-               goto out;
        /* Released in this function */
        page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
        if (!page_virt) {
@@ -348,75 +349,6 @@ out:
        return rc;
 }
 
-/**
- * ecryptfs_new_lower_dentry
- * @name: The name of the new dentry.
- * @lower_dir_dentry: Parent directory of the new dentry.
- * @nd: nameidata from last lookup.
- *
- * Create a new dentry or get it from lower parent dir.
- */
-static struct dentry *
-ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
-                         struct nameidata *nd)
-{
-       struct dentry *new_dentry;
-       struct dentry *tmp;
-       struct inode *lower_dir_inode;
-
-       lower_dir_inode = lower_dir_dentry->d_inode;
-
-       tmp = d_alloc(lower_dir_dentry, name);
-       if (!tmp)
-               return ERR_PTR(-ENOMEM);
-
-       mutex_lock(&lower_dir_inode->i_mutex);
-       new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
-       mutex_unlock(&lower_dir_inode->i_mutex);
-
-       if (!new_dentry)
-               new_dentry = tmp;
-       else
-               dput(tmp);
-
-       return new_dentry;
-}
-
-
-/**
- * ecryptfs_lookup_one_lower
- * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
- * @lower_dir_dentry: lower parent directory
- * @name: lower file name
- *
- * Get the lower dentry from vfs. If lower dentry does not exist yet,
- * create it.
- */
-static struct dentry *
-ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
-                         struct dentry *lower_dir_dentry, struct qstr *name)
-{
-       struct nameidata nd;
-       struct vfsmount *lower_mnt;
-       int err;
-
-       lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
-                                   ecryptfs_dentry->d_parent));
-       err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
-       mntput(lower_mnt);
-
-       if (!err) {
-               /* we dont need the mount */
-               mntput(nd.path.mnt);
-               return nd.path.dentry;
-       }
-       if (err != -ENOENT)
-               return ERR_PTR(err);
-
-       /* create a new lower dentry */
-       return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
-}
-
 /**
  * ecryptfs_lookup
  * @ecryptfs_dir_inode: The eCryptfs directory inode
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
        size_t encrypted_and_encoded_name_size;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
        struct dentry *lower_dir_dentry, *lower_dentry;
-       struct qstr lower_name;
        int rc = 0;
 
        if ((ecryptfs_dentry->d_name.len == 1
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                goto out_d_drop;
        }
        lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-       lower_name.name = ecryptfs_dentry->d_name.name;
-       lower_name.len = ecryptfs_dentry->d_name.len;
-       lower_name.hash = ecryptfs_dentry->d_name.hash;
-       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
-               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-                               lower_dir_dentry->d_inode, &lower_name);
-               if (rc < 0)
-                       goto out_d_drop;
-       }
-       lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry, &lower_name);
+       mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+       lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
+                                     lower_dir_dentry,
+                                     ecryptfs_dentry->d_name.len);
+       mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
-               ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+               ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
                                "[%d] on lower_dentry = [%s]\n", __func__, rc,
                                encrypted_and_encoded_name);
                goto out_d_drop;
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                       "filename; rc = [%d]\n", __func__, rc);
                goto out_d_drop;
        }
-       lower_name.name = encrypted_and_encoded_name;
-       lower_name.len = encrypted_and_encoded_name_size;
-       lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
-       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
-               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-                               lower_dir_dentry->d_inode, &lower_name);
-               if (rc < 0)
-                       goto out_d_drop;
-       }
-       lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry, &lower_name);
+       mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+       lower_dentry = lookup_one_len(encrypted_and_encoded_name,
+                                     lower_dir_dentry,
+                                     encrypted_and_encoded_name_size);
+       mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
-               ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+               ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
                                "[%d] on lower_dentry = [%s]\n", __func__, rc,
                                encrypted_and_encoded_name);
                goto out_d_drop;
        }
 lookup_and_interpose:
        rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
-                                                ecryptfs_dir_inode,
-                                                ecryptfs_nd);
+                                                ecryptfs_dir_inode);
        goto out;
 out_d_drop:
        d_drop(ecryptfs_dentry);
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
                         ecryptfs_dentry_to_lower(dentry), &lower_stat);
        if (!rc) {
+               fsstack_copy_attr_all(dentry->d_inode,
+                                     ecryptfs_inode_to_lower(dentry->d_inode));
                generic_fillattr(dentry->d_inode, stat);
                stat->blocks = lower_stat.blocks;
        }
index e0194b3..d9a5917 100644 (file)
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get);
  * @ctx: [in] Pointer to eventfd context.
  *
  * The eventfd context reference must have been previously acquired either
- * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ * with eventfd_ctx_get() or eventfd_ctx_fdget().
  */
 void eventfd_ctx_put(struct eventfd_ctx *ctx)
 {
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
  * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
  * @ctx: [in] Pointer to eventfd context.
  * @wait: [in] Wait queue to be removed.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
  *
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
  *
  * -EAGAIN      : The operation would have blocked.
  *
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
  * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
  * @ctx: [in] Pointer to eventfd context.
  * @no_wait: [in] Different from zero if the operation should not block.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
  *
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
  *
- * -EAGAIN      : The operation would have blocked but @no_wait was nonzero.
+ * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
  * -ERESTARTSYS : A signal interrupted the wait operation.
  *
  * If @no_wait is zero, the function might sleep until the eventfd internal
index cc8a9b7..4a09af9 100644 (file)
  * cleanup path and it is also acquired by eventpoll_release_file()
  * if a file has been pushed inside an epoll set and it is then
  * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
  * It is possible to drop the "ep->mtx" and to use the global
  * mutex "epmutex" (together with "ep->lock") to have it working,
  * but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
  */
 static DEFINE_MUTEX(epmutex);
 
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
 /* Used for safe wake up implementation */
 static struct nested_calls poll_safewake_ncalls;
 
@@ -1114,6 +1124,17 @@ static int ep_send_events(struct eventpoll *ep,
        return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
 }
 
+static inline struct timespec ep_set_mstimeout(long ms)
+{
+       struct timespec now, ts = {
+               .tv_sec = ms / MSEC_PER_SEC,
+               .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
+       };
+
+       ktime_get_ts(&now);
+       return timespec_add_safe(now, ts);
+}
+
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                   int maxevents, long timeout)
 {
@@ -1121,12 +1142,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
        unsigned long flags;
        long slack;
        wait_queue_t wait;
-       struct timespec end_time;
        ktime_t expires, *to = NULL;
 
        if (timeout > 0) {
-               ktime_get_ts(&end_time);
-               timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+               struct timespec end_time = ep_set_mstimeout(timeout);
+
                slack = select_estimate_accuracy(&end_time);
                to = &expires;
                *to = timespec_to_ktime(end_time);
@@ -1188,6 +1208,62 @@ retry:
        return res;
 }
 
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ *                      API, to verify that adding an epoll file inside another
+ *                      epoll structure, does not violate the constraints, in
+ *                      terms of closed loops, or too deep chains (which can
+ *                      result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ *          data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+       int error = 0;
+       struct file *file = priv;
+       struct eventpoll *ep = file->private_data;
+       struct rb_node *rbp;
+       struct epitem *epi;
+
+       mutex_lock(&ep->mtx);
+       for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+               epi = rb_entry(rbp, struct epitem, rbn);
+               if (unlikely(is_file_epoll(epi->ffd.file))) {
+                       error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+                                              ep_loop_check_proc, epi->ffd.file,
+                                              epi->ffd.file->private_data, current);
+                       if (error != 0)
+                               break;
+               }
+       }
+       mutex_unlock(&ep->mtx);
+
+       return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ *                 another epoll file (represented by @ep) does not create
+ *                 closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+       return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+                             ep_loop_check_proc, file, ep, current);
+}
+
 /*
  * Open an eventpoll file descriptor.
  */
@@ -1236,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                struct epoll_event __user *, event)
 {
        int error;
+       int did_lock_epmutex = 0;
        struct file *file, *tfile;
        struct eventpoll *ep;
        struct epitem *epi;
@@ -1277,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
         */
        ep = file->private_data;
 
+       /*
+        * When we insert an epoll file descriptor, inside another epoll file
+        * descriptor, there is the change of creating closed loops, which are
+        * better be handled here, than in more critical paths.
+        *
+        * We hold epmutex across the loop check and the insert in this case, in
+        * order to prevent two separate inserts from racing and each doing the
+        * insert "at the same time" such that ep_loop_check passes on both
+        * before either one does the insert, thereby creating a cycle.
+        */
+       if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+               mutex_lock(&epmutex);
+               did_lock_epmutex = 1;
+               error = -ELOOP;
+               if (ep_loop_check(ep, tfile) != 0)
+                       goto error_tgt_fput;
+       }
+
+
        mutex_lock(&ep->mtx);
 
        /*
@@ -1312,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
+       if (unlikely(did_lock_epmutex))
+               mutex_unlock(&epmutex);
+
        fput(tfile);
 error_fput:
        fput(file);
@@ -1431,6 +1530,12 @@ static int __init eventpoll_init(void)
                EP_ITEM_COST;
        BUG_ON(max_user_watches < 0);
 
+       /*
+        * Initialize the structure used to perform epoll file descriptor
+        * inclusion loops checks.
+        */
+       ep_nested_calls_init(&poll_loop_ncalls);
+
        /* Initialize the structure used to perform safe poll wait head wake ups */
        ep_nested_calls_init(&poll_safewake_ncalls);
 
index c62efcb..52a447d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -120,7 +120,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
                goto out;
 
        file = do_filp_open(AT_FDCWD, tmp,
-                               O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+                               O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
                                MAY_READ | MAY_EXEC | MAY_OPEN);
        putname(tmp);
        error = PTR_ERR(file);
@@ -723,7 +723,7 @@ struct file *open_exec(const char *name)
        int err;
 
        file = do_filp_open(AT_FDCWD, name,
-                               O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+                               O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
                                MAY_EXEC | MAY_OPEN);
        if (IS_ERR(file))
                goto out;
index 4268542..a755523 100644 (file)
@@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
                memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
        }
 
-       inode->i_mapping->backing_dev_info = sb->s_bdi;
        if (S_ISREG(inode->i_mode)) {
                inode->i_op = &exofs_file_inode_operations;
                inode->i_fop = &exofs_file_operations;
@@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
 
        sbi = sb->s_fs_info;
 
-       inode->i_mapping->backing_dev_info = sb->s_bdi;
        sb->s_dirt = 1;
        inode_init_owner(inode, dir, mode);
        inode->i_ino = sbi->s_nextid++;
index 264e95d..4d70db1 100644 (file)
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                err = exofs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME;
                if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= EXOFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = exofs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        exofs_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
index 2e1d834..adb9185 100644 (file)
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= EXT2_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ext2_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
+       mark_inode_dirty(old_inode);
 
        ext2_delete_entry (old_de, old_page);
-       inode_dec_link_count(old_inode);
 
        if (dir_de) {
                if (old_dir != new_dir)
index 0c8d97b..3aa0b72 100644 (file)
@@ -848,6 +848,7 @@ struct ext4_inode_info {
        atomic_t i_ioend_count; /* Number of outstanding io_end structs */
        /* current io_end structure for async DIO write*/
        ext4_io_end_t *cur_aio_dio;
+       atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */
 
        spinlock_t i_block_reservation_lock;
 
@@ -2119,6 +2120,15 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
 
 #define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/* For ioend & aio unwritten conversion wait queues */
+#define EXT4_WQ_HASH_SZ                37
+#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
+                                           EXT4_WQ_HASH_SZ])
+#define ext4_aio_mutex(v)  (&ext4__aio_mutex[((unsigned long)(v)) %\
+                                            EXT4_WQ_HASH_SZ])
+extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 #endif /* __KERNEL__ */
 
 #endif /* _EXT4_H */
index 63a7581..ccce8a7 100644 (file)
@@ -3174,9 +3174,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                 * that this IO needs to convertion to written when IO is
                 * completed
                 */
-               if (io)
+               if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
                        io->flag = EXT4_IO_END_UNWRITTEN;
-               else
+                       atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+               } else
                        ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
                if (ext4_should_dioread_nolock(inode))
                        map->m_flags |= EXT4_MAP_UNINIT;
@@ -3463,9 +3464,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 * that we need to perform convertion when IO is done.
                 */
                if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
-                       if (io)
+                       if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
                                io->flag = EXT4_IO_END_UNWRITTEN;
-                       else
+                               atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+                       } else
                                ext4_set_inode_state(inode,
                                                     EXT4_STATE_DIO_UNWRITTEN);
                }
index 2e8322c..7b80d54 100644 (file)
@@ -55,11 +55,47 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static void ext4_aiodio_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = ext4_ioend_wq(inode);
+
+       wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
+}
+
+/*
+ * This tests whether the IO in question is block-aligned or not.
+ * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
+ * are converted to written only after the IO is complete.  Until they are
+ * mapped, these blocks appear as holes, so dio_zero_block() will assume that
+ * it needs to zero out portions of the start and/or end block.  If 2 AIO
+ * threads are at work on the same unwritten block, they must be synchronized
+ * or one thread will zero the other's data, causing corruption.
+ */
+static int
+ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
+                  unsigned long nr_segs, loff_t pos)
+{
+       struct super_block *sb = inode->i_sb;
+       int blockmask = sb->s_blocksize - 1;
+       size_t count = iov_length(iov, nr_segs);
+       loff_t final_size = pos + count;
+
+       if (pos >= inode->i_size)
+               return 0;
+
+       if ((pos & blockmask) || (final_size & blockmask))
+               return 1;
+
+       return 0;
+}
+
 static ssize_t
 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       int unaligned_aio = 0;
+       int ret;
 
        /*
         * If we have encountered a bitmap-format file, the size limit
@@ -78,9 +114,31 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
                        nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
                                              sbi->s_bitmap_maxbytes - pos);
                }
+       } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
+                  !is_sync_kiocb(iocb))) {
+               unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
        }
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       /* Unaligned direct AIO must be serialized; see comment above */
+       if (unaligned_aio) {
+               static unsigned long unaligned_warn_time;
+
+               /* Warn about this once per day */
+               if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
+                       ext4_msg(inode->i_sb, KERN_WARNING,
+                                "Unaligned AIO/DIO on inode %ld by %s; "
+                                "performance will be poor.",
+                                inode->i_ino, current->comm);
+               mutex_lock(ext4_aio_mutex(inode));
+               ext4_aiodio_wait(inode);
+       }
+
+       ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+       if (unaligned_aio)
+               mutex_unlock(ext4_aio_mutex(inode));
+
+       return ret;
 }
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
index 851f49b..d1fe09a 100644 (file)
@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep;
 /* We create slab caches for groupinfo data structures based on the
  * superblock block size.  There will be one per mounted filesystem for
  * each unique s_blocksize_bits */
-#define NR_GRPINFO_CACHES      \
-       (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
+#define NR_GRPINFO_CACHES 8
 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 
+static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+       "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
+       "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
+       "ext4_groupinfo_64k", "ext4_groupinfo_128k"
+};
+
 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
                                        ext4_group_t group);
 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -2414,6 +2419,55 @@ err_freesgi:
        return -ENOMEM;
 }
 
+static void ext4_groupinfo_destroy_slabs(void)
+{
+       int i;
+
+       for (i = 0; i < NR_GRPINFO_CACHES; i++) {
+               if (ext4_groupinfo_caches[i])
+                       kmem_cache_destroy(ext4_groupinfo_caches[i]);
+               ext4_groupinfo_caches[i] = NULL;
+       }
+}
+
+static int ext4_groupinfo_create_slab(size_t size)
+{
+       static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
+       int slab_size;
+       int blocksize_bits = order_base_2(size);
+       int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+       struct kmem_cache *cachep;
+
+       if (cache_index >= NR_GRPINFO_CACHES)
+               return -EINVAL;
+
+       if (unlikely(cache_index < 0))
+               cache_index = 0;
+
+       mutex_lock(&ext4_grpinfo_slab_create_mutex);
+       if (ext4_groupinfo_caches[cache_index]) {
+               mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+               return 0;       /* Already created */
+       }
+
+       slab_size = offsetof(struct ext4_group_info,
+                               bb_counters[blocksize_bits + 2]);
+
+       cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
+                                       slab_size, 0, SLAB_RECLAIM_ACCOUNT,
+                                       NULL);
+
+       mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+       if (!cachep) {
+               printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
+               return -ENOMEM;
+       }
+
+       ext4_groupinfo_caches[cache_index] = cachep;
+
+       return 0;
+}
+
 int ext4_mb_init(struct super_block *sb, int needs_recovery)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
        unsigned offset;
        unsigned max;
        int ret;
-       int cache_index;
-       struct kmem_cache *cachep;
-       char *namep = NULL;
 
        i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
 
@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
                goto out;
        }
 
-       cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
-       cachep = ext4_groupinfo_caches[cache_index];
-       if (!cachep) {
-               char name[32];
-               int len = offsetof(struct ext4_group_info,
-                                       bb_counters[sb->s_blocksize_bits + 2]);
-
-               sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
-               namep = kstrdup(name, GFP_KERNEL);
-               if (!namep) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               /* Need to free the kmem_cache_name() when we
-                * destroy the slab */
-               cachep = kmem_cache_create(namep, len, 0,
-                                            SLAB_RECLAIM_ACCOUNT, NULL);
-               if (!cachep) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               ext4_groupinfo_caches[cache_index] = cachep;
-       }
+       ret = ext4_groupinfo_create_slab(sb->s_blocksize);
+       if (ret < 0)
+               goto out;
 
        /* order 0 is regular bitmap */
        sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
@@ -2520,7 +2550,6 @@ out:
        if (ret) {
                kfree(sbi->s_mb_offsets);
                kfree(sbi->s_mb_maxs);
-               kfree(namep);
        }
        return ret;
 }
@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void)
 
 void ext4_exit_mballoc(void)
 {
-       int i;
        /*
         * Wait for completion of call_rcu()'s on ext4_pspace_cachep
         * before destroying the slab cache.
@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void)
        kmem_cache_destroy(ext4_pspace_cachep);
        kmem_cache_destroy(ext4_ac_cachep);
        kmem_cache_destroy(ext4_free_ext_cachep);
-
-       for (i = 0; i < NR_GRPINFO_CACHES; i++) {
-               struct kmem_cache *cachep = ext4_groupinfo_caches[i];
-               if (cachep) {
-                       char *name = (char *)kmem_cache_name(cachep);
-                       kmem_cache_destroy(cachep);
-                       kfree(name);
-               }
-       }
+       ext4_groupinfo_destroy_slabs();
        ext4_remove_debugfs_entry();
 }
 
index 7270dcf..955cc30 100644 (file)
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
-#define WQ_HASH_SZ             37
-#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
-static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
-
 int __init ext4_init_pageio(void)
 {
-       int i;
-
        io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
        if (io_page_cachep == NULL)
                return -ENOMEM;
@@ -48,9 +42,6 @@ int __init ext4_init_pageio(void)
                kmem_cache_destroy(io_page_cachep);
                return -ENOMEM;
        }
-       for (i = 0; i < WQ_HASH_SZ; i++)
-               init_waitqueue_head(&ioend_wq[i]);
-
        return 0;
 }
 
@@ -62,7 +53,7 @@ void ext4_exit_pageio(void)
 
 void ext4_ioend_wait(struct inode *inode)
 {
-       wait_queue_head_t *wq = to_ioend_wq(inode);
+       wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
        wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
 }
@@ -87,7 +78,7 @@ void ext4_free_io_end(ext4_io_end_t *io)
        for (i = 0; i < io->num_io_pages; i++)
                put_io_page(io->pages[i]);
        io->num_io_pages = 0;
-       wq = to_ioend_wq(io->inode);
+       wq = ext4_ioend_wq(io->inode);
        if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
            waitqueue_active(wq))
                wake_up_all(wq);
@@ -102,6 +93,7 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
        struct inode *inode = io->inode;
        loff_t offset = io->offset;
        ssize_t size = io->size;
+       wait_queue_head_t *wq;
        int ret = 0;
 
        ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
@@ -126,7 +118,16 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
        if (io->iocb)
                aio_complete(io->iocb, io->result, 0);
        /* clear the DIO AIO unwritten flag */
-       io->flag &= ~EXT4_IO_END_UNWRITTEN;
+       if (io->flag & EXT4_IO_END_UNWRITTEN) {
+               io->flag &= ~EXT4_IO_END_UNWRITTEN;
+               /* Wake up anyone waiting on unwritten extent conversion */
+               wq = ext4_ioend_wq(io->inode);
+               if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) &&
+                   waitqueue_active(wq)) {
+                       wake_up_all(wq);
+               }
+       }
+
        return ret;
 }
 
@@ -190,6 +191,7 @@ static void ext4_end_bio(struct bio *bio, int error)
        struct inode *inode;
        unsigned long flags;
        int i;
+       sector_t bi_sector = bio->bi_sector;
 
        BUG_ON(!io_end);
        bio->bi_private = NULL;
@@ -207,9 +209,7 @@ static void ext4_end_bio(struct bio *bio, int error)
                if (error)
                        SetPageError(page);
                BUG_ON(!head);
-               if (head->b_size == PAGE_CACHE_SIZE)
-                       clear_buffer_dirty(head);
-               else {
+               if (head->b_size != PAGE_CACHE_SIZE) {
                        loff_t offset;
                        loff_t io_end_offset = io_end->offset + io_end->size;
 
@@ -221,7 +221,6 @@ static void ext4_end_bio(struct bio *bio, int error)
                                        if (error)
                                                buffer_io_error(bh);
 
-                                       clear_buffer_dirty(bh);
                                }
                                if (buffer_delay(bh))
                                        partial_write = 1;
@@ -257,7 +256,7 @@ static void ext4_end_bio(struct bio *bio, int error)
                             (unsigned long long) io_end->offset,
                             (long) io_end->size,
                             (unsigned long long)
-                            bio->bi_sector >> (inode->i_blkbits - 9));
+                            bi_sector >> (inode->i_blkbits - 9));
        }
 
        /* Add the io_end to per-inode completed io list*/
@@ -380,6 +379,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
        blocksize = 1 << inode->i_blkbits;
 
+       BUG_ON(!PageLocked(page));
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
        ClearPageError(page);
@@ -397,12 +397,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        for (bh = head = page_buffers(page), block_start = 0;
             bh != head || !block_start;
             block_start = block_end, bh = bh->b_this_page) {
+
                block_end = block_start + blocksize;
                if (block_start >= len) {
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
                }
+               clear_buffer_dirty(bh);
                ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
                if (ret) {
                        /*
index 48ce561..f6a318f 100644 (file)
@@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
                       const char *dev_name, void *data);
 static void ext4_destroy_lazyinit_thread(void);
 static void ext4_unregister_li_request(struct super_block *sb);
+static void ext4_clear_request_list(void);
 
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
@@ -832,6 +833,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_sync_tid = 0;
        ei->i_datasync_tid = 0;
        atomic_set(&ei->i_ioend_count, 0);
+       atomic_set(&ei->i_aiodio_unwritten, 0);
 
        return &ei->vfs_inode;
 }
@@ -2716,6 +2718,8 @@ static void ext4_unregister_li_request(struct super_block *sb)
        mutex_unlock(&ext4_li_info->li_list_mtx);
 }
 
+static struct task_struct *ext4_lazyinit_task;
+
 /*
  * This is the function where ext4lazyinit thread lives. It walks
  * through the request list searching for next scheduled filesystem.
@@ -2784,6 +2788,10 @@ cont_thread:
                if (time_before(jiffies, next_wakeup))
                        schedule();
                finish_wait(&eli->li_wait_daemon, &wait);
+               if (kthread_should_stop()) {
+                       ext4_clear_request_list();
+                       goto exit_thread;
+               }
        }
 
 exit_thread:
@@ -2808,6 +2816,7 @@ exit_thread:
        wake_up(&eli->li_wait_task);
 
        kfree(ext4_li_info);
+       ext4_lazyinit_task = NULL;
        ext4_li_info = NULL;
        mutex_unlock(&ext4_li_mtx);
 
@@ -2830,11 +2839,10 @@ static void ext4_clear_request_list(void)
 
 static int ext4_run_lazyinit_thread(void)
 {
-       struct task_struct *t;
-
-       t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
-       if (IS_ERR(t)) {
-               int err = PTR_ERR(t);
+       ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
+                                        ext4_li_info, "ext4lazyinit");
+       if (IS_ERR(ext4_lazyinit_task)) {
+               int err = PTR_ERR(ext4_lazyinit_task);
                ext4_clear_request_list();
                del_timer_sync(&ext4_li_info->li_timer);
                kfree(ext4_li_info);
@@ -2985,16 +2993,10 @@ static void ext4_destroy_lazyinit_thread(void)
         * If thread exited earlier
         * there's nothing to be done.
         */
-       if (!ext4_li_info)
+       if (!ext4_li_info || !ext4_lazyinit_task)
                return;
 
-       ext4_clear_request_list();
-
-       while (ext4_li_info->li_task) {
-               wake_up(&ext4_li_info->li_wait_daemon);
-               wait_event(ext4_li_info->li_wait_task,
-                          ext4_li_info->li_task == NULL);
-       }
+       kthread_stop(ext4_lazyinit_task);
 }
 
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
@@ -4768,7 +4770,7 @@ static struct file_system_type ext4_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-int __init ext4_init_feat_adverts(void)
+static int __init ext4_init_feat_adverts(void)
 {
        struct ext4_features *ef;
        int ret = -ENOMEM;
@@ -4792,23 +4794,44 @@ out:
        return ret;
 }
 
+static void ext4_exit_feat_adverts(void)
+{
+       kobject_put(&ext4_feat->f_kobj);
+       wait_for_completion(&ext4_feat->f_kobj_unregister);
+       kfree(ext4_feat);
+}
+
+/* Shared across all ext4 file systems */
+wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 static int __init ext4_init_fs(void)
 {
-       int err;
+       int i, err;
 
        ext4_check_flag_values();
+
+       for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
+               mutex_init(&ext4__aio_mutex[i]);
+               init_waitqueue_head(&ext4__ioend_wq[i]);
+       }
+
        err = ext4_init_pageio();
        if (err)
                return err;
        err = ext4_init_system_zone();
        if (err)
-               goto out5;
+               goto out7;
        ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
        if (!ext4_kset)
-               goto out4;
+               goto out6;
        ext4_proc_root = proc_mkdir("fs/ext4", NULL);
+       if (!ext4_proc_root)
+               goto out5;
 
        err = ext4_init_feat_adverts();
+       if (err)
+               goto out4;
 
        err = ext4_init_mballoc();
        if (err)
@@ -4838,12 +4861,14 @@ out1:
 out2:
        ext4_exit_mballoc();
 out3:
-       kfree(ext4_feat);
+       ext4_exit_feat_adverts();
+out4:
        remove_proc_entry("fs/ext4", NULL);
+out5:
        kset_unregister(ext4_kset);
-out4:
+out6:
        ext4_exit_system_zone();
-out5:
+out7:
        ext4_exit_pageio();
        return err;
 }
@@ -4857,6 +4882,7 @@ static void __exit ext4_exit_fs(void)
        destroy_inodecache();
        ext4_exit_xattr();
        ext4_exit_mballoc();
+       ext4_exit_feat_adverts();
        remove_proc_entry("fs/ext4", NULL);
        kset_unregister(ext4_kset);
        ext4_exit_system_zone();
index f88f752..adae3fb 100644 (file)
@@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
 
 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        /* This is not negative dentry. Always valid. */
@@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 
 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        /*
index ecc8b39..cb10261 100644 (file)
@@ -815,7 +815,7 @@ static int __init fcntl_init(void)
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               FMODE_EXEC
+               __FMODE_EXEC
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
index c3e89ad..eb36b6b 100644 (file)
@@ -125,13 +125,13 @@ struct file *get_empty_filp(void)
                goto fail;
 
        percpu_counter_inc(&nr_files);
+       f->f_cred = get_cred(cred);
        if (security_file_alloc(f))
                goto fail_sec;
 
        INIT_LIST_HEAD(&f->f_u.fu_list);
        atomic_long_set(&f->f_count, 1);
        rwlock_init(&f->f_owner.lock);
-       f->f_cred = get_cred(cred);
        spin_lock_init(&f->f_lock);
        eventpoll_init_file(f);
        /* f->f_version: 0 */
index bfed844..8bd0ef9 100644 (file)
@@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
 {
        struct inode *inode;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        inode = entry->d_inode;
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
        if (err)
                return err;
 
-       if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
-               return 0;
+       if (attr->ia_valid & ATTR_OPEN) {
+               if (fc->atomic_o_trunc)
+                       return 0;
+               file = NULL;
+       }
 
        if (attr->ia_valid & ATTR_SIZE)
                is_truncate = true;
index 95da1bc..9e0832d 100644 (file)
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
        return ff;
 }
 
+static void fuse_release_async(struct work_struct *work)
+{
+       struct fuse_req *req;
+       struct fuse_conn *fc;
+       struct path path;
+
+       req = container_of(work, struct fuse_req, misc.release.work);
+       path = req->misc.release.path;
+       fc = get_fuse_conn(path.dentry->d_inode);
+
+       fuse_put_request(fc, req);
+       path_put(&path);
+}
+
 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
 {
-       path_put(&req->misc.release.path);
+       if (fc->destroy_req) {
+               /*
+                * If this is a fuseblk mount, then it's possible that
+                * releasing the path will result in releasing the
+                * super block and sending the DESTROY request.  If
+                * the server is single threaded, this would hang.
+                * For this reason do the path_put() in a separate
+                * thread.
+                */
+               atomic_inc(&req->count);
+               INIT_WORK(&req->misc.release.work, fuse_release_async);
+               schedule_work(&req->misc.release.work);
+       } else {
+               path_put(&req->misc.release.path);
+       }
 }
 
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_file_put(struct fuse_file *ff, bool sync)
 {
        if (atomic_dec_and_test(&ff->count)) {
                struct fuse_req *req = ff->reserved_req;
 
-               req->end = fuse_release_end;
-               fuse_request_send_background(ff->fc, req);
+               if (sync) {
+                       fuse_request_send(ff->fc, req);
+                       path_put(&req->misc.release.path);
+                       fuse_put_request(ff->fc, req);
+               } else {
+                       req->end = fuse_release_end;
+                       fuse_request_send_background(ff->fc, req);
+               }
                kfree(ff);
        }
 }
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
         * Normally this will send the RELEASE request, however if
         * some asynchronous READ or WRITE requests are outstanding,
         * the sending will be delayed.
+        *
+        * Make the release synchronous if this is a fuseblk mount,
+        * synchronous RELEASE is allowed (and desirable) in this case
+        * because the server can be trusted not to screw up.
         */
-       fuse_file_put(ff);
+       fuse_file_put(ff, ff->fc->destroy_req != NULL);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
                page_cache_release(page);
        }
        if (req->ff)
-               fuse_file_put(req->ff);
+               fuse_file_put(req->ff, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
        __free_page(req->pages[0]);
-       fuse_file_put(req->ff);
+       fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
index ae5744a..d428694 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
 #include <linux/poll.h>
+#include <linux/workqueue.h>
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@ struct fuse_req {
        /** Data for asynchronous requests */
        union {
                struct {
-                       struct fuse_release_in in;
+                       union {
+                               struct fuse_release_in in;
+                               struct work_struct work;
+                       };
                        struct path path;
                } release;
                struct fuse_init_in init_in;
index 4a45633..0da8da2 100644 (file)
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
        int error;
        int had_lock = 0;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        parent = dget_parent(dentry);
index 08a8beb..7cd9a5a 100644 (file)
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void)
 #endif
 
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
-                                         WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+                                         WQ_HIGHPRI | WQ_FREEZABLE, 0);
        if (IS_ERR(glock_workqueue))
                return PTR_ERR(glock_workqueue);
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
-                                               WQ_MEM_RECLAIM | WQ_FREEZEABLE,
+                                               WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
        if (IS_ERR(gfs2_delete_workqueue)) {
                destroy_workqueue(glock_workqueue);
index ebef7ab..72c31a3 100644 (file)
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
        struct address_space *mapping = (struct address_space *)(gl + 1);
 
        gfs2_init_glock_once(gl);
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       address_space_init_once(mapping);
 }
 
 /**
@@ -144,7 +137,7 @@ static int __init init_gfs2_fs(void)
 
        error = -ENOMEM;
        gfs_recovery_wq = alloc_workqueue("gfs_recovery",
-                                         WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
+                                         WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
        if (!gfs_recovery_wq)
                goto fail_wq;
 
index afa66aa..b4d70b1 100644 (file)
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 }
 
 /*
- * hfs_unlink()
+ * hfs_remove()
  *
- * This is the unlink() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * file, given the inode for the parent directory and the name
- * (and its length) of the existing file.
- */
-static int hfs_unlink(struct inode *dir, struct dentry *dentry)
-{
-       struct inode *inode;
-       int res;
-
-       inode = dentry->d_inode;
-       res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
-       if (res)
-               return res;
-
-       drop_nlink(inode);
-       hfs_delete_inode(inode);
-       inode->i_ctime = CURRENT_TIME_SEC;
-       mark_inode_dirty(inode);
-
-       return res;
-}
-
-/*
- * hfs_rmdir()
+ * This serves as both unlink() and rmdir() in the inode_operations
+ * structure for regular HFS directories.  The purpose is to delete
+ * an existing child, given the inode for the parent directory and
+ * the name (and its length) of the existing directory.
  *
- * This is the rmdir() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * directory, given the inode for the parent directory and the name
- * (and its length) of the existing directory.
+ * HFS does not have hardlinks, so both rmdir and unlink set the
+ * link count to 0.  The only difference is the emptiness check.
  */
-static int hfs_rmdir(struct inode *dir, struct dentry *dentry)
+static int hfs_remove(struct inode *dir, struct dentry *dentry)
 {
-       struct inode *inode;
+       struct inode *inode = dentry->d_inode;
        int res;
 
-       inode = dentry->d_inode;
-       if (inode->i_size != 2)
+       if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
                return -ENOTEMPTY;
        res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
        if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               res = hfs_unlink(new_dir, new_dentry);
+               res = hfs_remove(new_dir, new_dentry);
                if (res)
                        return res;
        }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
 const struct inode_operations hfs_dir_inode_operations = {
        .create         = hfs_create,
        .lookup         = hfs_lookup,
-       .unlink         = hfs_unlink,
+       .unlink         = hfs_remove,
        .mkdir          = hfs_mkdir,
-       .rmdir          = hfs_rmdir,
+       .rmdir          = hfs_remove,
        .rename         = hfs_rename,
        .setattr        = hfs_inode_setattr,
 };
index 52a0bca..b1991a2 100644 (file)
@@ -397,8 +397,8 @@ int hfsplus_file_extend(struct inode *inode)
        u32 start, len, goal;
        int res;
 
-       if (sbi->total_blocks - sbi->free_blocks + 8 >
-                       sbi->alloc_file->i_size * 8) {
+       if (sbi->alloc_file->i_size * 8 <
+           sbi->total_blocks - sbi->free_blocks + 8) {
                /* extend alloc file */
                printk(KERN_ERR "hfs: extend alloc file! "
                                "(%llu,%u,%u)\n",
index d66ad11..40ad88c 100644 (file)
@@ -134,7 +134,7 @@ int hfs_part_find(struct super_block *sb,
        res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
                                 data, READ);
        if (res)
-               return res;
+               goto out;
 
        switch (be16_to_cpu(*((__be16 *)data))) {
        case HFS_OLD_PMAP_MAGIC:
@@ -147,7 +147,7 @@ int hfs_part_find(struct super_block *sb,
                res = -ENOENT;
                break;
        }
-
+out:
        kfree(data);
        return res;
 }
index 9a3b479..b49b555 100644 (file)
@@ -338,20 +338,22 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root, *inode;
        struct qstr str;
        struct nls_table *nls = NULL;
-       int err = -EINVAL;
+       int err;
 
+       err = -EINVAL;
        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
        if (!sbi)
-               return -ENOMEM;
+               goto out;
 
        sb->s_fs_info = sbi;
        mutex_init(&sbi->alloc_mutex);
        mutex_init(&sbi->vh_mutex);
        hfsplus_fill_defaults(sbi);
+
+       err = -EINVAL;
        if (!hfsplus_parse_options(data, sbi)) {
                printk(KERN_ERR "hfs: unable to parse mount options\n");
-               err = -EINVAL;
-               goto cleanup;
+               goto out_unload_nls;
        }
 
        /* temporarily use utf8 to correctly find the hidden dir below */
@@ -359,16 +361,14 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        sbi->nls = load_nls("utf8");
        if (!sbi->nls) {
                printk(KERN_ERR "hfs: unable to load nls for utf8\n");
-               err = -EINVAL;
-               goto cleanup;
+               goto out_unload_nls;
        }
 
        /* Grab the volume header */
        if (hfsplus_read_wrapper(sb)) {
                if (!silent)
                        printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n");
-               err = -EINVAL;
-               goto cleanup;
+               goto out_unload_nls;
        }
        vhdr = sbi->s_vhdr;
 
@@ -377,7 +377,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
            be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
                printk(KERN_ERR "hfs: wrong filesystem version\n");
-               goto cleanup;
+               goto out_free_vhdr;
        }
        sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
        sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
@@ -421,19 +421,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
        if (!sbi->ext_tree) {
                printk(KERN_ERR "hfs: failed to load extents file\n");
-               goto cleanup;
+               goto out_free_vhdr;
        }
        sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
        if (!sbi->cat_tree) {
                printk(KERN_ERR "hfs: failed to load catalog file\n");
-               goto cleanup;
+               goto out_close_ext_tree;
        }
 
        inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
        if (IS_ERR(inode)) {
                printk(KERN_ERR "hfs: failed to load allocation file\n");
                err = PTR_ERR(inode);
-               goto cleanup;
+               goto out_close_cat_tree;
        }
        sbi->alloc_file = inode;
 
@@ -442,14 +442,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (IS_ERR(root)) {
                printk(KERN_ERR "hfs: failed to load root directory\n");
                err = PTR_ERR(root);
-               goto cleanup;
-       }
-       sb->s_d_op = &hfsplus_dentry_operations;
-       sb->s_root = d_alloc_root(root);
-       if (!sb->s_root) {
-               iput(root);
-               err = -ENOMEM;
-               goto cleanup;
+               goto out_put_alloc_file;
        }
 
        str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
@@ -459,46 +452,69 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
                hfs_find_exit(&fd);
                if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
-                       goto cleanup;
+                       goto out_put_root;
                inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
-                       goto cleanup;
+                       goto out_put_root;
                }
                sbi->hidden_dir = inode;
        } else
                hfs_find_exit(&fd);
 
-       if (sb->s_flags & MS_RDONLY)
-               goto out;
+       if (!(sb->s_flags & MS_RDONLY)) {
+               /*
+                * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
+                * all three are registered with Apple for our use
+                */
+               vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+               vhdr->modify_date = hfsp_now2mt();
+               be32_add_cpu(&vhdr->write_count, 1);
+               vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+               vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+               hfsplus_sync_fs(sb, 1);
 
-       /* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
-        * all three are registered with Apple for our use
-        */
-       vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
-       vhdr->modify_date = hfsp_now2mt();
-       be32_add_cpu(&vhdr->write_count, 1);
-       vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
-       vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
-       hfsplus_sync_fs(sb, 1);
-
-       if (!sbi->hidden_dir) {
-               mutex_lock(&sbi->vh_mutex);
-               sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
-               hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
-                                  &str, sbi->hidden_dir);
-               mutex_unlock(&sbi->vh_mutex);
-
-               hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY);
+               if (!sbi->hidden_dir) {
+                       mutex_lock(&sbi->vh_mutex);
+                       sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+                       hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str,
+                                          sbi->hidden_dir);
+                       mutex_unlock(&sbi->vh_mutex);
+
+                       hfsplus_mark_inode_dirty(sbi->hidden_dir,
+                                                HFSPLUS_I_CAT_DIRTY);
+               }
        }
-out:
+
+       sb->s_d_op = &hfsplus_dentry_operations;
+       sb->s_root = d_alloc_root(root);
+       if (!sb->s_root) {
+               err = -ENOMEM;
+               goto out_put_hidden_dir;
+       }
+
        unload_nls(sbi->nls);
        sbi->nls = nls;
        return 0;
 
-cleanup:
-       hfsplus_put_super(sb);
+out_put_hidden_dir:
+       iput(sbi->hidden_dir);
+out_put_root:
+       iput(sbi->alloc_file);
+out_put_alloc_file:
+       iput(sbi->alloc_file);
+out_close_cat_tree:
+       hfs_btree_close(sbi->cat_tree);
+out_close_ext_tree:
+       hfs_btree_close(sbi->ext_tree);
+out_free_vhdr:
+       kfree(sbi->s_vhdr);
+       kfree(sbi->s_backup_vhdr);
+out_unload_nls:
+       unload_nls(sbi->nls);
        unload_nls(nls);
+       kfree(sbi);
+out:
        return err;
 }
 
index 1962317..3031d81 100644 (file)
@@ -167,7 +167,7 @@ reread:
                break;
        case cpu_to_be16(HFSP_WRAP_MAGIC):
                if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
-                       goto out;
+                       goto out_free_backup_vhdr;
                wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
                part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
                part_size = wd.embed_count * wd.ablk_size;
@@ -179,7 +179,7 @@ reread:
                 * (should do this only for cdrom/loop though)
                 */
                if (hfs_part_find(sb, &part_start, &part_size))
-                       goto out;
+                       goto out_free_backup_vhdr;
                goto reread;
        }
 
index da85e56..0647d80 100644 (file)
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
                call_rcu(&inode->i_rcu, i_callback);
 }
 
+void address_space_init_once(struct address_space *mapping)
+{
+       memset(mapping, 0, sizeof(*mapping));
+       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+       spin_lock_init(&mapping->tree_lock);
+       spin_lock_init(&mapping->i_mmap_lock);
+       INIT_LIST_HEAD(&mapping->private_list);
+       spin_lock_init(&mapping->private_lock);
+       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+       mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
 /*
  * These are initializations that only need to be done
  * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
        INIT_LIST_HEAD(&inode->i_devices);
        INIT_LIST_HEAD(&inode->i_wb_list);
        INIT_LIST_HEAD(&inode->i_lru);
-       INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-       spin_lock_init(&inode->i_data.tree_lock);
-       spin_lock_init(&inode->i_data.i_mmap_lock);
-       INIT_LIST_HEAD(&inode->i_data.private_list);
-       spin_lock_init(&inode->i_data.private_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
-       INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+       address_space_init_once(&inode->i_data);
        i_size_ordered_init(inode);
 #ifdef CONFIG_FSNOTIFY
        INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb)
 /**
  * invalidate_inodes   - attempt to free all inodes on a superblock
  * @sb:                superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Attempts to free all inodes for a given superblock.  If there were any
  * busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
  */
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 {
        int busy = 0;
        struct inode *inode, *next;
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
                        continue;
+               if (inode->i_state & I_DIRTY && !kill_dirty) {
+                       busy = 1;
+                       continue;
+               }
                if (atomic_read(&inode->i_count)) {
                        busy = 1;
                        continue;
index 0663568..9b976b5 100644 (file)
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
  */
 extern int get_nr_dirty_inodes(void);
 extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
index a59635e..1eebeb7 100644 (file)
@@ -273,6 +273,13 @@ int __generic_block_fiemap(struct inode *inode,
                len = isize;
        }
 
+       /*
+        * Some filesystems can't deal with being asked to map less than
+        * blocksize, so make sure our len is at least block length.
+        */
+       if (logical_to_blk(inode, len) == 0)
+               len = blk_to_logical(inode, 1);
+
        start_blk = logical_to_blk(inode, start);
        last_blk = logical_to_blk(inode, start + len - 1);
 
index 9e46869..97e7346 100644 (file)
@@ -473,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal)
 }
 
 /*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
+ * Called with j_state_lock locked for writing.
+ * Returns true if a transaction commit was started.
  */
 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
@@ -520,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
 {
        transaction_t *transaction = NULL;
        tid_t tid;
+       int need_to_start = 0;
 
        read_lock(&journal->j_state_lock);
        if (journal->j_running_transaction && !current->journal_info) {
                transaction = journal->j_running_transaction;
-               __jbd2_log_start_commit(journal, transaction->t_tid);
+               if (!tid_geq(journal->j_commit_request, transaction->t_tid))
+                       need_to_start = 1;
        } else if (journal->j_committing_transaction)
                transaction = journal->j_committing_transaction;
 
@@ -535,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
 
        tid = transaction->t_tid;
        read_unlock(&journal->j_state_lock);
+       if (need_to_start)
+               jbd2_log_start_commit(journal, tid);
        jbd2_log_wait_commit(journal, tid);
        return 1;
 }
index faad2bd..1d11910 100644 (file)
@@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction)
 static int start_this_handle(journal_t *journal, handle_t *handle,
                             int gfp_mask)
 {
-       transaction_t *transaction;
-       int needed;
-       int nblocks = handle->h_buffer_credits;
-       transaction_t *new_transaction = NULL;
+       transaction_t   *transaction, *new_transaction = NULL;
+       tid_t           tid;
+       int             needed, need_to_start;
+       int             nblocks = handle->h_buffer_credits;
 
        if (nblocks > journal->j_max_transaction_buffers) {
                printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
@@ -222,8 +222,11 @@ repeat:
                atomic_sub(nblocks, &transaction->t_outstanding_credits);
                prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
                                TASK_UNINTERRUPTIBLE);
-               __jbd2_log_start_commit(journal, transaction->t_tid);
+               tid = transaction->t_tid;
+               need_to_start = !tid_geq(journal->j_commit_request, tid);
                read_unlock(&journal->j_state_lock);
+               if (need_to_start)
+                       jbd2_log_start_commit(journal, tid);
                schedule();
                finish_wait(&journal->j_wait_transaction_locked, &wait);
                goto repeat;
@@ -442,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
-       int ret;
+       tid_t           tid;
+       int             need_to_start, ret;
 
        /* If we've had an abort of any type, don't even think about
         * actually doing the restart! */
@@ -465,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask)
        spin_unlock(&transaction->t_handle_lock);
 
        jbd_debug(2, "restarting handle %p\n", handle);
-       __jbd2_log_start_commit(journal, transaction->t_tid);
+       tid = transaction->t_tid;
+       need_to_start = !tid_geq(journal->j_commit_request, tid);
        read_unlock(&journal->j_state_lock);
+       if (need_to_start)
+               jbd2_log_start_commit(journal, tid);
 
        lock_map_release(&handle->h_lockdep_map);
        handle->h_buffer_credits = nblocks;
index 81ead85..5a2b269 100644 (file)
@@ -1600,7 +1600,7 @@ out:
 
 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
        /*
         * This is not negative dentry. Always valid.
index 5f1bcb2..b7c99bf 100644 (file)
@@ -520,7 +520,7 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
                                        struct nsm_handle *nsm,
                                        const struct nlm_reboot *info)
 {
-       struct nlm_host *host = NULL;
+       struct nlm_host *host;
        struct hlist_head *chain;
        struct hlist_node *pos;
 
@@ -532,12 +532,13 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
                        host->h_state++;
 
                        nlm_get_host(host);
-                       goto out;
+                       mutex_unlock(&nlm_host_mutex);
+                       return host;
                }
        }
-out:
+
        mutex_unlock(&nlm_host_mutex);
-       return host;
+       return NULL;
 }
 
 /**
index ce7337d..6e6777f 100644 (file)
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                new_de = minix_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                minix_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= info->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = minix_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        minix_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                minix_set_link(dir_de, dir_page, new_dir);
index 7d77f24..a4689eb 100644 (file)
@@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
        struct fs_struct *fs = current->fs;
        struct dentry *parent = nd->path.dentry;
 
-       /*
-        * It can be possible to revalidate the dentry that we started
-        * the path walk with. force_reval_path may also revalidate the
-        * dentry already committed to the nameidata.
-        */
-       if (unlikely(parent == dentry))
-               return nameidata_drop_rcu(nd);
-
        BUG_ON(!(nd->flags & LOOKUP_RCU));
        if (nd->root.mnt) {
                spin_lock(&fs->lock);
@@ -561,39 +553,25 @@ static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd)
  */
 void release_open_intent(struct nameidata *nd)
 {
-       if (nd->intent.open.file->f_path.dentry == NULL)
-               put_filp(nd->intent.open.file);
-       else
-               fput(nd->intent.open.file);
-}
-
-/*
- * Call d_revalidate and handle filesystems that request rcu-walk
- * to be dropped. This may be called and return in rcu-walk mode,
- * regardless of success or error. If -ECHILD is returned, the caller
- * must return -ECHILD back up the path walk stack so path walk may
- * be restarted in ref-walk mode.
- */
-static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-       int status;
+       struct file *file = nd->intent.open.file;
 
-       status = dentry->d_op->d_revalidate(dentry, nd);
-       if (status == -ECHILD) {
-               if (nameidata_dentry_drop_rcu(nd, dentry))
-                       return status;
-               status = dentry->d_op->d_revalidate(dentry, nd);
+       if (file && !IS_ERR(file)) {
+               if (file->f_path.dentry == NULL)
+                       put_filp(file);
+               else
+                       fput(file);
        }
+}
 
-       return status;
+static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+       return dentry->d_op->d_revalidate(dentry, nd);
 }
 
-static inline struct dentry *
+static struct dentry *
 do_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       int status;
-
-       status = d_revalidate(dentry, nd);
+       int status = d_revalidate(dentry, nd);
        if (unlikely(status <= 0)) {
                /*
                 * The dentry failed validation.
@@ -602,24 +580,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
                 * to return a fail status.
                 */
                if (status < 0) {
-                       /* If we're in rcu-walk, we don't have a ref */
-                       if (!(nd->flags & LOOKUP_RCU))
-                               dput(dentry);
+                       dput(dentry);
                        dentry = ERR_PTR(status);
-
-               } else {
-                       /* Don't d_invalidate in rcu-walk mode */
-                       if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
-                               return ERR_PTR(-ECHILD);
-                       if (!d_invalidate(dentry)) {
-                               dput(dentry);
-                               dentry = NULL;
-                       }
+               } else if (!d_invalidate(dentry)) {
+                       dput(dentry);
+                       dentry = NULL;
                }
        }
        return dentry;
 }
 
+static inline struct dentry *
+do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
+{
+       int status = d_revalidate(dentry, nd);
+       if (likely(status > 0))
+               return dentry;
+       if (status == -ECHILD) {
+               if (nameidata_dentry_drop_rcu(nd, dentry))
+                       return ERR_PTR(-ECHILD);
+               return do_revalidate(dentry, nd);
+       }
+       if (status < 0)
+               return ERR_PTR(status);
+       /* Don't d_invalidate in rcu-walk mode */
+       if (nameidata_dentry_drop_rcu(nd, dentry))
+               return ERR_PTR(-ECHILD);
+       if (!d_invalidate(dentry)) {
+               dput(dentry);
+               dentry = NULL;
+       }
+       return dentry;
+}
+
 static inline int need_reval_dot(struct dentry *dentry)
 {
        if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
@@ -664,9 +657,6 @@ force_reval_path(struct path *path, struct nameidata *nd)
                return 0;
 
        if (!status) {
-               /* Don't d_invalidate in rcu-walk mode */
-               if (nameidata_drop_rcu(nd))
-                       return -ECHILD;
                d_invalidate(dentry);
                status = -ESTALE;
        }
@@ -773,6 +763,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
        int error;
        struct dentry *dentry = link->dentry;
 
+       BUG_ON(nd->flags & LOOKUP_RCU);
+
        touch_atime(link->mnt, dentry);
        nd_set_link(nd, NULL);
 
@@ -803,10 +795,16 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
  * Without that kind of total limit, nasty chains of consecutive
  * symlinks can cause almost arbitrarily long lookups. 
  */
-static inline int do_follow_link(struct path *path, struct nameidata *nd)
+static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
 {
        void *cookie;
        int err = -ELOOP;
+
+       /* We drop rcu-walk here */
+       if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
+               return -ECHILD;
+       BUG_ON(inode != path->dentry->d_inode);
+
        if (current->link_count >= MAX_NESTED_LINKS)
                goto loop;
        if (current->total_link_count >= 40)
@@ -1251,9 +1249,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                        return -ECHILD;
 
                nd->seq = seq;
-               if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-                       goto need_revalidate;
-done2:
+               if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+                       dentry = do_revalidate_rcu(dentry, nd);
+                       if (!dentry)
+                               goto need_lookup;
+                       if (IS_ERR(dentry))
+                               goto fail;
+                       if (!(nd->flags & LOOKUP_RCU))
+                               goto done;
+               }
                path->mnt = mnt;
                path->dentry = dentry;
                if (likely(__follow_mount_rcu(nd, path, inode, false)))
@@ -1266,8 +1270,13 @@ done2:
        if (!dentry)
                goto need_lookup;
 found:
-       if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-               goto need_revalidate;
+       if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+               dentry = do_revalidate(dentry, nd);
+               if (!dentry)
+                       goto need_lookup;
+               if (IS_ERR(dentry))
+                       goto fail;
+       }
 done:
        path->mnt = mnt;
        path->dentry = dentry;
@@ -1309,16 +1318,6 @@ need_lookup:
        mutex_unlock(&dir->i_mutex);
        goto found;
 
-need_revalidate:
-       dentry = do_revalidate(dentry, nd);
-       if (!dentry)
-               goto need_lookup;
-       if (IS_ERR(dentry))
-               goto fail;
-       if (nd->flags & LOOKUP_RCU)
-               goto done2;
-       goto done;
-
 fail:
        return PTR_ERR(dentry);
 }
@@ -1415,11 +1414,7 @@ exec_again:
                        goto out_dput;
 
                if (inode->i_op->follow_link) {
-                       /* We commonly drop rcu-walk here */
-                       if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-                               return -ECHILD;
-                       BUG_ON(inode != next.dentry->d_inode);
-                       err = do_follow_link(&next, nd);
+                       err = do_follow_link(inode, &next, nd);
                        if (err)
                                goto return_err;
                        nd->inode = nd->path.dentry->d_inode;
@@ -1463,10 +1458,7 @@ last_component:
                        break;
                if (inode && unlikely(inode->i_op->follow_link) &&
                    (lookup_flags & LOOKUP_FOLLOW)) {
-                       if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-                               return -ECHILD;
-                       BUG_ON(inode != next.dentry->d_inode);
-                       err = do_follow_link(&next, nd);
+                       err = do_follow_link(inode, &next, nd);
                        if (err)
                                goto return_err;
                        nd->inode = nd->path.dentry->d_inode;
@@ -1500,12 +1492,15 @@ return_reval:
                 * We may need to check the cached dentry for staleness.
                 */
                if (need_reval_dot(nd->path.dentry)) {
+                       if (nameidata_drop_rcu_last_maybe(nd))
+                               return -ECHILD;
                        /* Note: we do not d_invalidate() */
                        err = d_revalidate(nd->path.dentry, nd);
                        if (!err)
                                err = -ESTALE;
                        if (err < 0)
                                break;
+                       return 0;
                }
 return_base:
                if (nameidata_drop_rcu_last_maybe(nd))
@@ -1551,6 +1546,7 @@ static int path_walk(const char *name, struct nameidata *nd)
                /* nd->path had been dropped */
                current->total_link_count = 0;
                nd->path = save;
+               nd->inode = save.dentry->d_inode;
                path_get(&nd->path);
                nd->flags |= LOOKUP_REVAL;
                result = link_path_walk(name, nd);
@@ -2265,8 +2261,6 @@ static struct file *finish_open(struct nameidata *nd,
        return filp;
 
 exit:
-       if (!IS_ERR(nd->intent.open.file))
-               release_open_intent(nd);
        path_put(&nd->path);
        return ERR_PTR(error);
 }
@@ -2389,8 +2383,6 @@ exit_mutex_unlock:
 exit_dput:
        path_put_conditional(path, nd);
 exit:
-       if (!IS_ERR(nd->intent.open.file))
-               release_open_intent(nd);
        path_put(&nd->path);
        return ERR_PTR(error);
 }
@@ -2464,21 +2456,29 @@ struct file *do_filp_open(int dfd, const char *pathname,
        /* !O_CREAT, simple open */
        error = do_path_lookup(dfd, pathname, flags, &nd);
        if (unlikely(error))
-               goto out_filp;
+               goto out_filp2;
        error = -ELOOP;
        if (!(nd.flags & LOOKUP_FOLLOW)) {
                if (nd.inode->i_op->follow_link)
-                       goto out_path;
+                       goto out_path2;
        }
        error = -ENOTDIR;
        if (nd.flags & LOOKUP_DIRECTORY) {
                if (!nd.inode->i_op->lookup)
-                       goto out_path;
+                       goto out_path2;
        }
        audit_inode(pathname, nd.path.dentry);
        filp = finish_open(&nd, open_flag, acc_mode);
+out2:
+       release_open_intent(&nd);
        return filp;
 
+out_path2:
+       path_put(&nd.path);
+out_filp2:
+       filp = ERR_PTR(error);
+       goto out2;
+
 creat:
        /* OK, have to create the file. Find the parent. */
        error = path_init_rcu(dfd, pathname,
@@ -2553,6 +2553,7 @@ out:
                path_put(&nd.root);
        if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
                goto reval;
+       release_open_intent(&nd);
        return filp;
 
 exit_dput:
@@ -2560,8 +2561,6 @@ exit_dput:
 out_path:
        path_put(&nd.path);
 out_filp:
-       if (!IS_ERR(nd.intent.open.file))
-               release_open_intent(&nd);
        filp = ERR_PTR(error);
        goto out;
 }
index 7b0b953..d1edf26 100644 (file)
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
                 */
                br_write_lock(vfsmount_lock);
                if (mnt_get_count(mnt) != 2) {
-                       br_write_lock(vfsmount_lock);
+                       br_write_unlock(vfsmount_lock);
                        return -EBUSY;
                }
                br_write_unlock(vfsmount_lock);
index 1990165..e3d2942 100644 (file)
@@ -134,33 +134,6 @@ out_err:
 }
 
 #if defined(CONFIG_NFS_V4_1)
-/*
- *  * CB_SEQUENCE operations will fail until the callback sessionid is set.
- *   */
-int nfs4_set_callback_sessionid(struct nfs_client *clp)
-{
-       struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
-       struct nfs4_sessionid *bc_sid;
-
-       if (!serv->sv_bc_xprt)
-               return -EINVAL;
-
-       /* on success freed in xprt_free */
-       bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
-       if (!bc_sid)
-               return -ENOMEM;
-       memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
-               NFS4_MAX_SESSIONID_LEN);
-       spin_lock_bh(&serv->sv_cb_lock);
-       serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
-       spin_unlock_bh(&serv->sv_cb_lock);
-       dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
-               ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
-               ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
-               serv->sv_bc_xprt);
-       return 0;
-}
-
 /*
  * The callback service for NFSv4.1 callbacks
  */
@@ -266,10 +239,6 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
                struct nfs_callback_data *cb_info)
 {
 }
-int nfs4_set_callback_sessionid(struct nfs_client *clp)
-{
-       return 0;
-}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -359,78 +328,58 @@ void nfs_callback_down(int minorversion)
        mutex_unlock(&nfs_callback_mutex);
 }
 
-static int check_gss_callback_principal(struct nfs_client *clp,
-                                       struct svc_rqst *rqstp)
+/* Boolean check of RPC_AUTH_GSS principal */
+int
+check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
        struct rpc_clnt *r = clp->cl_rpcclient;
        char *p = svc_gss_principal(rqstp);
 
+       if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
+               return 1;
+
        /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
        if (clp->cl_minorversion != 0)
-               return SVC_DROP;
+               return 0;
        /*
         * It might just be a normal user principal, in which case
         * userspace won't bother to tell us the name at all.
         */
        if (p == NULL)
-               return SVC_DENIED;
+               return 0;
 
        /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
 
        if (memcmp(p, "nfs@", 4) != 0)
-               return SVC_DENIED;
+               return 0;
        p += 4;
        if (strcmp(p, r->cl_server) != 0)
-               return SVC_DENIED;
-       return SVC_OK;
+               return 0;
+       return 1;
 }
 
-/* pg_authenticate method helper */
-static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
-{
-       struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
-       int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
-
-       dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
-       if (svc_is_backchannel(rqstp))
-               /* Sessionid (usually) set after CB_NULL ping */
-               return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
-                                                 is_cb_compound);
-       else
-               /* No callback identifier in pg_authenticate */
-               return nfs4_find_client_no_ident(svc_addr(rqstp));
-}
-
-/* pg_authenticate method for nfsv4 callback threads. */
+/*
+ * pg_authenticate method for nfsv4 callback threads.
+ *
+ * The authflavor has been negotiated, so an incorrect flavor is a server
+ * bug. Drop packets with incorrect authflavor.
+ *
+ * All other checking done after NFS decoding where the nfs_client can be
+ * found in nfs4_callback_compound
+ */
 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
 {
-       struct nfs_client *clp;
-       RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-       int ret = SVC_OK;
-
-       /* Don't talk to strangers */
-       clp = nfs_cb_find_client(rqstp);
-       if (clp == NULL)
-               return SVC_DROP;
-
-       dprintk("%s: %s NFSv4 callback!\n", __func__,
-                       svc_print_addr(rqstp, buf, sizeof(buf)));
-
        switch (rqstp->rq_authop->flavour) {
-               case RPC_AUTH_NULL:
-                       if (rqstp->rq_proc != CB_NULL)
-                               ret = SVC_DENIED;
-                       break;
-               case RPC_AUTH_UNIX:
-                       break;
-               case RPC_AUTH_GSS:
-                       ret = check_gss_callback_principal(clp, rqstp);
-                       break;
-               default:
-                       ret = SVC_DENIED;
+       case RPC_AUTH_NULL:
+               if (rqstp->rq_proc != CB_NULL)
+                       return SVC_DROP;
+               break;
+       case RPC_AUTH_GSS:
+               /* No RPC_AUTH_GSS support yet in NFSv4.1 */
+                if (svc_is_backchannel(rqstp))
+                       return SVC_DROP;
        }
-       nfs_put_client(clp);
-       return ret;
+       return SVC_OK;
 }
 
 /*
index d3b44f9..46d93ce 100644 (file)
@@ -7,6 +7,7 @@
  */
 #ifndef __LINUX_FS_NFS_CALLBACK_H
 #define __LINUX_FS_NFS_CALLBACK_H
+#include <linux/sunrpc/svc.h>
 
 #define NFS4_CALLBACK 0x40000000
 #define NFS4_CALLBACK_XDRSIZE 2048
@@ -37,7 +38,6 @@ enum nfs4_callback_opnum {
 struct cb_process_state {
        __be32                  drc_status;
        struct nfs_client       *clp;
-       struct nfs4_sessionid   *svc_sid; /* v4.1 callback service sessionid */
 };
 
 struct cb_compound_hdr_arg {
@@ -168,7 +168,7 @@ extern unsigned nfs4_callback_layoutrecall(
 extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
 extern void nfs4_cb_take_slot(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4_1 */
-
+extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
                                    struct cb_getattrres *res,
                                    struct cb_process_state *cps);
index 4bb91cb..8958757 100644 (file)
@@ -373,17 +373,11 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
 {
        struct nfs_client *clp;
        int i;
-       __be32 status;
+       __be32 status = htonl(NFS4ERR_BADSESSION);
 
        cps->clp = NULL;
 
-       status = htonl(NFS4ERR_BADSESSION);
-       /* Incoming session must match the callback session */
-       if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
-               goto out;
-
-       clp = nfs4_find_client_sessionid(args->csa_addr,
-                                        &args->csa_sessionid, 1);
+       clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
        if (clp == NULL)
                goto out;
 
@@ -414,9 +408,9 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
        res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
        res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
        nfs4_cb_take_slot(clp);
-       cps->clp = clp; /* put in nfs4_callback_compound */
 
 out:
+       cps->clp = clp; /* put in nfs4_callback_compound */
        for (i = 0; i < args->csa_nrclists; i++)
                kfree(args->csa_rclists[i].rcl_refcalls);
        kfree(args->csa_rclists);
index 23112c2..14e0f93 100644 (file)
@@ -794,10 +794,9 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
 
        if (hdr_arg.minorversion == 0) {
                cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
-               if (!cps.clp)
+               if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
                        return rpc_drop_reply;
-       } else
-               cps.svc_sid = bc_xprt_sid(rqstp);
+       }
 
        hdr_res.taglen = hdr_arg.taglen;
        hdr_res.tag = hdr_arg.tag;
index 192f2f8..bd3ca32 100644 (file)
@@ -1206,16 +1206,11 @@ nfs4_find_client_ident(int cb_ident)
  * For CB_COMPOUND calls, find a client by IP address, protocol version,
  * minorversion, and sessionID
  *
- * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
- * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
- * can arrive before the callback sessionid is set. For CB_NULL calls,
- * find a client by IP address protocol version, and minorversion.
- *
  * Returns NULL if no such client
  */
 struct nfs_client *
 nfs4_find_client_sessionid(const struct sockaddr *addr,
-                          struct nfs4_sessionid *sid, int is_cb_compound)
+                          struct nfs4_sessionid *sid)
 {
        struct nfs_client *clp;
 
@@ -1227,9 +1222,9 @@ nfs4_find_client_sessionid(const struct sockaddr *addr,
                if (!nfs4_has_session(clp))
                        continue;
 
-               /* Match sessionid unless cb_null call*/
-               if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
-                   sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
+               /* Match sessionid*/
+               if (memcmp(clp->cl_session->sess_id.data,
+                   sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
                        continue;
 
                atomic_inc(&clp->cl_count);
@@ -1244,7 +1239,7 @@ nfs4_find_client_sessionid(const struct sockaddr *addr,
 
 struct nfs_client *
 nfs4_find_client_sessionid(const struct sockaddr *addr,
-                          struct nfs4_sessionid *sid, int is_cb_compound)
+                          struct nfs4_sessionid *sid)
 {
        return NULL;
 }
index 364e432..bbbc6bf 100644 (file)
@@ -23,8 +23,6 @@
 
 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
 {
-       if (delegation->cred)
-               put_rpccred(delegation->cred);
        kfree(delegation);
 }
 
@@ -37,6 +35,10 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
 
 static void nfs_free_delegation(struct nfs_delegation *delegation)
 {
+       if (delegation->cred) {
+               put_rpccred(delegation->cred);
+               delegation->cred = NULL;
+       }
        call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
index e6ace0d..9943a75 100644 (file)
@@ -407,15 +407,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                pos += vec->iov_len;
        }
 
+       /*
+        * If no bytes were started, return the error, and let the
+        * generic layer handle the completion.
+        */
+       if (requested_bytes == 0) {
+               nfs_direct_req_release(dreq);
+               return result < 0 ? result : -EIO;
+       }
+
        if (put_dreq(dreq))
                nfs_direct_complete(dreq);
-
-       if (requested_bytes != 0)
-               return 0;
-
-       if (result < 0)
-               return result;
-       return -EIO;
+       return 0;
 }
 
 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
@@ -841,15 +844,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                pos += vec->iov_len;
        }
 
+       /*
+        * If no bytes were started, return the error, and let the
+        * generic layer handle the completion.
+        */
+       if (requested_bytes == 0) {
+               nfs_direct_req_release(dreq);
+               return result < 0 ? result : -EIO;
+       }
+
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq, dreq->inode);
-
-       if (requested_bytes != 0)
-               return 0;
-
-       if (result < 0)
-               return result;
-       return -EIO;
+       return 0;
 }
 
 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
index d851242..1cc600e 100644 (file)
@@ -881,9 +881,10 @@ out:
        return ret;
 }
 
-static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
+       unsigned long ret = 0;
 
        if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
                        && (fattr->valid & NFS_ATTR_FATTR_CHANGE)
@@ -891,25 +892,32 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                nfsi->change_attr = fattr->change_attr;
                if (S_ISDIR(inode->i_mode))
                        nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+               ret |= NFS_INO_INVALID_ATTR;
        }
        /* If we have atomic WCC data, we may update some attributes */
        if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
                        && (fattr->valid & NFS_ATTR_FATTR_CTIME)
-                       && timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
-                       memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+                       && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
+               memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+               ret |= NFS_INO_INVALID_ATTR;
+       }
 
        if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
                        && (fattr->valid & NFS_ATTR_FATTR_MTIME)
                        && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
-                       memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-                       if (S_ISDIR(inode->i_mode))
-                               nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+               memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+               if (S_ISDIR(inode->i_mode))
+                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+               ret |= NFS_INO_INVALID_ATTR;
        }
        if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
                        && (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
-                       && nfsi->npages == 0)
-                       i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+                       && nfsi->npages == 0) {
+               i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+               ret |= NFS_INO_INVALID_ATTR;
+       }
+       return ret;
 }
 
 /**
@@ -1223,7 +1231,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        | NFS_INO_REVAL_PAGECACHE);
 
        /* Do atomic weak cache consistency updates */
-       nfs_wcc_update_inode(inode, fattr);
+       invalid |= nfs_wcc_update_inode(inode, fattr);
 
        /* More cache consistency checks */
        if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
index 4644f04..cf9fdbd 100644 (file)
@@ -133,8 +133,7 @@ extern void nfs_put_client(struct nfs_client *);
 extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
 extern struct nfs_client *nfs4_find_client_ident(int);
 extern struct nfs_client *
-nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
-                          int);
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *);
 extern struct nfs_server *nfs_create_server(
                                        const struct nfs_parsed_mount_data *,
                                        struct nfs_fh *);
index 9f88c5f..2743427 100644 (file)
@@ -311,8 +311,8 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
        if (!nfs_server_capable(inode, NFS_CAP_ACLS))
                goto out;
 
-       /* We are doing this here, because XDR marshalling can only
-          return -ENOMEM. */
+       /* We are doing this here because XDR marshalling does not
+        * return any results, it BUGs. */
        status = -ENOSPC;
        if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES)
                goto out;
index 01c5e8b..183c6b1 100644 (file)
@@ -1328,10 +1328,13 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
 
        encode_nfs_fh3(xdr, NFS_FH(args->inode));
        encode_uint32(xdr, args->mask);
+
+       base = req->rq_slen;
        if (args->npages != 0)
                xdr_write_pages(xdr, args->pages, 0, args->len);
+       else
+               xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
 
-       base = req->rq_slen;
        error = nfsacl_encode(xdr->buf, base, args->inode,
                            (args->mask & NFS_ACL) ?
                            args->acl_access : NULL, 1, 0);
index 51fe64a..f5c9b12 100644 (file)
@@ -214,7 +214,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
 
        /* ipv6 length plus port is legal */
        if (rlen > INET6_ADDRSTRLEN + 8) {
-               dprintk("%s Invalid address, length %d\n", __func__,
+               dprintk("%s: Invalid address, length %d\n", __func__,
                        rlen);
                goto out_err;
        }
@@ -225,6 +225,11 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
        /* replace the port dots with dashes for the in4_pton() delimiter*/
        for (i = 0; i < 2; i++) {
                char *res = strrchr(buf, '.');
+               if (!res) {
+                       dprintk("%s: Failed finding expected dots in port\n",
+                               __func__);
+                       goto out_free;
+               }
                *res = '-';
        }
 
@@ -240,7 +245,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
        port = htons((tmp[0] << 8) | (tmp[1]));
 
        ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
-       dprintk("%s Decoded address and port %s\n", __func__, buf);
+       dprintk("%s: Decoded address and port %s\n", __func__, buf);
 out_free:
        kfree(buf);
 out_err:
index 9d992b0..1ff76ac 100644 (file)
@@ -50,6 +50,8 @@
 #include <linux/module.h>
 #include <linux/sunrpc/bc_xprt.h>
 #include <linux/xattr.h>
+#include <linux/utsname.h>
+#include <linux/mm.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -3251,6 +3253,35 @@ static void buf_to_pages(const void *buf, size_t buflen,
        }
 }
 
+static int buf_to_pages_noslab(const void *buf, size_t buflen,
+               struct page **pages, unsigned int *pgbase)
+{
+       struct page *newpage, **spages;
+       int rc = 0;
+       size_t len;
+       spages = pages;
+
+       do {
+               len = min(PAGE_CACHE_SIZE, buflen);
+               newpage = alloc_page(GFP_KERNEL);
+
+               if (newpage == NULL)
+                       goto unwind;
+               memcpy(page_address(newpage), buf, len);
+                buf += len;
+                buflen -= len;
+               *pages++ = newpage;
+               rc++;
+       } while (buflen != 0);
+
+       return rc;
+
+unwind:
+       for(; rc > 0; rc--)
+               __free_page(spages[rc-1]);
+       return -ENOMEM;
+}
+
 struct nfs4_cached_acl {
        int cached;
        size_t len;
@@ -3419,13 +3450,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
                .rpc_argp       = &arg,
                .rpc_resp       = &res,
        };
-       int ret;
+       int ret, i;
 
        if (!nfs4_server_supports_acls(server))
                return -EOPNOTSUPP;
+       i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
+       if (i < 0)
+               return i;
        nfs_inode_return_delegation(inode);
-       buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
        ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
+
+       /*
+        * Free each page after tx, so the only ref left is
+        * held by the network stack
+        */
+       for (; i > 0; i--)
+               put_page(pages[i-1]);
+
        /*
         * Acl update can result in inode attribute update.
         * so mark the attribute cache invalid.
@@ -4572,27 +4613,16 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
        *p = htonl((u32)clp->cl_boot_time.tv_nsec);
        args.verifier = &verifier;
 
-       while (1) {
-               args.id_len = scnprintf(args.id, sizeof(args.id),
-                                       "%s/%s %u",
-                                       clp->cl_ipaddr,
-                                       rpc_peeraddr2str(clp->cl_rpcclient,
-                                                        RPC_DISPLAY_ADDR),
-                                       clp->cl_id_uniquifier);
-
-               status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
-
-               if (status != -NFS4ERR_CLID_INUSE)
-                       break;
-
-               if (signalled())
-                       break;
-
-               if (++clp->cl_id_uniquifier == 0)
-                       break;
-       }
+       args.id_len = scnprintf(args.id, sizeof(args.id),
+                               "%s/%s.%s/%u",
+                               clp->cl_ipaddr,
+                               init_utsname()->nodename,
+                               init_utsname()->domainname,
+                               clp->cl_rpcclient->cl_auth->au_flavor);
 
-       status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+       status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+       if (!status)
+               status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
        dprintk("<-- %s status= %d\n", __func__, status);
        return status;
 }
index 2336d53..e6742b5 100644 (file)
@@ -232,12 +232,6 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
        status = nfs4_proc_create_session(clp);
        if (status != 0)
                goto out;
-       status = nfs4_set_callback_sessionid(clp);
-       if (status != 0) {
-               printk(KERN_WARNING "Sessionid not set. No callback service\n");
-               nfs_callback_down(1);
-               status = 0;
-       }
        nfs41_setup_state_renewal(clp);
        nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
index 2ab8e5c..4e2c168 100644 (file)
@@ -6086,11 +6086,11 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
        __be32 *p = xdr_inline_decode(xdr, 4);
        if (unlikely(!p))
                goto out_overflow;
-       if (!ntohl(*p++)) {
+       if (*p == xdr_zero) {
                p = xdr_inline_decode(xdr, 4);
                if (unlikely(!p))
                        goto out_overflow;
-               if (!ntohl(*p++))
+               if (*p == xdr_zero)
                        return -EAGAIN;
                entry->eof = 1;
                return -EBADCOOKIE;
@@ -6101,7 +6101,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
                goto out_overflow;
        entry->prev_cookie = entry->cookie;
        p = xdr_decode_hyper(p, &entry->cookie);
-       entry->len = ntohl(*p++);
+       entry->len = be32_to_cpup(p);
 
        p = xdr_inline_decode(xdr, entry->len);
        if (unlikely(!p))
@@ -6132,9 +6132,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
        if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
                entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
 
-       if (verify_attr_len(xdr, p, len) < 0)
-               goto out_overflow;
-
        return 0;
 
 out_overflow:
index bc40897..1b1bc1a 100644 (file)
@@ -951,7 +951,7 @@ pnfs_put_deviceid_cache(struct nfs_client *clp)
 {
        struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
 
-       dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+       dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref));
        if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
                int i;
                /* Verify cache is empty */
index 10d648e..c8278f4 100644 (file)
@@ -932,7 +932,7 @@ out_bad:
        while (!list_empty(&list)) {
                data = list_entry(list.next, struct nfs_write_data, pages);
                list_del(&data->pages);
-               nfs_writedata_release(data);
+               nfs_writedata_free(data);
        }
        nfs_redirty_request(req);
        return -ENOMEM;
index fc1c525..84c27d6 100644 (file)
@@ -42,6 +42,11 @@ struct nfsacl_encode_desc {
        gid_t gid;
 };
 
+struct nfsacl_simple_acl {
+       struct posix_acl acl;
+       struct posix_acl_entry ace[4];
+};
+
 static int
 xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
 {
@@ -72,9 +77,20 @@ xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
        return 0;
 }
 
-unsigned int
-nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
-             struct posix_acl *acl, int encode_entries, int typeflag)
+/**
+ * nfsacl_encode - Encode an NFSv3 ACL
+ *
+ * @buf: destination xdr_buf to contain XDR encoded ACL
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @inode: inode of file whose ACL this is
+ * @acl: posix_acl to encode
+ * @encode_entries: whether to encode ACEs as well
+ * @typeflag: ACL type: NFS_ACL_DEFAULT or zero
+ *
+ * Returns size of encoded ACL in bytes or a negative errno value.
+ */
+int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
+                 struct posix_acl *acl, int encode_entries, int typeflag)
 {
        int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0;
        struct nfsacl_encode_desc nfsacl_desc = {
@@ -88,17 +104,22 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
                .uid = inode->i_uid,
                .gid = inode->i_gid,
        };
+       struct nfsacl_simple_acl aclbuf;
        int err;
-       struct posix_acl *acl2 = NULL;
 
        if (entries > NFS_ACL_MAX_ENTRIES ||
            xdr_encode_word(buf, base, entries))
                return -EINVAL;
        if (encode_entries && acl && acl->a_count == 3) {
-               /* Fake up an ACL_MASK entry. */
-               acl2 = posix_acl_alloc(4, GFP_KERNEL);
-               if (!acl2)
-                       return -ENOMEM;
+               struct posix_acl *acl2 = &aclbuf.acl;
+
+               /* Avoid the use of posix_acl_alloc().  nfsacl_encode() is
+                * invoked in contexts where a memory allocation failure is
+                * fatal.  Fortunately this fake ACL is small enough to
+                * construct on the stack. */
+               memset(acl2, 0, sizeof(acl2));
+               posix_acl_init(acl2, 4);
+
                /* Insert entries in canonical order: other orders seem
                 to confuse Solaris VxFS. */
                acl2->a_entries[0] = acl->a_entries[0];  /* ACL_USER_OBJ */
@@ -109,8 +130,6 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
                nfsacl_desc.acl = acl2;
        }
        err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc);
-       if (acl2)
-               posix_acl_release(acl2);
        if (!err)
                err = 8 + nfsacl_desc.desc.elem_size *
                          nfsacl_desc.desc.array_len;
@@ -224,9 +243,18 @@ posix_acl_from_nfsacl(struct posix_acl *acl)
        return 0;
 }
 
-unsigned int
-nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
-             struct posix_acl **pacl)
+/**
+ * nfsacl_decode - Decode an NFSv3 ACL
+ *
+ * @buf: xdr_buf containing XDR'd ACL data to decode
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @aclcnt: count of ACEs in decoded posix_acl
+ * @pacl: buffer in which to place decoded posix_acl
+ *
+ * Returns the length of the decoded ACL in bytes, or a negative errno value.
+ */
+int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
+                 struct posix_acl **pacl)
 {
        struct nfsacl_decode_desc nfsacl_desc = {
                .desc = {
index 3be975e..02eb4ed 100644 (file)
@@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
         * If the server returns different values for sessionID, slotID or
         * sequence number, the server is looney tunes.
         */
-       p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
+       p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
        if (unlikely(p == NULL))
                goto out_overflow;
        memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
@@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
 out:
        return status;
 out_default:
-       return nfs_cb_stat_to_errno(status);
+       return nfs_cb_stat_to_errno(nfserr);
 }
 
 /*
@@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
        if (unlikely(status))
                goto out;
        if (unlikely(nfserr != NFS4_OK))
-               goto out_default;
+               status = nfs_cb_stat_to_errno(nfserr);
 out:
        return status;
-out_default:
-       return nfs_cb_stat_to_errno(status);
 }
 
 /*
index d98d021..7b566ec 100644 (file)
@@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
        dp->dl_client = clp;
        get_nfs4_file(fp);
        dp->dl_file = fp;
-       dp->dl_vfs_file = find_readable_file(fp);
-       get_file(dp->dl_vfs_file);
-       dp->dl_flock = NULL;
        dp->dl_type = type;
        dp->dl_stateid.si_boot = boot_time;
        dp->dl_stateid.si_stateownerid = current_delegid++;
@@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
        fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
        dp->dl_time = 0;
        atomic_set(&dp->dl_count, 1);
-       list_add(&dp->dl_perfile, &fp->fi_delegations);
-       list_add(&dp->dl_perclnt, &clp->cl_delegations);
        INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
        return dp;
 }
@@ -253,36 +248,30 @@ nfs4_put_delegation(struct nfs4_delegation *dp)
        if (atomic_dec_and_test(&dp->dl_count)) {
                dprintk("NFSD: freeing dp %p\n",dp);
                put_nfs4_file(dp->dl_file);
-               fput(dp->dl_vfs_file);
                kmem_cache_free(deleg_slab, dp);
                num_delegations--;
        }
 }
 
-/* Remove the associated file_lock first, then remove the delegation.
- * lease_modify() is called to remove the FS_LEASE file_lock from
- * the i_flock list, eventually calling nfsd's lock_manager
- * fl_release_callback.
- */
-static void
-nfs4_close_delegation(struct nfs4_delegation *dp)
+static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 {
-       dprintk("NFSD: close_delegation dp %p\n",dp);
-       /* XXX: do we even need this check?: */
-       if (dp->dl_flock)
-               vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock);
+       if (atomic_dec_and_test(&fp->fi_delegees)) {
+               vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
+               fp->fi_lease = NULL;
+               fp->fi_deleg_file = NULL;
+       }
 }
 
 /* Called under the state lock. */
 static void
 unhash_delegation(struct nfs4_delegation *dp)
 {
-       list_del_init(&dp->dl_perfile);
        list_del_init(&dp->dl_perclnt);
        spin_lock(&recall_lock);
+       list_del_init(&dp->dl_perfile);
        list_del_init(&dp->dl_recall_lru);
        spin_unlock(&recall_lock);
-       nfs4_close_delegation(dp);
+       nfs4_put_deleg_lease(dp->dl_file);
        nfs4_put_delegation(dp);
 }
 
@@ -958,8 +947,6 @@ expire_client(struct nfs4_client *clp)
        spin_lock(&recall_lock);
        while (!list_empty(&clp->cl_delegations)) {
                dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-               dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
-                               dp->dl_flock);
                list_del_init(&dp->dl_perclnt);
                list_move(&dp->dl_recall_lru, &reaplist);
        }
@@ -2078,6 +2065,7 @@ alloc_init_file(struct inode *ino)
                fp->fi_inode = igrab(ino);
                fp->fi_id = current_fileid++;
                fp->fi_had_conflict = false;
+               fp->fi_lease = NULL;
                memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
                memset(fp->fi_access, 0, sizeof(fp->fi_access));
                spin_lock(&recall_lock);
@@ -2329,23 +2317,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access)
                nfs4_file_put_access(fp, O_RDONLY);
 }
 
-/*
- * Spawn a thread to perform a recall on the delegation represented
- * by the lease (file_lock)
- *
- * Called from break_lease() with lock_flocks() held.
- * Note: we assume break_lease will only call this *once* for any given
- * lease.
- */
-static
-void nfsd_break_deleg_cb(struct file_lock *fl)
+static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
 {
-       struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
-
-       dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
-       if (!dp)
-               return;
-
        /* We're assuming the state code never drops its reference
         * without first removing the lease.  Since we're in this lease
         * callback (and since the lease code is serialized by the kernel
@@ -2353,22 +2326,35 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
         * it's safe to take a reference: */
        atomic_inc(&dp->dl_count);
 
-       spin_lock(&recall_lock);
        list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
-       spin_unlock(&recall_lock);
 
        /* only place dl_time is set. protected by lock_flocks*/
        dp->dl_time = get_seconds();
 
+       nfsd4_cb_recall(dp);
+}
+
+/* Called from break_lease() with lock_flocks() held. */
+static void nfsd_break_deleg_cb(struct file_lock *fl)
+{
+       struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
+       struct nfs4_delegation *dp;
+
+       BUG_ON(!fp);
+       /* We assume break_lease is only called once per lease: */
+       BUG_ON(fp->fi_had_conflict);
        /*
         * We don't want the locks code to timeout the lease for us;
-        * we'll remove it ourself if the delegation isn't returned
-        * in time.
+        * we'll remove it ourself if a delegation isn't returned
+        * in time:
         */
        fl->fl_break_time = 0;
 
-       dp->dl_file->fi_had_conflict = true;
-       nfsd4_cb_recall(dp);
+       spin_lock(&recall_lock);
+       fp->fi_had_conflict = true;
+       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+               nfsd_break_one_deleg(dp);
+       spin_unlock(&recall_lock);
 }
 
 static
@@ -2461,10 +2447,13 @@ find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
 {
        struct nfs4_delegation *dp;
 
-       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) {
-               if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid)
+       spin_lock(&recall_lock);
+       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+               if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
+                       spin_unlock(&recall_lock);
                        return dp;
-       }
+               }
+       spin_unlock(&recall_lock);
        return NULL;
 }
 
@@ -2641,6 +2630,66 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
        return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
 }
 
+static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
+{
+       struct file_lock *fl;
+
+       fl = locks_alloc_lock();
+       if (!fl)
+               return NULL;
+       locks_init_lock(fl);
+       fl->fl_lmops = &nfsd_lease_mng_ops;
+       fl->fl_flags = FL_LEASE;
+       fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+       fl->fl_end = OFFSET_MAX;
+       fl->fl_owner = (fl_owner_t)(dp->dl_file);
+       fl->fl_pid = current->tgid;
+       return fl;
+}
+
+static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
+{
+       struct nfs4_file *fp = dp->dl_file;
+       struct file_lock *fl;
+       int status;
+
+       fl = nfs4_alloc_init_lease(dp, flag);
+       if (!fl)
+               return -ENOMEM;
+       fl->fl_file = find_readable_file(fp);
+       list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+       status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
+       if (status) {
+               list_del_init(&dp->dl_perclnt);
+               locks_free_lock(fl);
+               return -ENOMEM;
+       }
+       fp->fi_lease = fl;
+       fp->fi_deleg_file = fl->fl_file;
+       get_file(fp->fi_deleg_file);
+       atomic_set(&fp->fi_delegees, 1);
+       list_add(&dp->dl_perfile, &fp->fi_delegations);
+       return 0;
+}
+
+static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
+{
+       struct nfs4_file *fp = dp->dl_file;
+
+       if (!fp->fi_lease)
+               return nfs4_setlease(dp, flag);
+       spin_lock(&recall_lock);
+       if (fp->fi_had_conflict) {
+               spin_unlock(&recall_lock);
+               return -EAGAIN;
+       }
+       atomic_inc(&fp->fi_delegees);
+       list_add(&dp->dl_perfile, &fp->fi_delegations);
+       spin_unlock(&recall_lock);
+       list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+       return 0;
+}
+
 /*
  * Attempt to hand out a delegation.
  */
@@ -2650,7 +2699,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
        struct nfs4_delegation *dp;
        struct nfs4_stateowner *sop = stp->st_stateowner;
        int cb_up;
-       struct file_lock *fl;
        int status, flag = 0;
 
        cb_up = nfsd4_cb_channel_good(sop->so_client);
@@ -2681,36 +2729,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
        }
 
        dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
-       if (dp == NULL) {
-               flag = NFS4_OPEN_DELEGATE_NONE;
-               goto out;
-       }
-       status = -ENOMEM;
-       fl = locks_alloc_lock();
-       if (!fl)
-               goto out;
-       locks_init_lock(fl);
-       fl->fl_lmops = &nfsd_lease_mng_ops;
-       fl->fl_flags = FL_LEASE;
-       fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
-       fl->fl_end = OFFSET_MAX;
-       fl->fl_owner =  (fl_owner_t)dp;
-       fl->fl_file = find_readable_file(stp->st_file);
-       BUG_ON(!fl->fl_file);
-       fl->fl_pid = current->tgid;
-       dp->dl_flock = fl;
-
-       /* vfs_setlease checks to see if delegation should be handed out.
-        * the lock_manager callback fl_change is used
-        */
-       if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
-               dprintk("NFSD: setlease failed [%d], no delegation\n", status);
-               dp->dl_flock = NULL;
-               locks_free_lock(fl);
-               unhash_delegation(dp);
-               flag = NFS4_OPEN_DELEGATE_NONE;
-               goto out;
-       }
+       if (dp == NULL)
+               goto out_no_deleg;
+       status = nfs4_set_delegation(dp, flag);
+       if (status)
+               goto out_free;
 
        memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
 
@@ -2722,6 +2745,12 @@ out:
                        && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
                dprintk("NFSD: WARNING: refusing delegation reclaim\n");
        open->op_delegate_type = flag;
+       return;
+out_free:
+       nfs4_put_delegation(dp);
+out_no_deleg:
+       flag = NFS4_OPEN_DELEGATE_NONE;
+       goto out;
 }
 
 /*
@@ -2916,8 +2945,6 @@ nfs4_laundromat(void)
                                test_val = u;
                        break;
                }
-               dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
-                                   dp, dp->dl_flock);
                list_move(&dp->dl_recall_lru, &reaplist);
        }
        spin_unlock(&recall_lock);
@@ -3128,7 +3155,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
                        goto out;
                renew_client(dp->dl_client);
                if (filpp) {
-                       *filpp = find_readable_file(dp->dl_file);
+                       *filpp = dp->dl_file->fi_deleg_file;
                        BUG_ON(!*filpp);
                }
        } else { /* open or lock stateid */
index 956629b..615f0a9 100644 (file)
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                READ_BUF(dummy32);
                len += (XDR_QUADLEN(dummy32) << 2);
                READMEM(buf, dummy32);
-               if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
-                       goto out_nfserr;
+               if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+                       return status;
                iattr->ia_valid |= ATTR_UID;
        }
        if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                READ_BUF(dummy32);
                len += (XDR_QUADLEN(dummy32) << 2);
                READMEM(buf, dummy32);
-               if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
-                       goto out_nfserr;
+               if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+                       return status;
                iattr->ia_valid |= ATTR_GID;
        }
        if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
 
        u32 dummy;
        char *machine_name;
-       int i;
+       int i, j;
        int nr_secflavs;
 
        READ_BUF(16);
@@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
                        READ_BUF(4);
                        READ32(dummy);
                        READ_BUF(dummy * 4);
-                       for (i = 0; i < dummy; ++i)
+                       for (j = 0; j < dummy; ++j)
                                READ32(dummy);
                        break;
                case RPC_AUTH_GSS:
index 3074656..2d31224 100644 (file)
@@ -83,8 +83,6 @@ struct nfs4_delegation {
        atomic_t                dl_count;       /* ref count */
        struct nfs4_client      *dl_client;
        struct nfs4_file        *dl_file;
-       struct file             *dl_vfs_file;
-       struct file_lock        *dl_flock;
        u32                     dl_type;
        time_t                  dl_time;
 /* For recall: */
@@ -379,6 +377,9 @@ struct nfs4_file {
         */
        atomic_t                fi_readers;
        atomic_t                fi_writers;
+       struct file             *fi_deleg_file;
+       struct file_lock        *fi_lease;
+       atomic_t                fi_delegees;
        struct inode            *fi_inode;
        u32                     fi_id;      /* used with stateowner->so_id 
                                             * for stateid_hashtbl hash */
index 641117f..da1d970 100644 (file)
@@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
                if (ra->p_count == 0)
                        frap = rap;
        }
-       depth = nfsdstats.ra_size*11/10;
+       depth = nfsdstats.ra_size;
        if (!frap) {    
                spin_unlock(&rab->pb_lock);
                return NULL;
@@ -1742,6 +1742,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
                goto out_dput_new;
 
        host_err = nfsd_break_lease(odentry->d_inode);
+       if (host_err)
+               goto out_drop_write;
+       if (ndentry->d_inode) {
+               host_err = nfsd_break_lease(ndentry->d_inode);
+               if (host_err)
+                       goto out_drop_write;
+       }
        if (host_err)
                goto out_drop_write;
        host_err = vfs_rename(fdir, odentry, tdir, ndentry);
@@ -1812,22 +1819,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
 
        host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
        if (host_err)
-               goto out_nfserr;
+               goto out_put;
 
        host_err = nfsd_break_lease(rdentry->d_inode);
        if (host_err)
-               goto out_put;
+               goto out_drop_write;
        if (type != S_IFDIR)
                host_err = vfs_unlink(dirp, rdentry);
        else
                host_err = vfs_rmdir(dirp, rdentry);
-out_put:
-       dput(rdentry);
-
        if (!host_err)
                host_err = commit_metadata(fhp);
-
+out_drop_write:
        mnt_drop_write(fhp->fh_export->ex_path.mnt);
+out_put:
+       dput(rdentry);
+
 out_nfserr:
        err = nfserrno(host_err);
 out:
index 388e9e8..85f7baa 100644 (file)
 #include "btnode.h"
 
 
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
-       nilfs_mapping_init_once(btnc);
-}
-
 static const struct address_space_operations def_btnode_aops = {
        .sync_page              = block_sync_page,
 };
index 7903749..1b8ebd8 100644 (file)
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
index 6a0e2a1..a0babd2 100644 (file)
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
        struct backing_dev_info *bdi = inode->i_sb->s_bdi;
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
-       nilfs_mapping_init_once(&shadow->frozen_data);
+       address_space_init_once(&shadow->frozen_data);
        nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
-       nilfs_mapping_init_once(&shadow->frozen_btnodes);
+       address_space_init_once(&shadow->frozen_btnodes);
        nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
        mi->mi_shadow = shadow;
        return 0;
index 9803427..161791d 100644 (file)
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inc_nlink(old_inode);
                nilfs_set_link(new_dir, new_de, new_page, old_inode);
                nilfs_mark_inode_dirty(new_dir);
                new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= NILFS_LINK_MAX)
                                goto out_dir;
                }
-               inc_nlink(old_inode);
                err = nilfs_add_link(new_dentry, old_inode);
-               if (err) {
-                       drop_nlink(old_inode);
-                       nilfs_mark_inode_dirty(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de) {
                        inc_nlink(new_dir);
                        nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        nilfs_delete_entry(old_de, old_page);
-       drop_nlink(old_inode);
 
        if (dir_de) {
                nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
index 0c43241..a585b35 100644 (file)
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init_once(struct address_space *mapping)
-{
-       memset(mapping, 0, sizeof(*mapping));
-       INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-       spin_lock_init(&mapping->tree_lock);
-       INIT_LIST_HEAD(&mapping->private_list);
-       spin_lock_init(&mapping->private_lock);
-
-       spin_lock_init(&mapping->i_mmap_lock);
-       INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-       INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
-}
-
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops)
index 622df27..2a00953 100644 (file)
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init_once(struct address_space *mapping);
 void nilfs_mapping_init(struct address_space *mapping,
                        struct backing_dev_info *bdi,
                        const struct address_space_operations *aops);
index 55ebae5..2de9f63 100644 (file)
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
        nilfs_segctor_map_segsum_entry(
                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 
-       if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
+       if (NILFS_I(inode)->i_root &&
+           !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
        /* skip finfo */
 }
index 0994f6a..1673b3d 100644 (file)
@@ -704,7 +704,8 @@ skip_mount_setup:
        sbp[0]->s_state =
                cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
        /* synchronize sbp[1] with sbp[0] */
-       memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+       if (sbp[1])
+               memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
        return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
 }
 
@@ -1278,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
 #ifdef CONFIG_NILFS_XATTR
        init_rwsem(&ii->xattr_sem);
 #endif
-       nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+       address_space_init_once(&ii->i_btnode_cache);
        ii->i_bmap = &ii->i_bmap_data;
        inode_init_once(&ii->vfs_inode);
 }
index b572b67..326e747 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -2576,6 +2576,8 @@ mft_rec_already_initialized:
        flush_dcache_page(page);
        SetPageUptodate(page);
        if (base_ni) {
+               MFT_RECORD *m_tmp;
+
                /*
                 * Setup the base mft record in the extent mft record.  This
                 * completes initialization of the allocated extent mft record
@@ -2588,11 +2590,11 @@ mft_rec_already_initialized:
                 * attach it to the base inode @base_ni and map, pin, and lock
                 * its, i.e. the allocated, mft record.
                 */
-               m = map_extent_mft_record(base_ni, bit, &ni);
-               if (IS_ERR(m)) {
+               m_tmp = map_extent_mft_record(base_ni, bit, &ni);
+               if (IS_ERR(m_tmp)) {
                        ntfs_error(vol->sb, "Failed to map allocated extent "
                                        "mft record 0x%llx.", (long long)bit);
-                       err = PTR_ERR(m);
+                       err = PTR_ERR(m_tmp);
                        /* Set the mft record itself not in use. */
                        m->flags &= cpu_to_le16(
                                        ~le16_to_cpu(MFT_RECORD_IN_USE));
@@ -2603,6 +2605,7 @@ mft_rec_already_initialized:
                        ntfs_unmap_page(page);
                        goto undo_mftbmp_alloc;
                }
+               BUG_ON(m != m_tmp);
                /*
                 * Make sure the allocated mft record is written out to disk.
                 * No need to set the inode dirty because the caller is going
index 6d80ecc..7eb9040 100644 (file)
@@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
        int ret = 0;    /* if all else fails, just return false */
        struct ocfs2_super *osb;
 
-       if (nd->flags & LOOKUP_RCU)
+       if (nd && nd->flags & LOOKUP_RCU)
                return -ECHILD;
 
        inode = dentry->d_inode;
index 43e56b9..6180da1 100644 (file)
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
               ocfs2_quota_trans_credits(sb);
 }
 
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
 
 static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
 {
index b5f9160..19ebc5a 100644 (file)
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
                                        u32 num_clusters, unsigned int e_flags)
 {
        int ret, delete, index, credits =  0;
-       u32 new_bit, new_len;
+       u32 new_bit, new_len, orig_num_clusters;
        unsigned int set_len;
        struct ocfs2_super *osb = OCFS2_SB(sb);
        handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
                goto out;
        }
 
+       orig_num_clusters = num_clusters;
+
        while (num_clusters) {
                ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
                                             p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
         * in write-back mode.
         */
        if (context->get_clusters == ocfs2_di_get_clusters) {
-               ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+               ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+                                              orig_num_clusters);
                if (ret)
                        mlog_errno(ret);
        }
index 38f986d..36c423f 100644 (file)
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
                               struct mount_options *mopt,
                               int is_remount)
 {
-       int status;
+       int status, user_stack = 0;
        char *p;
        u32 tmp;
 
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
                        memcpy(mopt->cluster_stack, args[0].from,
                               OCFS2_STACK_LABEL_LEN);
                        mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+                       /*
+                        * Open code the memcmp here as we don't have
+                        * an osb to pass to
+                        * ocfs2_userspace_stack().
+                        */
+                       if (memcmp(mopt->cluster_stack,
+                                  OCFS2_CLASSIC_CLUSTER_STACK,
+                                  OCFS2_STACK_LABEL_LEN))
+                               user_stack = 1;
                        break;
                case Opt_inode64:
                        mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
                }
        }
 
-       /* Ensure only one heartbeat mode */
-       tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
-                                OCFS2_MOUNT_HB_NONE);
-       if (hweight32(tmp) != 1) {
-               mlog(ML_ERROR, "Invalid heartbeat mount options\n");
-               status = 0;
-               goto bail;
+       if (user_stack == 0) {
+               /* Ensure only one heartbeat mode */
+               tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+                                        OCFS2_MOUNT_HB_GLOBAL |
+                                        OCFS2_MOUNT_HB_NONE);
+               if (hweight32(tmp) != 1) {
+                       mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+                       status = 0;
+                       goto bail;
+               }
        }
 
        status = 1;
index e52389e..b47aab3 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
+
+       /* It's not possible punch hole on append only file */
+       if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
+               return -EPERM;
+
+       if (IS_IMMUTABLE(inode))
+               return -EPERM;
+
        /*
         * Revalidate the write permissions, in case security policy has
         * changed since the files were opened.
@@ -790,6 +798,8 @@ struct file *nameidata_to_filp(struct nameidata *nd)
 
        /* Pick up the filp from the open intent */
        filp = nd->intent.open.file;
+       nd->intent.open.file = NULL;
+
        /* Has the filesystem initialised the file for us? */
        if (filp->f_path.dentry == NULL) {
                path_get(&nd->path);
index 789c625..b10e354 100644 (file)
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
        }
 
        vm->vblk_size     = get_unaligned_be32(data + 0x08);
+       if (vm->vblk_size == 0) {
+               ldm_error ("Illegal VBLK size");
+               return false;
+       }
+
        vm->vblk_offset   = get_unaligned_be32(data + 0x0C);
        vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
 
index 68d6a21..11f688b 100644 (file)
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len)
 
 int mac_partition(struct parsed_partitions *state)
 {
-       int slot = 1;
        Sector sect;
        unsigned char *data;
-       int blk, blocks_in_map;
+       int slot, blocks_in_map;
        unsigned secsize;
 #ifdef CONFIG_PPC_PMAC
        int found_root = 0;
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state)
                put_dev_sector(sect);
                return 0;               /* not a MacOS disk */
        }
-       strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
        blocks_in_map = be32_to_cpu(part->map_count);
-       for (blk = 1; blk <= blocks_in_map; ++blk) {
-               int pos = blk * secsize;
+       if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+               put_dev_sector(sect);
+               return 0;
+       }
+       strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
+       for (slot = 1; slot <= blocks_in_map; ++slot) {
+               int pos = slot * secsize;
                put_dev_sector(sect);
                data = read_part_sector(state, pos/512, &sect);
                if (!data)
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state)
                        }
 
                        if (goodness > found_root_goodness) {
-                               found_root = blk;
+                               found_root = slot;
                                found_root_goodness = goodness;
                        }
                }
 #endif /* CONFIG_PPC_PMAC */
-
-               ++slot;
        }
 #ifdef CONFIG_PPC_PMAC
        if (found_root_goodness)
index 39df95a..b1cf6bf 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/errno.h>
 
+EXPORT_SYMBOL(posix_acl_init);
 EXPORT_SYMBOL(posix_acl_alloc);
 EXPORT_SYMBOL(posix_acl_clone);
 EXPORT_SYMBOL(posix_acl_valid);
@@ -31,6 +32,16 @@ EXPORT_SYMBOL(posix_acl_create_masq);
 EXPORT_SYMBOL(posix_acl_chmod_masq);
 EXPORT_SYMBOL(posix_acl_permission);
 
+/*
+ * Init a fresh posix_acl
+ */
+void
+posix_acl_init(struct posix_acl *acl, int count)
+{
+       atomic_set(&acl->a_refcount, 1);
+       acl->a_count = count;
+}
+
 /*
  * Allocate a new ACL with the specified number of entries.
  */
@@ -40,10 +51,8 @@ posix_acl_alloc(int count, gfp_t flags)
        const size_t size = sizeof(struct posix_acl) +
                            count * sizeof(struct posix_acl_entry);
        struct posix_acl *acl = kmalloc(size, flags);
-       if (acl) {
-               atomic_set(&acl->a_refcount, 1);
-               acl->a_count = count;
-       }
+       if (acl)
+               posix_acl_init(acl, count);
        return acl;
 }
 
index df2b703..7c99c1c 100644 (file)
@@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
        task_cap(m, task);
        task_cpus_allowed(m, task);
        cpuset_task_status_allowed(m, task);
-#if defined(CONFIG_S390)
-       task_show_regs(m, task);
-#endif
        task_context_switch_counts(m, task);
        return 0;
 }
index 9d096e8..d49c4b5 100644 (file)
@@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = {
                &proc_self_inode_operations, NULL, {}),
 };
 
-/*
- *     Exceptional case: normally we are not allowed to unhash a busy
- * directory. In this case, however, we can do it - no aliasing problems
- * due to the way we treat inodes.
- */
-static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-       struct inode *inode;
-       struct task_struct *task;
-
-       if (nd->flags & LOOKUP_RCU)
-               return -ECHILD;
-
-       inode = dentry->d_inode;
-       task = get_proc_task(inode);
-       if (task) {
-               put_task_struct(task);
-               return 1;
-       }
-       d_drop(dentry);
-       return 0;
-}
-
-static const struct dentry_operations proc_base_dentry_operations =
-{
-       .d_revalidate   = proc_base_revalidate,
-       .d_delete       = pid_delete_dentry,
-};
-
 static struct dentry *proc_base_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
        if (p->fop)
                inode->i_fop = p->fop;
        ei->op = p->op;
-       d_set_d_op(dentry, &proc_base_dentry_operations);
        d_add(dentry, inode);
        error = NULL;
 out:
index eafc22a..b701eaa 100644 (file)
@@ -67,7 +67,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
        struct console *con;
        loff_t off = 0;
 
-       acquire_console_sem();
+       console_lock();
        for_each_console(con)
                if (off++ == *pos)
                        break;
@@ -84,7 +84,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void c_stop(struct seq_file *m, void *v)
 {
-       release_console_sem();
+       console_unlock();
 }
 
 static const struct seq_operations consoles_op = {
index 176ce4c..d6a7ca1 100644 (file)
@@ -27,6 +27,7 @@
 static void proc_evict_inode(struct inode *inode)
 {
        struct proc_dir_entry *de;
+       struct ctl_table_header *head;
 
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
@@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode)
        de = PROC_I(inode)->pde;
        if (de)
                pde_put(de);
-       if (PROC_I(inode)->sysctl)
-               sysctl_head_put(PROC_I(inode)->sysctl);
+       head = PROC_I(inode)->sysctl;
+       if (head) {
+               rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
+               sysctl_head_put(head);
+       }
 }
 
 struct vfsmount *proc_mnt;
index d9396a4..927cbd1 100644 (file)
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
                return;
        root = of_find_node_by_path("/");
        if (root == NULL) {
-               printk(KERN_ERR "/proc/device-tree: can't find root\n");
+               pr_debug("/proc/device-tree: can't find root\n");
                return;
        }
        proc_device_tree_add_node(root, proc_device_tree);
index 09a1f92..8eb2522 100644 (file)
@@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent,
                const struct dentry *dentry, const struct inode *inode,
                unsigned int len, const char *str, const struct qstr *name)
 {
+       struct ctl_table_header *head;
        /* Although proc doesn't have negative dentries, rcu-walk means
         * that inode here can be NULL */
+       /* AV: can it, indeed? */
        if (!inode)
-               return 0;
+               return 1;
        if (name->len != len)
                return 1;
        if (memcmp(name->name, str, len))
                return 1;
-       return !sysctl_is_seen(PROC_I(inode)->sysctl);
+       head = rcu_dereference(PROC_I(inode)->sysctl);
+       return !head || !sysctl_is_seen(head);
 }
 
 static const struct dentry_operations proc_sys_dentry_operations = {
index ba5f51e..68fdf45 100644 (file)
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                                        EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
                                        dentry, inode, &security);
        if (retval) {
-               dir->i_nlink--;
+               DEC_DIR_INODE_NLINK(dir)
                goto out_failed;
        }
 
index 3cfb2e9..5c11ca8 100644 (file)
@@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
 
 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       if (nd->flags & LOOKUP_RCU)
-               return -ECHILD;
        return -EPERM;
 }
 
index 2fb2882..8ab48bc 100644 (file)
@@ -63,6 +63,14 @@ static struct buffer_head *get_block_length(struct super_block *sb,
                *length = (unsigned char) bh->b_data[*offset] |
                        (unsigned char) bh->b_data[*offset + 1] << 8;
                *offset += 2;
+
+               if (*offset == msblk->devblksize) {
+                       put_bh(bh);
+                       bh = sb_bread(sb, ++(*cur_index));
+                       if (bh == NULL)
+                               return NULL;
+                       *offset = 0;
+               }
        }
 
        return bh;
index 856756c..c4eb400 100644 (file)
@@ -95,12 +95,6 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
                        if (!buffer_uptodate(bh[k]))
                                goto release_mutex;
 
-                       if (avail == 0) {
-                               offset = 0;
-                               put_bh(bh[k++]);
-                               continue;
-                       }
-
                        stream->buf.in = bh[k]->b_data + offset;
                        stream->buf.in_size = avail;
                        stream->buf.in_pos = 0;
index 818a5e0..4661ae2 100644 (file)
@@ -82,12 +82,6 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
                        if (!buffer_uptodate(bh[k]))
                                goto release_mutex;
 
-                       if (avail == 0) {
-                               offset = 0;
-                               put_bh(bh[k++]);
-                               continue;
-                       }
-
                        stream->next_in = bh[k]->b_data + offset;
                        stream->avail_in = avail;
                        offset = 0;
index 74e149e..7e9dd4c 100644 (file)
@@ -177,6 +177,11 @@ void deactivate_locked_super(struct super_block *s)
        struct file_system_type *fs = s->s_type;
        if (atomic_dec_and_test(&s->s_active)) {
                fs->kill_sb(s);
+               /*
+                * We need to call rcu_barrier so all the delayed rcu free
+                * inodes are flushed before we release the fs module.
+                */
+               rcu_barrier();
                put_filesystem(fs);
                put_super(s);
        } else {
index b427b12..e474fbc 100644 (file)
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                new_de = sysv_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                sysv_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = sysv_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        sysv_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                sysv_set_link(dir_de, dir_page, new_dir);
index 2be0f9e..b7c338d 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
 
+enum { UDF_MAX_LINKS = 0xffff };
+
 static inline int udf_match(int len1, const unsigned char *name1, int len2,
                            const unsigned char *name2)
 {
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        struct udf_inode_info *iinfo;
 
        err = -EMLINK;
-       if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
+       if (dir->i_nlink >= UDF_MAX_LINKS)
                goto out;
 
        err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
        struct fileIdentDesc cfi, *fi;
        int err;
 
-       if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
+       if (inode->i_nlink >= UDF_MAX_LINKS)
                return -EMLINK;
-       }
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto end_rename;
 
                retval = -EMLINK;
-               if (!new_inode &&
-                       new_dir->i_nlink >=
-                               (256 << sizeof(new_dir->i_nlink)) - 1)
+               if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
                        goto end_rename;
        }
        if (!nfi) {
index 12f39b9..d6f6815 100644 (file)
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ufs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= UFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ufs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
 
        ufs_delete_entry(old_dir, old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                ufs_set_link(old_inode, dir_de, dir_page, new_dir);
index 05201ae..d61611c 100644 (file)
@@ -152,6 +152,8 @@ xfs_ioc_trim(
 
        if (!capable(CAP_SYS_ADMIN))
                return -XFS_ERROR(EPERM);
+       if (!blk_queue_discard(q))
+               return -XFS_ERROR(EOPNOTSUPP);
        if (copy_from_user(&range, urange, sizeof(range)))
                return -XFS_ERROR(EFAULT);
 
index b06ede1..0ca0e3c 100644 (file)
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
        xfs_mount_t             *mp,
        void                    __user *arg)
 {
-       xfs_fsop_geom_v1_t      fsgeo;
+       xfs_fsop_geom_t         fsgeo;
        int                     error;
 
-       error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+       error = xfs_fs_geometry(mp, &fsgeo, 3);
        if (error)
                return -error;
 
-       if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+       /*
+        * Caller should have passed an argument of type
+        * xfs_fsop_geom_v1_t.  This is a proper subset of the
+        * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
+        */
+       if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
                return -XFS_ERROR(EFAULT);
        return 0;
 }
@@ -985,10 +990,22 @@ xfs_ioctl_setattr(
 
                /*
                 * Extent size must be a multiple of the appropriate block
-                * size, if set at all.
+                * size, if set at all. It must also be smaller than the
+                * maximum extent size supported by the filesystem.
+                *
+                * Also, for non-realtime files, limit the extent size hint to
+                * half the size of the AGs in the filesystem so alignment
+                * doesn't result in extents larger than an AG.
                 */
                if (fa->fsx_extsize != 0) {
-                       xfs_extlen_t    size;
+                       xfs_extlen_t    size;
+                       xfs_fsblock_t   extsize_fsb;
+
+                       extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+                       if (extsize_fsb > MAXEXTLEN) {
+                               code = XFS_ERROR(EINVAL);
+                               goto error_return;
+                       }
 
                        if (XFS_IS_REALTIME_INODE(ip) ||
                            ((mask & FSX_XFLAGS) &&
@@ -997,6 +1014,10 @@ xfs_ioctl_setattr(
                                       mp->m_sb.sb_blocklog;
                        } else {
                                size = mp->m_sb.sb_blocksize;
+                               if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
+                                       code = XFS_ERROR(EINVAL);
+                                       goto error_return;
+                               }
                        }
 
                        if (fa->fsx_extsize % size) {
index f8e854b..206a281 100644 (file)
@@ -1863,12 +1863,14 @@ xfs_qm_dqreclaim_one(void)
        xfs_dquot_t     *dqpout;
        xfs_dquot_t     *dqp;
        int             restarts;
+       int             startagain;
 
        restarts = 0;
        dqpout = NULL;
 
        /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
-startagain:
+again:
+       startagain = 0;
        mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
        list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
@@ -1885,13 +1887,10 @@ startagain:
                        ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
 
                        trace_xfs_dqreclaim_want(dqp);
-
-                       xfs_dqunlock(dqp);
-                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-                       if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return NULL;
                        XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-                       goto startagain;
+                       restarts++;
+                       startagain = 1;
+                       goto dqunlock;
                }
 
                /*
@@ -1906,23 +1905,20 @@ startagain:
                        ASSERT(list_empty(&dqp->q_mplist));
                        list_del_init(&dqp->q_freelist);
                        xfs_Gqm->qm_dqfrlist_cnt--;
-                       xfs_dqunlock(dqp);
                        dqpout = dqp;
                        XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-                       break;
+                       goto dqunlock;
                }
 
                ASSERT(dqp->q_hash);
                ASSERT(!list_empty(&dqp->q_mplist));
 
                /*
-                * Try to grab the flush lock. If this dquot is in the process of
-                * getting flushed to disk, we don't want to reclaim it.
+                * Try to grab the flush lock. If this dquot is in the process
+                * of getting flushed to disk, we don't want to reclaim it.
                 */
-               if (!xfs_dqflock_nowait(dqp)) {
-                       xfs_dqunlock(dqp);
-                       continue;
-               }
+               if (!xfs_dqflock_nowait(dqp))
+                       goto dqunlock;
 
                /*
                 * We have the flush lock so we know that this is not in the
@@ -1944,8 +1940,7 @@ startagain:
                                xfs_fs_cmn_err(CE_WARN, mp,
                        "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
                        }
-                       xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-                       continue;
+                       goto dqunlock;
                }
 
                /*
@@ -1967,13 +1962,8 @@ startagain:
                 */
                if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
                        restarts++;
-                       mutex_unlock(&dqp->q_hash->qh_lock);
-                       xfs_dqfunlock(dqp);
-                       xfs_dqunlock(dqp);
-                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-                       if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return NULL;
-                       goto startagain;
+                       startagain = 1;
+                       goto qhunlock;
                }
 
                ASSERT(dqp->q_nrefs == 0);
@@ -1986,14 +1976,20 @@ startagain:
                xfs_Gqm->qm_dqfrlist_cnt--;
                dqpout = dqp;
                mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+qhunlock:
                mutex_unlock(&dqp->q_hash->qh_lock);
 dqfunlock:
                xfs_dqfunlock(dqp);
+dqunlock:
                xfs_dqunlock(dqp);
                if (dqpout)
                        break;
                if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                       return NULL;
+                       break;
+               if (startagain) {
+                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+                       goto again;
+               }
        }
        mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
        return dqpout;
index 0ab56b3..d0b3bc7 100644 (file)
@@ -74,6 +74,22 @@ typedef unsigned int xfs_alloctype_t;
  */
 #define XFS_ALLOC_SET_ASIDE(mp)  (4 + ((mp)->m_sb.sb_agcount * 4))
 
+/*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ *     - the AG superblock, AGF, AGI and AGFL
+ *     - the AGF (bno and cnt) and AGI btree root blocks
+ *     - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+#define XFS_ALLOC_AG_MAX_USABLE(mp)    \
+       ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+
+
 /*
  * Argument structure for xfs_alloc routines.
  * This is turned into a structure to avoid having 20 arguments passed
index 4111cd3..dc3afd7 100644 (file)
@@ -1038,17 +1038,34 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in the middle part of a previous delayed allocation.
                 * Contiguity is impossible here.
                 * This case is avoided almost all the time.
+                *
+                * We start with a delayed allocation:
+                *
+                * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
+                *  PREV @ idx
+                *
+                * and we are allocating:
+                *                     +rrrrrrrrrrrrrrrrr+
+                *                            new
+                *
+                * and we set it up for insertion as:
+                * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
+                *                            new
+                *  PREV @ idx          LEFT              RIGHT
+                *                      inserted at idx + 1
                 */
                temp = new->br_startoff - PREV.br_startoff;
-               trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
-               xfs_bmbt_set_blockcount(ep, temp);
-               r[0] = *new;
-               r[1].br_state = PREV.br_state;
-               r[1].br_startblock = 0;
-               r[1].br_startoff = new_endoff;
                temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
-               r[1].br_blockcount = temp2;
-               xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
+               trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
+               xfs_bmbt_set_blockcount(ep, temp);      /* truncate PREV */
+               LEFT = *new;
+               RIGHT.br_state = PREV.br_state;
+               RIGHT.br_startblock = nullstartblock(
+                               (int)xfs_bmap_worst_indlen(ip, temp2));
+               RIGHT.br_startoff = new_endoff;
+               RIGHT.br_blockcount = temp2;
+               /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
+               xfs_iext_insert(ip, idx + 1, 2, &LEFT, state);
                ip->i_df.if_lastex = idx + 1;
                ip->i_d.di_nextents++;
                if (cur == NULL)
@@ -2430,7 +2447,7 @@ xfs_bmap_btalloc_nullfb(
                startag = ag = 0;
 
        pag = xfs_perag_get(mp, ag);
-       while (*blen < ap->alen) {
+       while (*blen < args->maxlen) {
                if (!pag->pagf_init) {
                        error = xfs_alloc_pagf_init(mp, args->tp, ag,
                                                    XFS_ALLOC_FLAG_TRYLOCK);
@@ -2452,7 +2469,7 @@ xfs_bmap_btalloc_nullfb(
                        notinit = 1;
 
                if (xfs_inode_is_filestream(ap->ip)) {
-                       if (*blen >= ap->alen)
+                       if (*blen >= args->maxlen)
                                break;
 
                        if (ap->userdata) {
@@ -2498,14 +2515,14 @@ xfs_bmap_btalloc_nullfb(
         * If the best seen length is less than the request
         * length, use the best as the minimum.
         */
-       else if (*blen < ap->alen)
+       else if (*blen < args->maxlen)
                args->minlen = *blen;
        /*
-        * Otherwise we've seen an extent as big as alen,
+        * Otherwise we've seen an extent as big as maxlen,
         * use that as the minimum.
         */
        else
-               args->minlen = ap->alen;
+               args->minlen = args->maxlen;
 
        /*
         * set the failure fallback case to look in the selected
@@ -2573,7 +2590,9 @@ xfs_bmap_btalloc(
        args.tp = ap->tp;
        args.mp = mp;
        args.fsbno = ap->rval;
-       args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
+
+       /* Trim the allocation back to the maximum an AG can fit. */
+       args.maxlen = MIN(ap->alen, XFS_ALLOC_AG_MAX_USABLE(mp));
        args.firstblock = ap->firstblock;
        blen = 0;
        if (nullfb) {
@@ -2621,7 +2640,7 @@ xfs_bmap_btalloc(
                        /*
                         * Adjust for alignment
                         */
-                       if (blen > args.alignment && blen <= ap->alen)
+                       if (blen > args.alignment && blen <= args.maxlen)
                                args.minlen = blen - args.alignment;
                        args.minalignslop = 0;
                } else {
@@ -2640,7 +2659,7 @@ xfs_bmap_btalloc(
                         * of minlen+alignment+slop doesn't go up
                         * between the calls.
                         */
-                       if (blen > mp->m_dalign && blen <= ap->alen)
+                       if (blen > mp->m_dalign && blen <= args.maxlen)
                                nextminlen = blen - mp->m_dalign;
                        else
                                nextminlen = args.minlen;
@@ -4485,6 +4504,16 @@ xfs_bmapi(
                                /* Figure out the extent size, adjust alen */
                                extsz = xfs_get_extsz_hint(ip);
                                if (extsz) {
+                                       /*
+                                        * make sure we don't exceed a single
+                                        * extent length when we align the
+                                        * extent by reducing length we are
+                                        * going to allocate by the maximum
+                                        * amount extent size aligment may
+                                        * require.
+                                        */
+                                       alen = XFS_FILBLKS_MIN(len,
+                                                  MAXEXTLEN - (2 * extsz - 1));
                                        error = xfs_bmap_extsize_align(mp,
                                                        &got, &prev, extsz,
                                                        rt, eof,
index 98c6f73..6f8c21c 100644 (file)
@@ -427,13 +427,15 @@ xfs_buf_item_unpin(
 
                if (remove) {
                        /*
-                        * We have to remove the log item from the transaction
-                        * as we are about to release our reference to the
-                        * buffer.  If we don't, the unlock that occurs later
-                        * in xfs_trans_uncommit() will ry to reference the
+                        * If we are in a transaction context, we have to
+                        * remove the log item from the transaction as we are
+                        * about to release our reference to the buffer.  If we
+                        * don't, the unlock that occurs later in
+                        * xfs_trans_uncommit() will try to reference the
                         * buffer which we no longer have a hold on.
                         */
-                       xfs_trans_del_item(lip);
+                       if (lip->li_desc)
+                               xfs_trans_del_item(lip);
 
                        /*
                         * Since the transaction no longer refers to the buffer,
index 75f2ef6..d22e626 100644 (file)
@@ -138,7 +138,8 @@ xfs_efi_item_unpin(
 
        if (remove) {
                ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
-               xfs_trans_del_item(lip);
+               if (lip->li_desc)
+                       xfs_trans_del_item(lip);
                xfs_efi_item_free(efip);
                return;
        }
index cec89dd..85668ef 100644 (file)
@@ -53,6 +53,9 @@ xfs_fs_geometry(
        xfs_fsop_geom_t         *geo,
        int                     new_version)
 {
+
+       memset(geo, 0, sizeof(*geo));
+
        geo->blocksize = mp->m_sb.sb_blocksize;
        geo->rtextsize = mp->m_sb.sb_rextsize;
        geo->agblocks = mp->m_sb.sb_agblocks;
index 55582bd..8a0f044 100644 (file)
@@ -337,7 +337,12 @@ xfs_iomap_prealloc_size(
                int shift = 0;
                int64_t freesp;
 
-               alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
+               /*
+                * rounddown_pow_of_two() returns an undefined result
+                * if we pass in alloc_blocks = 0. Hence the "+ 1" to
+                * ensure we always pass in a non-zero value.
+                */
+               alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1;
                alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
                                        rounddown_pow_of_two(alloc_blocks));
 
index 916eb7d..3bd3291 100644 (file)
@@ -191,7 +191,7 @@ void          xfs_log_ticket_put(struct xlog_ticket *ticket);
 
 xlog_tid_t xfs_log_get_trans_ident(struct xfs_trans *tp);
 
-int    xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
+void   xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
                                struct xfs_log_vec *log_vector,
                                xfs_lsn_t *commit_lsn, int flags);
 bool   xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
index 9dc8125..9ca59be 100644 (file)
@@ -543,7 +543,7 @@ xlog_cil_push(
 
        error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
        if (error)
-               goto out_abort;
+               goto out_abort_free_ticket;
 
        /*
         * now that we've written the checkpoint into the log, strictly
@@ -569,8 +569,9 @@ restart:
        }
        spin_unlock(&cil->xc_cil_lock);
 
+       /* xfs_log_done always frees the ticket on error. */
        commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
-       if (error || commit_lsn == -1)
+       if (commit_lsn == -1)
                goto out_abort;
 
        /* attach all the transactions w/ busy extents to iclog */
@@ -600,6 +601,8 @@ out_free_ticket:
        kmem_free(new_ctx);
        return 0;
 
+out_abort_free_ticket:
+       xfs_log_ticket_put(tic);
 out_abort:
        xlog_cil_committed(ctx, XFS_LI_ABORTED);
        return XFS_ERROR(EIO);
@@ -622,7 +625,7 @@ out_abort:
  * background commit, returns without it held once background commits are
  * allowed again.
  */
-int
+void
 xfs_log_commit_cil(
        struct xfs_mount        *mp,
        struct xfs_trans        *tp,
@@ -637,11 +640,6 @@ xfs_log_commit_cil(
        if (flags & XFS_TRANS_RELEASE_LOG_RES)
                log_flags = XFS_LOG_REL_PERM_RESERV;
 
-       if (XLOG_FORCED_SHUTDOWN(log)) {
-               xlog_cil_free_logvec(log_vector);
-               return XFS_ERROR(EIO);
-       }
-
        /*
         * do all the hard work of formatting items (including memory
         * allocation) outside the CIL context lock. This prevents stalling CIL
@@ -701,7 +699,6 @@ xfs_log_commit_cil(
         */
        if (push)
                xlog_cil_push(log, 0);
-       return 0;
 }
 
 /*
index 33dbc4e..7692279 100644 (file)
@@ -1446,6 +1446,14 @@ xfs_log_item_batch_insert(
  * Bulk operation version of xfs_trans_committed that takes a log vector of
  * items to insert into the AIL. This uses bulk AIL insertion techniques to
  * minimise lock traffic.
+ *
+ * If we are called with the aborted flag set, it is because a log write during
+ * a CIL checkpoint commit has failed. In this case, all the items in the
+ * checkpoint have already gone through IOP_COMMITED and IOP_UNLOCK, which
+ * means that checkpoint commit abort handling is treated exactly the same
+ * as an iclog write error even though we haven't started any IO yet. Hence in
+ * this case all we need to do is IOP_COMMITTED processing, followed by an
+ * IOP_UNPIN(aborted) call.
  */
 void
 xfs_trans_committed_bulk(
@@ -1472,6 +1480,16 @@ xfs_trans_committed_bulk(
                if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
                        continue;
 
+               /*
+                * if we are aborting the operation, no point in inserting the
+                * object into the AIL as we are in a shutdown situation.
+                */
+               if (aborted) {
+                       ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
+                       IOP_UNPIN(lip, 1);
+                       continue;
+               }
+
                if (item_lsn != commit_lsn) {
 
                        /*
@@ -1503,20 +1521,24 @@ xfs_trans_committed_bulk(
 }
 
 /*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
+ * Called from the trans_commit code when we notice that the filesystem is in
+ * the middle of a forced shutdown.
+ *
+ * When we are called here, we have already pinned all the items in the
+ * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
+ * so we can simply walk the items in the transaction, unpin them with an abort
+ * flag and then free the items. Note that unpinning the items can result in
+ * them being freed immediately, so we need to use a safe list traversal method
+ * here.
  */
 STATIC void
 xfs_trans_uncommit(
        struct xfs_trans        *tp,
        uint                    flags)
 {
-       struct xfs_log_item_desc *lidp;
+       struct xfs_log_item_desc *lidp, *n;
 
-       list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-               /*
-                * Unpin all but those that aren't dirty.
-                */
+       list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
                if (lidp->lid_flags & XFS_LID_DIRTY)
                        IOP_UNPIN(lidp->lid_item, 1);
        }
@@ -1733,7 +1755,6 @@ xfs_trans_commit_cil(
        int                     flags)
 {
        struct xfs_log_vec      *log_vector;
-       int                     error;
 
        /*
         * Get each log item to allocate a vector structure for
@@ -1744,9 +1765,7 @@ xfs_trans_commit_cil(
        if (!log_vector)
                return ENOMEM;
 
-       error = xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
-       if (error)
-               return error;
+       xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
 
        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
        xfs_trans_free(tp);
index 31b6188..b4bfe33 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_MMU
 
+#include <linux/mm_types.h>
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 extern int ptep_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pte_t *ptep,
index 6864933..fe77e33 100644 (file)
 #endif
 
 #ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS()        VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
+#define FTRACE_EVENTS()        . = ALIGN(8);                                   \
+                       VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
                        *(_ftrace_events)                               \
                        VMLINUX_SYMBOL(__stop_ftrace_events) = .;
 #else
 #endif
 
 #ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;        \
+#define TRACE_SYSCALLS() . = ALIGN(8);                                 \
+                        VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
                         *(__syscalls_metadata)                         \
                         VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 #else
        CPU_KEEP(exit.data)                                             \
        MEM_KEEP(init.data)                                             \
        MEM_KEEP(exit.data)                                             \
-       . = ALIGN(32);                                                  \
-       VMLINUX_SYMBOL(__start___tracepoints) = .;                      \
+       STRUCT_ALIGN();                                                 \
        *(__tracepoints)                                                \
-       VMLINUX_SYMBOL(__stop___tracepoints) = .;                       \
        /* implement dynamic printk debug */                            \
        . = ALIGN(8);                                                   \
        VMLINUX_SYMBOL(__start___verbose) = .;                          \
        VMLINUX_SYMBOL(__stop___verbose) = .;                           \
        LIKELY_PROFILE()                                                \
        BRANCH_PROFILE()                                                \
-       TRACE_PRINTKS()                                                 \
-                                                                       \
-       STRUCT_ALIGN();                                                 \
-       FTRACE_EVENTS()                                                 \
-                                                                       \
-       STRUCT_ALIGN();                                                 \
-       TRACE_SYSCALLS()
+       TRACE_PRINTKS()
 
 /*
  * Data section helpers
                VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
                *(__vermagic)           /* Kernel version magic */      \
+               . = ALIGN(8);                                           \
+               VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
+               *(__tracepoints_ptrs)   /* Tracepoints: pointer array */\
+               VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;          \
                *(__markers_strings)    /* Markers: strings */          \
                *(__tracepoints_strings)/* Tracepoints: strings */      \
        }                                                               \
                VMLINUX_SYMBOL(__start___param) = .;                    \
                *(__param)                                              \
                VMLINUX_SYMBOL(__stop___param) = .;                     \
+       }                                                               \
+                                                                       \
+       /* Built-in module versions. */                                 \
+       __modver : AT(ADDR(__modver) - LOAD_OFFSET) {                   \
+               VMLINUX_SYMBOL(__start___modver) = .;                   \
+               *(__modver)                                             \
+               VMLINUX_SYMBOL(__stop___modver) = .;                    \
                . = ALIGN((align));                                     \
                VMLINUX_SYMBOL(__end_rodata) = .;                       \
        }                                                               \
        KERNEL_CTORS()                                                  \
        *(.init.rodata)                                                 \
        MCOUNT_REC()                                                    \
+       FTRACE_EVENTS()                                                 \
+       TRACE_SYSCALLS()                                                \
        DEV_DISCARD(init.rodata)                                        \
        CPU_DISCARD(init.rodata)                                        \
        MEM_DISCARD(init.rodata)                                        \
index a4694c6..348843b 100644 (file)
@@ -1101,7 +1101,7 @@ struct drm_device {
        struct platform_device *platformdev; /**< Platform device struture */
 
        struct drm_sg_mem *sg;  /**< Scatter gather memory */
-       int num_crtcs;                  /**< Number of CRTCs on this device */
+       unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
        void *dev_private;              /**< device private data */
        void *mm_private;
        struct address_space *dev_mapping;
@@ -1367,7 +1367,7 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
-extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
index acd7fad..801be59 100644 (file)
@@ -275,6 +275,7 @@ struct drm_pending_vblank_event;
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
  * @dpms: control display power levels
  * @save: save CRTC state
  * @resore: restore CRTC state
@@ -302,6 +303,8 @@ struct drm_crtc_funcs {
        void (*save)(struct drm_crtc *crtc); /* suspend? */
        /* Restore CRTC state */
        void (*restore)(struct drm_crtc *crtc); /* resume? */
+       /* Reset CRTC state */
+       void (*reset)(struct drm_crtc *crtc);
 
        /* cursor controls */
        int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -379,6 +382,7 @@ struct drm_crtc {
  * @dpms: set power state (see drm_crtc_funcs above)
  * @save: save connector state
  * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
  * @mode_valid: is this mode valid on the given connector?
  * @mode_fixup: try to fixup proposed mode for this connector
  * @mode_set: set this mode
@@ -396,6 +400,7 @@ struct drm_connector_funcs {
        void (*dpms)(struct drm_connector *connector, int mode);
        void (*save)(struct drm_connector *connector);
        void (*restore)(struct drm_connector *connector);
+       void (*reset)(struct drm_connector *connector);
 
        /* Check to see if anything is attached to the connector.
         * @force is set to false whilst polling, true when checking the
@@ -413,6 +418,7 @@ struct drm_connector_funcs {
 };
 
 struct drm_encoder_funcs {
+       void (*reset)(struct drm_encoder *encoder);
        void (*destroy)(struct drm_encoder *encoder);
 };
 
@@ -656,6 +662,7 @@ extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
                                                   struct drm_display_mode *mode);
 extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
index fe29ae3..5ff1194 100644 (file)
@@ -28,7 +28,6 @@
        {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
        {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
        {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
-       {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
        {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
index e95a86b..e5c607a 100644 (file)
@@ -907,6 +907,7 @@ struct drm_radeon_cs {
 #define RADEON_INFO_TILING_CONFIG      0x06
 #define RADEON_INFO_WANT_HYPERZ                0x07
 #define RADEON_INFO_WANT_CMASK         0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
 
 struct drm_radeon_info {
        uint32_t                request;
index 5cb86c3..fc48754 100644 (file)
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
  * structure of raw payloads passed to add_key() or instantiate key
  */
 struct rxrpc_key_data_v1 {
-       u32             kif_version;            /* 1 */
        u16             security_index;
        u16             ticket_length;
        u32             expiry;                 /* time_t */
index 2296d8b..b0ada6f 100644 (file)
@@ -1,5 +1,6 @@
 header-y += byteorder/
 header-y += can/
+header-y += caif/
 header-y += dvb/
 header-y += hdlc/
 header-y += isdn/
index 359df04..9d339eb 100644 (file)
 #define AUDIT_BPRM_FCAPS       1321    /* Information about fcaps increasing perms */
 #define AUDIT_CAPSET           1322    /* Record showing argument to sys_capset */
 #define AUDIT_MMAP             1323    /* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT    1324    /* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG    1325    /* Netfilter chain modifications */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
index 4d18ff3..d5063e1 100644 (file)
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *);
+extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 
 #ifdef CONFIG_BLK_CGROUP
 /*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
-extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
 extern void throtl_shutdown_timer_wq(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
 
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
-static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
index 3395cf7..b22fb0d 100644 (file)
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
-extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
diff --git a/include/linux/caif/Kbuild b/include/linux/caif/Kbuild
new file mode 100644 (file)
index 0000000..a9cf250
--- /dev/null
@@ -0,0 +1,2 @@
+header-y += caif_socket.h
+header-y += if_caif.h
index c3011be..31d91a6 100644 (file)
@@ -123,6 +123,7 @@ struct ceph_msg_pos {
 #define SOCK_CLOSED    11 /* socket state changed to closed */
 #define OPENING         13 /* open connection w/ (possibly new) peer */
 #define DEAD            14 /* dead, about to kfree */
+#define BACKOFF         15
 
 /*
  * A single connection with another host.
@@ -160,7 +161,6 @@ struct ceph_connection {
        struct list_head out_queue;
        struct list_head out_sent;   /* sending or sent but unacked */
        u64 out_seq;                 /* last message queued for send */
-       bool out_keepalive_pending;
 
        u64 in_seq, in_seq_acked;  /* last message received, acked */
 
index 9774fe6..7453cfd 100644 (file)
@@ -139,9 +139,9 @@ extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_n
 extern void register_console(struct console *);
 extern int unregister_console(struct console *);
 extern struct console *console_drivers;
-extern void acquire_console_sem(void);
-extern int try_acquire_console_sem(void);
-extern void release_console_sem(void);
+extern void console_lock(void);
+extern int console_trylock(void);
+extern void console_unlock(void);
 extern void console_conditional_schedule(void);
 extern void console_unblank(void);
 extern struct tty_driver *console_device(int *);
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644 (file)
index 0000000..473771a
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ *      based on affinity masks
+ */
+struct cpu_rmap {
+       u16             size, used;
+       void            **obj;
+       struct {
+               u16     index;
+               u16     dist;
+       }               near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+       kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+                          const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+       return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+       return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+       return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
index 68cd248..c522800 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, Intel Corporation.
+ * Copyright (c) 2008-2011, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 /* IEEE 802.1Qaz std supported values */
 #define IEEE_8021QAZ_MAX_TCS   8
 
+#define IEEE_8021QAZ_TSA_STRICT                0
+#define IEEE_8021QAZ_TSA_CB_SHAPER     1
+#define IEEE_8021QAZ_TSA_ETS           2
+#define IEEE_8021QAZ_TSA_VENDOR                255
+
 /* This structure contains the IEEE 802.1Qaz ETS managed object
  *
- * @willing: willing bit in ETS configuratin TLV
+ * @willing: willing bit in ETS configuration TLV
  * @ets_cap: indicates supported capacity of ets feature
  * @cbs: credit based shaper ets algorithm supported
  * @tc_tx_bw: tc tx bandwidth indexed by traffic class
@@ -82,6 +87,50 @@ struct ieee_pfc {
        __u64   indications[IEEE_8021QAZ_MAX_TCS];
 };
 
+/* CEE DCBX std supported values */
+#define CEE_DCBX_MAX_PGS       8
+#define CEE_DCBX_MAX_PRIO      8
+
+/**
+ * struct cee_pg - CEE Priority-Group managed object
+ *
+ * @willing: willing bit in the PG tlv
+ * @error: error bit in the PG tlv
+ * @pg_en: enable bit of the PG feature
+ * @tcs_supported: number of traffic classes supported
+ * @pg_bw: bandwidth percentage for each priority group
+ * @prio_pg: priority to PG mapping indexed by priority
+ */
+struct cee_pg {
+       __u8    willing;
+       __u8    error;
+       __u8    pg_en;
+       __u8    tcs_supported;
+       __u8    pg_bw[CEE_DCBX_MAX_PGS];
+       __u8    prio_pg[CEE_DCBX_MAX_PGS];
+};
+
+/**
+ * struct cee_pfc - CEE PFC managed object
+ *
+ * @willing: willing bit in the PFC tlv
+ * @error: error bit in the PFC tlv
+ * @pfc_en: bitmap indicating pfc enabled traffic classes
+ * @tcs_supported: number of traffic classes supported
+ */
+struct cee_pfc {
+       __u8    willing;
+       __u8    error;
+       __u8    pfc_en;
+       __u8    tcs_supported;
+};
+
+/* IEEE 802.1Qaz std supported values */
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#define IEEE_8021QAZ_APP_SEL_STREAM    2
+#define IEEE_8021QAZ_APP_SEL_DGRAM     3
+#define IEEE_8021QAZ_APP_SEL_ANY       4
+
 /* This structure contains the IEEE 802.1Qaz APP managed object. This
  * object is also used for the CEE std as well. There is no difference
  * between the objects.
@@ -101,8 +150,22 @@ struct ieee_pfc {
  */
 struct dcb_app {
        __u8    selector;
-       __u32   protocol;
        __u8    priority;
+       __u16   protocol;
+};
+
+/**
+ * struct dcb_peer_app_info - APP feature information sent by the peer
+ *
+ * @willing: willing bit in the peer APP tlv
+ * @error: error bit in the peer APP tlv
+ *
+ * In addition to this information the full peer APP tlv also contains
+ * a table of 'app_count' APP objects defined above.
+ */
+struct dcb_peer_app_info {
+       __u8    willing;
+       __u8    error;
 };
 
 struct dcbmsg {
@@ -139,6 +202,7 @@ struct dcbmsg {
  * @DCB_CMD_SDCBX: set DCBX engine configuration
  * @DCB_CMD_GFEATCFG: get DCBX features flags
  * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags
+ * @DCB_CMD_CEE_GET: get CEE aggregated configuration
  */
 enum dcbnl_commands {
        DCB_CMD_UNDEFINED,
@@ -181,6 +245,8 @@ enum dcbnl_commands {
        DCB_CMD_GFEATCFG,
        DCB_CMD_SFEATCFG,
 
+       DCB_CMD_CEE_GET,
+
        __DCB_CMD_ENUM_MAX,
        DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
 };
@@ -203,6 +269,7 @@ enum dcbnl_commands {
  * @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED)
  * @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8)
  * @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED)
+ * @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED)
  */
 enum dcbnl_attrs {
        DCB_ATTR_UNDEFINED,
@@ -226,15 +293,32 @@ enum dcbnl_attrs {
        DCB_ATTR_DCBX,
        DCB_ATTR_FEATCFG,
 
+       /* CEE nested attributes */
+       DCB_ATTR_CEE,
+
        __DCB_ATTR_ENUM_MAX,
        DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
 };
 
+/**
+ * enum ieee_attrs - IEEE 802.1Qaz get/set attributes
+ *
+ * @DCB_ATTR_IEEE_UNSPEC: unspecified
+ * @DCB_ATTR_IEEE_ETS: negotiated ETS configuration
+ * @DCB_ATTR_IEEE_PFC: negotiated PFC configuration
+ * @DCB_ATTR_IEEE_APP_TABLE: negotiated APP configuration
+ * @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only
+ * @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
+ * @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
+ */
 enum ieee_attrs {
        DCB_ATTR_IEEE_UNSPEC,
        DCB_ATTR_IEEE_ETS,
        DCB_ATTR_IEEE_PFC,
        DCB_ATTR_IEEE_APP_TABLE,
+       DCB_ATTR_IEEE_PEER_ETS,
+       DCB_ATTR_IEEE_PEER_PFC,
+       DCB_ATTR_IEEE_PEER_APP,
        __DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
@@ -246,6 +330,31 @@ enum ieee_attrs_app {
 };
 #define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
 
+/**
+ * enum cee_attrs - CEE DCBX get attributes
+ *
+ * @DCB_ATTR_CEE_UNSPEC: unspecified
+ * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only
+ * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only
+ * @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only
+ */
+enum cee_attrs {
+       DCB_ATTR_CEE_UNSPEC,
+       DCB_ATTR_CEE_PEER_PG,
+       DCB_ATTR_CEE_PEER_PFC,
+       DCB_ATTR_CEE_PEER_APP_TABLE,
+       __DCB_ATTR_CEE_MAX
+};
+#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1)
+
+enum peer_app_attr {
+       DCB_ATTR_CEE_PEER_APP_UNSPEC,
+       DCB_ATTR_CEE_PEER_APP_INFO,
+       DCB_ATTR_CEE_PEER_APP,
+       __DCB_ATTR_CEE_PEER_APP_MAX
+};
+#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1)
+
 /**
  * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
  *
index 010e2d8..d638e85 100644 (file)
@@ -279,8 +279,6 @@ enum dccp_state {
        DCCP_MAX_STATES
 };
 
-#define DCCP_STATE_MASK 0x1f
-
 enum {
        DCCPF_OPEN            = TCPF_ESTABLISHED,
        DCCPF_REQUESTING      = TCPF_SYN_SENT,
index 1908929..aac3e2e 100644 (file)
@@ -251,6 +251,7 @@ enum ethtool_stringset {
        ETH_SS_STATS,
        ETH_SS_PRIV_FLAGS,
        ETH_SS_NTUPLE_FILTERS,
+       ETH_SS_FEATURES,
 };
 
 /* for passing string sets for data tagging */
@@ -523,6 +524,92 @@ struct ethtool_flash {
        char    data[ETHTOOL_FLASH_MAX_FILENAME];
 };
 
+/* for returning and changing feature sets */
+
+/**
+ * struct ethtool_get_features_block - block with state of 32 features
+ * @available: mask of changeable features
+ * @requested: mask of features requested to be enabled if possible
+ * @active: mask of currently enabled features
+ * @never_changed: mask of features not changeable for any device
+ */
+struct ethtool_get_features_block {
+       __u32   available;
+       __u32   requested;
+       __u32   active;
+       __u32   never_changed;
+};
+
+/**
+ * struct ethtool_gfeatures - command to get state of device's features
+ * @cmd: command number = %ETHTOOL_GFEATURES
+ * @size: in: number of elements in the features[] array;
+ *       out: number of elements in features[] needed to hold all features
+ * @features: state of features
+ */
+struct ethtool_gfeatures {
+       __u32   cmd;
+       __u32   size;
+       struct ethtool_get_features_block features[0];
+};
+
+/**
+ * struct ethtool_set_features_block - block with request for 32 features
+ * @valid: mask of features to be changed
+ * @requested: values of features to be changed
+ */
+struct ethtool_set_features_block {
+       __u32   valid;
+       __u32   requested;
+};
+
+/**
+ * struct ethtool_sfeatures - command to request change in device's features
+ * @cmd: command number = %ETHTOOL_SFEATURES
+ * @size: array size of the features[] array
+ * @features: feature change masks
+ */
+struct ethtool_sfeatures {
+       __u32   cmd;
+       __u32   size;
+       struct ethtool_set_features_block features[0];
+};
+
+/*
+ * %ETHTOOL_SFEATURES changes features present in features[].valid to the
+ * values of corresponding bits in features[].requested. Bits in .requested
+ * not set in .valid or not changeable are ignored.
+ *
+ * Returns %EINVAL when .valid contains undefined or never-changable bits
+ * or size is not equal to required number of features words (32-bit blocks).
+ * Returns >= 0 if request was completed; bits set in the value mean:
+ *   %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
+ *     changeable (not present in %ETHTOOL_GFEATURES' features[].available)
+ *     those bits were ignored.
+ *   %ETHTOOL_F_WISH - some or all changes requested were recorded but the
+ *      resulting state of bits masked by .valid is not equal to .requested.
+ *      Probably there are other device-specific constraints on some features
+ *      in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
+ *      here as though ignored bits were cleared.
+ *   %ETHTOOL_F_COMPAT - some or all changes requested were made by calling
+ *      compatibility functions. Requested offload state cannot be properly
+ *      managed by kernel.
+ *
+ * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
+ * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
+ * for ETH_SS_FEATURES string set. First entry in the table corresponds to least
+ * significant bit in features[0] fields. Empty strings mark undefined features.
+ */
+enum ethtool_sfeatures_retval_bits {
+       ETHTOOL_F_UNSUPPORTED__BIT,
+       ETHTOOL_F_WISH__BIT,
+       ETHTOOL_F_COMPAT__BIT,
+};
+
+#define ETHTOOL_F_UNSUPPORTED   (1 << ETHTOOL_F_UNSUPPORTED__BIT)
+#define ETHTOOL_F_WISH          (1 << ETHTOOL_F_WISH__BIT)
+#define ETHTOOL_F_COMPAT        (1 << ETHTOOL_F_COMPAT__BIT)
+
 #ifdef __KERNEL__
 
 #include <linux/rculist.h>
@@ -543,7 +630,6 @@ struct net_device;
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_rx_csum(struct net_device *dev);
 u32 ethtool_op_get_tx_csum(struct net_device *dev);
 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@@ -744,6 +830,9 @@ struct ethtool_ops {
 #define ETHTOOL_GRXFHINDIR     0x00000038 /* Get RX flow hash indir'n table */
 #define ETHTOOL_SRXFHINDIR     0x00000039 /* Set RX flow hash indir'n table */
 
+#define ETHTOOL_GFEATURES      0x0000003a /* Get device offload settings */
+#define ETHTOOL_SFEATURES      0x0000003b /* Change device offload settings */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET         ETHTOOL_GSET
 #define SPARC_ETH_SSET         ETHTOOL_SSET
index da7e52b..1effc8b 100644 (file)
@@ -109,7 +109,7 @@ static inline void freezer_count(void)
 }
 
 /*
- * Check if the task should be counted as freezeable by the freezer
+ * Check if the task should be counted as freezable by the freezer
  */
 static inline int freezer_should_skip(struct task_struct *p)
 {
index 32b38cd..e38b50a 100644 (file)
@@ -649,6 +649,7 @@ struct address_space {
        spinlock_t              private_lock;   /* for use by the address_space */
        struct list_head        private_list;   /* ditto */
        struct address_space    *assoc_mapping; /* ditto */
+       struct mutex            unmap_mutex;    /* to protect unmapping */
 } __attribute__((aligned(sizeof(long))));
        /*
         * On most architectures that alignment is already the case; but
@@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk,
                                   struct block_device *bdev);
 extern int revalidate_disk(struct gendisk *);
 extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
 extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
@@ -2555,9 +2557,12 @@ int proc_nr_inodes(struct ctl_table *table, int write,
                   void __user *buffer, size_t *lenp, loff_t *ppos);
 int __init get_filesystem_list(char *buf);
 
+#define __FMODE_EXEC           ((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY       ((__force int) FMODE_NONOTIFY)
+
 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
-                                           (flag & FMODE_NONOTIFY)))
+                                           (flag & __FMODE_NONOTIFY)))
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_FS_H */
index a3b148a..dca3176 100644 (file)
@@ -249,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
                                         ((1 << ZONES_SHIFT) - 1);
 
        if (__builtin_constant_p(bit))
-               MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+               BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
        else {
 #ifdef CONFIG_DEBUG_VM
                BUG_ON((GFP_ZONE_BAD >> bit) & 1);
@@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
        return alloc_pages_current(gfp_mask, order);
 }
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
-                       struct vm_area_struct *vma, unsigned long addr);
+                       struct vm_area_struct *vma, unsigned long addr,
+                       int node);
 #else
 #define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr)    \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)      \
        alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr)    \
-       alloc_pages_vma(gfp_mask, 0, vma, addr)
+#define alloc_page_vma(gfp_mask, vma, addr)                    \
+       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+#define alloc_page_vma_node(gfp_mask, vma, addr, node)         \
+       alloc_pages_vma(gfp_mask, 0, vma, addr, node)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
index 8e6c8c4..df29c8f 100644 (file)
@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
          (transparent_hugepage_flags &                                 \
           (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
           ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
-        !((__vma)->vm_flags & VM_NOHUGEPAGE))
+        !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
+        !is_vma_temporary_stack(__vma))
 #define transparent_hugepage_defrag(__vma)                             \
        ((transparent_hugepage_flags &                                  \
          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
index 4c4c74e..ba45e6b 100644 (file)
@@ -183,10 +183,10 @@ extern void                               icmpv6_cleanup(void);
 extern void                            icmpv6_param_prob(struct sk_buff *skb,
                                                          u8 code, int pos);
 
-struct flowi;
+struct flowi6;
 struct in6_addr;
 extern void                            icmpv6_flow_init(struct sock *sk,
-                                                        struct flowi *fl,
+                                                        struct flowi6 *fl6,
                                                         u8 type,
                                                         const struct in6_addr *saddr,
                                                         const struct in6_addr *daddr,
index 294169e..2d1c611 100644 (file)
@@ -1325,6 +1325,9 @@ enum {
 /* Although the spec says 8 I'm seeing 6 in practice */
 #define IEEE80211_COUNTRY_IE_MIN_LEN   6
 
+/* The Country String field of the element shall be 3 octets in length */
+#define IEEE80211_COUNTRY_STRING_LEN   3
+
 /*
  * For regulatory extension stuff see IEEE 802.11-2007
  * Annex I (page 1141) and Annex J (page 1147). Also
index 1239599..3bc63e6 100644 (file)
                                         * release skb->dst
                                         */
 #define IFF_DONT_BRIDGE 0x800          /* disallow bridging this ether dev */
-#define IFF_IN_NETPOLL 0x1000          /* whether we are processing netpoll */
-#define IFF_DISABLE_NETPOLL    0x2000  /* disable netpoll at run-time */
-#define IFF_MACVLAN_PORT       0x4000  /* device used as macvlan port */
-#define IFF_BRIDGE_PORT        0x8000          /* device used as bridge port */
-#define IFF_OVS_DATAPATH       0x10000 /* device used as Open vSwitch
+#define IFF_DISABLE_NETPOLL    0x1000  /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT       0x2000  /* device used as macvlan port */
+#define IFF_BRIDGE_PORT        0x4000          /* device used as bridge port */
+#define IFF_OVS_DATAPATH       0x8000  /* device used as Open vSwitch
                                         * datapath port */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
index 6485d2a..f4a2e6b 100644 (file)
@@ -135,6 +135,7 @@ enum {
        IFLA_VF_PORTS,
        IFLA_PORT_SELF,
        IFLA_AF_SPEC,
+       IFLA_GROUP,             /* Group the device belongs to */
        __IFLA_MAX
 };
 
index 74cfcff..82de336 100644 (file)
@@ -217,7 +217,7 @@ struct ip_mc_list {
 #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
 #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
 
-extern int ip_check_mc(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
+extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
index ae8fdc5..5f81466 100644 (file)
@@ -144,6 +144,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ARP_NOTIFY(in_dev)      IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
 
 struct in_ifaddr {
+       struct hlist_node       hash;
        struct in_ifaddr        *ifa_next;
        struct in_device        *ifa_dev;
        struct rcu_head         rcu_head;
index e470d38..05e0328 100644 (file)
@@ -12,8 +12,6 @@
  * @cs_en:     pointer to the cs enable function
  * @cs_dis:    pointer to the cs disable function
  * @irq_read_val:    pointer to read the pen irq value function
- * @x_max_res: xmax resolution
- * @y_max_res: ymax resolution
  * @touch_x_max: touch x max
  * @touch_y_max: touch y max
  * @cs_pin: chip select pin
@@ -29,8 +27,6 @@ struct bu21013_platform_device {
        int (*cs_en)(int reset_pin);
        int (*cs_dis)(int reset_pin);
        int (*irq_read_val)(void);
-       int x_max_res;
-       int y_max_res;
        int touch_x_max;
        int touch_y_max;
        unsigned int cs_pin;
index 6974746..fe7c4b9 100644 (file)
@@ -4,8 +4,8 @@
 #include <linux/types.h>
 #include <linux/input.h>
 
-#define MATRIX_MAX_ROWS                16
-#define MATRIX_MAX_COLS                16
+#define MATRIX_MAX_ROWS                32
+#define MATRIX_MAX_COLS                32
 
 #define KEY(row, col, val)     ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
                                 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
index 5f43a3b..4deb383 100644 (file)
 #define IP_VS_CONN_F_TEMPLATE  0x1000          /* template, not connection */
 #define IP_VS_CONN_F_ONE_PACKET        0x2000          /* forward only one packet */
 
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+                                 IP_VS_CONN_F_NOOUTPUT | \
+                                 IP_VS_CONN_F_INACTIVE | \
+                                 IP_VS_CONN_F_SEQ_MASK | \
+                                 IP_VS_CONN_F_NO_CPORT | \
+                                 IP_VS_CONN_F_TEMPLATE \
+                                )
+
 /* Flags that are not sent to backup server start from bit 16 */
 #define IP_VS_CONN_F_NFCT      (1 << 16)       /* use netfilter conntrack */
 
index abde252..80fcb53 100644 (file)
@@ -74,7 +74,8 @@ typedef       void (*irq_flow_handler_t)(unsigned int irq,
 
 #define IRQF_MODIFY_MASK       \
        (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
-        IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
+        IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+        IRQ_PER_CPU)
 
 #ifdef CONFIG_IRQ_PER_CPU
 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
index d07d805..2fe6e84 100644 (file)
@@ -575,12 +575,6 @@ struct sysinfo {
        char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
 };
 
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-
-/* Force a compilation error if condition is constant and true */
-#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
-
 /* Force a compilation error if a constant expression is not a power of 2 */
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n)                 \
        BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -592,6 +586,32 @@ struct sysinfo {
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
 
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but
+ * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+ * to inline functions).  So as a fallback we use the optimizer; if it can't
+ * prove the condition is false, it will cause a link error on the undefined
+ * "__build_bug_on_failed".  This error message can be harder to track down
+ * though, hence the two different methods.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int __build_bug_on_failed;
+#define BUILD_BUG_ON(condition)                                        \
+       do {                                                    \
+               ((void)sizeof(char[1 - 2*!!(condition)]));      \
+               if (condition) __build_bug_on_failed = 1;       \
+       } while(0)
+#endif
+
 /* Trap pasters of __FUNCTION__ at compile-time */
 #define __FUNCTION__ (__func__)
 
index e91a4e5..a370ce5 100644 (file)
@@ -22,7 +22,7 @@ struct klist {
        struct list_head        k_list;
        void                    (*get)(struct klist_node *);
        void                    (*put)(struct klist_node *);
-} __attribute__ ((aligned (4)));
+} __attribute__ ((aligned (sizeof(void *))));
 
 #define KLIST_INIT(_name, _get, _put)                                  \
        { .k_lock       = __SPIN_LOCK_UNLOCKED(_name.k_lock),           \
index 08d7dc4..39f8453 100644 (file)
@@ -76,7 +76,7 @@ bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
                                                                        \
                _n = (long) &((ptr)->name##_end)                        \
                        - (long) &((ptr)->name##_begin);                \
-               MAYBE_BUILD_BUG_ON(_n < 0);                             \
+               BUILD_BUG_ON(_n < 0);                                   \
                                                                        \
                kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
        } while (0)
index 9a5f8a7..3a54266 100644 (file)
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
  * in an undefined state.
  */
 #ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+       __list_del(entry->prev, entry->next);
+}
+
 static inline void list_del(struct list_head *entry)
 {
        __list_del(entry->prev, entry->next);
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry)
        entry->prev = LIST_POISON2;
 }
 #else
+extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old,
  */
 static inline void list_del_init(struct list_head *entry)
 {
-       __list_del(entry->prev, entry->next);
+       __list_del_entry(entry);
        INIT_LIST_HEAD(entry);
 }
 
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry)
  */
 static inline void list_move(struct list_head *list, struct list_head *head)
 {
-       __list_del(list->prev, list->next);
+       __list_del_entry(list);
        list_add(list, head);
 }
 
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head)
 static inline void list_move_tail(struct list_head *list,
                                  struct list_head *head)
 {
-       __list_del(list->prev, list->next);
+       __list_del_entry(list);
        list_add_tail(list, head);
 }
 
index 3fd3684..ef4f0b6 100644 (file)
@@ -71,6 +71,7 @@ struct wm8994 {
        u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
 
        /* Used over suspend/resume */
+       bool suspended;
        u16 ldo_regs[WM8994_NUM_LDO_REGS];
        u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644 (file)
index 0000000..dd8da34
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK     0x00fffff0
+
+#define PHY_ID_KSZ9021         0x00221611
+#define PHY_ID_KS8737          0x00221720
+#define PHY_ID_KS8041          0x00221510
+#define PHY_ID_KS8051          0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
+#define PHY_ID_KS8001          0x0022161A
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK   0x00000001
+
+#endif /* _MICREL_PHY_H */
index bf17350..38d3930 100644 (file)
@@ -94,12 +94,12 @@ struct sh_mmcif_plat_data {
 
 static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
-       return readl(addr + reg);
+       return __raw_readl(addr + reg);
 }
 
 static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
 {
-       writel(val, addr + reg);
+       __raw_writel(val, addr + reg);
 }
 
 #define SH_MMCIF_BBS 512 /* boot block size */
index 8b17fd8..5de4204 100644 (file)
@@ -58,6 +58,12 @@ struct module_attribute {
        void (*free)(struct module *);
 };
 
+struct module_version_attribute {
+       struct module_attribute mattr;
+       const char *module_name;
+       const char *version;
+} __attribute__ ((__aligned__(sizeof(void *))));
+
 struct module_kobject
 {
        struct kobject kobj;
@@ -161,7 +167,28 @@ extern struct module __this_module;
   Using this automatically adds a checksum of the .c files and the
   local headers in "srcversion".
 */
+
+#if defined(MODULE) || !defined(CONFIG_SYSFS)
 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#else
+#define MODULE_VERSION(_version)                                       \
+       extern ssize_t __modver_version_show(struct module_attribute *, \
+                                            struct module *, char *);  \
+       static struct module_version_attribute __modver_version_attr    \
+       __used                                                          \
+    __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
+       = {                                                             \
+               .mattr  = {                                             \
+                       .attr   = {                                     \
+                               .name   = "version",                    \
+                               .mode   = S_IRUGO,                      \
+                       },                                              \
+                       .show   = __modver_version_show,                \
+               },                                                      \
+               .module_name    = KBUILD_MODNAME,                       \
+               .version        = _version,                             \
+       }
+#endif
 
 /* Optional firmware file (or files) needed by the module
  * format is simply firmware file name.  Multiple firmware
@@ -350,7 +377,7 @@ struct module
           keeping pointers to this stuff */
        char *args;
 #ifdef CONFIG_TRACEPOINTS
-       struct tracepoint *tracepoints;
+       struct tracepoint * const *tracepoints_ptrs;
        unsigned int num_tracepoints;
 #endif
 #ifdef HAVE_JUMP_LABEL
@@ -362,7 +389,7 @@ struct module
        unsigned int num_trace_bprintk_fmt;
 #endif
 #ifdef CONFIG_EVENT_TRACING
-       struct ftrace_event_call *trace_events;
+       struct ftrace_event_call **trace_events;
        unsigned int num_trace_events;
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
index 112adf8..07b4195 100644 (file)
 /* Chosen so that structs with an unsigned long line up. */
 #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
 
-#ifdef MODULE
 #define ___module_cat(a,b) __mod_ ## a ## b
 #define __module_cat(a,b) ___module_cat(a,b)
+#ifdef MODULE
 #define __MODULE_INFO(tag, name, info)                                   \
 static const char __module_cat(name,__LINE__)[]                                  \
   __used __attribute__((section(".modinfo"), unused, aligned(1)))        \
   = __stringify(tag) "=" info
 #else  /* !MODULE */
-#define __MODULE_INFO(tag, name, info)
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO(tag, name, info)                                   \
+  struct __module_cat(name,__LINE__) {}
 #endif
 #define __MODULE_PARM_TYPE(name, _type)                                          \
   __MODULE_INFO(parmtype, name##type, #name ":" _type)
index 0fa7a3a..b21d567 100644 (file)
@@ -150,6 +150,7 @@ static inline int ip_mroute_opt(int opt)
 extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
 extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip_mr_init(void);
 #else
 static inline
index 6091ab7..9d2deb2 100644 (file)
@@ -136,6 +136,7 @@ extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int
 extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ip6_mr_input(struct sk_buff *skb);
 extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip6_mr_init(void);
 extern void ip6_mr_cleanup(void);
 #else
index 16faa13..94de83c 100644 (file)
@@ -118,6 +118,7 @@ enum sock_shutdown_cmd {
 };
 
 struct socket_wq {
+       /* Note: wait MUST be first field of socket_wq */
        wait_queue_head_t       wait;
        struct fasync_struct    *fasync_list;
        struct rcu_head         rcu;
@@ -142,7 +143,7 @@ struct socket {
 
        unsigned long           flags;
 
-       struct socket_wq        *wq;
+       struct socket_wq __rcu  *wq;
 
        struct file             *file;
        struct sock             *sk;
index d971346..604dbf5 100644 (file)
@@ -138,6 +138,9 @@ static inline bool dev_xmit_complete(int rc)
 
 #define MAX_ADDR_LEN   32              /* Largest hardware address length */
 
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP      0
+
 #ifdef  __KERNEL__
 /*
  *     Compute the worst case header length according to the protocols
@@ -551,14 +554,16 @@ struct rps_map {
 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
 
 /*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
  */
 struct rps_dev_flow {
        u16 cpu;
-       u16 fill;
+       u16 filter;
        unsigned int last_qtail;
 };
+#define RPS_NO_FILTER 0xffff
 
 /*
  * The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+                               u32 flow_id, u16 filter_id);
+#endif
+
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
        struct rps_map __rcu            *rps_map;
@@ -643,6 +653,14 @@ struct xps_dev_maps {
     (nr_cpu_ids * sizeof(struct xps_map *)))
 #endif /* CONFIG_XPS */
 
+#define TC_MAX_QUEUE   16
+#define TC_BITMASK     15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+       u16 count;
+       u16 offset;
+};
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,74 @@ struct xps_dev_maps {
  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  *                       struct nlattr *port[]);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ *     Called to setup 'tc' number of traffic classes in the net device. This
+ *     is always called from the stack with the rtnl lock held and netif tx
+ *     queues stopped. This allows the netdevice to perform queue management
+ *     safely.
+ *
+ *     Fiber Channel over Ethernet (FCoE) offload functions.
+ * int (*ndo_fcoe_enable)(struct net_device *dev);
+ *     Called when the FCoE protocol stack wants to start using LLD for FCoE
+ *     so the underlying device can perform whatever needed configuration or
+ *     initialization to support acceleration of FCoE traffic.
+ *
+ * int (*ndo_fcoe_disable)(struct net_device *dev);
+ *     Called when the FCoE protocol stack wants to stop using LLD for FCoE
+ *     so the underlying device can perform whatever needed clean-ups to
+ *     stop supporting acceleration of FCoE traffic.
+ *
+ * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
+ *                          struct scatterlist *sgl, unsigned int sgc);
+ *     Called when the FCoE Initiator wants to initialize an I/O that
+ *     is a possible candidate for Direct Data Placement (DDP). The LLD can
+ *     perform necessary setup and returns 1 to indicate the device is set up
+ *     successfully to perform DDP on this I/O, otherwise this returns 0.
+ *
+ * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
+ *     Called when the FCoE Initiator/Target is done with the DDPed I/O as
+ *     indicated by the FC exchange id 'xid', so the underlying device can
+ *     clean up and reuse resources for later DDP requests.
+ *
+ * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
+ *                           struct scatterlist *sgl, unsigned int sgc);
+ *     Called when the FCoE Target wants to initialize an I/O that
+ *     is a possible candidate for Direct Data Placement (DDP). The LLD can
+ *     perform necessary setup and returns 1 to indicate the device is set up
+ *     successfully to perform DDP on this I/O, otherwise this returns 0.
+ *
+ * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
+ *     Called when the underlying device wants to override default World Wide
+ *     Name (WWN) generation mechanism in FCoE protocol stack to pass its own
+ *     World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
+ *     protocol stack to use.
+ *
+ *     RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ *                         u16 rxq_index, u32 flow_id);
+ *     Set hardware filter for RFS.  rxq_index is the target queue index;
+ *     flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ *     Return the filter ID on success, or a negative error code.
+ *
+ *     Slave management functions (for bridge, bonding, etc). User should
+ *     call netdev_set_master() to set dev->master properly.
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *     Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *     Called to release previously enslaved netdev.
+ *
+ *      Feature/offload setting functions.
+ * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ *     Adjusts the requested feature flags according to device-specific
+ *     constraints, and returns the resulting flags. Must not modify
+ *     the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ *     Called to update device configuration to new features. Passed
+ *     feature set might be less than what was returned by ndo_fix_features()).
+ *     Must return >0 or -errno if it changed dev->features itself.
+ *
  */
 #define HAVE_NET_DEVICE_OPS
 struct net_device_ops {
@@ -811,6 +897,7 @@ struct net_device_ops {
                                                   struct nlattr *port[]);
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
+       int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        int                     (*ndo_fcoe_enable)(struct net_device *dev);
        int                     (*ndo_fcoe_disable)(struct net_device *dev);
@@ -820,11 +907,29 @@ struct net_device_ops {
                                                      unsigned int sgc);
        int                     (*ndo_fcoe_ddp_done)(struct net_device *dev,
                                                     u16 xid);
+       int                     (*ndo_fcoe_ddp_target)(struct net_device *dev,
+                                                      u16 xid,
+                                                      struct scatterlist *sgl,
+                                                      unsigned int sgc);
 #define NETDEV_FCOE_WWNN 0
 #define NETDEV_FCOE_WWPN 1
        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
                                                    u64 *wwn, int type);
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       int                     (*ndo_rx_flow_steer)(struct net_device *dev,
+                                                    const struct sk_buff *skb,
+                                                    u16 rxq_index,
+                                                    u32 flow_id);
+#endif
+       int                     (*ndo_add_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev);
+       int                     (*ndo_del_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev);
+       u32                     (*ndo_fix_features)(struct net_device *dev,
+                                                   u32 features);
+       int                     (*ndo_set_features)(struct net_device *dev,
+                                                   u32 features);
 };
 
 /*
@@ -876,8 +981,18 @@ struct net_device {
        struct list_head        napi_list;
        struct list_head        unreg_list;
 
-       /* Net device features */
-       unsigned long           features;
+       /* currently active device features */
+       u32                     features;
+       /* user-changeable features */
+       u32                     hw_features;
+       /* user-requested features */
+       u32                     wanted_features;
+       /* VLAN feature mask */
+       u32                     vlan_features;
+
+       /* Net device feature bits; if you change something,
+        * also update netdev_features_strings[] in ethtool.c */
+
 #define NETIF_F_SG             1       /* Scatter/gather IO. */
 #define NETIF_F_IP_CSUM                2       /* Can checksum TCP/UDP over IPv4. */
 #define NETIF_F_NO_CSUM                4       /* Does not require checksum. F.e. loopack. */
@@ -902,6 +1017,7 @@ struct net_device {
 #define NETIF_F_FCOE_MTU       (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
 #define NETIF_F_NTUPLE         (1 << 27) /* N-tuple filters supported */
 #define NETIF_F_RXHASH         (1 << 28) /* Receive hashing offload */
+#define NETIF_F_RXCSUM         (1 << 29) /* Receive checksumming offload */
 
        /* Segmentation offload features */
 #define NETIF_F_GSO_SHIFT      16
@@ -913,6 +1029,12 @@ struct net_device {
 #define NETIF_F_TSO6           (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_FSO            (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 
+       /* Features valid for ethtool to change */
+       /* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE   (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
+                                 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_ETHTOOL_BITS   (0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
+
        /* List of features with software fallbacks. */
 #define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
                                 NETIF_F_TSO6 | NETIF_F_UFO)
@@ -923,6 +1045,12 @@ struct net_device {
 #define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
 #define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
 
+#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_TX_OFFLOADS        (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
+
        /*
         * If one device supports one of these features, then enable them
         * for all in netdev_increment_features.
@@ -931,6 +1059,9 @@ struct net_device {
                                 NETIF_F_SG | NETIF_F_HIGHDMA |         \
                                 NETIF_F_FRAGLIST)
 
+       /* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+
        /* Interface index. Unique device identifier    */
        int                     ifindex;
        int                     iflink;
@@ -1039,6 +1170,13 @@ struct net_device {
 
        /* Number of RX queues currently active in device */
        unsigned int            real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+       /* CPU reverse-mapping for RX completion interrupts, indexed
+        * by RX queue number.  Assigned by driver.  This must only be
+        * set if the ndo_rx_flow_steer operation is defined. */
+       struct cpu_rmap         *rx_cpu_rmap;
+#endif
 #endif
 
        rx_handler_func_t __rcu *rx_handler;
@@ -1132,9 +1270,6 @@ struct net_device {
        /* rtnetlink link ops */
        const struct rtnl_link_ops *rtnl_link_ops;
 
-       /* VLAN feature mask */
-       unsigned long vlan_features;
-
        /* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE           65536
        unsigned int            gso_max_size;
@@ -1143,6 +1278,9 @@ struct net_device {
        /* Data Center Bridging netlink ops */
        const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
+       u8 num_tc;
+       struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+       u8 prio_tc_map[TC_BITMASK + 1];
 
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        /* max exchange id for FCoE LRO by ddp */
@@ -1153,11 +1291,65 @@ struct net_device {
 
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
+
+       /* group the device belongs to */
+       int group;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
 #define        NETDEV_ALIGN            32
 
+static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+       return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+       if (tc >= dev->num_tc)
+               return -EINVAL;
+
+       dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+       return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+       dev->num_tc = 0;
+       memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+       memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+       if (tc >= dev->num_tc)
+               return -EINVAL;
+
+       dev->tc_to_txq[tc].count = count;
+       dev->tc_to_txq[tc].offset = offset;
+       return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+       if (num_tc > TC_MAX_QUEUE)
+               return -EINVAL;
+
+       dev->num_tc = num_tc;
+       return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+       return dev->num_tc;
+}
+
 static inline
 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
                                         unsigned int index)
@@ -1300,7 +1492,7 @@ struct packet_type {
                                         struct packet_type *,
                                         struct net_device *);
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
-                                               int features);
+                                               u32 features);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
@@ -1345,7 +1537,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev)
        struct net *net;
 
        net = dev_net(dev);
-       lh = rcu_dereference(dev->dev_list.next);
+       lh = rcu_dereference(list_next_rcu(&dev->dev_list));
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
@@ -1355,6 +1547,13 @@ static inline struct net_device *first_net_device(struct net *net)
                net_device_entry(net->dev_base_head.next);
 }
 
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+       struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+       return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
 extern int                     netdev_boot_setup_check(struct net_device *dev);
 extern unsigned long           netdev_boot_base(const char *prefix, int unit);
 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1606,8 +1805,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
        if (WARN_ON(!dev_queue)) {
-               printk(KERN_INFO "netif_stop_queue() cannot be called before "
-                      "register_netdev()");
+               pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
        set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
@@ -1844,6 +2042,7 @@ extern int                dev_set_alias(struct net_device *, const char *, size_t);
 extern int             dev_change_net_namespace(struct net_device *,
                                                 struct net *, const char *);
 extern int             dev_set_mtu(struct net_device *, int);
+extern void            dev_set_group(struct net_device *, int);
 extern int             dev_set_mac_address(struct net_device *,
                                            struct sockaddr *);
 extern int             dev_hard_start_xmit(struct sk_buff *skb,
@@ -2267,8 +2466,10 @@ extern int               netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
 extern int             netdev_set_master(struct net_device *dev, struct net_device *master);
+extern int netdev_set_bond_master(struct net_device *dev,
+                                 struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2295,22 +2496,26 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
 
 extern void linkwatch_run_queue(void);
 
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-                                       unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+static inline u32 netdev_get_wanted_features(struct net_device *dev)
+{
+       return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
+void netdev_update_features(struct net_device *dev);
 
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
 {
        int feature = gso_type << NETIF_F_GSO_SHIFT;
        return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
 {
        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
@@ -2328,15 +2533,9 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
        dev->gso_max_size = size;
 }
 
-extern int __skb_bond_should_drop(struct sk_buff *skb,
-                                 struct net_device *master);
-
-static inline int skb_bond_should_drop(struct sk_buff *skb,
-                                      struct net_device *master)
+static inline int netif_is_bond_slave(struct net_device *dev)
 {
-       if (master)
-               return __skb_bond_should_drop(skb, master);
-       return 0;
+       return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 }
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -2351,6 +2550,8 @@ static inline int dev_ethtool_get_settings(struct net_device *dev,
 
 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
 {
+       if (dev->hw_features & NETIF_F_RXCSUM)
+               return !!(dev->features & NETIF_F_RXCSUM);
        if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
                return 0;
        return dev->ethtool_ops->get_rx_csum(dev);
@@ -2392,6 +2593,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...)
 extern int netdev_info(const struct net_device *dev, const char *format, ...)
        __attribute__ ((format (printf, 2, 3)));
 
+#define MODULE_ALIAS_NETDEV(device) \
+       MODULE_ALIAS("netdev-" device)
+
 #if defined(DEBUG)
 #define netdev_dbg(__dev, format, args...)                     \
        netdev_printk(KERN_DEBUG, __dev, format, ##args)
index 1893837..eeec00a 100644 (file)
 #define NF_MAX_VERDICT NF_STOP
 
 /* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS   0x00008000
 
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
 #define NF_VERDICT_QMASK 0xffff0000
 #define NF_VERDICT_QBITS 16
 
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
 
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
 
 /* only for userspace compatibility */
 #ifndef __KERNEL__
@@ -41,6 +45,9 @@
    <= 0x2000 is used for protocol-flags. */
 #define NFC_UNKNOWN 0x4000
 #define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
 #endif
 
 enum nf_inet_hooks {
@@ -72,6 +79,10 @@ union nf_inet_addr {
 
 #ifdef __KERNEL__
 #ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+       return -(verdict >> NF_VERDICT_QBITS);
+}
 
 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
                                   const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@ struct nf_afinfo {
        int             route_key_size;
 };
 
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
 static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
 {
        return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
 extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
index 9d40eff..a1b410c 100644 (file)
@@ -1,3 +1,5 @@
+header-y += ipset/
+
 header-y += nf_conntrack_common.h
 header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@ header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
 header-y += nfnetlink_queue.h
 header-y += x_tables.h
+header-y += xt_AUDIT.h
 header-y += xt_CHECKSUM.h
 header-y += xt_CLASSIFY.h
 header-y += xt_CONNMARK.h
@@ -26,6 +29,7 @@ header-y += xt_TCPMSS.h
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
 header-y += xt_TPROXY.h
+header-y += xt_addrtype.h
 header-y += xt_cluster.h
 header-y += xt_comment.h
 header-y += xt_connbytes.h
@@ -34,6 +38,7 @@ header-y += xt_connmark.h
 header-y += xt_conntrack.h
 header-y += xt_cpu.h
 header-y += xt_dccp.h
+header-y += xt_devgroup.h
 header-y += xt_dscp.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
@@ -54,7 +59,9 @@ header-y += xt_quota.h
 header-y += xt_rateest.h
 header-y += xt_realm.h
 header-y += xt_recent.h
+header-y += xt_set.h
 header-y += xt_sctp.h
+header-y += xt_socket.h
 header-y += xt_state.h
 header-y += xt_statistic.h
 header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644 (file)
index 0000000..601fe71
--- /dev/null
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644 (file)
index 0000000..ec333d8
--- /dev/null
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL         6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN       32
+
+/* Message types and commands */
+enum ipset_cmd {
+       IPSET_CMD_NONE,
+       IPSET_CMD_PROTOCOL,     /* 1: Return protocol version */
+       IPSET_CMD_CREATE,       /* 2: Create a new (empty) set */
+       IPSET_CMD_DESTROY,      /* 3: Destroy a (empty) set */
+       IPSET_CMD_FLUSH,        /* 4: Remove all elements from a set */
+       IPSET_CMD_RENAME,       /* 5: Rename a set */
+       IPSET_CMD_SWAP,         /* 6: Swap two sets */
+       IPSET_CMD_LIST,         /* 7: List sets */
+       IPSET_CMD_SAVE,         /* 8: Save sets */
+       IPSET_CMD_ADD,          /* 9: Add an element to a set */
+       IPSET_CMD_DEL,          /* 10: Delete an element from a set */
+       IPSET_CMD_TEST,         /* 11: Test an element in a set */
+       IPSET_CMD_HEADER,       /* 12: Get set header data only */
+       IPSET_CMD_TYPE,         /* 13: Get set type */
+       IPSET_MSG_MAX,          /* Netlink message commands */
+
+       /* Commands in userspace: */
+       IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+       IPSET_CMD_HELP,         /* 15: Get help */
+       IPSET_CMD_VERSION,      /* 16: Get program version */
+       IPSET_CMD_QUIT,         /* 17: Quit from interactive mode */
+
+       IPSET_CMD_MAX,
+
+       IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+       IPSET_ATTR_UNSPEC,
+       IPSET_ATTR_PROTOCOL,    /* 1: Protocol version */
+       IPSET_ATTR_SETNAME,     /* 2: Name of the set */
+       IPSET_ATTR_TYPENAME,    /* 3: Typename */
+       IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+       IPSET_ATTR_REVISION,    /* 4: Settype revision */
+       IPSET_ATTR_FAMILY,      /* 5: Settype family */
+       IPSET_ATTR_FLAGS,       /* 6: Flags at command level */
+       IPSET_ATTR_DATA,        /* 7: Nested attributes */
+       IPSET_ATTR_ADT,         /* 8: Multiple data containers */
+       IPSET_ATTR_LINENO,      /* 9: Restore lineno */
+       IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+       IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+       __IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX     (__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+       IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+       IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+       IPSET_ATTR_IP_TO,       /* 2 */
+       IPSET_ATTR_CIDR,        /* 3 */
+       IPSET_ATTR_PORT,        /* 4 */
+       IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+       IPSET_ATTR_PORT_TO,     /* 5 */
+       IPSET_ATTR_TIMEOUT,     /* 6 */
+       IPSET_ATTR_PROTO,       /* 7 */
+       IPSET_ATTR_CADT_FLAGS,  /* 8 */
+       IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO,     /* 9 */
+       /* Reserve empty slots */
+       IPSET_ATTR_CADT_MAX = 16,
+       /* Create-only specific attributes */
+       IPSET_ATTR_GC,
+       IPSET_ATTR_HASHSIZE,
+       IPSET_ATTR_MAXELEM,
+       IPSET_ATTR_NETMASK,
+       IPSET_ATTR_PROBES,
+       IPSET_ATTR_RESIZE,
+       IPSET_ATTR_SIZE,
+       /* Kernel-only */
+       IPSET_ATTR_ELEMENTS,
+       IPSET_ATTR_REFERENCES,
+       IPSET_ATTR_MEMSIZE,
+
+       __IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX  (__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+       IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+       IPSET_ATTR_NAME,
+       IPSET_ATTR_NAMEREF,
+       IPSET_ATTR_IP2,
+       IPSET_ATTR_CIDR2,
+       __IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX     (__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+       IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+       IPSET_ATTR_IPADDR_IPV6,
+       __IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX  (__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+       IPSET_ERR_PRIVATE = 4096,
+       IPSET_ERR_PROTOCOL,
+       IPSET_ERR_FIND_TYPE,
+       IPSET_ERR_MAX_SETS,
+       IPSET_ERR_BUSY,
+       IPSET_ERR_EXIST_SETNAME2,
+       IPSET_ERR_TYPE_MISMATCH,
+       IPSET_ERR_EXIST,
+       IPSET_ERR_INVALID_CIDR,
+       IPSET_ERR_INVALID_NETMASK,
+       IPSET_ERR_INVALID_FAMILY,
+       IPSET_ERR_TIMEOUT,
+       IPSET_ERR_REFERENCED,
+       IPSET_ERR_IPADDR_IPV4,
+       IPSET_ERR_IPADDR_IPV6,
+
+       /* Type specific error codes */
+       IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+       IPSET_FLAG_BIT_EXIST    = 0,
+       IPSET_FLAG_EXIST        = (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+       IPSET_FLAG_BIT_BEFORE   = 0,
+       IPSET_FLAG_BEFORE       = (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+       IPSET_ADD,
+       IPSET_DEL,
+       IPSET_TEST,
+       IPSET_ADT_MAX,
+       IPSET_CREATE = IPSET_ADT_MAX,
+       IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID               65535
+
+enum ip_set_dim {
+       IPSET_DIM_ZERO = 0,
+       IPSET_DIM_ONE,
+       IPSET_DIM_TWO,
+       IPSET_DIM_THREE,
+       /* Max dimension in elements.
+        * If changed, new revision of iptables match/target is required.
+        */
+       IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+       IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+       IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+       IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+       IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+       IPSET_TYPE_IP_FLAG = 0,
+       IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+       IPSET_TYPE_PORT_FLAG = 1,
+       IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+       IPSET_TYPE_MAC_FLAG = 2,
+       IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+       IPSET_TYPE_IP2_FLAG = 3,
+       IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+       IPSET_TYPE_NAME_FLAG = 4,
+       IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+       /* Strictly speaking not a feature, but a flag for dumping:
+        * this settype must be dumped last */
+       IPSET_DUMP_LAST_FLAG = 7,
+       IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+       /* Kernelspace: test/add/del entries
+        *              returns negative error code,
+        *                      zero for no match/success to add/delete
+        *                      positive for matching element */
+       int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+       /* Userspace: test/add/del entries
+        *              returns negative error code,
+        *                      zero for no match/success to add/delete
+        *                      positive for matching element */
+       int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags);
+
+       /* Low level add/del/test functions */
+       ipset_adtfn adt[IPSET_ADT_MAX];
+
+       /* When adding entries and set is full, try to resize the set */
+       int (*resize)(struct ip_set *set, bool retried);
+       /* Destroy the set */
+       void (*destroy)(struct ip_set *set);
+       /* Flush the elements */
+       void (*flush)(struct ip_set *set);
+       /* Expire entries before listing */
+       void (*expire)(struct ip_set *set);
+       /* List set header data */
+       int (*head)(struct ip_set *set, struct sk_buff *skb);
+       /* List elements */
+       int (*list)(const struct ip_set *set, struct sk_buff *skb,
+                   struct netlink_callback *cb);
+
+       /* Return true if "b" set is the same as "a"
+        * according to the create set parameters */
+       bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+       struct list_head list;
+
+       /* Typename */
+       char name[IPSET_MAXNAMELEN];
+       /* Protocol version */
+       u8 protocol;
+       /* Set features to control swapping */
+       u8 features;
+       /* Set type dimension */
+       u8 dimension;
+       /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+       u8 family;
+       /* Type revision */
+       u8 revision;
+
+       /* Create set */
+       int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+       /* Attribute policies */
+       const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+       const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+       struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+       /* The name of the set */
+       char name[IPSET_MAXNAMELEN];
+       /* Lock protecting the set data */
+       rwlock_t lock;
+       /* References to the set */
+       atomic_t ref;
+       /* The core set type */
+       struct ip_set_type *type;
+       /* The type variant doing the real job */
+       const struct ip_set_type_variant *variant;
+       /* The actual INET family of the set */
+       u8 family;
+       /* The type specific data */
+       void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+                     u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+                     u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+                      u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+       __be32 ip;
+       int ret = ip_set_get_ipaddr4(nla, &ip);
+       
+       if (ret)
+               return ret;
+       *ipaddr = ntohl(ip);
+       return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+       return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+       return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+       return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+       return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+       return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr)                     \
+do {                                                           \
+       struct nlattr *__nested = ipset_nest_start(skb, type);  \
+                                                               \
+       if (!__nested)                                          \
+               goto nla_put_failure;                           \
+       NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);     \
+       ipset_nest_end(skb, __nested);                          \
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)                  \
+do {                                                           \
+       struct nlattr *__nested = ipset_nest_start(skb, type);  \
+                                                               \
+       if (!__nested)                                          \
+               goto nla_put_failure;                           \
+       NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,                    \
+               sizeof(struct in6_addr), ipaddrptr);            \
+       ipset_nest_end(skb, __nested);                          \
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+       return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+       *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+       memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+              sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+       return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET              83
+
+union ip_set_name_index {
+       char name[IPSET_MAXNAMELEN];
+       ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME   0x00000006      /* Get set index by name */
+struct ip_set_req_get_set {
+       unsigned op;
+       unsigned version;
+       union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
+struct ip_set_req_version {
+       unsigned op;
+       unsigned version;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644 (file)
index 0000000..ec9d9be
--- /dev/null
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE                        4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE                 (3*4)
+
+/* A hash bucket */
+struct hbucket {
+       void *value;            /* the array of the values */
+       u8 size;                /* size of the array */
+       u8 pos;                 /* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+       u8 htable_bits;         /* size of hash table == 2^htable_bits */
+       struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i)          &((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+       u8 cidr;                /* the different cidr values in the set */
+       u32 nets;               /* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+       struct htable *table;   /* the hash table */
+       u32 maxelem;            /* max elements in the hash */
+       u32 elements;           /* current element (vs timeout) */
+       u32 initval;            /* random jhash init value */
+       u32 timeout;            /* timeout value, if enabled */
+       struct timer_list gc;   /* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+       u8 netmask;             /* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+       struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+       /* Assume that hashsize == 2^htable_bits */
+       u8 bits = fls(hashsize - 1);
+       if (jhash_size(bits) != hashsize)
+               /* Round up to the first 2^n value */
+               bits = fls(hashsize);
+
+       return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+       u8 i;
+
+       ++h->nets[cidr-1].nets;
+
+       pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+       if (h->nets[cidr-1].nets > 1)
+               return;
+
+       /* New cidr size */
+       for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+               /* Add in increasing prefix order, so larger cidr first */
+               if (h->nets[i].cidr < cidr)
+                       swap(h->nets[i].cidr, cidr);
+       }
+       if (i < host_mask)
+               h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+       u8 i;
+
+       --h->nets[cidr-1].nets;
+
+       pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+       if (h->nets[cidr-1].nets != 0)
+               return;
+
+       /* All entries with this cidr size deleted, so cleanup h->cidr[] */
+       for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+               if (h->nets[i].cidr == cidr)
+                       h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+       }
+       h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size)
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+       }
+
+       ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+       u32 i;
+       struct htable *t = h->table;
+       size_t memsize = sizeof(*h)
+                        + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+                        + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+                        + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++)
+                       memsize += t->bucket[i].size * dsize;
+
+       return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size) {
+                       n->size = n->pos = 0;
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+               }
+       }
+#ifdef IP_SET_HASH_WITH_NETS
+       memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+                          * SET_HOST_MASK(set->family));
+#endif
+       h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+
+       if (with_timeout(h->timeout))
+               del_timer_sync(&h->gc);
+
+       ahash_destroy(h->table);
+       kfree(h);
+
+       set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits)                                \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+       & jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c)                a##b##c
+#define TOKEN(a, b, c)         CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal     TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull    TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy      TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out  TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask   TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list      TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist     TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem           TOKEN(TYPE, PF, _elem)
+#define type_pf_telem          TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout   TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired   TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add       TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add            TOKEN(TYPE, PF, _add)
+#define type_pf_del            TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs     TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test           TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd      TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem      TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire         TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd           TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel           TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs    TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest          TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize         TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize                TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush          ip_set_hash_flush
+#define type_pf_destroy                ip_set_hash_destroy
+#define type_pf_head           TOKEN(TYPE, PF, _head)
+#define type_pf_list           TOKEN(TYPE, PF, _list)
+#define type_pf_tlist          TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set       TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt           TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt           TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc             TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init                TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant                TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant       TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i)       \
+       ((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+       if (n->pos >= n->size) {
+               void *tmp;
+
+               if (n->size >= AHASH_MAX_SIZE)
+                       /* Trigger rehashing */
+                       return -EAGAIN;
+
+               tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+                             * sizeof(struct type_pf_elem),
+                             GFP_ATOMIC);
+               if (!tmp)
+                       return -ENOMEM;
+               if (n->size) {
+                       memcpy(tmp, n->value,
+                              sizeof(struct type_pf_elem) * n->size);
+                       kfree(n->value);
+               }
+               n->value = tmp;
+               n->size += AHASH_INIT_SIZE;
+       }
+       type_pf_data_copy(ahash_data(n, n->pos++), value);
+       return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t, *orig = h->table;
+       u8 htable_bits = orig->htable_bits;
+       const struct type_pf_elem *data;
+       struct hbucket *n, *m;
+       u32 i, j;
+       int ret;
+
+retry:
+       ret = 0;
+       htable_bits++;
+       pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+                set->name, orig->htable_bits, htable_bits, orig);
+       if (!htable_bits)
+               /* In case we have plenty of memory :-) */
+               return -IPSET_ERR_HASH_FULL;
+       t = ip_set_alloc(sizeof(*t)
+                        + jhash_size(htable_bits) * sizeof(struct hbucket));
+       if (!t)
+               return -ENOMEM;
+       t->htable_bits = htable_bits;
+
+       read_lock_bh(&set->lock);
+       for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+               n = hbucket(orig, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_data(n, j);
+                       m = hbucket(t, HKEY(data, h->initval, htable_bits));
+                       ret = type_pf_elem_add(m, data);
+                       if (ret < 0) {
+                               read_unlock_bh(&set->lock);
+                               ahash_destroy(t);
+                               if (ret == -EAGAIN)
+                                       goto retry;
+                               return ret;
+                       }
+               }
+       }
+
+       rcu_assign_pointer(h->table, t);
+       read_unlock_bh(&set->lock);
+
+       /* Give time to other readers of the set */
+       synchronize_rcu_bh();
+
+       pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+                orig->htable_bits, orig, t->htable_bits, t);
+       ahash_destroy(orig);
+
+       return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i, ret = 0;
+       u32 key;
+
+       if (h->elements >= h->maxelem)
+               return -IPSET_ERR_HASH_FULL;
+
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++)
+               if (type_pf_data_equal(ahash_data(n, i), d)) {
+                       ret = -IPSET_ERR_EXIST;
+                       goto out;
+               }
+
+       ret = type_pf_elem_add(n, value);
+       if (ret != 0)
+               goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       add_cidr(h, d->cidr, HOST_MASK);
+#endif
+       h->elements++;
+out:
+       rcu_read_unlock_bh();
+       return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i;
+       struct type_pf_elem *data;
+       u32 key;
+
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_data(n, i);
+               if (!type_pf_data_equal(data, d))
+                       continue;
+               if (i != n->pos - 1)
+                       /* Not last one */
+                       type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+               n->pos--;
+               h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, d->cidr, HOST_MASK);
+#endif
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_elem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               return 0;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_elem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+               return 0;
+       }
+
+       return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct hbucket *n;
+       const struct type_pf_elem *data;
+       int i, j = 0;
+       u32 key;
+       u8 host_mask = SET_HOST_MASK(set->family);
+
+       pr_debug("test by nets\n");
+       for (; j < host_mask && h->nets[j].cidr; j++) {
+               type_pf_data_netmask(d, h->nets[j].cidr);
+               key = HKEY(d, h->initval, t->htable_bits);
+               n = hbucket(t, key);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_data(n, i);
+                       if (type_pf_data_equal(data, d))
+                               return 1;
+               }
+       }
+       return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *d = value;
+       struct hbucket *n;
+       const struct type_pf_elem *data;
+       int i;
+       u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       /* If we test an IP address and not a network address,
+        * try all possible network sizes */
+       if (d->cidr == SET_HOST_MASK(set->family))
+               return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_data(n, i);
+               if (type_pf_data_equal(data, d))
+                       return 1;
+       }
+       return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct ip_set_hash *h = set->data;
+       struct nlattr *nested;
+       size_t memsize;
+
+       read_lock_bh(&set->lock);
+       memsize = ahash_memsize(h, with_timeout(h->timeout)
+                                       ? sizeof(struct type_pf_telem)
+                                       : sizeof(struct type_pf_elem),
+                               set->family == AF_INET ? 32 : 128);
+       read_unlock_bh(&set->lock);
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+                     htonl(jhash_size(h->table->htable_bits)));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+       if (h->netmask != HOST_MASK)
+               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+       if (with_timeout(h->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+            struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct ip_set_hash *h = set->data;
+       const struct htable *t = h->table;
+       struct nlattr *atd, *nested;
+       const struct hbucket *n;
+       const struct type_pf_elem *data;
+       u32 first = cb->args[2];
+       /* We assume that one hash bucket fills into one page */
+       void *incomplete;
+       int i;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       pr_debug("list hash set %s\n", set->name);
+       for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+               incomplete = skb_tail_pointer(skb);
+               n = hbucket(t, cb->args[2]);
+               pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_data(n, i);
+                       pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+                                cb->args[2], n, i, data);
+                       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+                       if (!nested) {
+                               if (cb->args[2] == first) {
+                                       nla_nest_cancel(skb, atd);
+                                       return -EMSGSIZE;
+                               } else
+                                       goto nla_put_failure;
+                       }
+                       if (type_pf_data_list(skb, data))
+                               goto nla_put_failure;
+                       ipset_nest_end(skb, nested);
+               }
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, incomplete);
+       ipset_nest_end(skb, atd);
+       if (unlikely(first == cb->args[2])) {
+               pr_warning("Can't list set %s: one bucket does not fit into "
+                          "a message. Please report it!\n", set->name);
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+            enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+            enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+       .kadt   = type_pf_kadt,
+       .uadt   = type_pf_uadt,
+       .adt    = {
+               [IPSET_ADD] = type_pf_add,
+               [IPSET_DEL] = type_pf_del,
+               [IPSET_TEST] = type_pf_test,
+       },
+       .destroy = type_pf_destroy,
+       .flush  = type_pf_flush,
+       .head   = type_pf_head,
+       .list   = type_pf_list,
+       .resize = type_pf_resize,
+       .same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+       (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+       const struct type_pf_telem *tdata =
+               (const struct type_pf_telem *) data;
+
+       return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+       const struct type_pf_telem *tdata =
+               (const struct type_pf_telem *) data;
+
+       return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+       struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+       tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+                 u32 timeout)
+{
+       struct type_pf_elem *data;
+
+       if (n->pos >= n->size) {
+               void *tmp;
+
+               if (n->size >= AHASH_MAX_SIZE)
+                       /* Trigger rehashing */
+                       return -EAGAIN;
+
+               tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+                             * sizeof(struct type_pf_telem),
+                             GFP_ATOMIC);
+               if (!tmp)
+                       return -ENOMEM;
+               if (n->size) {
+                       memcpy(tmp, n->value,
+                              sizeof(struct type_pf_telem) * n->size);
+                       kfree(n->value);
+               }
+               n->value = tmp;
+               n->size += AHASH_INIT_SIZE;
+       }
+       data = ahash_tdata(n, n->pos++);
+       type_pf_data_copy(data, value);
+       type_pf_data_timeout_set(data, timeout);
+       return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+       struct htable *t = h->table;
+       struct hbucket *n;
+       struct type_pf_elem *data;
+       u32 i;
+       int j;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_tdata(n, j);
+                       if (type_pf_data_expired(data)) {
+                               pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                               del_cidr(h, data->cidr, HOST_MASK);
+#endif
+                               if (j != n->pos - 1)
+                                       /* Not last one */
+                                       type_pf_data_copy(data,
+                                               ahash_tdata(n, n->pos - 1));
+                               n->pos--;
+                               h->elements--;
+                       }
+               }
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_telem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               /* Still try to delete expired elements */
+                               continue;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_telem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+       }
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t, *orig = h->table;
+       u8 htable_bits = orig->htable_bits;
+       const struct type_pf_elem *data;
+       struct hbucket *n, *m;
+       u32 i, j;
+       int ret;
+
+       /* Try to cleanup once */
+       if (!retried) {
+               i = h->elements;
+               write_lock_bh(&set->lock);
+               type_pf_expire(set->data);
+               write_unlock_bh(&set->lock);
+               if (h->elements <  i)
+                       return 0;
+       }
+
+retry:
+       ret = 0;
+       htable_bits++;
+       if (!htable_bits)
+               /* In case we have plenty of memory :-) */
+               return -IPSET_ERR_HASH_FULL;
+       t = ip_set_alloc(sizeof(*t)
+                        + jhash_size(htable_bits) * sizeof(struct hbucket));
+       if (!t)
+               return -ENOMEM;
+       t->htable_bits = htable_bits;
+
+       read_lock_bh(&set->lock);
+       for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+               n = hbucket(orig, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_tdata(n, j);
+                       m = hbucket(t, HKEY(data, h->initval, htable_bits));
+                       ret = type_pf_elem_tadd(m, data,
+                                               type_pf_data_timeout(data));
+                       if (ret < 0) {
+                               read_unlock_bh(&set->lock);
+                               ahash_destroy(t);
+                               if (ret == -EAGAIN)
+                                       goto retry;
+                               return ret;
+                       }
+               }
+       }
+
+       rcu_assign_pointer(h->table, t);
+       read_unlock_bh(&set->lock);
+
+       /* Give time to other readers of the set */
+       synchronize_rcu_bh();
+
+       ahash_destroy(orig);
+
+       return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       struct type_pf_elem *data;
+       int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+       u32 key;
+
+       if (h->elements >= h->maxelem)
+               /* FIXME: when set is full, we slow down here */
+               type_pf_expire(h);
+       if (h->elements >= h->maxelem)
+               return -IPSET_ERR_HASH_FULL;
+
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (type_pf_data_equal(data, d)) {
+                       if (type_pf_data_expired(data))
+                               j = i;
+                       else {
+                               ret = -IPSET_ERR_EXIST;
+                               goto out;
+                       }
+               } else if (j == AHASH_MAX_SIZE + 1 &&
+                          type_pf_data_expired(data))
+                       j = i;
+       }
+       if (j != AHASH_MAX_SIZE + 1) {
+               data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, data->cidr, HOST_MASK);
+               add_cidr(h, d->cidr, HOST_MASK);
+#endif
+               type_pf_data_copy(data, d);
+               type_pf_data_timeout_set(data, timeout);
+               goto out;
+       }
+       ret = type_pf_elem_tadd(n, d, timeout);
+       if (ret != 0)
+               goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       add_cidr(h, d->cidr, HOST_MASK);
+#endif
+       h->elements++;
+out:
+       rcu_read_unlock_bh();
+       return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i, ret = 0;
+       struct type_pf_elem *data;
+       u32 key;
+
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (!type_pf_data_equal(data, d))
+                       continue;
+               if (type_pf_data_expired(data))
+                       ret = -IPSET_ERR_EXIST;
+               if (i != n->pos - 1)
+                       /* Not last one */
+                       type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+               n->pos--;
+               h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, d->cidr, HOST_MASK);
+#endif
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_telem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               return 0;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_telem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+               return 0;
+       }
+
+       return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *data;
+       struct hbucket *n;
+       int i, j = 0;
+       u32 key;
+       u8 host_mask = SET_HOST_MASK(set->family);
+
+       for (; j < host_mask && h->nets[j].cidr; j++) {
+               type_pf_data_netmask(d, h->nets[j].cidr);
+               key = HKEY(d, h->initval, t->htable_bits);
+               n = hbucket(t, key);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_tdata(n, i);
+                       if (type_pf_data_equal(data, d))
+                               return !type_pf_data_expired(data);
+               }
+       }
+       return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *data, *d = value;
+       struct hbucket *n;
+       int i;
+       u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       if (d->cidr == SET_HOST_MASK(set->family))
+               return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (type_pf_data_equal(data, d))
+                       return !type_pf_data_expired(data);
+       }
+       return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+             struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct ip_set_hash *h = set->data;
+       const struct htable *t = h->table;
+       struct nlattr *atd, *nested;
+       const struct hbucket *n;
+       const struct type_pf_elem *data;
+       u32 first = cb->args[2];
+       /* We assume that one hash bucket fills into one page */
+       void *incomplete;
+       int i;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+               incomplete = skb_tail_pointer(skb);
+               n = hbucket(t, cb->args[2]);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_tdata(n, i);
+                       pr_debug("list %p %u\n", n, i);
+                       if (type_pf_data_expired(data))
+                               continue;
+                       pr_debug("do list %p %u\n", n, i);
+                       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+                       if (!nested) {
+                               if (cb->args[2] == first) {
+                                       nla_nest_cancel(skb, atd);
+                                       return -EMSGSIZE;
+                               } else
+                                       goto nla_put_failure;
+                       }
+                       if (type_pf_data_tlist(skb, data))
+                               goto nla_put_failure;
+                       ipset_nest_end(skb, nested);
+               }
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, incomplete);
+       ipset_nest_end(skb, atd);
+       if (unlikely(first == cb->args[2])) {
+               pr_warning("Can't list set %s: one bucket does not fit into "
+                          "a message. Please report it!\n", set->name);
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+       .kadt   = type_pf_kadt,
+       .uadt   = type_pf_uadt,
+       .adt    = {
+               [IPSET_ADD] = type_pf_tadd,
+               [IPSET_DEL] = type_pf_tdel,
+               [IPSET_TEST] = type_pf_ttest,
+       },
+       .destroy = type_pf_destroy,
+       .flush  = type_pf_flush,
+       .head   = type_pf_head,
+       .list   = type_pf_tlist,
+       .resize = type_pf_tresize,
+       .same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct ip_set_hash *h = set->data;
+
+       pr_debug("called\n");
+       write_lock_bh(&set->lock);
+       type_pf_expire(h);
+       write_unlock_bh(&set->lock);
+
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+
+       init_timer(&h->gc);
+       h->gc.data = (unsigned long) set;
+       h->gc.function = type_pf_gc;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       add_timer(&h->gc);
+       pr_debug("gc initialized, run in every %u\n",
+                IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644 (file)
index 0000000..61a9e87
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+       /* The element is out of the range of the set */
+       IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+       /* The range exceeds the size limit of the set type */
+       IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+       u32 mask = 0xFFFFFFFE;
+
+       *bits = 32;
+       while (--(*bits) > 0 && mask && (to & mask) != from)
+               mask <<= 1;
+
+       return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644 (file)
index 0000000..3882a81
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+                               __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                               __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                                      __be16 *port, u8 *proto)
+{
+       return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+                               __be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644 (file)
index 0000000..b86f15c
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+       /* Hash is full */
+       IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+       /* Null-valued element */
+       IPSET_ERR_HASH_ELEM,
+       /* Invalid protocol */
+       IPSET_ERR_INVALID_PROTO,
+       /* Protocol missing but must be specified */
+       IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE         1024
+#define IPSET_MIMINAL_HASHSIZE         64
+#define IPSET_DEFAULT_MAXELEM          65536
+#define IPSET_DEFAULT_PROBES           4
+#define IPSET_DEFAULT_RESIZE           100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644 (file)
index 0000000..40a63f3
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+       /* Set name to be added/deleted/tested does not exist. */
+       IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+       /* list:set type is not permitted to add */
+       IPSET_ERR_LOOP,
+       /* Missing reference set */
+       IPSET_ERR_BEFORE,
+       /* Reference set does not exist */
+       IPSET_ERR_NAMEREF,
+       /* Set is full */
+       IPSET_ERR_LIST_FULL,
+       /* Reference set is not added to the set */
+       IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE       8
+#define IP_SET_LIST_MIN_SIZE           4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644 (file)
index 0000000..9f30c5f
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME                  (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+       ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT       UINT_MAX
+
+#define with_timeout(timeout)  ((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+       unsigned int timeout = ip_set_get_h32(tb);
+
+       /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+       return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET       0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT   (UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_UNSET &&
+              (timeout == IPSET_ELEM_PERMANENT ||
+               time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_UNSET &&
+              timeout != IPSET_ELEM_PERMANENT &&
+              time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+       unsigned long t;
+
+       if (!timeout)
+               return IPSET_ELEM_PERMANENT;
+
+       t = timeout * HZ + jiffies;
+       if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+               /* Bingo! */
+               t++;
+
+       return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT   0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ||
+              time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_PERMANENT &&
+              time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+       unsigned long t;
+
+       if (!timeout)
+               return IPSET_ELEM_PERMANENT;
+
+       t = timeout * HZ + jiffies;
+       if (t == IPSET_ELEM_PERMANENT)
+               /* Bingo! :-) */
+               t++;
+
+       return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644 (file)
index 0000000..0e1fb50
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h> 
+
+/* Prefixlen maps, by Jan Engelhardt  */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+       return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+       return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+       return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+       return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644 (file)
index 0000000..064bc63
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+                               unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
index 361d6b5..2b11fc1 100644 (file)
@@ -47,7 +47,8 @@ struct nfgenmsg {
 #define NFNL_SUBSYS_QUEUE              3
 #define NFNL_SUBSYS_ULOG               4
 #define NFNL_SUBSYS_OSF                        5
-#define NFNL_SUBSYS_COUNT              6
+#define NFNL_SUBSYS_IPSET              6
+#define NFNL_SUBSYS_COUNT              7
 
 #ifdef __KERNEL__
 
index 19711e3..debf1ae 100644 (file)
@@ -42,6 +42,7 @@ enum ctattr_type {
        CTA_SECMARK,            /* obsolete */
        CTA_ZONE,
        CTA_SECCTX,
+       CTA_TIMESTAMP,
        __CTA_MAX
 };
 #define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@ enum ctattr_counters {
 };
 #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
 
+enum ctattr_tstamp {
+       CTA_TIMESTAMP_UNSPEC,
+       CTA_TIMESTAMP_START,
+       CTA_TIMESTAMP_STOP,
+       __CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
 enum ctattr_nat {
        CTA_NAT_UNSPEC,
        CTA_NAT_MINIP,
index 6712e71..3721952 100644 (file)
@@ -611,8 +611,9 @@ struct _compat_xt_align {
 extern void xt_compat_lock(u_int8_t af);
 extern void xt_compat_unlock(u_int8_t af);
 
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
 extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644 (file)
index 0000000..38751d2
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+       XT_AUDIT_TYPE_ACCEPT = 0,
+       XT_AUDIT_TYPE_DROP,
+       XT_AUDIT_TYPE_REJECT,
+       __XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+       __u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
index 1b56410..b56e768 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _XT_CT_H
 #define _XT_CT_H
 
+#include <linux/types.h>
+
 #define XT_CT_NOTRACK  0x1
 
 struct xt_ct_target_info {
-       u_int16_t       flags;
-       u_int16_t       zone;
-       u_int32_t       ct_events;
-       u_int32_t       exp_events;
-       char            helper[16];
+       __u16 flags;
+       __u16 zone;
+       __u32 ct_events;
+       __u32 exp_events;
+       char helper[16];
 
        /* Used internally by the kernel */
        struct nf_conn  *ct __attribute__((aligned(8)));
index 2584f4a..9eafdbb 100644 (file)
@@ -20,4 +20,10 @@ struct xt_NFQ_info_v1 {
        __u16 queues_total;
 };
 
+struct xt_NFQ_info_v2 {
+       __u16 queuenum;
+       __u16 queues_total;
+       __u16 bypass;
+};
+
 #endif /* _XT_NFQ_TARGET_H */
index 2db5432..7157318 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef _XT_TCPOPTSTRIP_H
 #define _XT_TCPOPTSTRIP_H
 
+#include <linux/types.h>
+
 #define tcpoptstrip_set_bit(bmap, idx) \
        (bmap[(idx) >> 5] |= 1U << (idx & 31))
 #define tcpoptstrip_test_bit(bmap, idx) \
        (((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
 
 struct xt_tcpoptstrip_target_info {
-       u_int32_t strip_bmap[8];
+       __u32 strip_bmap[8];
 };
 
 #endif /* _XT_TCPOPTSTRIP_H */
index 3f3d693..902043c 100644 (file)
@@ -1,19 +1,21 @@
 #ifndef _XT_TPROXY_H
 #define _XT_TPROXY_H
 
+#include <linux/types.h>
+
 /* TPROXY target is capable of marking the packet to perform
  * redirection. We can get rid of that whenever we get support for
  * mutliple targets in the same rule. */
 struct xt_tproxy_target_info {
-       u_int32_t mark_mask;
-       u_int32_t mark_value;
+       __u32 mark_mask;
+       __u32 mark_value;
        __be32 laddr;
        __be16 lport;
 };
 
 struct xt_tproxy_target_info_v1 {
-       u_int32_t mark_mask;
-       u_int32_t mark_value;
+       __u32 mark_mask;
+       __u32 mark_value;
        union nf_inet_addr laddr;
        __be16 lport;
 };
diff --git a/include/linux/netfilter/xt_addrtype.h b/include/linux/netfilter/xt_addrtype.h
new file mode 100644 (file)
index 0000000..b156baa
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef _XT_ADDRTYPE_H
+#define _XT_ADDRTYPE_H
+
+#include <linux/types.h>
+
+enum {
+       XT_ADDRTYPE_INVERT_SOURCE       = 0x0001,
+       XT_ADDRTYPE_INVERT_DEST         = 0x0002,
+       XT_ADDRTYPE_LIMIT_IFACE_IN      = 0x0004,
+       XT_ADDRTYPE_LIMIT_IFACE_OUT     = 0x0008,
+};
+
+
+/* rtn_type enum values from rtnetlink.h, but shifted */
+enum {
+       XT_ADDRTYPE_UNSPEC = 1 << 0,
+       XT_ADDRTYPE_UNICAST = 1 << 1,   /* 1 << RTN_UNICAST */
+       XT_ADDRTYPE_LOCAL  = 1 << 2,    /* 1 << RTN_LOCAL, etc */
+       XT_ADDRTYPE_BROADCAST = 1 << 3,
+       XT_ADDRTYPE_ANYCAST = 1 << 4,
+       XT_ADDRTYPE_MULTICAST = 1 << 5,
+       XT_ADDRTYPE_BLACKHOLE = 1 << 6,
+       XT_ADDRTYPE_UNREACHABLE = 1 << 7,
+       XT_ADDRTYPE_PROHIBIT = 1 << 8,
+       XT_ADDRTYPE_THROW = 1 << 9,
+       XT_ADDRTYPE_NAT = 1 << 10,
+       XT_ADDRTYPE_XRESOLVE = 1 << 11,
+};
+
+struct xt_addrtype_info_v1 {
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   flags;
+};
+
+/* revision 0 */
+struct xt_addrtype_info {
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   invert_source;
+       __u32   invert_dest;
+};
+
+#endif
index 8866826..9b883c8 100644 (file)
@@ -1,15 +1,17 @@
 #ifndef _XT_CLUSTER_MATCH_H
 #define _XT_CLUSTER_MATCH_H
 
+#include <linux/types.h>
+
 enum xt_cluster_flags {
        XT_CLUSTER_F_INV        = (1 << 0)
 };
 
 struct xt_cluster_match_info {
-       u_int32_t               total_nodes;
-       u_int32_t               node_mask;
-       u_int32_t               hash_seed;
-       u_int32_t               flags;
+       __u32 total_nodes;
+       __u32 node_mask;
+       __u32 hash_seed;
+       __u32 flags;
 };
 
 #define XT_CLUSTER_NODES_MAX   32
index eacfedc..0ea5e79 100644 (file)
@@ -4,7 +4,7 @@
 #define XT_MAX_COMMENT_LEN 256
 
 struct xt_comment_info {
-       unsigned char comment[XT_MAX_COMMENT_LEN];
+       char comment[XT_MAX_COMMENT_LEN];
 };
 
 #endif /* XT_COMMENT_H */
index 7e3284b..0ca66e9 100644 (file)
@@ -1,8 +1,15 @@
 #ifndef _XT_CONNLIMIT_H
 #define _XT_CONNLIMIT_H
 
+#include <linux/types.h>
+
 struct xt_connlimit_data;
 
+enum {
+       XT_CONNLIMIT_INVERT = 1 << 0,
+       XT_CONNLIMIT_DADDR  = 1 << 1,
+};
+
 struct xt_connlimit_info {
        union {
                union nf_inet_addr mask;
@@ -13,7 +20,14 @@ struct xt_connlimit_info {
                };
 #endif
        };
-       unsigned int limit, inverse;
+       unsigned int limit;
+       union {
+               /* revision 0 */
+               unsigned int inverse;
+
+               /* revision 1 */
+               __u32 flags;
+       };
 
        /* Used internally by the kernel */
        struct xt_connlimit_data *data __attribute__((aligned(8)));
index 54f47a2..74b904d 100644 (file)
@@ -58,4 +58,19 @@ struct xt_conntrack_mtinfo2 {
        __u16 state_mask, status_mask;
 };
 
+struct xt_conntrack_mtinfo3 {
+       union nf_inet_addr origsrc_addr, origsrc_mask;
+       union nf_inet_addr origdst_addr, origdst_mask;
+       union nf_inet_addr replsrc_addr, replsrc_mask;
+       union nf_inet_addr repldst_addr, repldst_mask;
+       __u32 expires_min, expires_max;
+       __u16 l4proto;
+       __u16 origsrc_port, origdst_port;
+       __u16 replsrc_port, repldst_port;
+       __u16 match_flags, invert_flags;
+       __u16 state_mask, status_mask;
+       __u16 origsrc_port_high, origdst_port_high;
+       __u16 replsrc_port_high, repldst_port_high;
+};
+
 #endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644 (file)
index 0000000..1babde0
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+       XT_DEVGROUP_MATCH_SRC   = 0x1,
+       XT_DEVGROUP_INVERT_SRC  = 0x2,
+       XT_DEVGROUP_MATCH_DST   = 0x4,
+       XT_DEVGROUP_INVERT_DST  = 0x8,
+};
+
+struct xt_devgroup_info {
+       __u32   flags;
+       __u32   src_group;
+       __u32   src_mask;
+       __u32   dst_group;
+       __u32   dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
index b0d28c6..ca6e03e 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_QUOTA_H
 #define _XT_QUOTA_H
 
+#include <linux/types.h>
+
 enum xt_quota_flags {
        XT_QUOTA_INVERT         = 0x1,
 };
@@ -9,9 +11,9 @@ enum xt_quota_flags {
 struct xt_quota_priv;
 
 struct xt_quota_info {
-       u_int32_t               flags;
-       u_int32_t               pad;
-       aligned_u64             quota;
+       __u32 flags;
+       __u32 pad;
+       aligned_u64 quota;
 
        /* Used internally by the kernel */
        struct xt_quota_priv    *master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644 (file)
index 0000000..081f1de
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC              0x01    /* Source match/add */
+#define IPSET_DST              0x02    /* Destination match/add */
+#define IPSET_MATCH_INV                0x04    /* Inverse matching */
+
+struct xt_set_info_v0 {
+       ip_set_id_t index;
+       union {
+               __u32 flags[IPSET_DIM_MAX + 1];
+               struct {
+                       __u32 __flags[IPSET_DIM_MAX];
+                       __u8 dim;
+                       __u8 flags;
+               } compat;
+       } u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+       struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+       struct xt_set_info_v0 add_set;
+       struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+       ip_set_id_t index;
+       __u8 dim;
+       __u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+       struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+       struct xt_set_info add_set;
+       struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
index 6f475b8..26d7217 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_SOCKET_H
 #define _XT_SOCKET_H
 
+#include <linux/types.h>
+
 enum {
        XT_SOCKET_TRANSPARENT = 1 << 0,
 };
index 14b6df4..7c37fac 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _XT_TIME_H
 #define _XT_TIME_H 1
 
+#include <linux/types.h>
+
 struct xt_time_info {
-       u_int32_t date_start;
-       u_int32_t date_stop;
-       u_int32_t daytime_start;
-       u_int32_t daytime_stop;
-       u_int32_t monthdays_match;
-       u_int8_t weekdays_match;
-       u_int8_t flags;
+       __u32 date_start;
+       __u32 date_stop;
+       __u32 daytime_start;
+       __u32 daytime_stop;
+       __u32 monthdays_match;
+       __u8 weekdays_match;
+       __u8 flags;
 };
 
 enum {
index 9947f56..04d1bfe 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_U32_H
 #define _XT_U32_H 1
 
+#include <linux/types.h>
+
 enum xt_u32_ops {
        XT_U32_AND,
        XT_U32_LEFTSH,
@@ -9,13 +11,13 @@ enum xt_u32_ops {
 };
 
 struct xt_u32_location_element {
-       u_int32_t number;
-       u_int8_t nextop;
+       __u32 number;
+       __u8 nextop;
 };
 
 struct xt_u32_value_element {
-       u_int32_t min;
-       u_int32_t max;
+       __u32 min;
+       __u32 max;
 };
 
 /*
@@ -27,14 +29,14 @@ struct xt_u32_value_element {
 struct xt_u32_test {
        struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
        struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
-       u_int8_t nnums;
-       u_int8_t nvalues;
+       __u8 nnums;
+       __u8 nvalues;
 };
 
 struct xt_u32 {
        struct xt_u32_test tests[XT_U32_MAXSIZE+1];
-       u_int8_t ntests;
-       u_int8_t invert;
+       __u8 ntests;
+       __u8 invert;
 };
 
 #endif /* _XT_U32_H */
index c73ef0b..be5be15 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_802_3_H
 #define __LINUX_BRIDGE_EBT_802_3_H
 
+#include <linux/types.h>
+
 #define EBT_802_3_SAP 0x01
 #define EBT_802_3_TYPE 0x02
 
 
 /* ui has one byte ctrl, ni has two */
 struct hdr_ui {
-       uint8_t dsap;
-       uint8_t ssap;
-       uint8_t ctrl;
-       uint8_t orig[3];
+       __u8 dsap;
+       __u8 ssap;
+       __u8 ctrl;
+       __u8 orig[3];
        __be16 type;
 };
 
 struct hdr_ni {
-       uint8_t dsap;
-       uint8_t ssap;
+       __u8 dsap;
+       __u8 ssap;
        __be16 ctrl;
-       uint8_t  orig[3];
+       __u8  orig[3];
        __be16 type;
 };
 
 struct ebt_802_3_hdr {
-       uint8_t  daddr[6];
-       uint8_t  saddr[6];
+       __u8  daddr[6];
+       __u8  saddr[6];
        __be16 len;
        union {
                struct hdr_ui ui;
@@ -59,10 +61,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
 #endif
 
 struct ebt_802_3_info {
-       uint8_t  sap;
+       __u8  sap;
        __be16 type;
-       uint8_t  bitmask;
-       uint8_t  invflags;
+       __u8  bitmask;
+       __u8  invflags;
 };
 
 #endif
index 0009558..bd4e3ad 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_AMONG_H
 #define __LINUX_BRIDGE_EBT_AMONG_H
 
+#include <linux/types.h>
+
 #define EBT_AMONG_DST 0x01
 #define EBT_AMONG_SRC 0x02
 
@@ -30,7 +32,7 @@
  */
 
 struct ebt_mac_wormhash_tuple {
-       uint32_t cmp[2];
+       __u32 cmp[2];
        __be32 ip;
 };
 
index cbf4843..522f3e4 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_ARP_H
 #define __LINUX_BRIDGE_EBT_ARP_H
 
+#include <linux/types.h>
+
 #define EBT_ARP_OPCODE 0x01
 #define EBT_ARP_HTYPE 0x02
 #define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@ struct ebt_arp_info
        unsigned char smmsk[ETH_ALEN];
        unsigned char dmaddr[ETH_ALEN];
        unsigned char dmmsk[ETH_ALEN];
-       uint8_t  bitmask;
-       uint8_t  invflags;
+       __u8  bitmask;
+       __u8  invflags;
 };
 
 #endif
index 6a708fb..c4bbc41 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef __LINUX_BRIDGE_EBT_IP_H
 #define __LINUX_BRIDGE_EBT_IP_H
 
+#include <linux/types.h>
+
 #define EBT_IP_SOURCE 0x01
 #define EBT_IP_DEST 0x02
 #define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@ struct ebt_ip_info {
        __be32 daddr;
        __be32 smsk;
        __be32 dmsk;
-       uint8_t  tos;
-       uint8_t  protocol;
-       uint8_t  bitmask;
-       uint8_t  invflags;
-       uint16_t sport[2];
-       uint16_t dport[2];
+       __u8  tos;
+       __u8  protocol;
+       __u8  bitmask;
+       __u8  invflags;
+       __u16 sport[2];
+       __u16 dport[2];
 };
 
 #endif
index e5de987..42b8896 100644 (file)
 #ifndef __LINUX_BRIDGE_EBT_IP6_H
 #define __LINUX_BRIDGE_EBT_IP6_H
 
+#include <linux/types.h>
+
 #define EBT_IP6_SOURCE 0x01
 #define EBT_IP6_DEST 0x02
 #define EBT_IP6_TCLASS 0x04
 #define EBT_IP6_PROTO 0x08
 #define EBT_IP6_SPORT 0x10
 #define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
 #define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
-                     EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+                     EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+                     EBT_IP6_ICMP6)
 #define EBT_IP6_MATCH "ip6"
 
 /* the same values are used for the invflags */
@@ -28,12 +33,18 @@ struct ebt_ip6_info {
        struct in6_addr daddr;
        struct in6_addr smsk;
        struct in6_addr dmsk;
-       uint8_t  tclass;
-       uint8_t  protocol;
-       uint8_t  bitmask;
-       uint8_t  invflags;
-       uint16_t sport[2];
-       uint16_t dport[2];
+       __u8  tclass;
+       __u8  protocol;
+       __u8  bitmask;
+       __u8  invflags;
+       union {
+               __u16 sport[2];
+               __u8 icmpv6_type[2];
+       };
+       union {
+               __u16 dport[2];
+               __u8 icmpv6_code[2];
+       };
 };
 
 #endif
index 4bf76b7..66d80b3 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LIMIT_H
 #define __LINUX_BRIDGE_EBT_LIMIT_H
 
+#include <linux/types.h>
+
 #define EBT_LIMIT_MATCH "limit"
 
 /* timings are in milliseconds. */
    seconds, or one every 59 hours. */
 
 struct ebt_limit_info {
-       u_int32_t avg;    /* Average secs between packets * scale */
-       u_int32_t burst;  /* Period multiplier for upper limit. */
+       __u32 avg;    /* Average secs between packets * scale */
+       __u32 burst;  /* Period multiplier for upper limit. */
 
        /* Used internally by the kernel */
        unsigned long prev;
-       u_int32_t credit;
-       u_int32_t credit_cap, cost;
+       __u32 credit;
+       __u32 credit_cap, cost;
 };
 
 #endif
index cc2cdfb..7e7f1d1 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LOG_H
 #define __LINUX_BRIDGE_EBT_LOG_H
 
+#include <linux/types.h>
+
 #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
 #define EBT_LOG_ARP 0x02
 #define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
 #define EBT_LOG_WATCHER "log"
 
 struct ebt_log_info {
-       uint8_t loglevel;
-       uint8_t prefix[EBT_LOG_PREFIX_SIZE];
-       uint32_t bitmask;
+       __u8 loglevel;
+       __u8 prefix[EBT_LOG_PREFIX_SIZE];
+       __u32 bitmask;
 };
 
 #endif
index 9ceb10e..410f9e5 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef __LINUX_BRIDGE_EBT_MARK_M_H
 #define __LINUX_BRIDGE_EBT_MARK_M_H
 
+#include <linux/types.h>
+
 #define EBT_MARK_AND 0x01
 #define EBT_MARK_OR 0x02
 #define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
 struct ebt_mark_m_info {
        unsigned long mark, mask;
-       uint8_t invert;
-       uint8_t bitmask;
+       __u8 invert;
+       __u8 bitmask;
 };
 #define EBT_MARK_MATCH "mark_m"
 
index 0528178..df829fc 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_NFLOG_H
 #define __LINUX_BRIDGE_EBT_NFLOG_H
 
+#include <linux/types.h>
+
 #define EBT_NFLOG_MASK 0x0
 
 #define EBT_NFLOG_PREFIX_SIZE 64
 #define EBT_NFLOG_DEFAULT_THRESHOLD    1
 
 struct ebt_nflog_info {
-       u_int32_t len;
-       u_int16_t group;
-       u_int16_t threshold;
-       u_int16_t flags;
-       u_int16_t pad;
+       __u32 len;
+       __u16 group;
+       __u16 threshold;
+       __u16 flags;
+       __u16 pad;
        char prefix[EBT_NFLOG_PREFIX_SIZE];
 };
 
index 51a7998..c241bad 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
 #define __LINUX_BRIDGE_EBT_PKTTYPE_H
 
+#include <linux/types.h>
+
 struct ebt_pkttype_info {
-       uint8_t pkt_type;
-       uint8_t invert;
+       __u8 pkt_type;
+       __u8 invert;
 };
 #define EBT_PKTTYPE_MATCH "pkttype"
 
index e503a0a..1025b9f 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_STP_H
 #define __LINUX_BRIDGE_EBT_STP_H
 
+#include <linux/types.h>
+
 #define EBT_STP_TYPE           0x0001
 
 #define EBT_STP_FLAGS          0x0002
 #define EBT_STP_MATCH "stp"
 
 struct ebt_stp_config_info {
-       uint8_t flags;
-       uint16_t root_priol, root_priou;
+       __u8 flags;
+       __u16 root_priol, root_priou;
        char root_addr[6], root_addrmsk[6];
-       uint32_t root_costl, root_costu;
-       uint16_t sender_priol, sender_priou;
+       __u32 root_costl, root_costu;
+       __u16 sender_priol, sender_priou;
        char sender_addr[6], sender_addrmsk[6];
-       uint16_t portl, portu;
-       uint16_t msg_agel, msg_ageu;
-       uint16_t max_agel, max_ageu;
-       uint16_t hello_timel, hello_timeu;
-       uint16_t forward_delayl, forward_delayu;
+       __u16 portl, portu;
+       __u16 msg_agel, msg_ageu;
+       __u16 max_agel, max_ageu;
+       __u16 hello_timel, hello_timeu;
+       __u16 forward_delayl, forward_delayu;
 };
 
 struct ebt_stp_info {
-       uint8_t type;
+       __u8 type;
        struct ebt_stp_config_info config;
-       uint16_t bitmask;
-       uint16_t invflags;
+       __u16 bitmask;
+       __u16 invflags;
 };
 
 #endif
index b677e26..89a6bec 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _EBT_ULOG_H
 #define _EBT_ULOG_H
 
+#include <linux/types.h>
+
 #define EBT_ULOG_DEFAULT_NLGROUP 0
 #define EBT_ULOG_DEFAULT_QTHRESHOLD 1
 #define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
 #define EBT_ULOG_VERSION 1
 
 struct ebt_ulog_info {
-       uint32_t nlgroup;
+       __u32 nlgroup;
        unsigned int cprange;
        unsigned int qthreshold;
        char prefix[EBT_ULOG_PREFIX_LEN];
index 1d98be4..967d1d5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_VLAN_H
 #define __LINUX_BRIDGE_EBT_VLAN_H
 
+#include <linux/types.h>
+
 #define EBT_VLAN_ID    0x01
 #define EBT_VLAN_PRIO  0x02
 #define EBT_VLAN_ENCAP 0x04
 #define EBT_VLAN_MATCH "vlan"
 
 struct ebt_vlan_info {
-       uint16_t id;            /* VLAN ID {1-4095} */
-       uint8_t prio;           /* VLAN User Priority {0-7} */
+       __u16 id;               /* VLAN ID {1-4095} */
+       __u8 prio;              /* VLAN User Priority {0-7} */
        __be16 encap;           /* VLAN Encapsulated frame code {0-65535} */
-       uint8_t bitmask;                /* Args bitmask bit 1=1 - ID arg,
+       __u8 bitmask;           /* Args bitmask bit 1=1 - ID arg,
                                   bit 2=1 User-Priority arg, bit 3=1 encap*/
-       uint8_t invflags;               /* Inverse bitmask  bit 1=1 - inversed ID arg, 
+       __u8 invflags;          /* Inverse bitmask  bit 1=1 - inversed ID arg, 
                                   bit 2=1 - inversed Pirority arg */
 };
 
index e5a3687..c6a204c 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IPT_CLUSTERIP_H_target
 #define _IPT_CLUSTERIP_H_target
 
+#include <linux/types.h>
+
 enum clusterip_hashmode {
     CLUSTERIP_HASHMODE_SIP = 0,
     CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@ struct clusterip_config;
 
 struct ipt_clusterip_tgt_info {
 
-       u_int32_t flags;
+       __u32 flags;
 
        /* only relevant for new ones */
-       u_int8_t clustermac[6];
-       u_int16_t num_total_nodes;
-       u_int16_t num_local_nodes;
-       u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
-       u_int32_t hash_mode;
-       u_int32_t hash_initval;
+       __u8 clustermac[6];
+       __u16 num_total_nodes;
+       __u16 num_local_nodes;
+       __u16 local_nodes[CLUSTERIP_MAX_NODES];
+       __u32 hash_mode;
+       __u32 hash_initval;
 
        /* Used internally by the kernel */
        struct clusterip_config *config;
index 7ca4591..bb88d53 100644 (file)
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_TARGET_H
 #define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_DSCP.h>
 
 #define IPT_ECN_IP_MASK        (~XT_DSCP_MASK)
 #define IPT_ECN_OP_MASK                0xce
 
 struct ipt_ECN_info {
-       u_int8_t operation;     /* bitset of operations */
-       u_int8_t ip_ect;        /* ECT codepoint of IPv4 header, pre-shifted */
+       __u8 operation; /* bitset of operations */
+       __u8 ip_ect;    /* ECT codepoint of IPv4 header, pre-shifted */
        union {
                struct {
-                       u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+                       __u8 ece:1, cwr:1; /* TCP ECT bits */
                } tcp;
        } proto;
 };
index 2529660..5bca782 100644 (file)
@@ -1,15 +1,17 @@
 #ifndef _IPT_SAME_H
 #define _IPT_SAME_H
 
+#include <linux/types.h>
+
 #define IPT_SAME_MAX_RANGE     10
 
 #define IPT_SAME_NODST         0x01
 
 struct ipt_same_info {
        unsigned char info;
-       u_int32_t rangesize;
-       u_int32_t ipnum;
-       u_int32_t *iparray;
+       __u32 rangesize;
+       __u32 ipnum;
+       __u32 *iparray;
 
        /* hangs off end. */
        struct nf_nat_range range[IPT_SAME_MAX_RANGE];
index ee6611e..f6ac169 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
        IPT_TTL_SET = 0,
        IPT_TTL_INC,
@@ -13,8 +15,8 @@ enum {
 #define IPT_TTL_MAXMODE        IPT_TTL_DEC
 
 struct ipt_TTL_info {
-       u_int8_t        mode;
-       u_int8_t        ttl;
+       __u8    mode;
+       __u8    ttl;
 };
 
 
index 446de6a..0da4223 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IPT_ADDRTYPE_H
 #define _IPT_ADDRTYPE_H
 
+#include <linux/types.h>
+
 enum {
        IPT_ADDRTYPE_INVERT_SOURCE      = 0x0001,
        IPT_ADDRTYPE_INVERT_DEST        = 0x0002,
@@ -9,17 +11,17 @@ enum {
 };
 
 struct ipt_addrtype_info_v1 {
-       u_int16_t       source;         /* source-type mask */
-       u_int16_t       dest;           /* dest-type mask */
-       u_int32_t       flags;
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   flags;
 };
 
 /* revision 0 */
 struct ipt_addrtype_info {
-       u_int16_t       source;         /* source-type mask */
-       u_int16_t       dest;           /* dest-type mask */
-       u_int32_t       invert_source;
-       u_int32_t       invert_dest;
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   invert_source;
+       __u32   invert_dest;
 };
 
 #endif
index 2e555b4..4e02bb0 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef _IPT_AH_H
 #define _IPT_AH_H
 
+#include <linux/types.h>
+
 struct ipt_ah {
-       u_int32_t spis[2];                      /* Security Parameter Index */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 spis[2];                  /* Security Parameter Index */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 
index 9945baa..eabf95f 100644 (file)
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_H
 #define _IPT_ECN_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_dscp.h>
 
 #define IPT_ECN_IP_MASK        (~XT_DSCP_MASK)
 
 /* match info */
 struct ipt_ecn_info {
-       u_int8_t operation;
-       u_int8_t invert;
-       u_int8_t ip_ect;
+       __u8 operation;
+       __u8 invert;
+       __u8 ip_ect;
        union {
                struct {
-                       u_int8_t ect;
+                       __u8 ect;
                } tcp;
        } proto;
 };
index ee24fd8..37bee44 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
        IPT_TTL_EQ = 0,         /* equals */
        IPT_TTL_NE,             /* not equals */
@@ -13,8 +15,8 @@ enum {
 
 
 struct ipt_ttl_info {
-       u_int8_t        mode;
-       u_int8_t        ttl;
+       __u8    mode;
+       __u8    ttl;
 };
 
 
index afb7813..ebd8ead 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
        IP6T_HL_SET = 0,
        IP6T_HL_INC,
@@ -14,8 +16,8 @@ enum {
 #define IP6T_HL_MAXMODE        IP6T_HL_DEC
 
 struct ip6t_HL_info {
-       u_int8_t        mode;
-       u_int8_t        hop_limit;
+       __u8    mode;
+       __u8    hop_limit;
 };
 
 
index 6be6504..205ed62 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IP6T_REJECT_H
 #define _IP6T_REJECT_H
 
+#include <linux/types.h>
+
 enum ip6t_reject_with {
        IP6T_ICMP6_NO_ROUTE,
        IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@ enum ip6t_reject_with {
 };
 
 struct ip6t_reject_info {
-       u_int32_t       with;   /* reject type */
+       __u32   with;   /* reject type */
 };
 
 #endif /*_IP6T_REJECT_H*/
index 17a745c..5da2b65 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IP6T_AH_H
 #define _IP6T_AH_H
 
+#include <linux/types.h>
+
 struct ip6t_ah {
-       u_int32_t spis[2];                      /* Security Parameter Index */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  hdrres;                       /* Test of the Reserved Filed */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 spis[2];                  /* Security Parameter Index */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  hdrres;                   /* Test of the Reserved Filed */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 #define IP6T_AH_SPI 0x01
index 3724d08..b47f61b 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IP6T_FRAG_H
 #define _IP6T_FRAG_H
 
+#include <linux/types.h>
+
 struct ip6t_frag {
-       u_int32_t ids[2];                       /* Security Parameter Index */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  flags;                        /*  */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 ids[2];                   /* Security Parameter Index */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  flags;                    /*  */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 #define IP6T_FRAG_IDS          0x01
index 5ef91b8..6e76dbc 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
        IP6T_HL_EQ = 0,         /* equals */
        IP6T_HL_NE,             /* not equals */
@@ -14,8 +16,8 @@ enum {
 
 
 struct ip6t_hl_info {
-       u_int8_t        mode;
-       u_int8_t        hop_limit;
+       __u8    mode;
+       __u8    hop_limit;
 };
 
 
index 01dfd44..efae3a2 100644 (file)
@@ -8,10 +8,12 @@ on whether they contain certain headers */
 #ifndef __IPV6HEADER_H
 #define __IPV6HEADER_H
 
+#include <linux/types.h>
+
 struct ip6t_ipv6header_info {
-       u_int8_t matchflags;
-       u_int8_t invflags;
-       u_int8_t modeflag;
+       __u8 matchflags;
+       __u8 invflags;
+       __u8 modeflag;
 };
 
 #define MASK_HOPOPTS    128
index 18549bc..a7729a5 100644 (file)
@@ -1,10 +1,12 @@
 #ifndef _IP6T_MH_H
 #define _IP6T_MH_H
 
+#include <linux/types.h>
+
 /* MH matching stuff */
 struct ip6t_mh {
-       u_int8_t types[2];      /* MH type range */
-       u_int8_t invflags;      /* Inverse flags */
+       __u8 types[2];  /* MH type range */
+       __u8 invflags;  /* Inverse flags */
 };
 
 /* Values for "invflags" field in struct ip6t_mh. */
index 62d89bc..17d419a 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _IP6T_OPTS_H
 #define _IP6T_OPTS_H
 
+#include <linux/types.h>
+
 #define IP6T_OPTS_OPTSNR 16
 
 struct ip6t_opts {
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t flags;                         /*  */
-       u_int8_t invflags;                      /* Inverse flags */
-       u_int16_t opts[IP6T_OPTS_OPTSNR];       /* opts */
-       u_int8_t optsnr;                        /* Nr of OPts */
+       __u32 hdrlen;                   /* Header Length */
+       __u8 flags;                             /*  */
+       __u8 invflags;                  /* Inverse flags */
+       __u16 opts[IP6T_OPTS_OPTSNR];   /* opts */
+       __u8 optsnr;                    /* Nr of OPts */
 };
 
 #define IP6T_OPTS_LEN          0x01
index ab91bfd..7605a5f 100644 (file)
@@ -1,18 +1,19 @@
 #ifndef _IP6T_RT_H
 #define _IP6T_RT_H
 
+#include <linux/types.h>
 /*#include <linux/in6.h>*/
 
 #define IP6T_RT_HOPS 16
 
 struct ip6t_rt {
-       u_int32_t rt_type;                      /* Routing Type */
-       u_int32_t segsleft[2];                  /* Segments Left */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  flags;                        /*  */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 rt_type;                  /* Routing Type */
+       __u32 segsleft[2];                      /* Segments Left */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  flags;                    /*  */
+       __u8  invflags;                 /* Inverse flags */
        struct in6_addr addrs[IP6T_RT_HOPS];    /* Hops */
-       u_int8_t addrnr;                        /* Nr of Addresses */
+       __u8 addrnr;                    /* Nr of Addresses */
 };
 
 #define IP6T_RT_TYP            0x01
index e2b9e63..4c4ac3f 100644 (file)
@@ -160,10 +160,6 @@ struct netlink_skb_parms {
        struct ucred            creds;          /* Skb credentials      */
        __u32                   pid;
        __u32                   dst_group;
-       kernel_cap_t            eff_cap;
-       __u32                   loginuid;       /* Login (audit) uid */
-       __u32                   sessionid;      /* Session id (audit) */
-       __u32                   sid;            /* SELinux security id */
 };
 
 #define NETLINK_CB(skb)                (*(struct netlink_skb_parms*)&((skb)->cb))
index f321b57..fabcb1e 100644 (file)
@@ -51,10 +51,10 @@ nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default)
        return w;
 }
 
-extern unsigned int
+extern int
 nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
              struct posix_acl *acl, int encode_entries, int typeflag);
-extern unsigned int
+extern int
 nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
              struct posix_acl **pacl);
 
index 821ffb9..3002218 100644 (file)
@@ -1243,6 +1243,8 @@ enum nl80211_rate_info {
  * @NL80211_STA_INFO_LLID: the station's mesh LLID
  * @NL80211_STA_INFO_PLID: the station's mesh PLID
  * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested
+ *     attribute, like NL80211_STA_INFO_TX_BITRATE.
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -1261,6 +1263,7 @@ enum nl80211_sta_info {
        NL80211_STA_INFO_TX_RETRIES,
        NL80211_STA_INFO_TX_FAILED,
        NL80211_STA_INFO_SIGNAL_AVG,
+       NL80211_STA_INFO_RX_BITRATE,
 
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
index 32fb812..1ca6411 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
 #include <asm/atomic.h>
  
 /* Each escaped entry is prefixed by ESCAPE_CODE
@@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
 int oprofile_add_data64(struct op_entry *entry, u64 val);
 int oprofile_write_commit(struct op_entry *entry);
 
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_HW_PERF_EVENTS
 int __init oprofile_perf_init(struct oprofile_operations *ops);
 void oprofile_perf_exit(void);
 char *op_name_from_perf_id(void);
-#endif /* CONFIG_PERF_EVENTS */
+#else
+static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+       pr_info("oprofile: hardware counters not available\n");
+       return -ENODEV;
+}
+static inline void oprofile_perf_exit(void) { }
+#endif /* CONFIG_HW_PERF_EVENTS */
 
 #endif /* OPROFILE_H */
index 559d028..ff5bccb 100644 (file)
@@ -1479,6 +1479,7 @@ void pci_request_acs(void);
 #define PCI_VPD_RO_KEYWORD_PARTNO      "PN"
 #define PCI_VPD_RO_KEYWORD_MFR_ID      "MN"
 #define PCI_VPD_RO_KEYWORD_VENDOR0     "V0"
+#define PCI_VPD_RO_KEYWORD_CHKSUM      "RV"
 
 /**
  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
index 26c8df7..6fb1384 100644 (file)
@@ -36,9 +36,7 @@
 /* Socket options for SOL_PNPIPE level */
 #define PNPIPE_ENCAP           1
 #define PNPIPE_IFINDEX         2
-#define PNPIPE_PIPE_HANDLE     3
-#define PNPIPE_ENABLE           4
-/* unused slot */
+#define PNPIPE_HANDLE          3
 
 #define PNADDR_ANY             0
 #define PNADDR_BROADCAST       0xFC
index 2cfa4bc..b1032a3 100644 (file)
@@ -247,6 +247,35 @@ struct tc_gred_sopt {
        __u16           pad1;
 };
 
+/* CHOKe section */
+
+enum {
+       TCA_CHOKE_UNSPEC,
+       TCA_CHOKE_PARMS,
+       TCA_CHOKE_STAB,
+       __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+       __u32           limit;          /* Hard queue length (packets)  */
+       __u32           qth_min;        /* Min average threshold (packets) */
+       __u32           qth_max;        /* Max average threshold (packets) */
+       unsigned char   Wlog;           /* log(W)               */
+       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
+       unsigned char   Scell_log;      /* cell size for idle damping */
+       unsigned char   flags;          /* see RED flags */
+};
+
+struct tc_choke_xstats {
+       __u32           early;          /* Early drops */
+       __u32           pdrop;          /* Drops due to queue limits */
+       __u32           other;          /* Drops due to drop() calls */
+       __u32           marked;         /* Marked packets */
+       __u32           matched;        /* Drops due to flow match */
+};
+
 /* HTB section */
 #define TC_HTB_NUMPRIO         8
 #define TC_HTB_MAXDEPTH                8
@@ -435,6 +464,7 @@ enum {
        TCA_NETEM_DELAY_DIST,
        TCA_NETEM_REORDER,
        TCA_NETEM_CORRUPT,
+       TCA_NETEM_LOSS,
        __TCA_NETEM_MAX,
 };
 
@@ -465,7 +495,33 @@ struct tc_netem_corrupt {
        __u32   correlation;
 };
 
+enum {
+       NETEM_LOSS_UNSPEC,
+       NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
+       NETEM_LOSS_GE,          /* Gilbert Elliot models */
+       __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probablities for 4 state model */
+struct tc_netem_gimodel {
+       __u32   p13;
+       __u32   p31;
+       __u32   p32;
+       __u32   p14;
+       __u32   p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+       __u32 p;
+       __u32 r;
+       __u32 h;
+       __u32 k1;
+};
+
 #define NETEM_DIST_SCALE       8192
+#define NETEM_DIST_MAX         16384
 
 /* DRR */
 
@@ -481,4 +537,55 @@ struct tc_drr_stats {
        __u32   deficit;
 };
 
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+       __u8    num_tc;
+       __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
+       __u8    hw;
+       __u16   count[TC_QOPT_MAX_QUEUE];
+       __u16   offset[TC_QOPT_MAX_QUEUE];
+};
+
+/* SFB */
+
+enum {
+       TCA_SFB_UNSPEC,
+       TCA_SFB_PARMS,
+       __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+       __u32 rehash_interval;  /* delay between hash move, in ms */
+       __u32 warmup_time;      /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+       __u32 max;              /* max len of qlen_min */
+       __u32 bin_size;         /* maximum queue length per bin */
+       __u32 increment;        /* probability increment, (d1 in Blue) */
+       __u32 decrement;        /* probability decrement, (d2 in Blue) */
+       __u32 limit;            /* max SFB queue length */
+       __u32 penalty_rate;     /* inelastic flows are rate limited to 'rate' pps */
+       __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+       __u32 earlydrop;
+       __u32 penaltydrop;
+       __u32 bucketdrop;
+       __u32 queuedrop;
+       __u32 childdrop; /* drops in child qdisc */
+       __u32 marked;
+       __u32 maxqlen;
+       __u32 maxprob;
+       __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
 #endif
index dd9c7ab..21415cc 100644 (file)
@@ -431,6 +431,8 @@ struct dev_pm_info {
        struct list_head        entry;
        struct completion       completion;
        struct wakeup_source    *wakeup;
+#else
+       unsigned int            should_wakeup:1;
 #endif
 #ifdef CONFIG_PM_RUNTIME
        struct timer_list       suspend_timer;
index 9cff00d..03a67db 100644 (file)
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
        return dev->power.can_wakeup;
 }
 
-static inline bool device_may_wakeup(struct device *dev)
-{
-       return false;
-}
-
 static inline struct wakeup_source *wakeup_source_create(const char *name)
 {
        return NULL;
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
 
 static inline int device_wakeup_enable(struct device *dev)
 {
-       return -EINVAL;
+       dev->power.should_wakeup = true;
+       return 0;
 }
 
 static inline int device_wakeup_disable(struct device *dev)
 {
+       dev->power.should_wakeup = false;
        return 0;
 }
 
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
 {
-       dev->power.can_wakeup = val;
-       return val ? -EINVAL : 0;
+       dev->power.should_wakeup = enable;
+       return 0;
 }
 
+static inline int device_init_wakeup(struct device *dev, bool val)
+{
+       device_set_wakeup_capable(dev, val);
+       device_set_wakeup_enable(dev, val);
+       return 0;
+}
 
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
 {
-       return -EINVAL;
+       return dev->power.can_wakeup && dev->power.should_wakeup;
 }
 
 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
index d68283a..54211c1 100644 (file)
@@ -71,6 +71,7 @@ posix_acl_release(struct posix_acl *acl)
 
 /* posix_acl.c */
 
+extern void posix_acl_init(struct posix_acl *, int);
 extern struct posix_acl *posix_acl_alloc(int, gfp_t);
 extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t);
 extern int posix_acl_valid(const struct posix_acl *);
index 092a04f..a1147e5 100644 (file)
 
 extern long arch_ptrace(struct task_struct *child, long request,
                        unsigned long addr, unsigned long data);
-extern int ptrace_traceme(void);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
-extern int ptrace_attach(struct task_struct *tsk);
-extern int ptrace_detach(struct task_struct *, unsigned int);
 extern void ptrace_disable(struct task_struct *);
 extern int ptrace_check_attach(struct task_struct *task, int kill);
 extern int ptrace_request(struct task_struct *child, long request,
index fcb9884..a5930cb 100644 (file)
@@ -182,6 +182,26 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
        return ret;
 }
 
+/**
+ * res_counter_check_margin - check if the counter allows charging
+ * @cnt: the resource counter to check
+ * @bytes: the number of bytes to check the remaining space against
+ *
+ * Returns a boolean value on whether the counter can be charged
+ * @bytes or whether this would exceed the limit.
+ */
+static inline bool res_counter_check_margin(struct res_counter *cnt,
+                                           unsigned long bytes)
+{
+       bool ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cnt->lock, flags);
+       ret = cnt->limit - cnt->usage >= bytes;
+       spin_unlock_irqrestore(&cnt->lock, flags);
+       return ret;
+}
+
 static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
 {
        bool ret;
index d63dcba..9026b30 100644 (file)
 #define LINUX_RIO_REGS_H
 
 /*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
  * accessed via maintenance transactions.  Portions of configuration
  * space are standardized and/or reserved.
  */
+#define RIO_MAINT_SPACE_SZ     0x1000000 /* 16MB of RapidIO mainenance space */
+
 #define RIO_DEV_ID_CAR         0x00    /* [I] Device Identity CAR */
 #define RIO_DEV_INFO_CAR       0x04    /* [I] Device Information CAR */
 #define RIO_ASM_ID_CAR         0x08    /* [I] Assembly Identity CAR */
index 3c995b4..89c3e51 100644 (file)
@@ -203,6 +203,18 @@ struct rtc_device
        struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
        int pie_enabled;
        struct work_struct irqwork;
+
+
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+       struct work_struct uie_task;
+       struct timer_list uie_timer;
+       /* Those fields are protected by rtc->irq_lock */
+       unsigned int oldsecs;
+       unsigned int uie_irq_active:1;
+       unsigned int stop_uie_polling:1;
+       unsigned int uie_task_active:1;
+       unsigned int uie_timer_active:1;
+#endif
 };
 #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
 
@@ -238,6 +250,7 @@ extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
 extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
                                                unsigned int enabled);
 
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
 void rtc_aie_update_irq(void *private);
 void rtc_uie_update_irq(void *private);
 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
@@ -246,8 +259,6 @@ int rtc_register(rtc_task_t *task);
 int rtc_unregister(rtc_task_t *task);
 int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
 
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
 void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
                        ktime_t expires, ktime_t period);
index d747f94..777d8a5 100644 (file)
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MEMPOLICY   0x10000000      /* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER        0x20000000      /* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP        0x40000000      /* Freezer should not count it as freezeable */
+#define PF_FREEZER_SKIP        0x40000000      /* Freezer should not count it as freezable */
 #define PF_FREEZER_NOSIG 0x80000000    /* Freezer won't send signals to it */
 
 /*
index c642bb8..9b5f184 100644 (file)
@@ -1623,7 +1623,7 @@ struct security_operations {
        int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
        int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
                                          struct xfrm_policy *xp,
-                                         struct flowi *fl);
+                                         const struct flowi *fl);
        int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
 
@@ -1662,7 +1662,7 @@ int security_capset(struct cred *new, const struct cred *old,
                    const kernel_cap_t *effective,
                    const kernel_cap_t *inheritable,
                    const kernel_cap_t *permitted);
-int security_capable(int cap);
+int security_capable(const struct cred *cred, int cap);
 int security_real_capable(struct task_struct *tsk, int cap);
 int security_real_capable_noaudit(struct task_struct *tsk, int cap);
 int security_sysctl(struct ctl_table *table, int op);
@@ -1856,9 +1856,9 @@ static inline int security_capset(struct cred *new,
        return cap_capset(new, old, effective, inheritable, permitted);
 }
 
-static inline int security_capable(int cap)
+static inline int security_capable(const struct cred *cred, int cap)
 {
-       return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT);
+       return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 static inline int security_real_capable(struct task_struct *tsk, int cap)
@@ -2761,7 +2761,8 @@ int security_xfrm_state_delete(struct xfrm_state *x);
 void security_xfrm_state_free(struct xfrm_state *x);
 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                                      struct xfrm_policy *xp, struct flowi *fl);
+                                      struct xfrm_policy *xp,
+                                      const struct flowi *fl);
 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
 
@@ -2813,7 +2814,7 @@ static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_s
 }
 
 static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, struct flowi *fl)
+                       struct xfrm_policy *xp, const struct flowi *fl)
 {
        return 1;
 }
index bf221d6..31f02d0 100644 (file)
@@ -1801,6 +1801,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
                     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));   \
                     skb = skb->prev)
 
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
+               for (skb = (queue)->prev, tmp = skb->prev;                      \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                      \
+               for (tmp = skb->prev;                                           \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
 
 static inline bool skb_has_frag_list(const struct sk_buff *skb)
 {
@@ -1868,7 +1877,7 @@ extern void              skb_split(struct sk_buff *skb,
 extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
                                 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
index 241f179..7997a50 100644 (file)
@@ -22,7 +22,7 @@
 
 /* Linux-specific socket ioctls */
 #define SIOCINQ                FIONREAD
-#define SIOCOUTQ       TIOCOUTQ
+#define SIOCOUTQ       TIOCOUTQ        /* output queue size (not sent + not acked) */
 
 /* Routing table calls. */
 #define SIOCADDRT      0x890B          /* add routing table entry      */
@@ -83,6 +83,8 @@
 
 #define SIOCWANDEV     0x894A          /* get/set netdev parameters    */
 
+#define SIOCOUTQNSD    0x894B          /* output queue size (not sent only) */
+
 /* ARP cache control calls. */
                    /*  0x8950 - 0x8952  * obsolete calls, don't re-use */
 #define SIOCDARP       0x8953          /* delete ARP table entry       */
index 489f7b6..402955a 100644 (file)
@@ -85,6 +85,8 @@
 #define  SSB_IMSTATE_AP_RSV    0x00000030 /* Reserved */
 #define  SSB_IMSTATE_IBE       0x00020000 /* In Band Error */
 #define  SSB_IMSTATE_TO                0x00040000 /* Timeout */
+#define  SSB_IMSTATE_BUSY      0x01800000 /* Busy (Backplane rev >= 2.3 only) */
+#define  SSB_IMSTATE_REJECT    0x02000000 /* Reject (Backplane rev >= 2.3 only) */
 #define SSB_INTVEC             0x0F94     /* SB Interrupt Mask */
 #define  SSB_INTVEC_PCI                0x00000001 /* Enable interrupts for PCI */
 #define  SSB_INTVEC_ENET0      0x00000002 /* Enable interrupts for enet 0 */
@@ -97,7 +99,6 @@
 #define  SSB_TMSLOW_RESET      0x00000001 /* Reset */
 #define  SSB_TMSLOW_REJECT_22  0x00000002 /* Reject (Backplane rev 2.2) */
 #define  SSB_TMSLOW_REJECT_23  0x00000004 /* Reject (Backplane rev 2.3) */
-#define  SSB_TMSLOW_PHYCLK     0x00000010 /* MAC PHY Clock Control Enable */
 #define  SSB_TMSLOW_CLOCK      0x00010000 /* Clock Enable */
 #define  SSB_TMSLOW_FGC                0x00020000 /* Force Gated Clocks On */
 #define  SSB_TMSLOW_PE         0x40000000 /* Power Management Enable */
 /* SPROM Revision 4 */
 #define SSB_SPROM4_BFLLO               0x0044  /* Boardflags (low 16 bits) */
 #define SSB_SPROM4_BFLHI               0x0046  /* Board Flags Hi */
+#define SSB_SPROM4_BFL2LO              0x0048  /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM4_BFL2HI              0x004A  /* Board flags 2 Hi */
 #define SSB_SPROM4_IL0MAC              0x004C  /* 6 byte MAC address for a/b/g/n */
 #define SSB_SPROM4_CCODE               0x0052  /* Country Code (2 bytes) */
 #define SSB_SPROM4_GPIOA               0x0056  /* Gen. Purpose IO # 0 and 1 */
 #define SSB_SPROM5_CCODE               0x0044  /* Country Code (2 bytes) */
 #define SSB_SPROM5_BFLLO               0x004A  /* Boardflags (low 16 bits) */
 #define SSB_SPROM5_BFLHI               0x004C  /* Board Flags Hi */
+#define SSB_SPROM5_BFL2LO              0x004E  /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM5_BFL2HI              0x0050  /* Board flags 2 Hi */
 #define SSB_SPROM5_IL0MAC              0x0052  /* 6 byte MAC address for a/b/g/n */
 #define SSB_SPROM5_GPIOA               0x0076  /* Gen. Purpose IO # 0 and 1 */
 #define  SSB_SPROM5_GPIOA_P0           0x00FF  /* Pin 0 */
index c50b458..0828842 100644 (file)
@@ -47,14 +47,6 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
                return 1;
        return 0;
 }
-static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
-{
-       if (svc_is_backchannel(rqstp))
-               return (struct nfs4_sessionid *)
-                       rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
-       return NULL;
-}
-
 #else /* CONFIG_NFS_V4_1 */
 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
                                         unsigned int min_reqs)
@@ -67,11 +59,6 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
        return 0;
 }
 
-static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
-{
-       return NULL;
-}
-
 static inline void xprt_free_bc_request(struct rpc_rqst *req)
 {
 }
index 059877b..7ad9751 100644 (file)
@@ -77,7 +77,6 @@ struct svc_xprt {
        size_t                  xpt_remotelen;  /* length of address */
        struct rpc_wait_queue   xpt_bc_pending; /* backchannel wait queue */
        struct list_head        xpt_users;      /* callbacks on free */
-       void                    *xpt_bc_sid;    /* back channel session ID */
 
        struct net              *xpt_net;
        struct rpc_xprt         *xpt_bc_xprt;   /* NFSv4.1 backchannel */
index 18cd068..98664db 100644 (file)
@@ -125,39 +125,37 @@ extern struct trace_event_functions enter_syscall_print_funcs;
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)                               \
-       static struct syscall_metadata                                  \
-       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
+       static struct syscall_metadata __syscall_meta_##sname;          \
        static struct ftrace_event_call __used                          \
-         __attribute__((__aligned__(4)))                               \
-         __attribute__((section("_ftrace_events")))                    \
          event_enter_##sname = {                                       \
                .name                   = "sys_enter"#sname,            \
                .class                  = &event_class_syscall_enter,   \
                .event.funcs            = &enter_syscall_print_funcs,   \
                .data                   = (void *)&__syscall_meta_##sname,\
        };                                                              \
+       static struct ftrace_event_call __used                          \
+         __attribute__((section("_ftrace_events")))                    \
+        *__event_enter_##sname = &event_enter_##sname;                 \
        __TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
-       static struct syscall_metadata                                  \
-       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
+       static struct syscall_metadata __syscall_meta_##sname;          \
        static struct ftrace_event_call __used                          \
-         __attribute__((__aligned__(4)))                               \
-         __attribute__((section("_ftrace_events")))                    \
          event_exit_##sname = {                                        \
                .name                   = "sys_exit"#sname,             \
                .class                  = &event_class_syscall_exit,    \
                .event.funcs            = &exit_syscall_print_funcs,    \
                .data                   = (void *)&__syscall_meta_##sname,\
        };                                                              \
+       static struct ftrace_event_call __used                          \
+         __attribute__((section("_ftrace_events")))                    \
+       *__event_exit_##sname = &event_exit_##sname;                    \
        __TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_METADATA(sname, nb)                            \
        SYSCALL_TRACE_ENTER_EVENT(sname);                       \
        SYSCALL_TRACE_EXIT_EVENT(sname);                        \
        static struct syscall_metadata __used                   \
-         __attribute__((__aligned__(4)))                       \
-         __attribute__((section("__syscalls_metadata")))       \
          __syscall_meta_##sname = {                            \
                .name           = "sys"#sname,                  \
                .nb_args        = nb,                           \
@@ -166,14 +164,15 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .enter_event    = &event_enter_##sname,         \
                .exit_event     = &event_exit_##sname,          \
                .enter_fields   = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
-       };
+       };                                                      \
+       static struct syscall_metadata __used                   \
+         __attribute__((section("__syscalls_metadata")))       \
+        *__p_syscall_meta_##sname = &__syscall_meta_##sname;
 
 #define SYSCALL_DEFINE0(sname)                                 \
        SYSCALL_TRACE_ENTER_EVENT(_##sname);                    \
        SYSCALL_TRACE_EXIT_EVENT(_##sname);                     \
        static struct syscall_metadata __used                   \
-         __attribute__((__aligned__(4)))                       \
-         __attribute__((section("__syscalls_metadata")))       \
          __syscall_meta__##sname = {                           \
                .name           = "sys_"#sname,                 \
                .nb_args        = 0,                            \
@@ -181,6 +180,9 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .exit_event     = &event_exit__##sname,         \
                .enter_fields   = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
        };                                                      \
+       static struct syscall_metadata __used                   \
+         __attribute__((section("__syscalls_metadata")))       \
+        *__p_syscall_meta_##sname = &__syscall_meta__##sname;  \
        asmlinkage long sys_##sname(void)
 #else
 #define SYSCALL_DEFINE0(name)     asmlinkage long sys_##name(void)
index 7bb5cb6..11684d9 100644 (file)
@@ -930,6 +930,7 @@ enum
 
 #ifdef __KERNEL__
 #include <linux/list.h>
+#include <linux/rcupdate.h>
 
 /* For the /proc/sys support */
 struct ctl_table;
@@ -1037,10 +1038,15 @@ struct ctl_table_root {
    struct ctl_table trees. */
 struct ctl_table_header
 {
-       struct ctl_table *ctl_table;
-       struct list_head ctl_entry;
-       int used;
-       int count;
+       union {
+               struct {
+                       struct ctl_table *ctl_table;
+                       struct list_head ctl_entry;
+                       int used;
+                       int count;
+               };
+               struct rcu_head rcu;
+       };
        struct completion *unregistering;
        struct ctl_table *ctl_table_arg;
        struct ctl_table_root *root;
index 387fa7d..7faf933 100644 (file)
@@ -17,6 +17,9 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 
+/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
+#define SYSRQ_DEFAULT_ENABLE   1
+
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
 #define SYSRQ_ENABLE_LOG       0x0002
index 8651556..d3ec89f 100644 (file)
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
                const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#ifdef CONFIG_NET
 extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+       return 0;
+}
+#endif
 
 #endif /* __THERMAL_H__ */
index 1eefa3f..a5b994a 100644 (file)
@@ -2,7 +2,7 @@
  * include/linux/tipc.h: Include file for TIPC socket interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -130,12 +130,6 @@ static inline unsigned int tipc_node(__u32 addr)
 #define TIPC_SUB_PORTS         0x01    /* filter for port availability */
 #define TIPC_SUB_SERVICE       0x02    /* filter for service availability */
 #define TIPC_SUB_CANCEL                0x04    /* cancel a subscription */
-#if 0
-/* The following filter options are not currently implemented */
-#define TIPC_SUB_NO_BIND_EVTS  0x04    /* filter out "publish" events */
-#define TIPC_SUB_NO_UNBIND_EVTS        0x08    /* filter out "withdraw" events */
-#define TIPC_SUB_SINGLE_EVT    0x10    /* expire after first event */
-#endif
 
 #define TIPC_WAIT_FOREVER      (~0)    /* timeout for permanent subscription */
 
index 7d42460..0db2395 100644 (file)
@@ -2,7 +2,7 @@
  * include/linux/tipc_config.h: Include file for TIPC configuration interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define  TIPC_CMD_SHOW_LINK_STATS   0x000B    /* tx link_name, rx ultra_string */
 #define  TIPC_CMD_SHOW_STATS        0x000F    /* tx unsigned, rx ultra_string */
 
-#if 0
-#define  TIPC_CMD_SHOW_PORT_STATS   0x0008    /* tx port_ref, rx ultra_string */
-#define  TIPC_CMD_RESET_PORT_STATS  0x0009    /* tx port_ref, rx none */
-#define  TIPC_CMD_GET_ROUTES        0x000A    /* tx ?, rx ? */
-#define  TIPC_CMD_GET_LINK_PEER     0x000D    /* tx link_name, rx ? */
-#endif
-
 /*
  * Protected commands:
  * May only be issued by "network administration capable" process.
@@ -96,7 +89,7 @@
 #define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
 #define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* obsoleted */
-#define  TIPC_CMD_GET_MAX_NODES     0x4009    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_NODES     0x4009    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_SLAVES    0x400A    /* obsoleted */
 #define  TIPC_CMD_GET_NETID         0x400B    /* tx none, rx unsigned */
 
 #define  TIPC_CMD_DUMP_LOG          0x410B    /* tx none, rx ultra_string */
 #define  TIPC_CMD_RESET_LINK_STATS  0x410C    /* tx link_name, rx none */
 
-#if 0
-#define  TIPC_CMD_CREATE_LINK       0x4103    /* tx link_create, rx none */
-#define  TIPC_CMD_REMOVE_LINK       0x4104    /* tx link_name, rx none */
-#define  TIPC_CMD_BLOCK_LINK        0x4105    /* tx link_name, rx none */
-#define  TIPC_CMD_UNBLOCK_LINK      0x4106    /* tx link_name, rx none */
-#endif
-
 /*
  * Private commands:
  * May only be issued by "network administration capable" process.
  */
 
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
-#if 0
-#define  TIPC_CMD_SET_ZONE_MASTER   0x8002    /* tx none, rx none */
-#endif
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* obsoleted */
-#define  TIPC_CMD_SET_MAX_NODES     0x8009    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_NODES     0x8009    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_SLAVES    0x800A    /* obsoleted */
 #define  TIPC_CMD_SET_NETID         0x800B    /* tx unsigned, rx none */
 
 #define TIPC_DEF_LINK_TOL 1500
 #define TIPC_MAX_LINK_TOL 30000
 
+#if (TIPC_MIN_LINK_TOL < 16)
+#error "TIPC_MIN_LINK_TOL is too small (abort limit may be NaN)"
+#endif
+
 /*
  * Link window limits (min, default, max), in packets
  */
@@ -215,7 +202,7 @@ struct tipc_link_info {
 
 struct tipc_bearer_config {
        __be32 priority;                /* Range [1,31]. Override per link  */
-       __be32 detect_scope;
+       __be32 disc_domain;             /* <Z.C.N> describing desired nodes */
        char name[TIPC_MAX_BEARER_NAME];
 };
 
@@ -247,15 +234,6 @@ struct tipc_name_table_query {
 #define TIPC_CFG_NOT_SUPPORTED  "\x84" /* request is not supported by TIPC */
 #define TIPC_CFG_INVALID_VALUE  "\x85"  /* request has invalid argument value */
 
-#if 0
-/* prototypes TLV structures for proposed commands */
-struct tipc_link_create {
-       __u32   domain;
-       struct tipc_media_addr peer_addr;
-       char bearer_name[TIPC_MAX_BEARER_NAME];
-};
-#endif
-
 /*
  * A TLV consists of a descriptor, followed by the TLV value.
  * TLV descriptor fields are stored in network byte order;
index c681461..97c84a5 100644 (file)
@@ -33,12 +33,7 @@ struct tracepoint {
        void (*regfunc)(void);
        void (*unregfunc)(void);
        struct tracepoint_func __rcu *funcs;
-} __attribute__((aligned(32)));                /*
-                                        * Aligned on 32 bytes because it is
-                                        * globally visible and gcc happily
-                                        * align these on the structure size.
-                                        * Keep in sync with vmlinux.lds.h.
-                                        */
+};
 
 /*
  * Connect a probe to a tracepoint.
@@ -61,15 +56,15 @@ extern void tracepoint_probe_update_all(void);
 
 struct tracepoint_iter {
        struct module *module;
-       struct tracepoint *tracepoint;
+       struct tracepoint * const *tracepoint;
 };
 
 extern void tracepoint_iter_start(struct tracepoint_iter *iter);
 extern void tracepoint_iter_next(struct tracepoint_iter *iter);
 extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
 extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-       struct tracepoint *begin, struct tracepoint *end);
+extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+       struct tracepoint * const *begin, struct tracepoint * const *end);
 
 /*
  * tracepoint_synchronize_unregister must be called between the last tracepoint
@@ -84,11 +79,13 @@ static inline void tracepoint_synchronize_unregister(void)
 #define PARAMS(args...) args
 
 #ifdef CONFIG_TRACEPOINTS
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
-       struct tracepoint *end);
+extern
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+       struct tracepoint * const *end);
 #else
-static inline void tracepoint_update_probe_range(struct tracepoint *begin,
-       struct tracepoint *end)
+static inline
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+       struct tracepoint * const *end)
 { }
 #endif /* CONFIG_TRACEPOINTS */
 
@@ -174,12 +171,20 @@ do_trace:                                                         \
        {                                                               \
        }
 
+/*
+ * We have no guarantee that gcc and the linker won't up-align the tracepoint
+ * structures, so we create an array of pointers that will be used for iteration
+ * on the tracepoints.
+ */
 #define DEFINE_TRACE_FN(name, reg, unreg)                              \
        static const char __tpstrtab_##name[]                           \
        __attribute__((section("__tracepoints_strings"))) = #name;      \
        struct tracepoint __tracepoint_##name                           \
-       __attribute__((section("__tracepoints"), aligned(32))) =        \
-               { __tpstrtab_##name, 0, reg, unreg, NULL }
+       __attribute__((section("__tracepoints"))) =                     \
+               { __tpstrtab_##name, 0, reg, unreg, NULL };             \
+       static struct tracepoint * const __tracepoint_ptr_##name __used \
+       __attribute__((section("__tracepoints_ptrs"))) =                \
+               &__tracepoint_##name;
 
 #define DEFINE_TRACE(name)                                             \
        DEFINE_TRACE_FN(name, NULL, NULL);
index 5e86dc7..81a9279 100644 (file)
@@ -89,7 +89,7 @@ struct usb_cdc_acm_descriptor {
 
 #define USB_CDC_COMM_FEATURE   0x01
 #define USB_CDC_CAP_LINE       0x02
-#define USB_CDC_CAP_BRK        0x04
+#define USB_CDC_CAP_BRK                0x04
 #define USB_CDC_CAP_NOTIFY     0x08
 
 /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */
@@ -271,6 +271,11 @@ struct usb_cdc_notification {
        __le16  wLength;
 } __attribute__ ((packed));
 
+struct usb_cdc_speed_change {
+       __le32  DLBitRRate;     /* contains the downlink bit rate (IN pipe) */
+       __le32  ULBitRate;      /* contains the uplink bit rate (OUT pipe) */
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -292,7 +297,7 @@ struct usb_cdc_ncm_ntb_parameters {
        __le16  wNdpOutDivisor;
        __le16  wNdpOutPayloadRemainder;
        __le16  wNdpOutAlignment;
-       __le16  wPadding2;
+       __le16  wNtbOutMaxDatagrams;
 } __attribute__ ((packed));
 
 /*
@@ -307,7 +312,7 @@ struct usb_cdc_ncm_nth16 {
        __le16  wHeaderLength;
        __le16  wSequence;
        __le16  wBlockLength;
-       __le16  wFpIndex;
+       __le16  wNdpIndex;
 } __attribute__ ((packed));
 
 struct usb_cdc_ncm_nth32 {
@@ -315,7 +320,7 @@ struct usb_cdc_ncm_nth32 {
        __le16  wHeaderLength;
        __le16  wSequence;
        __le32  dwBlockLength;
-       __le32  dwFpIndex;
+       __le32  dwNdpIndex;
 } __attribute__ ((packed));
 
 /*
@@ -337,7 +342,7 @@ struct usb_cdc_ncm_dpe16 {
 struct usb_cdc_ncm_ndp16 {
        __le32  dwSignature;
        __le16  wLength;
-       __le16  wNextFpIndex;
+       __le16  wNextNdpIndex;
        struct  usb_cdc_ncm_dpe16 dpe16[0];
 } __attribute__ ((packed));
 
@@ -375,6 +380,7 @@ struct usb_cdc_ncm_ndp32 {
 #define USB_CDC_NCM_NCAP_ENCAP_COMMAND                 (1 << 2)
 #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE             (1 << 3)
 #define USB_CDC_NCM_NCAP_CRC_MODE                      (1 << 4)
+#define        USB_CDC_NCM_NCAP_NTB_INPUT_SIZE                 (1 << 5)
 
 /* CDC NCM subclass Table 6-3: NTB Parameter Structure */
 #define USB_CDC_NCM_NTB16_SUPPORTED                    (1 << 0)
@@ -392,6 +398,13 @@ struct usb_cdc_ncm_ndp32 {
 #define USB_CDC_NCM_NTB_MIN_IN_SIZE                    2048
 #define USB_CDC_NCM_NTB_MIN_OUT_SIZE                   2048
 
+/* NTB Input Size Structure */
+struct usb_cdc_ncm_ndp_input_size {
+       __le32  dwNtbInMaxSize;
+       __le16  wNtbInMaxDatagrams;
+       __le16  wReserved;
+} __attribute__ ((packed));
+
 /* CDC NCM subclass 6.2.11 SetCrcMode */
 #define USB_CDC_NCM_CRC_NOT_APPENDED                   0x00
 #define USB_CDC_NCM_CRC_APPENDED                       0x01
index dd6ee49..a854fe8 100644 (file)
@@ -112,6 +112,7 @@ struct usb_hcd {
        /* Flags that get set only during HCD registration or removal. */
        unsigned                rh_registered:1;/* is root hub registered? */
        unsigned                rh_pollable:1;  /* may we poll the root hub? */
+       unsigned                msix_enabled:1; /* driver has MSI-X enabled? */
 
        /* The next flag is a stopgap, to be removed when all the HCDs
         * support the new root-hub polling mechanism. */
index b92e173..7d1babb 100644 (file)
 #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
 #define __LINUX_USB_GADGET_MSM72K_UDC_H__
 
-#ifdef CONFIG_ARCH_MSM7X00A
-#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
-#else
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
-#endif
 #define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
 
 #define USB_USBCMD           (MSM_USB_BASE + 0x0140)
index 16d682f..c904913 100644 (file)
@@ -347,6 +347,9 @@ extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
 extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
                                        unsigned int ch);
 extern int usb_serial_handle_break(struct usb_serial_port *port);
+extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+                                        struct tty_struct *tty,
+                                        unsigned int status);
 
 
 extern int usb_serial_bus_register(struct usb_serial_driver *device);
index 0093dd7..800617b 100644 (file)
@@ -109,7 +109,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
                                      unsigned int fbit)
 {
        /* Did you forget to fix assumptions on max features? */
-       MAYBE_BUILD_BUG_ON(fbit >= 32);
+       if (__builtin_constant_p(fbit))
+               BUILD_BUG_ON(fbit >= 32);
+       else
+               BUG_ON(fbit >= 32);
 
        if (fbit < VIRTIO_TRANSPORT_F_START)
                virtio_check_driver_offered_feature(vdev, fbit);
index a85064d..e4d3335 100644 (file)
@@ -7,7 +7,8 @@
  * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
  * anyone can use the definitions to implement compatible drivers/servers.
  *
- * Copyright (C) Red Hat, Inc., 2009, 2010
+ * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
+ * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
  */
 
 /* Feature bits */
index 1ac1158..f7998a3 100644 (file)
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 enum {
        WQ_NON_REENTRANT        = 1 << 0, /* guarantee non-reentrance */
        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
-       WQ_FREEZEABLE           = 1 << 2, /* freeze during suspend */
+       WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
        WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
        WQ_HIGHPRI              = 1 << 4, /* high priority */
        WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @name: name of the workqueue
- * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
 
 #define create_workqueue(name)                                 \
        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezeable_workqueue(name)                      \
-       alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
+#define create_freezable_workqueue(name)                       \
+       alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 #define create_singlethread_workqueue(name)                    \
        alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
index 930fdd2..22e61fd 100644 (file)
@@ -84,6 +84,16 @@ struct xfrm_replay_state {
        __u32   bitmap;
 };
 
+struct xfrm_replay_state_esn {
+       unsigned int    bmp_len;
+       __u32           oseq;
+       __u32           seq;
+       __u32           oseq_hi;
+       __u32           seq_hi;
+       __u32           replay_window;
+       __u32           bmp[0];
+};
+
 struct xfrm_algo {
        char            alg_name[64];
        unsigned int    alg_key_len;    /* in bits */
@@ -284,6 +294,7 @@ enum xfrm_attr_type_t {
        XFRMA_ALG_AUTH_TRUNC,   /* struct xfrm_algo_auth */
        XFRMA_MARK,             /* struct xfrm_mark */
        XFRMA_TFCPAD,           /* __u32 */
+       XFRMA_REPLAY_ESN_VAL,   /* struct xfrm_replay_esn */
        __XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -350,6 +361,8 @@ struct xfrm_usersa_info {
 #define XFRM_STATE_WILDRECV    8
 #define XFRM_STATE_ICMP                16
 #define XFRM_STATE_AF_UNSPEC   32
+#define XFRM_STATE_ALIGN4      64
+#define XFRM_STATE_ESN         128
 };
 
 struct xfrm_usersa_id {
index 0c5e725..4375043 100644 (file)
@@ -64,6 +64,11 @@ struct bt_security {
 
 #define BT_DEFER_SETUP 7
 
+#define BT_FLUSHABLE   8
+
+#define BT_FLUSHABLE_OFF       0
+#define BT_FLUSHABLE_ON                1
+
 #define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
 #define BT_ERR(fmt, arg...)  printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
 #define BT_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n" , __func__ , ## arg)
@@ -200,4 +205,32 @@ extern void bt_sysfs_cleanup(void);
 
 extern struct dentry *bt_debugfs;
 
+#ifdef CONFIG_BT_L2CAP
+int l2cap_init(void);
+void l2cap_exit(void);
+#else
+static inline int l2cap_init(void)
+{
+       return 0;
+}
+
+static inline void l2cap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_BT_SCO
+int sco_init(void);
+void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+       return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
 #endif /* __BLUETOOTH_H */
index 29a7a8c..ec6acf2 100644 (file)
@@ -76,6 +76,14 @@ enum {
        HCI_INQUIRY,
 
        HCI_RAW,
+
+       HCI_SETUP,
+       HCI_AUTO_OFF,
+       HCI_MGMT,
+       HCI_PAIRABLE,
+       HCI_SERVICE_CACHE,
+       HCI_LINK_KEYS,
+       HCI_DEBUG_KEYS,
 };
 
 /* HCI ioctl defines */
@@ -111,6 +119,7 @@ enum {
 #define HCI_PAIRING_TIMEOUT    (60000) /* 60 seconds */
 #define HCI_IDLE_TIMEOUT       (6000)  /* 6 seconds */
 #define HCI_INIT_TIMEOUT       (10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT                (1000)  /* 1 seconds */
 
 /* HCI data types */
 #define HCI_COMMAND_PKT                0x01
@@ -150,6 +159,7 @@ enum {
 #define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
 
 /* ACL flags */
+#define ACL_START_NO_FLUSH     0x00
 #define ACL_CONT               0x01
 #define ACL_START              0x02
 #define ACL_ACTIVE_BCAST       0x04
@@ -159,6 +169,8 @@ enum {
 #define SCO_LINK       0x00
 #define ACL_LINK       0x01
 #define ESCO_LINK      0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK                0x80
 
 /* LMP features */
 #define LMP_3SLOT      0x01
@@ -183,17 +195,25 @@ enum {
 #define LMP_PSCHEME    0x02
 #define LMP_PCONTROL   0x04
 
+#define LMP_RSSI_INQ   0x40
 #define LMP_ESCO       0x80
 
 #define LMP_EV4                0x01
 #define LMP_EV5                0x02
+#define LMP_LE         0x40
 
 #define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC  0x04
 #define LMP_EDR_ESCO_2M        0x20
 #define LMP_EDR_ESCO_3M        0x40
 #define LMP_EDR_3S_ESCO        0x80
 
+#define LMP_EXT_INQ    0x01
 #define LMP_SIMPLE_PAIR        0x08
+#define LMP_NO_FLUSH   0x40
+
+#define LMP_LSTO       0x01
+#define LMP_INQ_TX_PWR 0x02
 
 /* Connection modes */
 #define HCI_CM_ACTIVE  0x0000
@@ -225,6 +245,8 @@ enum {
 #define HCI_AT_GENERAL_BONDING_MITM    0x05
 
 /* -----  HCI Commands ---- */
+#define HCI_OP_NOP                     0x0000
+
 #define HCI_OP_INQUIRY                 0x0401
 struct hci_cp_inquiry {
        __u8     lap[3];
@@ -292,11 +314,19 @@ struct hci_cp_pin_code_reply {
        __u8     pin_len;
        __u8     pin_code[16];
 } __packed;
+struct hci_rp_pin_code_reply {
+       __u8     status;
+       bdaddr_t bdaddr;
+} __packed;
 
 #define HCI_OP_PIN_CODE_NEG_REPLY      0x040e
 struct hci_cp_pin_code_neg_reply {
        bdaddr_t bdaddr;
 } __packed;
+struct hci_rp_pin_code_neg_reply {
+       __u8     status;
+       bdaddr_t bdaddr;
+} __packed;
 
 #define HCI_OP_CHANGE_CONN_PTYPE       0x040f
 struct hci_cp_change_conn_ptype {
@@ -377,6 +407,31 @@ struct hci_cp_reject_sync_conn_req {
        __u8     reason;
 } __packed;
 
+#define HCI_OP_IO_CAPABILITY_REPLY     0x042b
+struct hci_cp_io_capability_reply {
+       bdaddr_t bdaddr;
+       __u8     capability;
+       __u8     oob_data;
+       __u8     authentication;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_REPLY              0x042c
+struct hci_cp_user_confirm_reply {
+       bdaddr_t bdaddr;
+} __packed;
+struct hci_rp_user_confirm_reply {
+       __u8     status;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_USER_CONFIRM_NEG_REPLY  0x042d
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+       bdaddr_t bdaddr;
+       __u8     reason;
+} __packed;
+
 #define HCI_OP_SNIFF_MODE              0x0803
 struct hci_cp_sniff_mode {
        __le16   handle;
@@ -474,6 +529,12 @@ struct hci_cp_set_event_flt {
 #define HCI_CONN_SETUP_AUTO_OFF        0x01
 #define HCI_CONN_SETUP_AUTO_ON 0x02
 
+#define HCI_OP_DELETE_STORED_LINK_KEY  0x0c12
+struct hci_cp_delete_stored_link_key {
+       bdaddr_t bdaddr;
+       __u8     delete_all;
+} __packed;
+
 #define HCI_OP_WRITE_LOCAL_NAME                0x0c13
 struct hci_cp_write_local_name {
        __u8     name[248];
@@ -537,6 +598,8 @@ struct hci_cp_host_buffer_size {
        __le16   sco_max_pkt;
 } __packed;
 
+#define HCI_OP_WRITE_INQUIRY_MODE      0x0c45
+
 #define HCI_OP_READ_SSP_MODE           0x0c55
 struct hci_rp_read_ssp_mode {
        __u8     status;
@@ -548,6 +611,8 @@ struct hci_cp_write_ssp_mode {
        __u8     mode;
 } __packed;
 
+#define HCI_OP_READ_INQ_RSP_TX_POWER   0x0c58
+
 #define HCI_OP_READ_LOCAL_VERSION      0x1001
 struct hci_rp_read_local_version {
        __u8     status;
@@ -593,6 +658,47 @@ struct hci_rp_read_bd_addr {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_OP_LE_SET_EVENT_MASK       0x2001
+struct hci_cp_le_set_event_mask {
+       __u8     mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE     0x2002
+struct hci_rp_le_read_buffer_size {
+       __u8     status;
+       __le16   le_mtu;
+       __u8     le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN          0x200d
+struct hci_cp_le_create_conn {
+       __le16   scan_interval;
+       __le16   scan_window;
+       __u8     filter_policy;
+       __u8     peer_addr_type;
+       bdaddr_t peer_addr;
+       __u8     own_address_type;
+       __le16   conn_interval_min;
+       __le16   conn_interval_max;
+       __le16   conn_latency;
+       __le16   supervision_timeout;
+       __le16   min_ce_len;
+       __le16   max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL   0x200e
+
+#define HCI_OP_LE_CONN_UPDATE          0x2013
+struct hci_cp_le_conn_update {
+       __le16   handle;
+       __le16   conn_interval_min;
+       __le16   conn_interval_max;
+       __le16   conn_latency;
+       __le16   supervision_timeout;
+       __le16   min_ce_len;
+       __le16   max_ce_len;
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE                0x01
 
@@ -833,6 +939,20 @@ struct hci_ev_io_capa_request {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_EV_IO_CAPA_REPLY           0x32
+struct hci_ev_io_capa_reply {
+       bdaddr_t bdaddr;
+       __u8     capability;
+       __u8     oob_data;
+       __u8     authentication;
+} __packed;
+
+#define HCI_EV_USER_CONFIRM_REQUEST    0x33
+struct hci_ev_user_confirm_req {
+       bdaddr_t        bdaddr;
+       __le32          passkey;
+} __packed;
+
 #define HCI_EV_SIMPLE_PAIR_COMPLETE    0x36
 struct hci_ev_simple_pair_complete {
        __u8     status;
@@ -845,6 +965,25 @@ struct hci_ev_remote_host_features {
        __u8     features[8];
 } __packed;
 
+#define HCI_EV_LE_META                 0x3e
+struct hci_ev_le_meta {
+       __u8     subevent;
+} __packed;
+
+/* Low energy meta events */
+#define HCI_EV_LE_CONN_COMPLETE                0x01
+struct hci_ev_le_conn_complete {
+       __u8     status;
+       __le16   handle;
+       __u8     role;
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+       __le16   interval;
+       __le16   latency;
+       __le16   supervision_timeout;
+       __u8     clk_accurancy;
+} __packed;
+
 /* Internal events generated by Bluetooth stack */
 #define HCI_EV_STACK_INTERNAL  0xfd
 struct hci_ev_stack_internal {
index a29feb0..441dadb 100644 (file)
@@ -60,12 +60,28 @@ struct hci_conn_hash {
        spinlock_t       lock;
        unsigned int     acl_num;
        unsigned int     sco_num;
+       unsigned int     le_num;
 };
 
 struct bdaddr_list {
        struct list_head list;
        bdaddr_t bdaddr;
 };
+
+struct bt_uuid {
+       struct list_head list;
+       u8 uuid[16];
+       u8 svc_hint;
+};
+
+struct link_key {
+       struct list_head list;
+       bdaddr_t bdaddr;
+       u8 type;
+       u8 val[16];
+       u8 pin_len;
+};
+
 #define NUM_REASSEMBLY 4
 struct hci_dev {
        struct list_head list;
@@ -80,13 +96,18 @@ struct hci_dev {
        bdaddr_t        bdaddr;
        __u8            dev_name[248];
        __u8            dev_class[3];
+       __u8            major_class;
+       __u8            minor_class;
        __u8            features[8];
        __u8            commands[64];
        __u8            ssp_mode;
        __u8            hci_ver;
        __u16           hci_rev;
+       __u8            lmp_ver;
        __u16           manufacturer;
+       __le16          lmp_subver;
        __u16           voice_setting;
+       __u8            io_capability;
 
        __u16           pkt_type;
        __u16           esco_type;
@@ -102,18 +123,26 @@ struct hci_dev {
        atomic_t        cmd_cnt;
        unsigned int    acl_cnt;
        unsigned int    sco_cnt;
+       unsigned int    le_cnt;
 
        unsigned int    acl_mtu;
        unsigned int    sco_mtu;
+       unsigned int    le_mtu;
        unsigned int    acl_pkts;
        unsigned int    sco_pkts;
+       unsigned int    le_pkts;
 
-       unsigned long   cmd_last_tx;
        unsigned long   acl_last_tx;
        unsigned long   sco_last_tx;
+       unsigned long   le_last_tx;
 
        struct workqueue_struct *workqueue;
 
+       struct work_struct      power_on;
+       struct work_struct      power_off;
+       struct timer_list       off_timer;
+
+       struct timer_list       cmd_timer;
        struct tasklet_struct   cmd_task;
        struct tasklet_struct   rx_task;
        struct tasklet_struct   tx_task;
@@ -129,12 +158,17 @@ struct hci_dev {
        wait_queue_head_t       req_wait_q;
        __u32                   req_status;
        __u32                   req_result;
-       __u16                   req_last_cmd;
+
+       __u16                   init_last_cmd;
 
        struct inquiry_cache    inq_cache;
        struct hci_conn_hash    conn_hash;
        struct list_head        blacklist;
 
+       struct list_head        uuids;
+
+       struct list_head        link_keys;
+
        struct hci_dev_stats    stat;
 
        struct sk_buff_head     driver_init;
@@ -165,30 +199,37 @@ struct hci_dev {
 struct hci_conn {
        struct list_head list;
 
-       atomic_t         refcnt;
-       spinlock_t       lock;
-
-       bdaddr_t         dst;
-       __u16            handle;
-       __u16            state;
-       __u8             mode;
-       __u8             type;
-       __u8             out;
-       __u8             attempt;
-       __u8             dev_class[3];
-       __u8             features[8];
-       __u8             ssp_mode;
-       __u16            interval;
-       __u16            pkt_type;
-       __u16            link_policy;
-       __u32            link_mode;
-       __u8             auth_type;
-       __u8             sec_level;
-       __u8             power_save;
-       __u16            disc_timeout;
-       unsigned long    pend;
-
-       unsigned int     sent;
+       atomic_t        refcnt;
+       spinlock_t      lock;
+
+       bdaddr_t        dst;
+       __u16           handle;
+       __u16           state;
+       __u8            mode;
+       __u8            type;
+       __u8            out;
+       __u8            attempt;
+       __u8            dev_class[3];
+       __u8            features[8];
+       __u8            ssp_mode;
+       __u16           interval;
+       __u16           pkt_type;
+       __u16           link_policy;
+       __u32           link_mode;
+       __u8            auth_type;
+       __u8            sec_level;
+       __u8            pending_sec_level;
+       __u8            pin_length;
+       __u8            io_capability;
+       __u8            power_save;
+       __u16           disc_timeout;
+       unsigned long   pend;
+
+       __u8            remote_cap;
+       __u8            remote_oob;
+       __u8            remote_auth;
+
+       unsigned int    sent;
 
        struct sk_buff_head data_q;
 
@@ -207,6 +248,10 @@ struct hci_conn {
        void            *priv;
 
        struct hci_conn *link;
+
+       void (*connect_cfm_cb)  (struct hci_conn *conn, u8 status);
+       void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
+       void (*disconn_cfm_cb)  (struct hci_conn *conn, u8 reason);
 };
 
 extern struct hci_proto *hci_proto[];
@@ -273,24 +318,40 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        list_add(&c->list, &h->list);
-       if (c->type == ACL_LINK)
+       switch (c->type) {
+       case ACL_LINK:
                h->acl_num++;
-       else
+               break;
+       case LE_LINK:
+               h->le_num++;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
                h->sco_num++;
+               break;
+       }
 }
 
 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        list_del(&c->list);
-       if (c->type == ACL_LINK)
+       switch (c->type) {
+       case ACL_LINK:
                h->acl_num--;
-       else
+               break;
+       case LE_LINK:
+               h->le_num--;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
                h->sco_num--;
+               break;
+       }
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
-                                       __u16 handle)
+                                                               __u16 handle)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -305,7 +366,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
-                                       __u8 type, bdaddr_t *ba)
+                                                       __u8 type, bdaddr_t *ba)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -320,7 +381,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
-                                       __u8 type, __u16 state)
+                                                       __u8 type, __u16 state)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -436,6 +497,16 @@ int hci_inquiry(void __user *arg);
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int hci_blacklist_clear(struct hci_dev *hdev);
 
+int hci_uuids_clear(struct hci_dev *hdev);
+
+int hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+                                               u8 *key, u8 type, u8 pin_len);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+void hci_del_off_timer(struct hci_dev *hdev);
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct sk_buff *skb);
@@ -457,6 +528,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev)  ((dev)->features[6] & LMP_NO_FLUSH)
+#define lmp_le_capable(dev)        ((dev)->features[4] & LMP_LE)
 
 /* ----- HCI protocols ----- */
 struct hci_proto {
@@ -502,6 +575,9 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
        hp = hci_proto[HCI_PROTO_SCO];
        if (hp && hp->connect_cfm)
                hp->connect_cfm(conn, status);
+
+       if (conn->connect_cfm_cb)
+               conn->connect_cfm_cb(conn, status);
 }
 
 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
@@ -531,6 +607,9 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
        hp = hci_proto[HCI_PROTO_SCO];
        if (hp && hp->disconn_cfm)
                hp->disconn_cfm(conn, reason);
+
+       if (conn->disconn_cfm_cb)
+               conn->disconn_cfm_cb(conn, reason);
 }
 
 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
@@ -550,6 +629,9 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
        hp = hci_proto[HCI_PROTO_SCO];
        if (hp && hp->security_cfm)
                hp->security_cfm(conn, status, encrypt);
+
+       if (conn->security_cfm_cb)
+               conn->security_cfm_cb(conn, status);
 }
 
 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
@@ -563,6 +645,9 @@ static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u
        hp = hci_proto[HCI_PROTO_SCO];
        if (hp && hp->security_cfm)
                hp->security_cfm(conn, status, encrypt);
+
+       if (conn->security_cfm_cb)
+               conn->security_cfm_cb(conn, status);
 }
 
 int hci_register_proto(struct hci_proto *hproto);
@@ -659,12 +744,29 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
 
 /* ----- HCI Sockets ----- */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+                                                       struct sock *skip_sk);
 
 /* Management interface */
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_index_added(u16 index);
 int mgmt_index_removed(u16 index);
+int mgmt_powered(u16 index, u8 powered);
+int mgmt_discoverable(u16 index, u8 discoverable);
+int mgmt_connectable(u16 index, u8 connectable);
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type);
+int mgmt_connected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnect_failed(u16 index);
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr);
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value);
+int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
+                                                               u8 status);
+int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -696,4 +798,6 @@ struct hci_sec_filter {
 
 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
 
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+                                       u16 latency, u16 to_multiplier);
 #endif /* __HCI_CORE_H */
index 7ad25ca..4f4bff1 100644 (file)
@@ -38,6 +38,7 @@
 #define L2CAP_DEFAULT_MAX_PDU_SIZE     1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO           200
 #define L2CAP_LOCAL_BUSY_TRIES         12
+#define L2CAP_LE_DEFAULT_MTU           23
 
 #define L2CAP_CONN_TIMEOUT     (40000) /* 40 seconds */
 #define L2CAP_INFO_TIMEOUT     (4000)  /*  4 seconds */
@@ -88,6 +89,8 @@ struct l2cap_conninfo {
 #define L2CAP_ECHO_RSP         0x09
 #define L2CAP_INFO_REQ         0x0a
 #define L2CAP_INFO_RSP         0x0b
+#define L2CAP_CONN_PARAM_UPDATE_REQ    0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP    0x13
 
 /* L2CAP feature mask */
 #define L2CAP_FEAT_FLOWCTL     0x00000001
@@ -160,6 +163,9 @@ struct l2cap_conn_rsp {
 /* channel indentifier */
 #define L2CAP_CID_SIGNALING    0x0001
 #define L2CAP_CID_CONN_LESS    0x0002
+#define L2CAP_CID_LE_DATA      0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP          0x0006
 #define L2CAP_CID_DYN_START    0x0040
 #define L2CAP_CID_DYN_END      0xffff
 
@@ -255,6 +261,21 @@ struct l2cap_info_rsp {
 #define L2CAP_IR_SUCCESS    0x0000
 #define L2CAP_IR_NOTSUPP    0x0001
 
+struct l2cap_conn_param_update_req {
+       __le16      min;
+       __le16      max;
+       __le16      latency;
+       __le16      to_multiplier;
+} __packed;
+
+struct l2cap_conn_param_update_rsp {
+       __le16      result;
+} __packed;
+
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED      0x0000
+#define L2CAP_CONN_PARAM_REJECTED      0x0001
+
 /* ----- L2CAP connections ----- */
 struct l2cap_chan_list {
        struct sock     *head;
@@ -327,6 +348,7 @@ struct l2cap_pinfo {
        __u8            sec_level;
        __u8            role_switch;
        __u8            force_reliable;
+       __u8            flushable;
 
        __u8            conf_req[64];
        __u8            conf_len;
@@ -423,6 +445,35 @@ static inline int l2cap_tx_window_full(struct sock *sk)
 #define __is_sframe(ctrl)      ((ctrl) & L2CAP_CTRL_FRAME_TYPE)
 #define __is_sar_start(ctrl)   (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
 
-void l2cap_load(void);
+extern int disable_ertm;
+extern const struct proto_ops l2cap_sock_ops;
+extern struct bt_sock_list l2cap_sk_list;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+
+u8 l2cap_get_ident(struct l2cap_conn *conn);
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
+int l2cap_build_conf_req(struct sock *sk, void *data);
+int __l2cap_wait_ack(struct sock *sk);
+
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen);
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len);
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb);
+void l2cap_streaming_send(struct sock *sk);
+int l2cap_ertm_send(struct sock *sk);
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout);
+void l2cap_sock_clear_timer(struct sock *sk);
+void __l2cap_sock_close(struct sock *sk, int reason);
+void l2cap_sock_kill(struct sock *sk);
+void l2cap_sock_init(struct sock *sk, struct sock *parent);
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+                                                       int proto, gfp_t prio);
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err);
+void l2cap_chan_del(struct sock *sk, int err);
+int l2cap_do_connect(struct sock *sk);
 
 #endif /* __L2CAP_H */
index ca29c13..5fabfa8 100644 (file)
    SOFTWARE IS DISCLAIMED.
 */
 
+#define MGMT_INDEX_NONE                        0xFFFF
+
 struct mgmt_hdr {
        __le16 opcode;
+       __le16 index;
        __le16 len;
 } __packed;
-#define MGMT_HDR_SIZE                  4
 
 #define MGMT_OP_READ_VERSION           0x0001
 struct mgmt_rp_read_version {
@@ -40,13 +42,10 @@ struct mgmt_rp_read_index_list {
 } __packed;
 
 #define MGMT_OP_READ_INFO              0x0004
-struct mgmt_cp_read_info {
-       __le16 index;
-} __packed;
 struct mgmt_rp_read_info {
-       __le16 index;
        __u8 type;
        __u8 powered;
+       __u8 connectable;
        __u8 discoverable;
        __u8 pairable;
        __u8 sec_mode;
@@ -58,6 +57,116 @@ struct mgmt_rp_read_info {
        __u16 hci_rev;
 } __packed;
 
+struct mgmt_mode {
+       __u8 val;
+} __packed;
+
+#define MGMT_OP_SET_POWERED            0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE       0x0006
+
+#define MGMT_OP_SET_CONNECTABLE                0x0007
+
+#define MGMT_OP_SET_PAIRABLE           0x0008
+
+#define MGMT_OP_ADD_UUID               0x0009
+struct mgmt_cp_add_uuid {
+       __u8 uuid[16];
+       __u8 svc_hint;
+} __packed;
+
+#define MGMT_OP_REMOVE_UUID            0x000A
+struct mgmt_cp_remove_uuid {
+       __u8 uuid[16];
+} __packed;
+
+#define MGMT_OP_SET_DEV_CLASS          0x000B
+struct mgmt_cp_set_dev_class {
+       __u8 major;
+       __u8 minor;
+} __packed;
+
+#define MGMT_OP_SET_SERVICE_CACHE      0x000C
+struct mgmt_cp_set_service_cache {
+       __u8 enable;
+} __packed;
+
+struct mgmt_key_info {
+       bdaddr_t bdaddr;
+       u8 type;
+       u8 val[16];
+       u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_KEYS              0x000D
+struct mgmt_cp_load_keys {
+       __u8 debug_keys;
+       __le16 key_count;
+       struct mgmt_key_info keys[0];
+} __packed;
+
+#define MGMT_OP_REMOVE_KEY             0x000E
+struct mgmt_cp_remove_key {
+       bdaddr_t bdaddr;
+       __u8 disconnect;
+} __packed;
+
+#define MGMT_OP_DISCONNECT             0x000F
+struct mgmt_cp_disconnect {
+       bdaddr_t bdaddr;
+} __packed;
+struct mgmt_rp_disconnect {
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS                0x0010
+struct mgmt_rp_get_connections {
+       __le16 conn_count;
+       bdaddr_t conn[0];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY         0x0011
+struct mgmt_cp_pin_code_reply {
+       bdaddr_t bdaddr;
+       __u8 pin_len;
+       __u8 pin_code[16];
+} __packed;
+struct mgmt_rp_pin_code_reply {
+       bdaddr_t bdaddr;
+       uint8_t status;
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY     0x0012
+struct mgmt_cp_pin_code_neg_reply {
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_SET_IO_CAPABILITY      0x0013
+struct mgmt_cp_set_io_capability {
+       __u8 io_capability;
+} __packed;
+
+#define MGMT_OP_PAIR_DEVICE            0x0014
+struct mgmt_cp_pair_device {
+       bdaddr_t bdaddr;
+       __u8 io_cap;
+} __packed;
+struct mgmt_rp_pair_device {
+       bdaddr_t bdaddr;
+       __u8 status;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_REPLY     0x0015
+struct mgmt_cp_user_confirm_reply {
+       bdaddr_t bdaddr;
+} __packed;
+struct mgmt_rp_user_confirm_reply {
+       bdaddr_t bdaddr;
+       __u8 status;
+} __packed;
+
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16 opcode;
@@ -72,16 +181,56 @@ struct mgmt_ev_cmd_status {
 
 #define MGMT_EV_CONTROLLER_ERROR       0x0003
 struct mgmt_ev_controller_error {
-       __le16 index;
        __u8 error_code;
 } __packed;
 
 #define MGMT_EV_INDEX_ADDED            0x0004
-struct mgmt_ev_index_added {
-       __le16 index;
-} __packed;
 
 #define MGMT_EV_INDEX_REMOVED          0x0005
-struct mgmt_ev_index_removed {
-       __le16 index;
+
+#define MGMT_EV_POWERED                        0x0006
+
+#define MGMT_EV_DISCOVERABLE           0x0007
+
+#define MGMT_EV_CONNECTABLE            0x0008
+
+#define MGMT_EV_PAIRABLE               0x0009
+
+#define MGMT_EV_NEW_KEY                        0x000A
+struct mgmt_ev_new_key {
+       struct mgmt_key_info key;
+       __u8 old_key_type;
+} __packed;
+
+#define MGMT_EV_CONNECTED              0x000B
+struct mgmt_ev_connected {
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_DISCONNECTED           0x000C
+struct mgmt_ev_disconnected {
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED         0x000D
+struct mgmt_ev_connect_failed {
+       bdaddr_t bdaddr;
+       __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST       0x000E
+struct mgmt_ev_pin_code_request {
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_USER_CONFIRM_REQUEST   0x000F
+struct mgmt_ev_user_confirm_request {
+       bdaddr_t bdaddr;
+       __le32 value;
+} __packed;
+
+#define MGMT_EV_AUTH_FAILED            0x0010
+struct mgmt_ev_auth_failed {
+       bdaddr_t bdaddr;
+       __u8 status;
 } __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
new file mode 100644 (file)
index 0000000..8f2edbf
--- /dev/null
@@ -0,0 +1,76 @@
+#ifndef __SMP_H
+#define __SMP_H
+
+struct smp_command_hdr {
+       __u8    code;
+} __packed;
+
+#define SMP_CMD_PAIRING_REQ    0x01
+#define SMP_CMD_PAIRING_RSP    0x02
+struct smp_cmd_pairing {
+       __u8    io_capability;
+       __u8    oob_flag;
+       __u8    auth_req;
+       __u8    max_key_size;
+       __u8    init_key_dist;
+       __u8    resp_key_dist;
+} __packed;
+
+#define SMP_CMD_PAIRING_CONFIRM        0x03
+struct smp_cmd_pairing_confirm {
+       __u8    confirm_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_RANDOM 0x04
+struct smp_cmd_pairing_random {
+       __u8    rand_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_FAIL   0x05
+struct smp_cmd_pairing_fail {
+       __u8    reason;
+} __packed;
+
+#define SMP_CMD_ENCRYPT_INFO   0x06
+struct smp_cmd_encrypt_info {
+       __u8    ltk[16];
+} __packed;
+
+#define SMP_CMD_MASTER_IDENT   0x07
+struct smp_cmd_master_ident {
+       __u16   ediv;
+       __u8    rand[8];
+} __packed;
+
+#define SMP_CMD_IDENT_INFO     0x08
+struct smp_cmd_ident_info {
+       __u8    irk[16];
+} __packed;
+
+#define SMP_CMD_IDENT_ADDR_INFO        0x09
+struct smp_cmd_ident_addr_info {
+       __u8    addr_type;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define SMP_CMD_SIGN_INFO      0x0a
+struct smp_cmd_sign_info {
+       __u8    csrk[16];
+} __packed;
+
+#define SMP_CMD_SECURITY_REQ   0x0b
+struct smp_cmd_security_req {
+       __u8    auth_req;
+} __packed;
+
+#define SMP_PASSKEY_ENTRY_FAILED       0x01
+#define SMP_OOB_NOT_AVAIL              0x02
+#define SMP_AUTH_REQUIREMENTS          0x03
+#define SMP_CONFIRM_FAILED             0x04
+#define SMP_PAIRING_NOTSUPP            0x05
+#define SMP_ENC_KEY_SIZE               0x06
+#define SMP_CMD_NOTSUPP                0x07
+#define SMP_UNSPECIFIED                0x08
+#define SMP_REPEATED_ATTEMPTS          0x09
+
+#endif /* __SMP_H */
index 1322695..60f7876 100644 (file)
@@ -413,7 +413,7 @@ struct station_parameters {
  * @STATION_INFO_PLID: @plid filled
  * @STATION_INFO_PLINK_STATE: @plink_state filled
  * @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled
+ * @STATION_INFO_TX_BITRATE: @txrate fields are filled
  *  (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
  * @STATION_INFO_RX_PACKETS: @rx_packets filled
  * @STATION_INFO_TX_PACKETS: @tx_packets filled
@@ -421,6 +421,7 @@ struct station_parameters {
  * @STATION_INFO_TX_FAILED: @tx_failed filled
  * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
  * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
+ * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
  */
 enum station_info_flags {
        STATION_INFO_INACTIVE_TIME      = 1<<0,
@@ -437,6 +438,7 @@ enum station_info_flags {
        STATION_INFO_TX_FAILED          = 1<<11,
        STATION_INFO_RX_DROP_MISC       = 1<<12,
        STATION_INFO_SIGNAL_AVG         = 1<<13,
+       STATION_INFO_RX_BITRATE         = 1<<14,
 };
 
 /**
@@ -506,6 +508,7 @@ struct station_info {
        s8 signal;
        s8 signal_avg;
        struct rate_info txrate;
+       struct rate_info rxrate;
        u32 rx_packets;
        u32 tx_packets;
        u32 tx_retries;
@@ -1194,6 +1197,10 @@ struct cfg80211_pmksa {
  *     (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
  *
  * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy);
@@ -1361,6 +1368,10 @@ struct cfg80211_ops {
 
        int     (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
        int     (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+
+       int     (*set_ringparam)(struct wiphy *wiphy, u32 tx, u32 rx);
+       void    (*get_ringparam)(struct wiphy *wiphy,
+                                u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
 };
 
 /*
@@ -1790,8 +1801,9 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
 /**
  * ieee80211_channel_to_frequency - convert channel number to frequency
  * @chan: channel number
+ * @band: band, necessary due to channel number overlap
  */
-extern int ieee80211_channel_to_frequency(int chan);
+extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
index a8e7852..e5983c9 100644 (file)
@@ -43,6 +43,8 @@ struct dcbnl_rtnl_ops {
        int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_getapp) (struct net_device *, struct dcb_app *);
        int (*ieee_setapp) (struct net_device *, struct dcb_app *);
+       int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *);
+       int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *);
 
        /* CEE std */
        u8   (*getstate)(struct net_device *);
@@ -77,7 +79,14 @@ struct dcbnl_rtnl_ops {
        u8   (*getdcbx)(struct net_device *);
        u8   (*setdcbx)(struct net_device *, u8);
 
+       /* peer apps */
+       int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *,
+                              u16 *);
+       int (*peer_getapptable)(struct net_device *, struct dcb_app *);
 
+       /* CEE peer */
+       int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
+       int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
 };
 
 #endif /* __NET_DCBNL_H__ */
index a514a3c..298521e 100644 (file)
@@ -192,10 +192,10 @@ static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
        ethaddr[5] = (__u8)(a >> 8);
 }
 
-static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
+static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
 {
-       fl->uli_u.dnports.sport = scp->addrloc;
-       fl->uli_u.dnports.dport = scp->addrrem;
+       fld->fld_sport = scp->addrloc;
+       fld->fld_dport = scp->addrrem;
 }
 
 extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
index bbcde32..782ef7c 100644 (file)
@@ -98,7 +98,7 @@ struct dn_fib_table {
        int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
                        struct dn_kern_rta *rta, struct nlmsghdr *n,
                        struct netlink_skb_parms *req);
-       int (*lookup)(struct dn_fib_table *t, const struct flowi *fl,
+       int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
                        struct dn_fib_res *res);
        int (*flush)(struct dn_fib_table *t);
        int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
@@ -119,12 +119,12 @@ extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
                                struct dn_kern_rta *rta, 
                                const struct nlmsghdr *nlh, int *errp);
 extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 
-                       const struct flowi *fl,
+                       const struct flowidn *fld,
                        struct dn_fib_res *res);
 extern void dn_fib_release_info(struct dn_fib_info *fi);
 extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
 extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowi *fl,
+extern void dn_fib_select_multipath(const struct flowidn *fld,
                                        struct dn_fib_res *res);
 
 /*
@@ -141,7 +141,7 @@ extern void dn_fib_table_cleanup(void);
 extern void dn_fib_rules_init(void);
 extern void dn_fib_rules_cleanup(void);
 extern unsigned dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res);
+extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
index 9b185df..81712cf 100644 (file)
@@ -16,7 +16,7 @@
 *******************************************************************************/
 
 extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
+extern int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *, struct sock *sk, int flags);
 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
 extern void dn_rt_cache_flush(int delay);
 
@@ -67,7 +67,7 @@ extern void dn_rt_cache_flush(int delay);
 struct dn_route {
        struct dst_entry dst;
 
-       struct flowi fl;
+       struct flowidn fld;
 
        __le16 rt_saddr;
        __le16 rt_daddr;
@@ -82,12 +82,12 @@ struct dn_route {
 
 static inline bool dn_is_input_route(struct dn_route *rt)
 {
-       return rt->fl.iif != 0;
+       return rt->fld.flowidn_iif != 0;
 }
 
 static inline bool dn_is_output_route(struct dn_route *rt)
 {
-       return rt->fl.iif == 0;
+       return rt->fld.flowidn_iif == 0;
 }
 
 extern void dn_route_init(void);
index 93b0310..2a46cba 100644 (file)
@@ -40,24 +40,10 @@ struct dst_entry {
        struct rcu_head         rcu_head;
        struct dst_entry        *child;
        struct net_device       *dev;
-       short                   error;
-       short                   obsolete;
-       int                     flags;
-#define DST_HOST               0x0001
-#define DST_NOXFRM             0x0002
-#define DST_NOPOLICY           0x0004
-#define DST_NOHASH             0x0008
-#define DST_NOCACHE            0x0010
+       struct  dst_ops         *ops;
+       unsigned long           _metrics;
        unsigned long           expires;
-
-       unsigned short          header_len;     /* more space at head required */
-       unsigned short          trailer_len;    /* space to reserve at tail */
-
-       unsigned int            rate_tokens;
-       unsigned long           rate_last;      /* rate limiting for ICMP */
-
        struct dst_entry        *path;
-
        struct neighbour        *neighbour;
        struct hh_cache         *hh;
 #ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@ struct dst_entry {
        int                     (*input)(struct sk_buff*);
        int                     (*output)(struct sk_buff*);
 
-       struct  dst_ops         *ops;
-
-       u32                     _metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+       short                   error;
+       short                   obsolete;
+       unsigned short          header_len;     /* more space at head required */
+       unsigned short          trailer_len;    /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
        __u32                   tclassid;
 #else
        __u32                   __pad2;
 #endif
 
-
        /*
         * Align __refcnt to a 64 bytes alignment
         * (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,12 @@ struct dst_entry {
        atomic_t                __refcnt;       /* client references    */
        int                     __use;
        unsigned long           lastuse;
+       int                     flags;
+#define DST_HOST               0x0001
+#define DST_NOXFRM             0x0002
+#define DST_NOPOLICY           0x0004
+#define DST_NOHASH             0x0008
+#define DST_NOCACHE            0x0010
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
@@ -103,10 +94,70 @@ struct dst_entry {
 
 #ifdef __KERNEL__
 
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY  0x1UL
+#define __DST_METRICS_PTR(Y)   \
+       ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X)     __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+       return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+       unsigned long val = dst->_metrics;
+       if (!(val & DST_METRICS_READ_ONLY))
+               __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+       unsigned long p = dst->_metrics;
+
+       if (p & DST_METRICS_READ_ONLY)
+               return dst->ops->cow_metrics(dst, p);
+       return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+                                   const u32 *src_metrics,
+                                   bool read_only)
+{
+       dst->_metrics = ((unsigned long) src_metrics) |
+               (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+       u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+       if (dst_metrics) {
+               u32 *src_metrics = DST_METRICS_PTR(src);
+
+               memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+       }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+       return DST_METRICS_PTR(dst);
+}
+
 static inline u32
 dst_metric_raw(const struct dst_entry *dst, const int metric)
 {
-       return dst->_metrics[metric-1];
+       u32 *p = DST_METRICS_PTR(dst);
+
+       return p[metric-1];
 }
 
 static inline u32
@@ -131,22 +182,10 @@ dst_metric_advmss(const struct dst_entry *dst)
 
 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
 {
-       dst->_metrics[metric-1] = val;
-}
+       u32 *p = dst_metrics_write_ptr(dst);
 
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
-       memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
-
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
-       dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
-       return dst->_metrics;
+       if (p)
+               p[metric-1] = val;
 }
 
 static inline u32
@@ -181,8 +220,6 @@ static inline u32
 dst_allfrag(const struct dst_entry *dst)
 {
        int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
-       /* Yes, _exactly_. This is paranoia. */
-       barrier();
        return ret;
 }
 
@@ -315,7 +352,7 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
 }
 
 extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
+extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
 extern void __dst_free(struct dst_entry * dst);
 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
 
@@ -384,27 +421,22 @@ extern void               dst_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
-       XFRM_LOOKUP_WAIT = 1 << 0,
-       XFRM_LOOKUP_ICMP = 1 << 1,
+       XFRM_LOOKUP_ICMP = 1 << 0,
 };
 
 struct flowi;
 #ifndef CONFIG_XFRM
-static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                             struct flowi *fl, struct sock *sk, int flags)
+static inline struct dst_entry *xfrm_lookup(struct net *net,
+                                           struct dst_entry *dst_orig,
+                                           const struct flowi *fl, struct sock *sk,
+                                           int flags)
 {
-       return 0;
+       return dst_orig;
 } 
-static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                               struct flowi *fl, struct sock *sk, int flags)
-{
-       return 0;
-}
 #else
-extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                      struct flowi *fl, struct sock *sk, int flags);
-extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                        struct flowi *fl, struct sock *sk, int flags);
+extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                                    const struct flowi *fl, struct sock *sk,
+                                    int flags);
 #endif
 #endif
 
index 21a320b..dc07463 100644 (file)
@@ -18,6 +18,7 @@ struct dst_ops {
        struct dst_entry *      (*check)(struct dst_entry *, __u32 cookie);
        unsigned int            (*default_advmss)(const struct dst_entry *);
        unsigned int            (*default_mtu)(const struct dst_entry *);
+       u32 *                   (*cow_metrics)(struct dst_entry *, unsigned long);
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
                                          struct net_device *dev, int how);
index 240b7f3..7fe5a0f 100644 (file)
 #include <linux/in6.h>
 #include <asm/atomic.h>
 
-struct flowi {
-       int     oif;
-       int     iif;
-       __u32   mark;
+struct flowi_common {
+       int     flowic_oif;
+       int     flowic_iif;
+       __u32   flowic_mark;
+       __u8    flowic_tos;
+       __u8    flowic_scope;
+       __u8    flowic_proto;
+       __u8    flowic_flags;
+#define FLOWI_FLAG_ANYSRC              0x01
+#define FLOWI_FLAG_PRECOW_METRICS      0x02
+#define FLOWI_FLAG_CAN_SLEEP           0x04
+       __u32   flowic_secid;
+};
 
+union flowi_uli {
+       struct {
+               __be16  sport;
+               __be16  dport;
+       } ports;
+
+       struct {
+               __u8    type;
+               __u8    code;
+       } icmpt;
+
+       struct {
+               __le16  sport;
+               __le16  dport;
+       } dnports;
+
+       __be32          spi;
+       __be32          gre_key;
+
+       struct {
+               __u8    type;
+       } mht;
+};
+
+struct flowi4 {
+       struct flowi_common     __fl_common;
+#define flowi4_oif             __fl_common.flowic_oif
+#define flowi4_iif             __fl_common.flowic_iif
+#define flowi4_mark            __fl_common.flowic_mark
+#define flowi4_tos             __fl_common.flowic_tos
+#define flowi4_scope           __fl_common.flowic_scope
+#define flowi4_proto           __fl_common.flowic_proto
+#define flowi4_flags           __fl_common.flowic_flags
+#define flowi4_secid           __fl_common.flowic_secid
+       __be32                  daddr;
+       __be32                  saddr;
+       union flowi_uli         uli;
+#define fl4_sport              uli.ports.sport
+#define fl4_dport              uli.ports.dport
+#define fl4_icmp_type          uli.icmpt.type
+#define fl4_icmp_code          uli.icmpt.code
+#define fl4_ipsec_spi          uli.spi
+#define fl4_mh_type            uli.mht.type
+#define fl4_gre_key            uli.gre_key
+};
+
+struct flowi6 {
+       struct flowi_common     __fl_common;
+#define flowi6_oif             __fl_common.flowic_oif
+#define flowi6_iif             __fl_common.flowic_iif
+#define flowi6_mark            __fl_common.flowic_mark
+#define flowi6_tos             __fl_common.flowic_tos
+#define flowi6_scope           __fl_common.flowic_scope
+#define flowi6_proto           __fl_common.flowic_proto
+#define flowi6_flags           __fl_common.flowic_flags
+#define flowi6_secid           __fl_common.flowic_secid
+       struct in6_addr         daddr;
+       struct in6_addr         saddr;
+       __be32                  flowlabel;
+       union flowi_uli         uli;
+#define fl6_sport              uli.ports.sport
+#define fl6_dport              uli.ports.dport
+#define fl6_icmp_type          uli.icmpt.type
+#define fl6_icmp_code          uli.icmpt.code
+#define fl6_ipsec_spi          uli.spi
+#define fl6_mh_type            uli.mht.type
+#define fl6_gre_key            uli.gre_key
+};
+
+struct flowidn {
+       struct flowi_common     __fl_common;
+#define flowidn_oif            __fl_common.flowic_oif
+#define flowidn_iif            __fl_common.flowic_iif
+#define flowidn_mark           __fl_common.flowic_mark
+#define flowidn_scope          __fl_common.flowic_scope
+#define flowidn_proto          __fl_common.flowic_proto
+#define flowidn_flags          __fl_common.flowic_flags
+       __le16                  daddr;
+       __le16                  saddr;
+       union flowi_uli         uli;
+#define fld_sport              uli.ports.sport
+#define fld_dport              uli.ports.dport
+};
+
+struct flowi {
        union {
-               struct {
-                       __be32                  daddr;
-                       __be32                  saddr;
-                       __u8                    tos;
-                       __u8                    scope;
-               } ip4_u;
-               
-               struct {
-                       struct in6_addr         daddr;
-                       struct in6_addr         saddr;
-                       __be32                  flowlabel;
-               } ip6_u;
-
-               struct {
-                       __le16                  daddr;
-                       __le16                  saddr;
-                       __u8                    scope;
-               } dn_u;
-       } nl_u;
-#define fld_dst                nl_u.dn_u.daddr
-#define fld_src                nl_u.dn_u.saddr
-#define fld_scope      nl_u.dn_u.scope
-#define fl6_dst                nl_u.ip6_u.daddr
-#define fl6_src                nl_u.ip6_u.saddr
-#define fl6_flowlabel  nl_u.ip6_u.flowlabel
-#define fl4_dst                nl_u.ip4_u.daddr
-#define fl4_src                nl_u.ip4_u.saddr
-#define fl4_tos                nl_u.ip4_u.tos
-#define fl4_scope      nl_u.ip4_u.scope
-
-       __u8    proto;
-       __u8    flags;
-#define FLOWI_FLAG_ANYSRC 0x01
-       union {
-               struct {
-                       __be16  sport;
-                       __be16  dport;
-               } ports;
-
-               struct {
-                       __u8    type;
-                       __u8    code;
-               } icmpt;
-
-               struct {
-                       __le16  sport;
-                       __le16  dport;
-               } dnports;
-
-               __be32          spi;
-               __be32          gre_key;
-
-               struct {
-                       __u8    type;
-               } mht;
-       } uli_u;
-#define fl_ip_sport    uli_u.ports.sport
-#define fl_ip_dport    uli_u.ports.dport
-#define fl_icmp_type   uli_u.icmpt.type
-#define fl_icmp_code   uli_u.icmpt.code
-#define fl_ipsec_spi   uli_u.spi
-#define fl_mh_type     uli_u.mht.type
-#define fl_gre_key     uli_u.gre_key
-       __u32           secid;  /* used by xfrm; see secid.txt */
+               struct flowi_common     __fl_common;
+               struct flowi4           ip4;
+               struct flowi6           ip6;
+               struct flowidn          dn;
+       } u;
+#define flowi_oif      u.__fl_common.flowic_oif
+#define flowi_iif      u.__fl_common.flowic_iif
+#define flowi_mark     u.__fl_common.flowic_mark
+#define flowi_tos      u.__fl_common.flowic_tos
+#define flowi_scope    u.__fl_common.flowic_scope
+#define flowi_proto    u.__fl_common.flowic_proto
+#define flowi_flags    u.__fl_common.flowic_flags
+#define flowi_secid    u.__fl_common.flowic_secid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
+static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
+{
+       return container_of(fl4, struct flowi, u.ip4);
+}
+
+static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+{
+       return container_of(fl6, struct flowi, u.ip6);
+}
+
+static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+{
+       return container_of(fldn, struct flowi, u.dn);
+}
+
 #define FLOW_DIR_IN    0
 #define FLOW_DIR_OUT   1
 #define FLOW_DIR_FWD   2
@@ -101,20 +159,14 @@ struct flow_cache_ops {
 };
 
 typedef struct flow_cache_object *(*flow_resolve_t)(
-               struct net *net, struct flowi *key, u16 family,
+               struct net *net, const struct flowi *key, u16 family,
                u8 dir, struct flow_cache_object *oldobj, void *ctx);
 
 extern struct flow_cache_object *flow_cache_lookup(
-               struct net *net, struct flowi *key, u16 family,
+               struct net *net, const struct flowi *key, u16 family,
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
 extern atomic_t flow_cache_genid;
 
-static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
-{
-       return (fl1->proto == fl2->proto &&
-               !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
-}
-
 #endif
index 8a64b81..b4c7c1c 100644 (file)
@@ -195,7 +195,8 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
  */
 static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
 {
-       nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+       if (hdr)
+               nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
 }
 
 /**
index 6e991e0..f0698b9 100644 (file)
@@ -45,7 +45,4 @@ extern int    icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int     icmp_init(void);
 extern void    icmp_out_count(struct net *net, unsigned char type);
 
-/* Move into dst.h ? */
-extern int     xrlim_allow(struct dst_entry *dst, int timeout);
-
 #endif /* _ICMP_H */
index af49f8a..b0be5fb 100644 (file)
@@ -178,6 +178,11 @@ struct ieee80211_radiotap_header {
  *
  *     Number of unicast retries a transmitted frame used.
  *
+ * IEEE80211_RADIOTAP_MCS      u8, u8, u8              unitless
+ *
+ *     Contains a bitmap of known fields/flags, the flags, and
+ *     the MCS index.
+ *
  */
 enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TSFT = 0,
@@ -199,6 +204,8 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_RTS_RETRIES = 16,
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
+       IEEE80211_RADIOTAP_MCS = 19,
+
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
        IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
@@ -245,6 +252,24 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_F_TX_CTS    0x0002  /* used cts 'protection' */
 #define IEEE80211_RADIOTAP_F_TX_RTS    0x0004  /* used rts/cts handshake */
 
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW         0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS                0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI         0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT                0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC                0x10
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK         0x03
+#define                IEEE80211_RADIOTAP_MCS_BW_20    0
+#define                IEEE80211_RADIOTAP_MCS_BW_40    1
+#define                IEEE80211_RADIOTAP_MCS_BW_20L   2
+#define                IEEE80211_RADIOTAP_MCS_BW_20U   3
+#define IEEE80211_RADIOTAP_MCS_SGI             0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
+
+
 /* Ugly macro to convert literal channel numbers into their mhz equivalents
  * There are certianly some conditions that will break this (like feeding it '30')
  * but they shouldn't arise since nothing talks on channel 30. */
index 8181498..7a37369 100644 (file)
@@ -86,6 +86,19 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
+struct inet_cork {
+       unsigned int            flags;
+       unsigned int            fragsize;
+       struct ip_options       *opt;
+       struct dst_entry        *dst;
+       int                     length; /* Total length of all frames */
+       __be32                  addr;
+       struct flowi            fl;
+       struct page             *page;
+       u32                     off;
+       u8                      tx_flags;
+};
+
 struct ip_mc_socklist;
 struct ipv6_pinfo;
 struct rtable;
@@ -143,15 +156,7 @@ struct inet_sock {
        int                     mc_index;
        __be32                  mc_addr;
        struct ip_mc_socklist __rcu     *mc_list;
-       struct {
-               unsigned int            flags;
-               unsigned int            fragsize;
-               struct ip_options       *opt;
-               struct dst_entry        *dst;
-               int                     length; /* Total length of all frames */
-               __be32                  addr;
-               struct flowi            fl;
-       } cork;
+       struct inet_cork        cork;
 };
 
 #define IPCORK_OPT     1       /* ip-options has been held in ipcork.opt */
@@ -219,7 +224,13 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
-       return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+       __u8 flags = 0;
+
+       if (inet_sk(sk)->transparent)
+               flags |= FLOWI_FLAG_ANYSRC;
+       if (sk->sk_protocol == IPPROTO_TCP)
+               flags |= FLOWI_FLAG_PRECOW_METRICS;
+       return flags;
 }
 
 #endif /* _INET_SOCK_H */
index 599d96e..e6dd8da 100644 (file)
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
 #include <net/ipv6.h>
 #include <asm/atomic.h>
 
-struct inetpeer_addr {
+struct inetpeer_addr_base {
        union {
-               __be32          a4;
-               __be32          a6[4];
+               __be32                  a4;
+               __be32                  a6[4];
        };
-       __u16   family;
+};
+
+struct inetpeer_addr {
+       struct inetpeer_addr_base       addr;
+       __u16                           family;
 };
 
 struct inet_peer {
@@ -33,15 +38,22 @@ struct inet_peer {
        atomic_t                refcnt;
        /*
         * Once inet_peer is queued for deletion (refcnt == -1), following fields
-        * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
-        * We can share memory with rcu_head to keep inet_peer small
+        * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+        * We can share memory with rcu_head to help keep inet_peer small.
         */
        union {
                struct {
-                       atomic_t        rid;            /* Frag reception counter */
-                       atomic_t        ip_id_count;    /* IP ID for the next packet */
-                       __u32           tcp_ts;
-                       __u32           tcp_ts_stamp;
+                       atomic_t                        rid;            /* Frag reception counter */
+                       atomic_t                        ip_id_count;    /* IP ID for the next packet */
+                       __u32                           tcp_ts;
+                       __u32                           tcp_ts_stamp;
+                       u32                             metrics[RTAX_MAX];
+                       u32                             rate_tokens;    /* rate limiting for ICMP */
+                       unsigned long                   rate_last;
+                       unsigned long                   pmtu_expires;
+                       u32                             pmtu_orig;
+                       u32                             pmtu_learned;
+                       struct inetpeer_addr_base       redirect_learned;
                };
                struct rcu_head         rcu;
        };
@@ -49,6 +61,13 @@ struct inet_peer {
 
 void                   inet_initpeers(void) __init;
 
+#define INETPEER_METRICS_NEW   (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+       return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
 /* can be called with or without local BH being disabled */
 struct inet_peer       *inet_getpeer(struct inetpeer_addr *daddr, int create);
 
@@ -56,7 +75,7 @@ static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
 {
        struct inetpeer_addr daddr;
 
-       daddr.a4 = v4daddr;
+       daddr.addr.a4 = v4daddr;
        daddr.family = AF_INET;
        return inet_getpeer(&daddr, create);
 }
@@ -65,13 +84,14 @@ static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int cr
 {
        struct inetpeer_addr daddr;
 
-       ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+       ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
        daddr.family = AF_INET6;
        return inet_getpeer(&daddr, create);
 }
 
 /* can be called from BH context or outside */
 extern void inet_putpeer(struct inet_peer *p);
+extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
index 67fac78..a4f6311 100644 (file)
@@ -116,8 +116,24 @@ extern int         ip_append_data(struct sock *sk,
 extern int             ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
 extern ssize_t         ip_append_page(struct sock *sk, struct page *page,
                                int offset, size_t size, int flags);
+extern struct sk_buff  *__ip_make_skb(struct sock *sk,
+                                     struct sk_buff_head *queue,
+                                     struct inet_cork *cork);
+extern int             ip_send_skb(struct sk_buff *skb);
 extern int             ip_push_pending_frames(struct sock *sk);
 extern void            ip_flush_pending_frames(struct sock *sk);
+extern struct sk_buff  *ip_make_skb(struct sock *sk,
+                                   int getfrag(void *from, char *to, int offset, int len,
+                                               int odd, struct sk_buff *skb),
+                                   void *from, int length, int transhdrlen,
+                                   struct ipcm_cookie *ipc,
+                                   struct rtable **rtp,
+                                   unsigned int flags);
+
+static inline struct sk_buff *ip_finish_skb(struct sock *sk)
+{
+       return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
+}
 
 /* datagram.c */
 extern int             ip4_datagram_connect(struct sock *sk, 
index 708ff7c..bc3cde0 100644 (file)
@@ -108,6 +108,7 @@ struct rt6_info {
        u32                             rt6i_flags;
        struct rt6key                   rt6i_src;
        u32                             rt6i_metric;
+       u32                             rt6i_peer_genid;
 
        struct inet6_dev                *rt6i_idev;
        struct inet_peer                *rt6i_peer;
@@ -182,7 +183,7 @@ struct fib6_table {
 
 typedef struct rt6_info *(*pol_lookup_t)(struct net *,
                                         struct fib6_table *,
-                                        struct flowi *, int);
+                                        struct flowi6 *, int);
 
 /*
  *     exported functions
@@ -191,7 +192,7 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
 extern struct fib6_table        *fib6_get_table(struct net *net, u32 id);
 extern struct fib6_table        *fib6_new_table(struct net *net, u32 id);
 extern struct dst_entry         *fib6_rule_lookup(struct net *net,
-                                                 struct flowi *fl, int flags,
+                                                 struct flowi6 *fl6, int flags,
                                                  pol_lookup_t lookup);
 
 extern struct fib6_node                *fib6_lookup(struct fib6_node *root,
index 8552f0a..642a80b 100644 (file)
@@ -71,7 +71,7 @@ extern void                   ip6_route_input(struct sk_buff *skb);
 
 extern struct dst_entry *      ip6_route_output(struct net *net,
                                                 struct sock *sk,
-                                                struct flowi *fl);
+                                                struct flowi6 *fl6);
 
 extern int                     ip6_route_init(void);
 extern void                    ip6_route_cleanup(void);
index 07bdb5e..a1a8580 100644 (file)
@@ -51,15 +51,17 @@ struct fib_nh {
        struct fib_info         *nh_parent;
        unsigned                nh_flags;
        unsigned char           nh_scope;
+       unsigned char           nh_cfg_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int                     nh_weight;
        int                     nh_power;
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        __u32                   nh_tclassid;
 #endif
        int                     nh_oif;
        __be32                  nh_gw;
+       __be32                  nh_saddr;
 };
 
 /*
@@ -77,7 +79,7 @@ struct fib_info {
        int                     fib_protocol;
        __be32                  fib_prefsrc;
        u32                     fib_priority;
-       u32                     fib_metrics[RTAX_MAX];
+       u32                     *fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
 #define fib_window fib_metrics[RTAX_WINDOW-1]
 #define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +98,15 @@ struct fib_info {
 struct fib_rule;
 #endif
 
+struct fib_table;
 struct fib_result {
        unsigned char   prefixlen;
        unsigned char   nh_sel;
        unsigned char   type;
        unsigned char   scope;
        struct fib_info *fi;
+       struct fib_table *table;
+       struct list_head *fa_head;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        struct fib_rule *r;
 #endif
@@ -136,11 +141,13 @@ struct fib_result_nl {
 
 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
-#define FIB_RES_PREFSRC(res)           ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res))
+#define FIB_RES_SADDR(res)             (FIB_RES_NH(res).nh_saddr)
 #define FIB_RES_GW(res)                        (FIB_RES_NH(res).nh_gw)
 #define FIB_RES_DEV(res)               (FIB_RES_NH(res).nh_dev)
 #define FIB_RES_OIF(res)               (FIB_RES_NH(res).nh_oif)
 
+#define FIB_RES_PREFSRC(res)           ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res))
+
 struct fib_table {
        struct hlist_node tb_hlist;
        u32             tb_id;
@@ -148,16 +155,13 @@ struct fib_table {
        unsigned char   tb_data[0];
 };
 
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
+extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                            struct fib_result *res, int fib_flags);
 extern int fib_table_insert(struct fib_table *, struct fib_config *);
 extern int fib_table_delete(struct fib_table *, struct fib_config *);
 extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
                          struct netlink_callback *cb);
 extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
-                                    const struct flowi *flp,
-                                    struct fib_result *res);
 extern void fib_free_table(struct fib_table *tb);
 
 
@@ -182,7 +186,7 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
        return fib_get_table(net, id);
 }
 
-static inline int fib_lookup(struct net *net, const struct flowi *flp,
+static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
                             struct fib_result *res)
 {
        struct fib_table *table;
@@ -201,11 +205,11 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
 extern int __net_init fib4_rules_init(struct net *net);
 extern void __net_exit fib4_rules_exit(struct net *net);
 
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+extern u32 fib_rules_tclass(const struct fib_result *res);
 #endif
 
-extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
+extern int fib_lookup(struct net *n, struct flowi4 *flp, struct fib_result *res);
 
 extern struct fib_table *fib_new_table(struct net *net, u32 id);
 extern struct fib_table *fib_get_table(struct net *net, u32 id);
@@ -218,24 +222,23 @@ extern void               ip_fib_init(void);
 extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                               struct net_device *dev, __be32 *spec_dst,
                               u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
-                              struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
 
 /* Exported by fib_semantics.c */
 extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
 extern int fib_sync_down_dev(struct net_device *dev, int force);
 extern int fib_sync_down_addr(struct net *net, __be32 local);
+extern void fib_update_nh_saddrs(struct net_device *dev);
 extern int fib_sync_up(struct net_device *dev);
-extern __be32  __fib_res_prefsrc(struct fib_result *res);
-extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
+extern void fib_select_multipath(struct fib_result *res);
 
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
 
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        u32 rtag;
 #endif
index b7bbd6c..272f593 100644 (file)
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
+#include <net/net_namespace.h>         /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+       return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+       /*
+        * This is used for debug only.
+        * Start with the most likely hit
+        * End with BUG
+        */
+       if (likely(skb->dev && skb->dev->nd_net))
+               return dev_net(skb->dev);
+       if (skb_dst(skb)->dev)
+               return dev_net(skb_dst(skb)->dev);
+       WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+                     __func__, __LINE__);
+       if (likely(skb->sk && skb->sk->sk_net))
+               return sock_net(skb->sk);
+       pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+               __func__, __LINE__);
+       BUG();
+#else
+       return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+       return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+       /* Start with the most likely hit */
+       if (likely(skb->sk && skb->sk->sk_net))
+               return sock_net(skb->sk);
+       WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+                      __func__, __LINE__);
+       if (likely(skb->dev && skb->dev->nd_net))
+               return dev_net(skb->dev);
+       pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+               __func__, __LINE__);
+       BUG();
+#else
+       return sock_net(skb->sk);
+#endif
+#else
+       return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+       return (struct net *)seq->private;
+#else
+       return &init_net;
+#endif
+}
 
 /* Connections' size value needed by ip_vs_ctl.c */
 extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@ struct ip_vs_seq {
                                                   before last resized pkt */
 };
 
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+       __u32           conns;          /* connections scheduled */
+       __u32           inpkts;         /* incoming packets */
+       __u32           outpkts;        /* outgoing packets */
+       __u64           inbytes;        /* incoming bytes */
+       __u64           outbytes;       /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+       struct ip_vs_counters   ustats;
+       struct u64_stats_sync   syncp;
+};
 
 /*
  *     IPVS statistics objects
@@ -279,10 +370,11 @@ struct ip_vs_estimator {
 };
 
 struct ip_vs_stats {
-       struct ip_vs_stats_user ustats;         /* statistics */
+       struct ip_vs_stats_user ustats;         /* statistics */
        struct ip_vs_estimator  est;            /* estimator */
-
-       spinlock_t              lock;           /* spin lock */
+       struct ip_vs_cpu_stats  *cpustats;      /* per cpu counters */
+       spinlock_t              lock;           /* spin lock */
+       struct ip_vs_stats_user ustats0;        /* reset values */
 };
 
 struct dst_entry;
@@ -290,6 +382,7 @@ struct iphdr;
 struct ip_vs_conn;
 struct ip_vs_app;
 struct sk_buff;
+struct ip_vs_proto_data;
 
 struct ip_vs_protocol {
        struct ip_vs_protocol   *next;
@@ -297,21 +390,22 @@ struct ip_vs_protocol {
        u16                     protocol;
        u16                     num_states;
        int                     dont_defrag;
-       atomic_t                appcnt;         /* counter of proto app incs */
-       int                     *timeout_table; /* protocol timeout table */
 
        void (*init)(struct ip_vs_protocol *pp);
 
        void (*exit)(struct ip_vs_protocol *pp);
 
+       void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+       void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
        int (*conn_schedule)(int af, struct sk_buff *skb,
-                            struct ip_vs_protocol *pp,
+                            struct ip_vs_proto_data *pd,
                             int *verdict, struct ip_vs_conn **cpp);
 
        struct ip_vs_conn *
        (*conn_in_get)(int af,
                       const struct sk_buff *skb,
-                      struct ip_vs_protocol *pp,
                       const struct ip_vs_iphdr *iph,
                       unsigned int proto_off,
                       int inverse);
@@ -319,7 +413,6 @@ struct ip_vs_protocol {
        struct ip_vs_conn *
        (*conn_out_get)(int af,
                        const struct sk_buff *skb,
-                       struct ip_vs_protocol *pp,
                        const struct ip_vs_iphdr *iph,
                        unsigned int proto_off,
                        int inverse);
@@ -337,11 +430,11 @@ struct ip_vs_protocol {
 
        int (*state_transition)(struct ip_vs_conn *cp, int direction,
                                const struct sk_buff *skb,
-                               struct ip_vs_protocol *pp);
+                               struct ip_vs_proto_data *pd);
 
-       int (*register_app)(struct ip_vs_app *inc);
+       int (*register_app)(struct net *net, struct ip_vs_app *inc);
 
-       void (*unregister_app)(struct ip_vs_app *inc);
+       void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
 
        int (*app_conn_bind)(struct ip_vs_conn *cp);
 
@@ -350,14 +443,26 @@ struct ip_vs_protocol {
                             int offset,
                             const char *msg);
 
-       void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+       void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
 
-       int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+       struct ip_vs_proto_data *next;
+       struct ip_vs_protocol   *pp;
+       int                     *timeout_table; /* protocol timeout table */
+       atomic_t                appcnt;         /* counter of proto app incs. */
+       struct tcp_states_t     *tcp_state_table;
 };
 
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+                                                    unsigned short proto);
 
 struct ip_vs_conn_param {
+       struct net                      *net;
        const union nf_inet_addr        *caddr;
        const union nf_inet_addr        *vaddr;
        __be16                          cport;
@@ -374,17 +479,20 @@ struct ip_vs_conn_param {
  *     IP_VS structure allocated for each dynamically scheduled connection
  */
 struct ip_vs_conn {
-       struct list_head        c_list;         /* hashed list heads */
-
+       struct hlist_node       c_list;         /* hashed list heads */
+#ifdef CONFIG_NET_NS
+       struct net              *net;           /* Name space */
+#endif
        /* Protocol, addresses and port numbers */
-       u16                      af;            /* address family */
-       union nf_inet_addr       caddr;          /* client address */
-       union nf_inet_addr       vaddr;          /* virtual address */
-       union nf_inet_addr       daddr;          /* destination address */
-       volatile __u32           flags;          /* status flags */
-       __be16                   cport;
-       __be16                   vport;
-       __be16                   dport;
+       u16                     af;             /* address family */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __u32                   fwmark;         /* Fire wall mark from skb */
+       union nf_inet_addr      caddr;          /* client address */
+       union nf_inet_addr      vaddr;          /* virtual address */
+       union nf_inet_addr      daddr;          /* destination address */
+       volatile __u32          flags;          /* status flags */
        __u16                   protocol;       /* Which protocol (TCP/UDP) */
 
        /* counter and timer */
@@ -422,10 +530,38 @@ struct ip_vs_conn {
        struct ip_vs_seq        in_seq;         /* incoming seq. struct */
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 
+       const struct ip_vs_pe   *pe;
        char                    *pe_data;
        __u8                    pe_data_len;
 };
 
+/*
+ *  To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+       return cp->net;
+#else
+       return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+       cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+                                   struct net *net)
+{
+#ifdef CONFIG_NET_NS
+       return cp->net == net;
+#else
+       return 1;
+#endif
+}
 
 /*
  *     Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +621,7 @@ struct ip_vs_service {
        unsigned                flags;    /* service status flags */
        unsigned                timeout;  /* persistent timeout in ticks */
        __be32                  netmask;  /* grouping granularity */
+       struct net              *net;
 
        struct list_head        destinations;  /* real server d-linked list */
        __u32                   num_dests;     /* number of servers */
@@ -510,8 +647,8 @@ struct ip_vs_dest {
        struct list_head        d_list;   /* for table with all the dests */
 
        u16                     af;             /* address family */
-       union nf_inet_addr      addr;           /* IP address of the server */
        __be16                  port;           /* port number of the server */
+       union nf_inet_addr      addr;           /* IP address of the server */
        volatile unsigned       flags;          /* dest status flags */
        atomic_t                conn_flags;     /* flags to copy to conn */
        atomic_t                weight;         /* server weight */
@@ -538,8 +675,8 @@ struct ip_vs_dest {
        /* for virtual service */
        struct ip_vs_service    *svc;           /* service it belongs to */
        __u16                   protocol;       /* which protocol (TCP/UDP) */
-       union nf_inet_addr      vaddr;          /* virtual IP address */
        __be16                  vport;          /* virtual port number */
+       union nf_inet_addr      vaddr;          /* virtual IP address */
        __u32                   vfwmark;        /* firewall mark of service */
 };
 
@@ -651,6 +788,171 @@ struct ip_vs_app {
        void (*timeout_change)(struct ip_vs_app *app, int flags);
 };
 
+/* IPVS in network namespace */
+struct netns_ipvs {
+       int                     gen;            /* Generation */
+       /*
+        *      Hash table: for real service lookups
+        */
+       #define IP_VS_RTAB_BITS 4
+       #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+       #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+       struct list_head        rs_table[IP_VS_RTAB_SIZE];
+       /* ip_vs_app */
+       struct list_head        app_list;
+       struct mutex            app_mutex;
+       struct lock_class_key   app_key;        /* mutex debuging */
+
+       /* ip_vs_proto */
+       #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
+       struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+       /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+       #define TCP_APP_TAB_BITS        4
+       #define TCP_APP_TAB_SIZE        (1 << TCP_APP_TAB_BITS)
+       #define TCP_APP_TAB_MASK        (TCP_APP_TAB_SIZE - 1)
+       struct list_head        tcp_apps[TCP_APP_TAB_SIZE];
+       spinlock_t              tcp_app_lock;
+#endif
+       /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+       #define UDP_APP_TAB_BITS        4
+       #define UDP_APP_TAB_SIZE        (1 << UDP_APP_TAB_BITS)
+       #define UDP_APP_TAB_MASK        (UDP_APP_TAB_SIZE - 1)
+       struct list_head        udp_apps[UDP_APP_TAB_SIZE];
+       spinlock_t              udp_app_lock;
+#endif
+       /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+       #define SCTP_APP_TAB_BITS       4
+       #define SCTP_APP_TAB_SIZE       (1 << SCTP_APP_TAB_BITS)
+       #define SCTP_APP_TAB_MASK       (SCTP_APP_TAB_SIZE - 1)
+       /* Hash table for SCTP application incarnations  */
+       struct list_head        sctp_apps[SCTP_APP_TAB_SIZE];
+       spinlock_t              sctp_app_lock;
+#endif
+       /* ip_vs_conn */
+       atomic_t                conn_count;      /*  connection counter */
+
+       /* ip_vs_ctl */
+       struct ip_vs_stats              tot_stats;  /* Statistics & est. */
+
+       int                     num_services;    /* no of virtual services */
+
+       rwlock_t                rs_lock;         /* real services table */
+       /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+       struct lock_class_key   ctl_key;        /* ctl_mutex debuging */
+       /* Trash for destinations */
+       struct list_head        dest_trash;
+       /* Service counters */
+       atomic_t                ftpsvc_counter;
+       atomic_t                nullsvc_counter;
+
+#ifdef CONFIG_SYSCTL
+       /* 1/rate drop and drop-entry variables */
+       struct delayed_work     defense_work;   /* Work handler */
+       int                     drop_rate;
+       int                     drop_counter;
+       atomic_t                dropentry;
+       /* locks in ctl.c */
+       spinlock_t              dropentry_lock;  /* drop entry handling */
+       spinlock_t              droppacket_lock; /* drop packet handling */
+       spinlock_t              securetcp_lock;  /* state and timeout tables */
+
+       /* sys-ctl struct */
+       struct ctl_table_header *sysctl_hdr;
+       struct ctl_table        *sysctl_tbl;
+#endif
+
+       /* sysctl variables */
+       int                     sysctl_amemthresh;
+       int                     sysctl_am_droprate;
+       int                     sysctl_drop_entry;
+       int                     sysctl_drop_packet;
+       int                     sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+       int                     sysctl_conntrack;
+#endif
+       int                     sysctl_snat_reroute;
+       int                     sysctl_sync_ver;
+       int                     sysctl_cache_bypass;
+       int                     sysctl_expire_nodest_conn;
+       int                     sysctl_expire_quiescent_template;
+       int                     sysctl_sync_threshold[2];
+       int                     sysctl_nat_icmp_send;
+
+       /* ip_vs_lblc */
+       int                     sysctl_lblc_expiration;
+       struct ctl_table_header *lblc_ctl_header;
+       struct ctl_table        *lblc_ctl_table;
+       /* ip_vs_lblcr */
+       int                     sysctl_lblcr_expiration;
+       struct ctl_table_header *lblcr_ctl_header;
+       struct ctl_table        *lblcr_ctl_table;
+       /* ip_vs_est */
+       struct list_head        est_list;       /* estimator list */
+       spinlock_t              est_lock;
+       struct timer_list       est_timer;      /* Estimation timer */
+       /* ip_vs_sync */
+       struct list_head        sync_queue;
+       spinlock_t              sync_lock;
+       struct ip_vs_sync_buff  *sync_buff;
+       spinlock_t              sync_buff_lock;
+       struct sockaddr_in      sync_mcast_addr;
+       struct task_struct      *master_thread;
+       struct task_struct      *backup_thread;
+       int                     send_mesg_maxlen;
+       int                     recv_mesg_maxlen;
+       volatile int            sync_state;
+       volatile int            master_syncid;
+       volatile int            backup_syncid;
+       /* multicast interface name */
+       char                    master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+       char                    backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+       /* net name space ptr */
+       struct net              *net;            /* Needed by timer routines */
+};
+
+#define DEFAULT_SYNC_THRESHOLD 3
+#define DEFAULT_SYNC_PERIOD    50
+#define DEFAULT_SYNC_VER       1
+
+#ifdef CONFIG_SYSCTL
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_sync_threshold[0];
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_sync_threshold[1];
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_sync_ver;
+}
+
+#else
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+       return DEFAULT_SYNC_THRESHOLD;
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+       return DEFAULT_SYNC_PERIOD;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+       return DEFAULT_SYNC_VER;
+}
+
+#endif
 
 /*
  *      IPVS core functions
@@ -674,13 +976,14 @@ enum {
        IP_VS_DIR_LAST,
 };
 
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
                                         const union nf_inet_addr *caddr,
                                         __be16 cport,
                                         const union nf_inet_addr *vaddr,
                                         __be16 vport,
                                         struct ip_vs_conn_param *p)
 {
+       p->net = net;
        p->af = af;
        p->protocol = protocol;
        p->caddr = caddr;
@@ -695,7 +998,6 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-                                           struct ip_vs_protocol *pp,
                                            const struct ip_vs_iphdr *iph,
                                            unsigned int proto_off,
                                            int inverse);
@@ -703,7 +1005,6 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-                                            struct ip_vs_protocol *pp,
                                             const struct ip_vs_iphdr *iph,
                                             unsigned int proto_off,
                                             int inverse);
@@ -719,14 +1020,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
                                  const union nf_inet_addr *daddr,
                                  __be16 dport, unsigned flags,
-                                 struct ip_vs_dest *dest);
+                                 struct ip_vs_dest *dest, __u32 fwmark);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
 extern const char * ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
 extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
 extern int ip_vs_conn_init(void);
 extern void ip_vs_conn_cleanup(void);
 
@@ -796,12 +1097,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+                                 __u16 proto, __u16 port);
 extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
 extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
 
@@ -814,15 +1115,27 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
 void ip_vs_unbind_pe(struct ip_vs_service *svc);
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+       if (pe && pe->module)
+               __module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+       if (pe && pe->module)
+               module_put(pe->module);
+}
 
 /*
  *     IPVS protocol functions (from ip_vs_proto.c)
  */
 extern int ip_vs_protocol_init(void);
 extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
 extern int *ip_vs_create_timeout_table(int *table, int size);
 extern int
 ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1165,23 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
 extern struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-              struct ip_vs_protocol *pp, int *ignored);
+              struct ip_vs_proto_data *pd, int *ignored);
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-                       struct ip_vs_protocol *pp);
+                       struct ip_vs_proto_data *pd);
+
+extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
 
 /*
  *      IPVS control data and functions (from ip_vs_ctl.c)
  */
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
 extern struct ip_vs_stats ip_vs_stats;
 extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
 
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
 extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport);
 
 static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1190,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
 }
 
 extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
                          const union nf_inet_addr *daddr, __be16 dport);
 
 extern int ip_vs_use_count_inc(void);
@@ -888,8 +1198,9 @@ extern void ip_vs_use_count_dec(void);
 extern int ip_vs_control_init(void);
 extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
-               const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+               __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+               __u16 protocol, __u32 fwmark);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
@@ -897,14 +1208,12 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+                            __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
 
 
 /*
@@ -912,9 +1221,11 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
  */
 extern int ip_vs_estimator_init(void);
 extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+                                struct ip_vs_stats *stats);
 
 /*
  *     Various IPVS packet transmitters (from ip_vs_xmit.c)
@@ -947,21 +1258,25 @@ extern int ip_vs_icmp_xmit_v6
  int offset);
 #endif
 
+#ifdef CONFIG_SYSCTL
 /*
  *     This is a simple mechanism to ignore packets when
  *     we are loaded. Just set ip_vs_drop_rate to 'n' and
  *     we start to drop 1/rate of the packets
  */
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
 
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
 {
-       if (!ip_vs_drop_rate) return 0;
-       if (--ip_vs_drop_counter > 0) return 0;
-       ip_vs_drop_counter = ip_vs_drop_rate;
+       if (!ipvs->drop_rate)
+               return 0;
+       if (--ipvs->drop_counter > 0)
+               return 0;
+       ipvs->drop_counter = ipvs->drop_rate;
        return 1;
 }
+#else
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
+#endif
 
 /*
  *      ip_vs_fwd_tag returns the forwarding tag of the connection
@@ -1031,7 +1346,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
                nf_reset(skb);
@@ -1047,9 +1362,13 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
  *      Netfilter connection tracking
  *      (from ip_vs_nfct.c)
  */
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
-       return sysctl_ip_vs_conntrack;
+#ifdef CONFIG_SYSCTL
+       return ipvs->sysctl_conntrack;
+#else
+       return 0;
+#endif
 }
 
 extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1381,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
 
 #else
 
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
        return 0;
 }
@@ -1084,6 +1403,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 /* CONFIG_IP_VS_NFCT */
 #endif
 
+static inline unsigned int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+       /*
+        * We think the overhead of processing active connections is 256
+        * times higher than that of inactive connections in average. (This
+        * 256 times might not be accurate, we will change it later) We
+        * use the following formula to estimate the overhead now:
+        *                dest->activeconns*256 + dest->inactconns
+        */
+       return (atomic_read(&dest->activeconns) << 8) +
+               atomic_read(&dest->inactconns);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _NET_IP_VS_H */
index 4a3cd2c..34200f9 100644 (file)
 #define IPV6_ADDR_SCOPE_ORGLOCAL       0x08
 #define IPV6_ADDR_SCOPE_GLOBAL         0x0e
 
+/*
+ *     Addr flags
+ */
+#ifdef __KERNEL__
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
+       ((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a)    \
+       ((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a)        \
+       ((a)->s6_addr[1] & 0x40)
+#endif
+
 /*
  *     fragmentation header
  */
@@ -480,7 +492,7 @@ extern int                  ip6_rcv_finish(struct sk_buff *skb);
  */
 extern int                     ip6_xmit(struct sock *sk,
                                         struct sk_buff *skb,
-                                        struct flowi *fl,
+                                        struct flowi6 *fl6,
                                         struct ipv6_txoptions *opt);
 
 extern int                     ip6_nd_hdr(struct sock *sk,
@@ -500,7 +512,7 @@ extern int                  ip6_append_data(struct sock *sk,
                                                int hlimit,
                                                int tclass,
                                                struct ipv6_txoptions *opt,
-                                               struct flowi *fl,
+                                               struct flowi6 *fl6,
                                                struct rt6_info *rt,
                                                unsigned int flags,
                                                int dontfrag);
@@ -511,13 +523,17 @@ extern void                       ip6_flush_pending_frames(struct sock *sk);
 
 extern int                     ip6_dst_lookup(struct sock *sk,
                                               struct dst_entry **dst,
-                                              struct flowi *fl);
-extern int                     ip6_dst_blackhole(struct sock *sk,
-                                                 struct dst_entry **dst,
-                                                 struct flowi *fl);
-extern int                     ip6_sk_dst_lookup(struct sock *sk,
-                                                 struct dst_entry **dst,
-                                                 struct flowi *fl);
+                                              struct flowi6 *fl6);
+extern struct dst_entry *      ip6_dst_lookup_flow(struct sock *sk,
+                                                   struct flowi6 *fl6,
+                                                   const struct in6_addr *final_dst,
+                                                   bool can_sleep);
+extern struct dst_entry *      ip6_sk_dst_lookup_flow(struct sock *sk,
+                                                      struct flowi6 *fl6,
+                                                      const struct in6_addr *final_dst,
+                                                      bool can_sleep);
+extern struct dst_entry *      ip6_blackhole_route(struct net *net,
+                                                   struct dst_entry *orig_dst);
 
 /*
  *     skb processing functions
@@ -550,7 +566,7 @@ extern int                  ipv6_ext_hdr(u8 nexthdr);
 
 extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
-extern struct in6_addr *fl6_update_dst(struct flowi *fl,
+extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
                                       const struct ipv6_txoptions *opt,
                                       struct in6_addr *orig);
 
@@ -584,8 +600,8 @@ extern int                  ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
 extern int                     ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
 extern void                    ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
                                                u32 info, u8 *payload);
-extern void                    ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
-extern void                    ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu);
+extern void                    ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+extern void                    ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
 
 extern int inet6_release(struct socket *sock);
 extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 
index 62c0ce2..8650e7b 100644 (file)
@@ -341,6 +341,9 @@ struct ieee80211_bss_conf {
  *     the off-channel channel when a remain-on-channel offload is done
  *     in hardware -- normal packets still flow and are expected to be
  *     handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ *     testing. It will be sent out with incorrect Michael MIC key to allow
+ *     TKIP countermeasures to be tested.
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *      forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -370,6 +373,7 @@ enum mac80211_tx_control_flags {
        IEEE80211_TX_CTL_LDPC                   = BIT(22),
        IEEE80211_TX_CTL_STBC                   = BIT(23) | BIT(24),
        IEEE80211_TX_CTL_TX_OFFCHAN             = BIT(25),
+       IEEE80211_TX_INTFL_TKIP_MIC_FAILURE     = BIT(26),
 };
 
 #define IEEE80211_TX_CTL_STBC_SHIFT            23
@@ -595,9 +599,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  *     the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
  *     the frame.
- * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
- *     is valid. This is useful in monitor mode and necessary for beacon frames
- *     to enable IBSS merging.
+ * @RX_FLAG_MACTIME_MPDU: The timestamp passed in the RX status (@mactime
+ *     field) is valid and contains the time the first symbol of the MPDU
+ *     was received. This is useful in monitor mode and for proper IBSS
+ *     merging.
  * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
  * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
@@ -610,7 +615,7 @@ enum mac80211_rx_flags {
        RX_FLAG_IV_STRIPPED     = 1<<4,
        RX_FLAG_FAILED_FCS_CRC  = 1<<5,
        RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-       RX_FLAG_TSFT            = 1<<7,
+       RX_FLAG_MACTIME_MPDU    = 1<<7,
        RX_FLAG_SHORTPRE        = 1<<8,
        RX_FLAG_HT              = 1<<9,
        RX_FLAG_40MHZ           = 1<<10,
@@ -1069,6 +1074,13 @@ enum ieee80211_tkip_key_type {
  *     to decrypt group addressed frames, then IBSS RSN support is still
  *     possible but software crypto will be used. Advertise the wiphy flag
  *     only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ *     autonomously manages the PS status of connected stations. When
+ *     this flag is set mac80211 will not trigger PS mode for connected
+ *     stations based on the PM bit of incoming frames.
+ *     Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ *     the PS mode of connected stations.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1093,6 +1105,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_CONNECTION_MONITOR                 = 1<<19,
        IEEE80211_HW_SUPPORTS_CQM_RSSI                  = 1<<20,
        IEEE80211_HW_SUPPORTS_PER_STA_GTK               = 1<<21,
+       IEEE80211_HW_AP_LINK_PS                         = 1<<22,
 };
 
 /**
@@ -1147,6 +1160,17 @@ enum ieee80211_hw_flags {
  * @napi_weight: weight used for NAPI polling.  You must specify an
  *     appropriate value here if a napi_poll operation is provided
  *     by your driver.
+
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ *     sub-frames) to be used for A-MPDU block ack receiver
+ *     aggregation.
+ *     This is only relevant if the device has restrictions on the
+ *     number of subframes, if it relies on mac80211 to do reordering
+ *     it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ *     aggregate an HT driver will transmit, used by the peer as a
+ *     hint to size its reorder buffer.
  */
 struct ieee80211_hw {
        struct ieee80211_conf conf;
@@ -1165,6 +1189,8 @@ struct ieee80211_hw {
        u8 max_rates;
        u8 max_report_rates;
        u8 max_rate_tries;
+       u8 max_rx_aggregation_subframes;
+       u8 max_tx_aggregation_subframes;
 };
 
 /**
@@ -1688,7 +1714,9 @@ enum ieee80211_ampdu_mlme_action {
  *     station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
  *
  * @sta_notify: Notifies low level driver about power state transition of an
- *     associated station, AP,  IBSS/WDS/mesh peer etc. Must be atomic.
+ *     associated station, AP,  IBSS/WDS/mesh peer etc. For a VIF operating
+ *     in AP mode, this callback will not be called when the flag
+ *     %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
  *
  * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
  *     bursting) for a hardware TX queue.
@@ -1723,6 +1751,10 @@ enum ieee80211_ampdu_mlme_action {
  *     ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
  *     is the first frame we expect to perform the action on. Notice
  *     that TX/RX_STOP can pass NULL for this parameter.
+ *     The @buf_size parameter is only valid when the action is set to
+ *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
+ *     buffer size (number of subframes) for this session -- aggregates
+ *     containing more subframes than this may not be transmitted to the peer.
  *     Returns a negative error code on failure.
  *     The callback can sleep.
  *
@@ -1767,9 +1799,18 @@ enum ieee80211_ampdu_mlme_action {
  *     ieee80211_remain_on_channel_expired(). This callback may sleep.
  * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
  *     aborted before it expires. This callback may sleep.
+ * @offchannel_tx: Transmit frame on another channel, wait for a response
+ *     and return. Reliable TX status must be reported for the frame. If the
+ *     return value is 1, then the @remain_on_channel will be used with a
+ *     regular transmission (if supported.)
+ * @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
  */
 struct ieee80211_ops {
-       int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
        int (*add_interface)(struct ieee80211_hw *hw,
@@ -1825,7 +1866,8 @@ struct ieee80211_ops {
        int (*ampdu_action)(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
        int (*get_survey)(struct ieee80211_hw *hw, int idx,
                struct survey_info *survey);
        void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -1845,6 +1887,14 @@ struct ieee80211_ops {
                                 enum nl80211_channel_type channel_type,
                                 int duration);
        int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
+       int (*offchannel_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
+                            struct ieee80211_channel *chan,
+                            enum nl80211_channel_type channel_type,
+                            unsigned int wait);
+       int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
+       int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
+       void (*get_ringparam)(struct ieee80211_hw *hw,
+                             u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
 };
 
 /**
@@ -2113,6 +2163,48 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
        local_bh_enable();
 }
 
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * The function returns -EINVAL when the requested PS mode is already set.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ *                                  (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+                                                 bool start)
+{
+       int ret;
+
+       local_bh_disable();
+       ret = ieee80211_sta_ps_transition(sta, start);
+       local_bh_enable();
+
+       return ret;
+}
+
 /*
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
index 1bf812b..3ae4919 100644 (file)
@@ -27,6 +27,7 @@ struct sock;
 struct ctl_table_header;
 struct net_generic;
 struct sock;
+struct netns_ipvs;
 
 
 #define NETDEV_HASHBITS    8
@@ -94,6 +95,7 @@ struct net {
 #ifdef CONFIG_XFRM
        struct netns_xfrm       xfrm;
 #endif
+       struct netns_ipvs       *ipvs;
 };
 
 
index e82b7ba..22b239c 100644 (file)
@@ -21,7 +21,6 @@ struct netevent_redirect {
 
 enum netevent_notif_type {
        NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
-       NETEVENT_PMTU_UPDATE,      /* arg is struct dst_entry ptr */
        NETEVENT_REDIRECT,         /* arg is struct netevent_redirect ptr */
 };
 
index d85cff1..d0d1337 100644 (file)
@@ -50,11 +50,24 @@ union nf_conntrack_expect_proto {
 /* per conntrack: application helper private data */
 union nf_conntrack_help {
        /* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
        struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+    defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
        struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+    defined(CONFIG_NF_CONNTRACK_H323_MODULE)
        struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+    defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
        struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
        struct nf_ct_sip_master ct_sip_info;
+#endif
 };
 
 #include <linux/types.h>
@@ -116,14 +129,14 @@ struct nf_conn {
        u_int32_t secmark;
 #endif
 
-       /* Storage reserved for other modules: */
-       union nf_conntrack_proto proto;
-
        /* Extensions */
        struct nf_ct_ext *ext;
 #ifdef CONFIG_NET_NS
        struct net *ct_net;
 #endif
+
+       /* Storage reserved for other modules, must be the last member */
+       union nf_conntrack_proto proto;
 };
 
 static inline struct nf_conn *
@@ -189,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
 
 extern struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
index 96ba5f7..4283508 100644 (file)
@@ -23,12 +23,17 @@ struct nf_conntrack_ecache {
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_find(const struct nf_conn *ct)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+       return NULL;
+#endif
 }
 
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
@@ -45,6 +50,9 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
                e->expmask = expmask;
        }
        return e;
+#else
+       return NULL;
+#endif
 };
 
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
 extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
 extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
 
@@ -77,9 +85,6 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
        if (e == NULL)
                return;
 
-       if (!(e->ctmask & (1 << event)))
-               return;
-
        set_bit(event, &e->cache);
 }
 
@@ -159,7 +164,7 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
 extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
 extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
 
index 0772d29..2dcf317 100644 (file)
@@ -7,10 +7,19 @@
 
 enum nf_ct_ext_id {
        NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
        NF_CT_EXT_NAT,
+#endif
        NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
        NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       NF_CT_EXT_TSTAMP,
+#endif
        NF_CT_EXT_NUM,
 };
 
@@ -19,6 +28,7 @@ enum nf_ct_ext_id {
 #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
 #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
 #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
 
 /* Extensions: optional stuff which isn't permanently in struct. */
 struct nf_ct_ext {
index 32c305d..f1c1311 100644 (file)
@@ -63,4 +63,10 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 extern int nf_conntrack_helper_init(void);
 extern void nf_conntrack_helper_fini(void);
 
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+                                      unsigned int protoff,
+                                      struct nf_conn *ct,
+                                      enum ip_conntrack_info ctinfo,
+                                      unsigned int timeout);
+
 #endif /*_NF_CONNTRACK_HELPER_H*/
index a754761..e8010f4 100644 (file)
@@ -73,7 +73,7 @@ struct nf_conntrack_l3proto {
        struct module *me;
 };
 
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol registration. */
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644 (file)
index 0000000..fc9c82b
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+       u_int64_t start;
+       u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+       return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       struct net *net = nf_ct_net(ct);
+
+       if (!net->ct.sysctl_tstamp)
+               return NULL;
+
+       return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+       return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+       return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+       net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+       return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+       return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
index f5f09f0..aff80b1 100644 (file)
@@ -56,7 +56,9 @@ struct nf_nat_multi_range_compat {
 /* per conntrack: nat application helper private data */
 union nf_conntrack_nat_help {
        /* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
        struct nf_nat_pptp nat_pptp_info;
+#endif
 };
 
 struct nf_conn;
@@ -84,7 +86,11 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
        return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+       return NULL;
+#endif
 }
 
 #else  /* !__KERNEL__: iptables wants this to compile. */
index 33602ab..3dc7b98 100644 (file)
@@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
 {
        if (manip == IP_NAT_MANIP_SRC)
-               return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+               return ct->status & IPS_SRC_NAT_DONE;
        else
-               return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+               return ct->status & IPS_DST_NAT_DONE;
 }
 
 struct nlattr;
index cd85b3b..e505358 100644 (file)
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
 }
 #endif
 
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
-       /* TIME_WAIT inet sockets have to be handled differently */
-       if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
-               inet_twsk_put(inet_twsk(sk));
-       else
-               sock_put(sk);
-}
-
 /* assign a socket to the skb -- consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
 
 #endif
index 373f1a9..8a3906a 100644 (file)
@@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
 #define NLA_PUT_BE16(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be16, attrtype, value)
 
+#define NLA_PUT_NET16(skb, attrtype, value) \
+       NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U32(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, u32, attrtype, value)
 
 #define NLA_PUT_BE32(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be32, attrtype, value)
 
+#define NLA_PUT_NET32(skb, attrtype, value) \
+       NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U64(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, u64, attrtype, value)
 
 #define NLA_PUT_BE64(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be64, attrtype, value)
 
+#define NLA_PUT_NET64(skb, attrtype, value) \
+       NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_STRING(skb, attrtype, value) \
        NLA_PUT(skb, attrtype, strlen(value) + 1, value)
 
index d4958d4..341eb08 100644 (file)
@@ -21,15 +21,15 @@ struct netns_ct {
        int                     sysctl_events;
        unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_acct;
+       int                     sysctl_tstamp;
        int                     sysctl_checksum;
        unsigned int            sysctl_log_invalid; /* Log invalid packets */
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
+       struct ctl_table_header *tstamp_sysctl_header;
        struct ctl_table_header *event_sysctl_header;
 #endif
-       int                     hash_vmalloc;
-       int                     expect_vmalloc;
        char                    *slabname;
 };
 #endif
index d68c3f1..e2e2ef5 100644 (file)
@@ -43,7 +43,6 @@ struct netns_ipv4 {
        struct xt_table         *nat_table;
        struct hlist_head       *nat_bysource;
        unsigned int            nat_htable_size;
-       int                     nat_vmalloced;
 #endif
 
        int sysctl_icmp_echo_ignore_all;
index b60b28c..b669fe6 100644 (file)
@@ -28,7 +28,6 @@ struct pep_sock {
 
        /* XXX: union-ify listening vs connected stuff ? */
        /* Listening socket stuff: */
-       struct hlist_head       ackq;
        struct hlist_head       hlist;
 
        /* Connected socket stuff: */
@@ -45,10 +44,6 @@ struct pep_sock {
        u8                      tx_fc;  /* TX flow control */
        u8                      init_enable;    /* auto-enable at creation */
        u8                      aligned;
-#ifdef CONFIG_PHONET_PIPECTRLR
-       u8                      pipe_state;
-       struct sockaddr_pn      remote_pep;
-#endif
 };
 
 static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -158,6 +153,7 @@ enum {
        PN_LEGACY_FLOW_CONTROL,
        PN_ONE_CREDIT_FLOW_CONTROL,
        PN_MULTI_CREDIT_FLOW_CONTROL,
+       PN_MAX_FLOW_CONTROL,
 };
 
 #define pn_flow_safe(fc) ((fc) >> 1)
@@ -169,21 +165,4 @@ enum {
        PEP_IND_READY,
 };
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-#define PNS_PEP_CONNECT_UTID           0x02
-#define PNS_PIPE_CREATED_IND_UTID      0x04
-#define PNS_PIPE_ENABLE_UTID           0x0A
-#define PNS_PIPE_ENABLED_IND_UTID      0x0C
-#define PNS_PIPE_DISABLE_UTID          0x0F
-#define PNS_PIPE_DISABLED_IND_UTID     0x11
-#define PNS_PEP_DISCONNECT_UTID        0x06
-
-/* Used for tracking state of a pipe */
-enum {
-       PIPE_IDLE,
-       PIPE_DISABLED,
-       PIPE_ENABLED,
-};
-#endif /* CONFIG_PHONET_PIPECTRLR */
-
 #endif
index 5395e09..68e5097 100644 (file)
@@ -36,6 +36,7 @@
 struct pn_sock {
        struct sock     sk;
        u16             sobject;
+       u16             dobject;
        u8              resource;
 };
 
index dc07495..6f7eb80 100644 (file)
@@ -38,7 +38,7 @@ struct net_protocol {
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff         *(*gso_segment)(struct sk_buff *skb,
-                                              int features);
+                                              u32 features);
        struct sk_buff        **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
 
        int     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-                                      int features);
+                                      u32 features);
        struct sk_buff **(*gro_receive)(struct sk_buff **head,
                                        struct sk_buff *skb);
        int     (*gro_complete)(struct sk_buff *skb);
index 93e10c4..30d6cae 100644 (file)
 
 struct fib_nh;
 struct inet_peer;
+struct fib_info;
 struct rtable {
        struct dst_entry        dst;
 
-       /* Cache lookup keys */
-       struct flowi            fl;
+       /* Lookup key. */
+       __be32                  rt_key_dst;
+       __be32                  rt_key_src;
 
        int                     rt_genid;
        unsigned                rt_flags;
        __u16                   rt_type;
+       __u8                    rt_tos;
 
        __be32                  rt_dst; /* Path destination     */
        __be32                  rt_src; /* Path source          */
        int                     rt_iif;
+       int                     rt_oif;
+       __u32                   rt_mark;
 
        /* Info on neighbour */
        __be32                  rt_gateway;
 
        /* Miscellaneous cached information */
        __be32                  rt_spec_dst; /* RFC1122 specific destination */
+       u32                     rt_peer_genid;
        struct inet_peer        *peer; /* long-living peer info */
+       struct fib_info         *fi; /* for client ref to shared metrics */
 };
 
 static inline bool rt_is_input_route(struct rtable *rt)
 {
-       return rt->fl.iif != 0;
+       return rt->rt_iif != 0;
 }
 
 static inline bool rt_is_output_route(struct rtable *rt)
 {
-       return rt->fl.iif == 0;
+       return rt->rt_iif == 0;
 }
 
 struct ip_rt_acct {
@@ -115,9 +122,63 @@ extern void                ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw,
                                       __be32 src, struct net_device *dev);
 extern void            rt_cache_flush(struct net *net, int how);
 extern void            rt_cache_flush_batch(struct net *net);
-extern int             __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp);
-extern int             ip_route_output_key(struct net *, struct rtable **, struct flowi *flp);
-extern int             ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
+extern struct rtable *__ip_route_output_key(struct net *, const struct flowi4 *flp);
+extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+                                          struct sock *sk);
+extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
+{
+       return ip_route_output_flow(net, flp, NULL);
+}
+
+static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
+                                            __be32 saddr, u8 tos, int oif)
+{
+       struct flowi4 fl4 = {
+               .flowi4_oif = oif,
+               .daddr = daddr,
+               .saddr = saddr,
+               .flowi4_tos = tos,
+       };
+       return ip_route_output_key(net, &fl4);
+}
+
+static inline struct rtable *ip_route_output_ports(struct net *net, struct sock *sk,
+                                                  __be32 daddr, __be32 saddr,
+                                                  __be16 dport, __be16 sport,
+                                                  __u8 proto, __u8 tos, int oif)
+{
+       struct flowi4 fl4 = {
+               .flowi4_oif = oif,
+               .flowi4_flags = sk ? inet_sk_flowi_flags(sk) : 0,
+               .flowi4_mark = sk ? sk->sk_mark : 0,
+               .daddr = daddr,
+               .saddr = saddr,
+               .flowi4_tos = tos,
+               .flowi4_proto = proto,
+               .fl4_dport = dport,
+               .fl4_sport = sport,
+       };
+       if (sk)
+               security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+       return ip_route_output_flow(net, &fl4, sk);
+}
+
+static inline struct rtable *ip_route_output_gre(struct net *net,
+                                                __be32 daddr, __be32 saddr,
+                                                __be32 gre_key, __u8 tos, int oif)
+{
+       struct flowi4 fl4 = {
+               .flowi4_oif = oif,
+               .daddr = daddr,
+               .saddr = saddr,
+               .flowi4_tos = tos,
+               .flowi4_proto = IPPROTO_GRE,
+               .fl4_gre_key = gre_key,
+       };
+       return ip_route_output_key(net, &fl4);
+}
 
 extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
                                 u8 tos, struct net_device *devin, bool noref);
@@ -162,57 +223,68 @@ static inline char rt_tos2priority(u8 tos)
        return ip_tos2prio[IPTOS_TOS(tos)>>1];
 }
 
-static inline int ip_route_connect(struct rtable **rp, __be32 dst,
-                                  __be32 src, u32 tos, int oif, u8 protocol,
-                                  __be16 sport, __be16 dport, struct sock *sk,
-                                  int flags)
+static inline struct rtable *ip_route_connect(__be32 dst, __be32 src, u32 tos,
+                                             int oif, u8 protocol,
+                                             __be16 sport, __be16 dport,
+                                             struct sock *sk, bool can_sleep)
 {
-       struct flowi fl = { .oif = oif,
-                           .mark = sk->sk_mark,
-                           .fl4_dst = dst,
-                           .fl4_src = src,
-                           .fl4_tos = tos,
-                           .proto = protocol,
-                           .fl_ip_sport = sport,
-                           .fl_ip_dport = dport };
-       int err;
+       struct flowi4 fl4 = {
+               .flowi4_oif = oif,
+               .flowi4_mark = sk->sk_mark,
+               .daddr = dst,
+               .saddr = src,
+               .flowi4_tos = tos,
+               .flowi4_proto = protocol,
+               .fl4_sport = sport,
+               .fl4_dport = dport,
+       };
        struct net *net = sock_net(sk);
+       struct rtable *rt;
 
        if (inet_sk(sk)->transparent)
-               fl.flags |= FLOWI_FLAG_ANYSRC;
+               fl4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+       if (protocol == IPPROTO_TCP)
+               fl4.flowi4_flags |= FLOWI_FLAG_PRECOW_METRICS;
+       if (can_sleep)
+               fl4.flowi4_flags |= FLOWI_FLAG_CAN_SLEEP;
 
        if (!dst || !src) {
-               err = __ip_route_output_key(net, rp, &fl);
-               if (err)
-                       return err;
-               fl.fl4_dst = (*rp)->rt_dst;
-               fl.fl4_src = (*rp)->rt_src;
-               ip_rt_put(*rp);
-               *rp = NULL;
+               rt = __ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt))
+                       return rt;
+               fl4.daddr = rt->rt_dst;
+               fl4.saddr = rt->rt_src;
+               ip_rt_put(rt);
        }
-       security_sk_classify_flow(sk, &fl);
-       return ip_route_output_flow(net, rp, &fl, sk, flags);
+       security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+       return ip_route_output_flow(net, &fl4, sk);
 }
 
-static inline int ip_route_newports(struct rtable **rp, u8 protocol,
-                                   __be16 sport, __be16 dport, struct sock *sk)
+static inline struct rtable *ip_route_newports(struct rtable *rt,
+                                              u8 protocol, __be16 orig_sport,
+                                              __be16 orig_dport, __be16 sport,
+                                              __be16 dport, struct sock *sk)
 {
-       if (sport != (*rp)->fl.fl_ip_sport ||
-           dport != (*rp)->fl.fl_ip_dport) {
-               struct flowi fl;
-
-               memcpy(&fl, &(*rp)->fl, sizeof(fl));
-               fl.fl_ip_sport = sport;
-               fl.fl_ip_dport = dport;
-               fl.proto = protocol;
+       if (sport != orig_sport || dport != orig_dport) {
+               struct flowi4 fl4 = {
+                       .flowi4_oif = rt->rt_oif,
+                       .flowi4_mark = rt->rt_mark,
+                       .daddr = rt->rt_key_dst,
+                       .saddr = rt->rt_key_src,
+                       .flowi4_tos = rt->rt_tos,
+                       .flowi4_proto = protocol,
+                       .fl4_sport = sport,
+                       .fl4_dport = dport
+               };
                if (inet_sk(sk)->transparent)
-                       fl.flags |= FLOWI_FLAG_ANYSRC;
-               ip_rt_put(*rp);
-               *rp = NULL;
-               security_sk_classify_flow(sk, &fl);
-               return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0);
+                       fl4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+               if (protocol == IPPROTO_TCP)
+                       fl4.flowi4_flags |= FLOWI_FLAG_PRECOW_METRICS;
+               ip_rt_put(rt);
+               security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+               return ip_route_output_flow(sock_net(sk), &fl4, sk);
        }
-       return 0;
+       return rt;
 }
 
 extern void rt_bind_peer(struct rtable *rt, int create);
index e9eee99..a9505b6 100644 (file)
@@ -31,10 +31,12 @@ enum qdisc_state_t {
  * following bits are only changed while qdisc lock is held
  */
 enum qdisc___state_t {
-       __QDISC___STATE_RUNNING,
+       __QDISC___STATE_RUNNING = 1,
+       __QDISC___STATE_THROTTLED = 2,
 };
 
 struct qdisc_size_table {
+       struct rcu_head         rcu;
        struct list_head        list;
        struct tc_sizespec      szopts;
        int                     refcnt;
@@ -46,14 +48,13 @@ struct Qdisc {
        struct sk_buff *        (*dequeue)(struct Qdisc *dev);
        unsigned                flags;
 #define TCQ_F_BUILTIN          1
-#define TCQ_F_THROTTLED                2
-#define TCQ_F_INGRESS          4
-#define TCQ_F_CAN_BYPASS       8
-#define TCQ_F_MQROOT           16
+#define TCQ_F_INGRESS          2
+#define TCQ_F_CAN_BYPASS       4
+#define TCQ_F_MQROOT           8
 #define TCQ_F_WARN_NONWC       (1 << 16)
        int                     padded;
        struct Qdisc_ops        *ops;
-       struct qdisc_size_table *stab;
+       struct qdisc_size_table __rcu *stab;
        struct list_head        list;
        u32                     handle;
        u32                     parent;
@@ -78,25 +79,44 @@ struct Qdisc {
        unsigned long           state;
        struct sk_buff_head     q;
        struct gnet_stats_basic_packed bstats;
-       unsigned long           __state;
+       unsigned int            __state;
        struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
        spinlock_t              busylock;
+       u32                     limit;
 };
 
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
 {
-       return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
 }
 
 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
-       return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       if (qdisc_is_running(qdisc))
+               return false;
+       qdisc->__state |= __QDISC___STATE_RUNNING;
+       return true;
 }
 
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
-       __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+       return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+       qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+       qdisc->__state &= ~__QDISC___STATE_THROTTLED;
 }
 
 struct Qdisc_class_ops {
@@ -199,7 +219,7 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       char                    data[];
+       long                    data[];
 };
 
 static inline int qdisc_qlen(struct Qdisc *q)
@@ -331,8 +351,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                                 struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                       struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
-                                  struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+                                     const struct qdisc_size_table *stab);
 extern void tcf_destroy(struct tcf_proto *tp);
 extern void tcf_destroy_chain(struct tcf_proto **fl);
 
@@ -411,12 +431,20 @@ enum net_xmit_qdisc_t {
 #define net_xmit_drop_count(e) (1)
 #endif
 
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+                                          const struct Qdisc *sch)
 {
 #ifdef CONFIG_NET_SCHED
-       if (sch->stab)
-               qdisc_calculate_pkt_len(skb, sch->stab);
+       struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+       if (stab)
+               __qdisc_calculate_pkt_len(skb, stab);
 #endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+       qdisc_calculate_pkt_len(skb, sch);
        return sch->enqueue(skb, sch);
 }
 
@@ -445,7 +473,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 {
        __skb_queue_tail(list, skb);
        sch->qstats.backlog += qdisc_pkt_len(skb);
-       qdisc_bstats_update(sch, skb);
 
        return NET_XMIT_SUCCESS;
 }
@@ -460,8 +487,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
 {
        struct sk_buff *skb = __skb_dequeue(list);
 
-       if (likely(skb != NULL))
+       if (likely(skb != NULL)) {
                sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_bstats_update(sch, skb);
+       }
 
        return skb;
 }
@@ -474,10 +503,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
                                              struct sk_buff_head *list)
 {
-       struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+       struct sk_buff *skb = __skb_dequeue(list);
 
        if (likely(skb != NULL)) {
                unsigned int len = qdisc_pkt_len(skb);
+               sch->qstats.backlog -= len;
                kfree_skb(skb);
                return len;
        }
index d884d26..da0534d 100644 (file)
@@ -281,7 +281,7 @@ struct sock {
        int                     sk_rcvbuf;
 
        struct sk_filter __rcu  *sk_filter;
-       struct socket_wq        *sk_wq;
+       struct socket_wq __rcu  *sk_wq;
 
 #ifdef CONFIG_NET_DMA
        struct sk_buff_head     sk_async_wait_queue;
@@ -753,6 +753,8 @@ struct proto {
                                        int level,
                                        int optname, char __user *optval,
                                        int __user *option);
+       int                     (*compat_ioctl)(struct sock *sk,
+                                       unsigned int cmd, unsigned long arg);
 #endif
        int                     (*sendmsg)(struct kiocb *iocb, struct sock *sk,
                                           struct msghdr *msg, size_t len);
@@ -1189,7 +1191,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu);
 static inline void sk_filter_release(struct sk_filter *fp)
 {
        if (atomic_dec_and_test(&fp->refcnt))
-               call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+               call_rcu(&fp->rcu, sk_filter_release_rcu);
 }
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
@@ -1264,7 +1266,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 
 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 {
-       return &sk->sk_wq->wait;
+       BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+       return &rcu_dereference_raw(sk->sk_wq)->wait;
 }
 /* Detach socket from process context.
  * Announce socket dead, detach it from wait queue and inode.
@@ -1285,7 +1288,7 @@ static inline void sock_orphan(struct sock *sk)
 static inline void sock_graft(struct sock *sk, struct socket *parent)
 {
        write_lock_bh(&sk->sk_callback_lock);
-       rcu_assign_pointer(sk->sk_wq, parent->wq);
+       sk->sk_wq = parent->wq;
        parent->sk = sk;
        sk_set_socket(sk, parent);
        security_sock_graft(sk, parent);
index 38509f0..cda30ea 100644 (file)
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 /* TCP thin-stream limits */
 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND          10
+
 extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
@@ -799,15 +802,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
-       return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 
@@ -1074,8 +1068,6 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
        return 1;
 }
 
-#define TCP_CHECK_TIMER(sk) do { } while (0)
-
 static inline void tcp_mib_init(struct net *net)
 {
        /* See RFC 2012 */
@@ -1404,7 +1396,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
                                        struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
index 42a0eb6..eeb077d 100644 (file)
@@ -14,7 +14,7 @@ extern struct proto udpv6_prot;
 extern struct proto udplitev6_prot;
 extern struct proto tcpv6_prot;
 
-struct flowi;
+struct flowi6;
 
 /* extention headers */
 extern int                             ipv6_exthdrs_init(void);
@@ -42,7 +42,7 @@ extern int                    datagram_recv_ctl(struct sock *sk,
 
 extern int                     datagram_send_ctl(struct net *net,
                                                  struct msghdr *msg,
-                                                 struct flowi *fl,
+                                                 struct flowi6 *fl6,
                                                  struct ipv6_txoptions *opt,
                                                  int *hlimit, int *tclass,
                                                  int *dontfrag);
index bb967dd..67ea6fc 100644 (file)
@@ -144,6 +144,17 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
        return csum;
 }
 
+static inline __wsum udp_csum(struct sk_buff *skb)
+{
+       __wsum csum = csum_partial(skb_transport_header(skb),
+                                  sizeof(struct udphdr), skb->csum);
+
+       for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+               csum = csum_add(csum, skb->csum);
+       }
+       return csum;
+}
+
 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 static inline void udp_lib_hash(struct sock *sk)
 {
@@ -245,5 +256,5 @@ extern void udp4_proc_exit(void);
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
 #endif /* _UDP_H */
index afdffe6..673a024 100644 (file)
@@ -115,6 +115,18 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
        return csum;
 }
 
+static inline __wsum udplite_csum(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+       const int off = skb_transport_offset(skb);
+       const int len = skb->len - off;
+
+       skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
+
+       return skb_checksum(skb, off, min(cscov, len), 0);
+}
+
 extern void    udplite4_register(void);
 extern int     udplite_get_port(struct sock *sk, unsigned short snum,
                        int (*scmp)(const struct sock *, const struct sock *));
index b9f385d..42a8c32 100644 (file)
@@ -36,6 +36,7 @@
 #define XFRM_PROTO_ROUTING     IPPROTO_ROUTING
 #define XFRM_PROTO_DSTOPTS     IPPROTO_DSTOPTS
 
+#define XFRM_ALIGN4(len)       (((len) + 3) & ~3)
 #define XFRM_ALIGN8(len)       (((len) + 7) & ~7)
 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
        MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
@@ -185,9 +186,14 @@ struct xfrm_state {
 
        /* State for replay detection */
        struct xfrm_replay_state replay;
+       struct xfrm_replay_state_esn *replay_esn;
 
        /* Replay detection state at the time we sent the last notification */
        struct xfrm_replay_state preplay;
+       struct xfrm_replay_state_esn *preplay_esn;
+
+       /* The functions for replay detection. */
+       struct xfrm_replay      *repl;
 
        /* internal flag that only holds state for delayed aevent at the
         * moment
@@ -258,6 +264,15 @@ struct km_event {
        struct net *net;
 };
 
+struct xfrm_replay {
+       void    (*advance)(struct xfrm_state *x, __be32 net_seq);
+       int     (*check)(struct xfrm_state *x,
+                        struct sk_buff *skb,
+                        __be32 net_seq);
+       void    (*notify)(struct xfrm_state *x, int event);
+       int     (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+};
+
 struct net_device;
 struct xfrm_type;
 struct xfrm_dst;
@@ -266,25 +281,26 @@ struct xfrm_policy_afinfo {
        struct dst_ops          *dst_ops;
        void                    (*garbage_collect)(struct net *net);
        struct dst_entry        *(*dst_lookup)(struct net *net, int tos,
-                                              xfrm_address_t *saddr,
-                                              xfrm_address_t *daddr);
+                                              const xfrm_address_t *saddr,
+                                              const xfrm_address_t *daddr);
        int                     (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
        void                    (*decode_session)(struct sk_buff *skb,
                                                  struct flowi *fl,
                                                  int reverse);
-       int                     (*get_tos)(struct flowi *fl);
+       int                     (*get_tos)(const struct flowi *fl);
        int                     (*init_path)(struct xfrm_dst *path,
                                             struct dst_entry *dst,
                                             int nfheader_len);
        int                     (*fill_dst)(struct xfrm_dst *xdst,
                                            struct net_device *dev,
-                                           struct flowi *fl);
+                                           const struct flowi *fl);
+       struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
+extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
+extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
@@ -299,9 +315,12 @@ struct xfrm_state_afinfo {
        const struct xfrm_type  *type_map[IPPROTO_MAX];
        struct xfrm_mode        *mode_map[XFRM_MODE_MAX];
        int                     (*init_flags)(struct xfrm_state *x);
-       void                    (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
-       void                    (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                                               xfrm_address_t *daddr, xfrm_address_t *saddr);
+       void                    (*init_tempsel)(struct xfrm_selector *sel,
+                                               const struct flowi *fl);
+       void                    (*init_temprop)(struct xfrm_state *x,
+                                               const struct xfrm_tmpl *tmpl,
+                                               const xfrm_address_t *daddr,
+                                               const xfrm_address_t *saddr);
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
        int                     (*output)(struct sk_buff *skb);
@@ -332,7 +351,8 @@ struct xfrm_type {
        void                    (*destructor)(struct xfrm_state *);
        int                     (*input)(struct xfrm_state *, struct sk_buff *skb);
        int                     (*output)(struct xfrm_state *, struct sk_buff *pskb);
-       int                     (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
+       int                     (*reject)(struct xfrm_state *, struct sk_buff *,
+                                         const struct flowi *);
        int                     (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
        /* Estimate maximal size of result of transformation of a dgram */
        u32                     (*get_mtu)(struct xfrm_state *, int size);
@@ -501,7 +521,7 @@ struct xfrm_policy {
        struct xfrm_tmpl        xfrm_vec[XFRM_MAX_DEPTH];
 };
 
-static inline struct net *xp_net(struct xfrm_policy *xp)
+static inline struct net *xp_net(const struct xfrm_policy *xp)
 {
        return read_pnet(&xp->xp_net);
 }
@@ -545,13 +565,17 @@ struct xfrm_migrate {
 struct xfrm_mgr {
        struct list_head        list;
        char                    *id;
-       int                     (*notify)(struct xfrm_state *x, struct km_event *c);
+       int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
        int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
        int                     (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-       int                     (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
+       int                     (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
        int                     (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-       int                     (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
+       int                     (*migrate)(const struct xfrm_selector *sel,
+                                          u8 dir, u8 type,
+                                          const struct xfrm_migrate *m,
+                                          int num_bundles,
+                                          const struct xfrm_kmaddress *k);
 };
 
 extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -570,8 +594,14 @@ struct xfrm_skb_cb {
 
         /* Sequence number for replay protection. */
        union {
-               u64 output;
-               __be32 input;
+               struct {
+                       __u32 low;
+                       __u32 hi;
+               } output;
+               struct {
+                       __be32 low;
+                       __be32 hi;
+               } input;
        } seq;
 };
 
@@ -675,6 +705,8 @@ extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
                                    u32 auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
                                             struct sk_buff *skb);
+extern void xfrm_audit_state_replay(struct xfrm_state *x,
+                                   struct sk_buff *skb, __be32 net_seq);
 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
                                      __be32 net_spi, __be32 net_seq);
@@ -707,6 +739,11 @@ static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
 {
 }
 
+static inline void xfrm_audit_state_replay(struct xfrm_state *x,
+                                          struct sk_buff *skb, __be32 net_seq)
+{
+}
+
 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
                                      u16 family)
 {
@@ -762,10 +799,11 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
        atomic_inc(&x->refcnt);
 }
 
-static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
+static inline bool addr_match(const void *token1, const void *token2,
+                             int prefixlen)
 {
-       __be32 *a1 = token1;
-       __be32 *a2 = token2;
+       const __be32 *a1 = token1;
+       const __be32 *a2 = token2;
        int pdw;
        int pbi;
 
@@ -774,7 +812,7 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
 
        if (pdw)
                if (memcmp(a1, a2, pdw << 2))
-                       return 0;
+                       return false;
 
        if (pbi) {
                __be32 mask;
@@ -782,32 +820,32 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
                mask = htonl((0xffffffff) << (32 - pbi));
 
                if ((a1[pdw] ^ a2[pdw]) & mask)
-                       return 0;
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
 static __inline__
-__be16 xfrm_flowi_sport(struct flowi *fl)
+__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
 {
        __be16 port;
-       switch(fl->proto) {
+       switch(fl->flowi_proto) {
        case IPPROTO_TCP:
        case IPPROTO_UDP:
        case IPPROTO_UDPLITE:
        case IPPROTO_SCTP:
-               port = fl->fl_ip_sport;
+               port = uli->ports.sport;
                break;
        case IPPROTO_ICMP:
        case IPPROTO_ICMPV6:
-               port = htons(fl->fl_icmp_type);
+               port = htons(uli->icmpt.type);
                break;
        case IPPROTO_MH:
-               port = htons(fl->fl_mh_type);
+               port = htons(uli->mht.type);
                break;
        case IPPROTO_GRE:
-               port = htons(ntohl(fl->fl_gre_key) >> 16);
+               port = htons(ntohl(uli->gre_key) >> 16);
                break;
        default:
                port = 0;       /*XXX*/
@@ -816,22 +854,22 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
 }
 
 static __inline__
-__be16 xfrm_flowi_dport(struct flowi *fl)
+__be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
 {
        __be16 port;
-       switch(fl->proto) {
+       switch(fl->flowi_proto) {
        case IPPROTO_TCP:
        case IPPROTO_UDP:
        case IPPROTO_UDPLITE:
        case IPPROTO_SCTP:
-               port = fl->fl_ip_dport;
+               port = uli->ports.dport;
                break;
        case IPPROTO_ICMP:
        case IPPROTO_ICMPV6:
-               port = htons(fl->fl_icmp_code);
+               port = htons(uli->icmpt.code);
                break;
        case IPPROTO_GRE:
-               port = htons(ntohl(fl->fl_gre_key) & 0xffff);
+               port = htons(ntohl(uli->gre_key) & 0xffff);
                break;
        default:
                port = 0;       /*XXX*/
@@ -839,7 +877,8 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
        return port;
 }
 
-extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+extern int xfrm_selector_match(const struct xfrm_selector *sel,
+                              const struct flowi *fl,
                               unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -947,7 +986,7 @@ secpath_reset(struct sk_buff *skb)
 }
 
 static inline int
-xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
+xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -959,21 +998,21 @@ xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
 }
 
 static inline int
-__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
 {
        return  (tmpl->saddr.a4 &&
                 tmpl->saddr.a4 != x->props.saddr.a4);
 }
 
 static inline int
-__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
 {
        return  (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
                 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
 }
 
 static inline int
-xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
+xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -1126,49 +1165,49 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
 #endif
 
 static __inline__
-xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
 {
        switch (family){
        case AF_INET:
-               return (xfrm_address_t *)&fl->fl4_dst;
+               return (xfrm_address_t *)&fl->u.ip4.daddr;
        case AF_INET6:
-               return (xfrm_address_t *)&fl->fl6_dst;
+               return (xfrm_address_t *)&fl->u.ip6.daddr;
        }
        return NULL;
 }
 
 static __inline__
-xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
 {
        switch (family){
        case AF_INET:
-               return (xfrm_address_t *)&fl->fl4_src;
+               return (xfrm_address_t *)&fl->u.ip4.saddr;
        case AF_INET6:
-               return (xfrm_address_t *)&fl->fl6_src;
+               return (xfrm_address_t *)&fl->u.ip6.saddr;
        }
        return NULL;
 }
 
 static __inline__
-void xfrm_flowi_addr_get(struct flowi *fl,
+void xfrm_flowi_addr_get(const struct flowi *fl,
                         xfrm_address_t *saddr, xfrm_address_t *daddr,
                         unsigned short family)
 {
        switch(family) {
        case AF_INET:
-               memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4));
-               memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4));
+               memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
+               memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
                break;
        case AF_INET6:
-               ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src);
-               ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst);
+               ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
+               ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
                break;
        }
 }
 
 static __inline__ int
-__xfrm4_state_addr_check(struct xfrm_state *x,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_state_addr_check(const struct xfrm_state *x,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        if (daddr->a4 == x->id.daddr.a4 &&
            (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
@@ -1177,8 +1216,8 @@ __xfrm4_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-__xfrm6_state_addr_check(struct xfrm_state *x,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_state_addr_check(const struct xfrm_state *x,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
            (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)|| 
@@ -1189,8 +1228,8 @@ __xfrm6_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-xfrm_state_addr_check(struct xfrm_state *x,
-                     xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_state_addr_check(const struct xfrm_state *x,
+                     const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                      unsigned short family)
 {
        switch (family) {
@@ -1203,23 +1242,23 @@ xfrm_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
+xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
                           unsigned short family)
 {
        switch (family) {
        case AF_INET:
                return __xfrm4_state_addr_check(x,
-                                               (xfrm_address_t *)&fl->fl4_dst,
-                                               (xfrm_address_t *)&fl->fl4_src);
+                                               (const xfrm_address_t *)&fl->u.ip4.daddr,
+                                               (const xfrm_address_t *)&fl->u.ip4.saddr);
        case AF_INET6:
                return __xfrm6_state_addr_check(x,
-                                               (xfrm_address_t *)&fl->fl6_dst,
-                                               (xfrm_address_t *)&fl->fl6_src);
+                                               (const xfrm_address_t *)&fl->u.ip6.daddr,
+                                               (const xfrm_address_t *)&fl->u.ip6.saddr);
        }
        return 0;
 }
 
-static inline int xfrm_state_kern(struct xfrm_state *x)
+static inline int xfrm_state_kern(const struct xfrm_state *x)
 {
        return atomic_read(&x->tunnel_users);
 }
@@ -1323,8 +1362,10 @@ extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                           int (*func)(struct xfrm_state *, int, void*), void *);
 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 
-                                         struct flowi *fl, struct xfrm_tmpl *tmpl,
+extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+                                         const xfrm_address_t *saddr,
+                                         const struct flowi *fl,
+                                         struct xfrm_tmpl *tmpl,
                                          struct xfrm_policy *pol, int *err,
                                          unsigned short family);
 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
@@ -1337,11 +1378,11 @@ extern void xfrm_state_insert(struct xfrm_state *x);
 extern int xfrm_state_add(struct xfrm_state *x);
 extern int xfrm_state_update(struct xfrm_state *x);
 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
-                                           xfrm_address_t *daddr, __be32 spi,
+                                           const xfrm_address_t *daddr, __be32 spi,
                                            u8 proto, unsigned short family);
 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                                                  xfrm_address_t *daddr,
-                                                  xfrm_address_t *saddr,
+                                                  const xfrm_address_t *daddr,
+                                                  const xfrm_address_t *saddr,
                                                   u8 proto,
                                                   unsigned short family);
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1386,10 +1427,8 @@ extern int xfrm_state_delete(struct xfrm_state *x);
 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
 extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
 extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern int xfrm_replay_check(struct xfrm_state *x,
-                            struct sk_buff *skb, __be32 seq);
-extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
-extern void xfrm_replay_notify(struct xfrm_state *x, int event);
+extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+extern int xfrm_init_replay(struct xfrm_state *x);
 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
 extern int xfrm_init_state(struct xfrm_state *x);
 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
@@ -1468,19 +1507,19 @@ u32 xfrm_get_acqseq(void);
 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
                                 u8 mode, u32 reqid, u8 proto,
-                                xfrm_address_t *daddr,
-                                xfrm_address_t *saddr, int create,
+                                const xfrm_address_t *daddr,
+                                const xfrm_address_t *saddr, int create,
                                 unsigned short family);
 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 
 #ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                     struct xfrm_migrate *m, int num_bundles,
-                     struct xfrm_kmaddress *k);
+extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                     const struct xfrm_migrate *m, int num_bundles,
+                     const struct xfrm_kmaddress *k);
 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
                                              struct xfrm_migrate *m);
-extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                        struct xfrm_migrate *m, int num_bundles,
                        struct xfrm_kmaddress *k);
 #endif
@@ -1500,10 +1539,10 @@ extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
+extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
                                                   int probe);
 
 struct hash_desc;
@@ -1511,7 +1550,8 @@ struct scatterlist;
 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
                              unsigned int);
 
-static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
+static inline int xfrm_addr_cmp(const xfrm_address_t *a,
+                               const xfrm_address_t *b,
                                int family)
 {
        switch (family) {
@@ -1544,16 +1584,21 @@ static inline int xfrm_aevent_is_on(struct net *net)
 }
 #endif
 
-static inline int xfrm_alg_len(struct xfrm_algo *alg)
+static inline int xfrm_alg_len(const struct xfrm_algo *alg)
 {
        return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
 }
 
-static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
+static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
 {
        return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
 }
 
+static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
+{
+       return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
+}
+
 #ifdef CONFIG_XFRM_MIGRATE
 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
 {
@@ -1597,7 +1642,7 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
        return m->v & m->m;
 }
 
-static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
+static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
 {
        if (m->m | m->v)
                NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
index 8479b66..3fd5064 100644 (file)
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
 #define CONF_ENABLE_ESR         0x0008
 #define CONF_ENABLE_IOCARD     0x0010 /* auto-enabled if IO resources or IRQ
                                        * (CONF_ENABLE_IRQ) in use */
+#define CONF_ENABLE_ZVCARD     0x0020
 
 /* flags used by pcmcia_loop_config() autoconfiguration */
 #define CONF_AUTO_CHECK_VCC    0x0100 /* check for matching Vcc? */
index 648d233..b76d400 100644 (file)
@@ -9,6 +9,7 @@
 #define _SCSI_SCSI_H
 
 #include <linux/types.h>
+#include <linux/scatterlist.h>
 
 struct scsi_cmnd;
 
index b4a0db2..1eeebd5 100644 (file)
 /*
  * R6 (0x06) - Mic Bias Control 0
  */
-#define WM8903_MICDET_HYST_ENA                  0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_MASK             0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_SHIFT                 7  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_WIDTH                 1  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_THR_MASK                  0x0070  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_WIDTH                      3  /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_MASK                  0x0030  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_WIDTH                      2  /* MICDET_THR - [5:4] */
 #define WM8903_MICSHORT_THR_MASK                0x000C  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_SHIFT                    2  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_WIDTH                    2  /* MICSHORT_THR - [3:2] */
index 07fdfb6..0828b6c 100644 (file)
@@ -8,7 +8,6 @@
 #include <scsi/scsi_cmnd.h>
 #include <net/sock.h>
 #include <net/tcp.h>
-#include "target_core_mib.h"
 
 #define TARGET_CORE_MOD_VERSION                "v4.0.0-rc6"
 #define SHUTDOWN_SIGS  (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
@@ -195,6 +194,21 @@ typedef enum {
        SAM_TASK_ATTR_EMULATED
 } t10_task_attr_index_t;
 
+/*
+ * Used for target SCSI statistics
+ */
+typedef enum {
+       SCSI_INST_INDEX,
+       SCSI_DEVICE_INDEX,
+       SCSI_AUTH_INTR_INDEX,
+       SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+       spinlock_t      lock;
+       u32             scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
 struct se_cmd;
 
 struct t10_alua {
@@ -578,8 +592,6 @@ struct se_node_acl {
        spinlock_t              stats_lock;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                acl_pr_ref_count;
-       /* Used for MIB access */
-       atomic_t                mib_ref_count;
        struct se_dev_entry     *device_list;
        struct se_session       *nacl_sess;
        struct se_portal_group *se_tpg;
@@ -595,8 +607,6 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       /* Used for MIB access */
-       atomic_t                mib_ref_count;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -806,7 +816,6 @@ struct se_hba {
        /* Virtual iSCSI devices attached. */
        u32                     dev_count;
        u32                     hba_index;
-       atomic_t                dev_mib_access_count;
        atomic_t                load_balance_queue;
        atomic_t                left_queue_depth;
        /* Maximum queue depth the HBA can handle. */
@@ -845,6 +854,12 @@ struct se_lun {
 
 #define SE_LUN(c)              ((struct se_lun *)(c)->se_lun)
 
+struct scsi_port_stats {
+       u64     cmd_pdus;
+       u64     tx_data_octets;
+       u64     rx_data_octets;
+} ____cacheline_aligned;
+
 struct se_port {
        /* RELATIVE TARGET PORT IDENTIFER */
        u16             sep_rtpi;
@@ -867,6 +882,7 @@ struct se_port {
 } ____cacheline_aligned;
 
 struct se_tpg_np {
+       struct se_portal_group *tpg_np_parent;
        struct config_group     tpg_np_group;
 } ____cacheline_aligned;
 
index 66f44e5..2469405 100644 (file)
@@ -111,6 +111,8 @@ struct se_subsystem_api;
 
 extern int init_se_global(void);
 extern void release_se_global(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
 extern void transport_init_queue_obj(struct se_queue_obj *);
 extern int transport_subsystem_check_init(void);
 extern int transport_subsystem_register(struct se_subsystem_api *);
index aba421d..78f18ad 100644 (file)
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
                                        0 : blk_rq_sectors(rq);
                __entry->errors    = rq->errors;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
        ),
 
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
                __entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
                                        blk_rq_bytes(rq) : 0;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
                __entry->nr_sector      = blk_rq_sectors(rq);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index e16610c..3e68366 100644 (file)
@@ -446,14 +446,16 @@ static inline notrace int ftrace_get_offsets_##call(                      \
  *     .reg                    = ftrace_event_reg,
  * };
  *
- * static struct ftrace_event_call __used
- * __attribute__((__aligned__(4)))
- * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * static struct ftrace_event_call event_<call> = {
  *     .name                   = "<call>",
  *     .class                  = event_class_<template>,
  *     .event                  = &ftrace_event_type_<call>,
  *     .print_fmt              = print_fmt_<call>,
  * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct ftrace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
  *
  */
 
@@ -579,28 +581,28 @@ static struct ftrace_event_class __used event_class_##call = {            \
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)                      \
                                                                        \
-static struct ftrace_event_call __used                                 \
-__attribute__((__aligned__(4)))                                                \
-__attribute__((section("_ftrace_events"))) event_##call = {            \
+static struct ftrace_event_call __used event_##call = {                        \
        .name                   = #call,                                \
        .class                  = &event_class_##template,              \
        .event.funcs            = &ftrace_event_type_funcs_##template,  \
        .print_fmt              = print_fmt_##template,                 \
-};
+};                                                                     \
+static struct ftrace_event_call __used                                 \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #undef DEFINE_EVENT_PRINT
 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)         \
                                                                        \
 static const char print_fmt_##call[] = print;                          \
                                                                        \
-static struct ftrace_event_call __used                                 \
-__attribute__((__aligned__(4)))                                                \
-__attribute__((section("_ftrace_events"))) event_##call = {            \
+static struct ftrace_event_call __used event_##call = {                        \
        .name                   = #call,                                \
        .class                  = &event_class_##template,              \
        .event.funcs            = &ftrace_event_type_funcs_##call,      \
        .print_fmt              = print_fmt_##call,                     \
-}
+};                                                                     \
+static struct ftrace_event_call __used                                 \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
index 6eb48e5..24fe022 100644 (file)
@@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
                pre_start = 0;
                read_current_timer(&start);
                start_jiffies = jiffies;
-               while (jiffies <= (start_jiffies + 1)) {
+               while (time_before_eq(jiffies, start_jiffies + 1)) {
                        pre_start = start;
                        read_current_timer(&start);
                }
@@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
 
                pre_end = 0;
                end = post_start;
-               while (jiffies <=
-                      (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
+               while (time_before_eq(jiffies, start_jiffies + 1 +
+                                              DELAY_CALIBRATION_TICKS)) {
                        pre_end = end;
                        read_current_timer(&end);
                }
index e495624..9395003 100644 (file)
@@ -74,6 +74,8 @@ static int    audit_initialized;
 int            audit_enabled;
 int            audit_ever_enabled;
 
+EXPORT_SYMBOL_GPL(audit_enabled);
+
 /* Default state when kernel boots without any parameters. */
 static int     audit_default;
 
@@ -671,9 +673,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        pid  = NETLINK_CREDS(skb)->pid;
        uid  = NETLINK_CREDS(skb)->uid;
-       loginuid = NETLINK_CB(skb).loginuid;
-       sessionid = NETLINK_CB(skb).sessionid;
-       sid  = NETLINK_CB(skb).sid;
+       loginuid = audit_get_loginuid(current);
+       sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &sid);
        seq  = nlh->nlmsg_seq;
        data = NLMSG_DATA(nlh);
 
index add2819..f8277c8 100644 (file)
@@ -1238,6 +1238,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
        for (i = 0; i < rule->field_count; i++) {
                struct audit_field *f = &rule->fields[i];
                int result = 0;
+               u32 sid;
 
                switch (f->type) {
                case AUDIT_PID:
@@ -1250,19 +1251,22 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
                        result = audit_comparator(cb->creds.gid, f->op, f->val);
                        break;
                case AUDIT_LOGINUID:
-                       result = audit_comparator(cb->loginuid, f->op, f->val);
+                       result = audit_comparator(audit_get_loginuid(current),
+                                                 f->op, f->val);
                        break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
                case AUDIT_SUBJ_SEN:
                case AUDIT_SUBJ_CLR:
-                       if (f->lsm_rule)
-                               result = security_audit_rule_match(cb->sid,
+                       if (f->lsm_rule) {
+                               security_task_getsecid(current, &sid);
+                               result = security_audit_rule_match(sid,
                                                                   f->type,
                                                                   f->op,
                                                                   f->lsm_rule,
                                                                   NULL);
+                       }
                        break;
                }
 
index 2f05303..9e9385f 100644 (file)
@@ -306,7 +306,7 @@ int capable(int cap)
                BUG();
        }
 
-       if (security_capable(cap) == 0) {
+       if (security_capable(current_cred(), cap) == 0) {
                current->flags |= PF_SUPERPRIV;
                return 1;
        }
index 4349935..e92e981 100644 (file)
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
                return -ENODEV;
 
        trialcs = alloc_trial_cpuset(cs);
-       if (!trialcs)
-               return -ENOMEM;
+       if (!trialcs) {
+               retval = -ENOMEM;
+               goto out;
+       }
 
        switch (cft->private) {
        case FILE_CPULIST:
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
        }
 
        free_trial_cpuset(trialcs);
+out:
        cgroup_unlock();
        return retval;
 }
index 6a1aa00..3a9d6dd 100644 (file)
@@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void)
 #endif
 
        atomic_set(&new->usage, 1);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+       new->magic = CRED_MAGIC;
+#endif
 
        if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
                goto error;
 
-#ifdef CONFIG_DEBUG_CREDENTIALS
-       new->magic = CRED_MAGIC;
-#endif
        return new;
 
 error:
@@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        validate_creds(old);
 
        *new = *old;
+       atomic_set(&new->usage, 1);
+       set_cred_subscribers(new, 0);
        get_uid(new->user);
        get_group_info(new->group_info);
 
@@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
                goto error;
 
-       atomic_set(&new->usage, 1);
-       set_cred_subscribers(new, 0);
        put_cred(old);
        validate_creds(new);
        return new;
@@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred)
        if (cred->magic != CRED_MAGIC)
                return true;
 #ifdef CONFIG_SECURITY_SELINUX
-       if (selinux_is_enabled()) {
+       /*
+        * cred->security == NULL if security_cred_alloc_blank() or
+        * security_prepare_creds() returned an error.
+        */
+       if (selinux_is_enabled() && cred->security) {
                if ((unsigned long) cred->security < PAGE_SIZE)
                        return true;
                if ((*(u32 *)cred->security & 0xffffff00) ==
index 4571ae7..99c3bc8 100644 (file)
@@ -3,6 +3,12 @@
  */
 #include <linux/irqdesc.h>
 
+#ifdef CONFIG_SPARSE_IRQ
+# define IRQ_BITMAP_BITS       (NR_IRQS + 8196)
+#else
+# define IRQ_BITMAP_BITS       NR_IRQS
+#endif
+
 extern int noirqdebug;
 
 #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
index 282f202..2039bea 100644 (file)
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 static DEFINE_MUTEX(sparse_irq_lock);
-static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
+static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
 
 #ifdef CONFIG_SPARSE_IRQ
 
@@ -217,6 +217,15 @@ int __init early_irq_init(void)
        initcnt = arch_probe_nr_irqs();
        printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
 
+       if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
+               nr_irqs = IRQ_BITMAP_BITS;
+
+       if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
+               initcnt = IRQ_BITMAP_BITS;
+
+       if (initcnt > nr_irqs)
+               nr_irqs = initcnt;
+
        for (i = 0; i < initcnt; i++) {
                desc = alloc_desc(i, node);
                set_bit(i, allocated_irqs);
index 538fce2..b0c9005 100644 (file)
@@ -1182,7 +1182,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        if (retval)
                kfree(action);
 
-#ifdef CONFIG_DEBUG_SHIRQ
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
        if (!retval && (irqflags & IRQF_SHARED)) {
                /*
                 * It's a shared IRQ -- the driver ought to be prepared for it
index 1d25419..441fd62 100644 (file)
@@ -56,6 +56,7 @@ void move_masked_irq(int irq)
 void move_native_irq(int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
+       bool masked;
 
        if (likely(!(desc->status & IRQ_MOVE_PENDING)))
                return;
@@ -63,8 +64,15 @@ void move_native_irq(int irq)
        if (unlikely(desc->status & IRQ_DISABLED))
                return;
 
-       desc->irq_data.chip->irq_mask(&desc->irq_data);
+       /*
+        * Be careful vs. already masked interrupts. If this is a
+        * threaded interrupt with ONESHOT set, we can end up with an
+        * interrupt storm.
+        */
+       masked = desc->status & IRQ_MASKED;
+       if (!masked)
+               desc->irq_data.chip->irq_mask(&desc->irq_data);
        move_masked_irq(irq);
-       desc->irq_data.chip->irq_unmask(&desc->irq_data);
+       if (!masked)
+               desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
-
index 891115a..dc49358 100644 (file)
@@ -23,7 +23,7 @@
 #ifdef CONFIG_HARDIRQS_SW_RESEND
 
 /* Bitmap to handle software resend of interrupts: */
-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
 
 /*
  * Run software resends of IRQ's
index 34e00b7..efa290e 100644 (file)
@@ -2460,9 +2460,9 @@ static void find_module_sections(struct module *mod, struct load_info *info)
 #endif
 
 #ifdef CONFIG_TRACEPOINTS
-       mod->tracepoints = section_objs(info, "__tracepoints",
-                                       sizeof(*mod->tracepoints),
-                                       &mod->num_tracepoints);
+       mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
+                                            sizeof(*mod->tracepoints_ptrs),
+                                            &mod->num_tracepoints);
 #endif
 #ifdef HAVE_JUMP_LABEL
        mod->jump_entries = section_objs(info, "__jump_table",
@@ -3393,7 +3393,7 @@ void module_layout(struct module *mod,
                   struct modversion_info *ver,
                   struct kernel_param *kp,
                   struct kernel_symbol *ks,
-                  struct tracepoint *tp)
+                  struct tracepoint * const *tp)
 {
 }
 EXPORT_SYMBOL(module_layout);
@@ -3407,8 +3407,8 @@ void module_update_tracepoints(void)
        mutex_lock(&module_mutex);
        list_for_each_entry(mod, &modules, list)
                if (!mod->taints)
-                       tracepoint_update_probe_range(mod->tracepoints,
-                               mod->tracepoints + mod->num_tracepoints);
+                       tracepoint_update_probe_range(mod->tracepoints_ptrs,
+                               mod->tracepoints_ptrs + mod->num_tracepoints);
        mutex_unlock(&module_mutex);
 }
 
@@ -3432,8 +3432,8 @@ int module_get_iter_tracepoints(struct tracepoint_iter *iter)
                        else if (iter_mod > iter->module)
                                iter->tracepoint = NULL;
                        found = tracepoint_get_iter_range(&iter->tracepoint,
-                               iter_mod->tracepoints,
-                               iter_mod->tracepoints
+                               iter_mod->tracepoints_ptrs,
+                               iter_mod->tracepoints_ptrs
                                        + iter_mod->num_tracepoints);
                        if (found) {
                                iter->module = iter_mod;
index 08107d1..0da1411 100644 (file)
@@ -719,9 +719,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
                        params[i].ops->free(params[i].arg);
 }
 
-static void __init kernel_add_sysfs_param(const char *name,
-                                         struct kernel_param *kparam,
-                                         unsigned int name_skip)
+static struct module_kobject * __init locate_module_kobject(const char *name)
 {
        struct module_kobject *mk;
        struct kobject *kobj;
@@ -729,10 +727,7 @@ static void __init kernel_add_sysfs_param(const char *name,
 
        kobj = kset_find_obj(module_kset, name);
        if (kobj) {
-               /* We already have one.  Remove params so we can add more. */
                mk = to_module_kobject(kobj);
-               /* We need to remove it before adding parameters. */
-               sysfs_remove_group(&mk->kobj, &mk->mp->grp);
        } else {
                mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
                BUG_ON(!mk);
@@ -743,15 +738,36 @@ static void __init kernel_add_sysfs_param(const char *name,
                                           "%s", name);
                if (err) {
                        kobject_put(&mk->kobj);
-                       printk(KERN_ERR "Module '%s' failed add to sysfs, "
-                              "error number %d\n", name, err);
-                       printk(KERN_ERR "The system will be unstable now.\n");
-                       return;
+                       printk(KERN_ERR
+                               "Module '%s' failed add to sysfs, error number %d\n",
+                               name, err);
+                       printk(KERN_ERR
+                               "The system will be unstable now.\n");
+                       return NULL;
                }
-               /* So that exit path is even. */
+
+               /* So that we hold reference in both cases. */
                kobject_get(&mk->kobj);
        }
 
+       return mk;
+}
+
+static void __init kernel_add_sysfs_param(const char *name,
+                                         struct kernel_param *kparam,
+                                         unsigned int name_skip)
+{
+       struct module_kobject *mk;
+       int err;
+
+       mk = locate_module_kobject(name);
+       if (!mk)
+               return;
+
+       /* We need to remove old parameters before adding more. */
+       if (mk->mp)
+               sysfs_remove_group(&mk->kobj, &mk->mp->grp);
+
        /* These should not fail at boot. */
        err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
        BUG_ON(err);
@@ -796,6 +812,32 @@ static void __init param_sysfs_builtin(void)
        }
 }
 
+ssize_t __modver_version_show(struct module_attribute *mattr,
+                             struct module *mod, char *buf)
+{
+       struct module_version_attribute *vattr =
+               container_of(mattr, struct module_version_attribute, mattr);
+
+       return sprintf(buf, "%s\n", vattr->version);
+}
+
+extern struct module_version_attribute __start___modver[], __stop___modver[];
+
+static void __init version_sysfs_builtin(void)
+{
+       const struct module_version_attribute *vattr;
+       struct module_kobject *mk;
+       int err;
+
+       for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+               mk = locate_module_kobject(vattr->module_name);
+               if (mk) {
+                       err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+                       kobject_uevent(&mk->kobj, KOBJ_ADD);
+                       kobject_put(&mk->kobj);
+               }
+       }
+}
 
 /* module-related sysfs stuff */
 
@@ -875,6 +917,7 @@ static int __init param_sysfs_init(void)
        }
        module_sysfs_initialized = 1;
 
+       version_sysfs_builtin();
        param_sysfs_builtin();
 
        return 0;
index 84522c7..656222f 100644 (file)
@@ -782,6 +782,10 @@ retry:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
 static int
 event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
 
        event->state = PERF_EVENT_STATE_ACTIVE;
        event->oncpu = smp_processor_id();
+
+       /*
+        * Unthrottle events, since we scheduled we might have missed several
+        * ticks already, also for a heavily scheduling task there is little
+        * guarantee it'll get a tick in a timely manner.
+        */
+       if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+               perf_log_throttle(event, 1);
+               event->hw.interrupts = 0;
+       }
+
        /*
         * The new state must be visible before we turn it on in the hardware:
         */
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
        }
 }
 
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
        u64 frequency = event->attr.sample_freq;
@@ -1901,11 +1912,12 @@ static void __perf_event_read(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       update_context_time(ctx);
+       if (ctx->is_active)
+               update_context_time(ctx);
        update_event_times(event);
+       if (event->state == PERF_EVENT_STATE_ACTIVE)
+               event->pmu->read(event);
        raw_spin_unlock(&ctx->lock);
-
-       event->pmu->read(event);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -1999,8 +2011,7 @@ static int alloc_callchain_buffers(void)
         * accessed from NMI. Use a temporary manual per cpu allocation
         * until that gets sorted out.
         */
-       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
-               num_possible_cpus();
+       size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 
        entries = kzalloc(size, GFP_KERNEL);
        if (!entries)
@@ -2201,13 +2212,6 @@ find_lively_task_by_vpid(pid_t vpid)
        if (!task)
                return ERR_PTR(-ESRCH);
 
-       /*
-        * Can't attach events to a dying task.
-        */
-       err = -ESRCH;
-       if (task->flags & PF_EXITING)
-               goto errout;
-
        /* Reuse ptrace permission checks for now. */
        err = -EACCES;
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
@@ -2268,14 +2272,27 @@ retry:
 
                get_ctx(ctx);
 
-               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
-                       /*
-                        * We raced with some other task; use
-                        * the context they set.
-                        */
+               err = 0;
+               mutex_lock(&task->perf_event_mutex);
+               /*
+                * If it has already passed perf_event_exit_task().
+                * we must see PF_EXITING, it takes this mutex too.
+                */
+               if (task->flags & PF_EXITING)
+                       err = -ESRCH;
+               else if (task->perf_event_ctxp[ctxn])
+                       err = -EAGAIN;
+               else
+                       rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+               mutex_unlock(&task->perf_event_mutex);
+
+               if (unlikely(err)) {
                        put_task_struct(task);
                        kfree(ctx);
-                       goto retry;
+
+                       if (err == -EAGAIN)
+                               goto retry;
+                       goto errout;
                }
        }
 
@@ -5374,6 +5391,8 @@ free_dev:
        goto out;
 }
 
+static struct lock_class_key cpuctx_mutex;
+
 int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
        int cpu, ret;
@@ -5422,6 +5441,7 @@ skip_type:
 
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                __perf_event_init_context(&cpuctx->ctx);
+               lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
                cpuctx->ctx.type = cpu_context;
                cpuctx->ctx.pmu = pmu;
                cpuctx->jiffies_interval = 1;
@@ -6127,7 +6147,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp[ctxn];
+       child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
        task_ctx_sched_out(child_ctx, EVENT_ALL);
 
        /*
@@ -6440,11 +6460,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        unsigned long flags;
        int ret = 0;
 
-       child->perf_event_ctxp[ctxn] = NULL;
-
-       mutex_init(&child->perf_event_mutex);
-       INIT_LIST_HEAD(&child->perf_event_list);
-
        if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
 
@@ -6533,6 +6548,10 @@ int perf_event_init_task(struct task_struct *child)
 {
        int ctxn, ret;
 
+       memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+       mutex_init(&child->perf_event_mutex);
+       INIT_LIST_HEAD(&child->perf_event_list);
+
        for_each_task_context_nr(ctxn) {
                ret = perf_event_init_context(child, ctxn);
                if (ret)
index 7b5db6a..7018530 100644 (file)
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
 
 static int __init pm_start_workqueue(void)
 {
-       pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0);
+       pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
 
        return pm_wq ? 0 : -ENOMEM;
 }
index d6d2a10..0cf3a27 100644 (file)
@@ -22,7 +22,7 @@
  */
 #define TIMEOUT        (20 * HZ)
 
-static inline int freezeable(struct task_struct * p)
+static inline int freezable(struct task_struct * p)
 {
        if ((p == current) ||
            (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
                todo = 0;
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
-                       if (frozen(p) || !freezeable(p))
+                       if (frozen(p) || !freezable(p))
                                continue;
 
                        if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
 
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
-               if (!freezeable(p))
+               if (!freezable(p))
                        continue;
 
                if (nosig_only && should_send_signal(p))
index 0dac75e..64db648 100644 (file)
@@ -1519,11 +1519,8 @@ static int
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
                unsigned int nr_pages, unsigned int nr_highmem)
 {
-       int error = 0;
-
        if (nr_highmem > 0) {
-               error = get_highmem_buffer(PG_ANY);
-               if (error)
+               if (get_highmem_buffer(PG_ANY))
                        goto err_out;
                if (nr_highmem > alloc_highmem) {
                        nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
 
  err_out:
        swsusp_free();
-       return error;
+       return -ENOMEM;
 }
 
 asmlinkage int swsusp_save(void)
index 53d9a9e..3623152 100644 (file)
@@ -97,7 +97,7 @@ static int console_locked, console_suspended;
 /*
  * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars
  * It is also used in interesting ways to provide interlocking in
- * release_console_sem().
+ * console_unlock();.
  */
 static DEFINE_SPINLOCK(logbuf_lock);
 
@@ -262,25 +262,47 @@ int dmesg_restrict = 1;
 int dmesg_restrict;
 #endif
 
+static int syslog_action_restricted(int type)
+{
+       if (dmesg_restrict)
+               return 1;
+       /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
+       return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
+}
+
+static int check_syslog_permissions(int type, bool from_file)
+{
+       /*
+        * If this is from /proc/kmsg and we've already opened it, then we've
+        * already done the capabilities checks at open time.
+        */
+       if (from_file && type != SYSLOG_ACTION_OPEN)
+               return 0;
+
+       if (syslog_action_restricted(type)) {
+               if (capable(CAP_SYSLOG))
+                       return 0;
+               /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+               if (capable(CAP_SYS_ADMIN)) {
+                       WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
+                                "but no CAP_SYSLOG (deprecated).\n");
+                       return 0;
+               }
+               return -EPERM;
+       }
+       return 0;
+}
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
        unsigned i, j, limit, count;
        int do_clear = 0;
        char c;
-       int error = 0;
+       int error;
 
-       /*
-        * If this is from /proc/kmsg we only do the capabilities checks
-        * at open time.
-        */
-       if (type == SYSLOG_ACTION_OPEN || !from_file) {
-               if (dmesg_restrict && !capable(CAP_SYSLOG))
-                       goto warn; /* switch to return -EPERM after 2.6.39 */
-               if ((type != SYSLOG_ACTION_READ_ALL &&
-                    type != SYSLOG_ACTION_SIZE_BUFFER) &&
-                   !capable(CAP_SYSLOG))
-                       goto warn; /* switch to return -EPERM after 2.6.39 */
-       }
+       error = check_syslog_permissions(type, from_file);
+       if (error)
+               goto out;
 
        error = security_syslog(type);
        if (error)
@@ -423,12 +445,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
        }
 out:
        return error;
-warn:
-       /* remove after 2.6.39 */
-       if (capable(CAP_SYS_ADMIN))
-               WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
-                 "but no CAP_SYSLOG (deprecated and denied).\n");
-       return -EPERM;
 }
 
 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
@@ -501,7 +517,7 @@ static void _call_console_drivers(unsigned start,
 /*
  * Call the console drivers, asking them to write out
  * log_buf[start] to log_buf[end - 1].
- * The console_sem must be held.
+ * The console_lock must be held.
  */
 static void call_console_drivers(unsigned start, unsigned end)
 {
@@ -604,11 +620,11 @@ static int have_callable_console(void)
  *
  * This is printk().  It can be called from any context.  We want it to work.
  *
- * We try to grab the console_sem.  If we succeed, it's easy - we log the output and
+ * We try to grab the console_lock.  If we succeed, it's easy - we log the output and
  * call the console drivers.  If we fail to get the semaphore we place the output
  * into the log buffer and return.  The current holder of the console_sem will
- * notice the new output in release_console_sem() and will send it to the
- * consoles before releasing the semaphore.
+ * notice the new output in console_unlock(); and will send it to the
+ * consoles before releasing the lock.
  *
  * One effect of this deferred printing is that code which calls printk() and
  * then changes console_loglevel may break. This is because console_loglevel
@@ -659,19 +675,19 @@ static inline int can_use_console(unsigned int cpu)
 /*
  * Try to get console ownership to actually show the kernel
  * messages from a 'printk'. Return true (and with the
- * console_semaphore held, and 'console_locked' set) if it
+ * console_lock held, and 'console_locked' set) if it
  * is successful, false otherwise.
  *
  * This gets called with the 'logbuf_lock' spinlock held and
  * interrupts disabled. It should return with 'lockbuf_lock'
  * released but interrupts still disabled.
  */
-static int acquire_console_semaphore_for_printk(unsigned int cpu)
+static int console_trylock_for_printk(unsigned int cpu)
        __releases(&logbuf_lock)
 {
        int retval = 0;
 
-       if (!try_acquire_console_sem()) {
+       if (console_trylock()) {
                retval = 1;
 
                /*
@@ -827,12 +843,12 @@ asmlinkage int vprintk(const char *fmt, va_list args)
         * actual magic (print out buffers, wake up klogd,
         * etc). 
         *
-        * The acquire_console_semaphore_for_printk() function
+        * The console_trylock_for_printk() function
         * will release 'logbuf_lock' regardless of whether it
         * actually gets the semaphore or not.
         */
-       if (acquire_console_semaphore_for_printk(this_cpu))
-               release_console_sem();
+       if (console_trylock_for_printk(this_cpu))
+               console_unlock();
 
        lockdep_on();
 out_restore_irqs:
@@ -993,7 +1009,7 @@ void suspend_console(void)
        if (!console_suspend_enabled)
                return;
        printk("Suspending console(s) (use no_console_suspend to debug)\n");
-       acquire_console_sem();
+       console_lock();
        console_suspended = 1;
        up(&console_sem);
 }
@@ -1004,7 +1020,7 @@ void resume_console(void)
                return;
        down(&console_sem);
        console_suspended = 0;
-       release_console_sem();
+       console_unlock();
 }
 
 /**
@@ -1027,21 +1043,21 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
        case CPU_DYING:
        case CPU_DOWN_FAILED:
        case CPU_UP_CANCELED:
-               acquire_console_sem();
-               release_console_sem();
+               console_lock();
+               console_unlock();
        }
        return NOTIFY_OK;
 }
 
 /**
- * acquire_console_sem - lock the console system for exclusive use.
+ * console_lock - lock the console system for exclusive use.
  *
- * Acquires a semaphore which guarantees that the caller has
+ * Acquires a lock which guarantees that the caller has
  * exclusive access to the console system and the console_drivers list.
  *
  * Can sleep, returns nothing.
  */
-void acquire_console_sem(void)
+void console_lock(void)
 {
        BUG_ON(in_interrupt());
        down(&console_sem);
@@ -1050,21 +1066,29 @@ void acquire_console_sem(void)
        console_locked = 1;
        console_may_schedule = 1;
 }
-EXPORT_SYMBOL(acquire_console_sem);
+EXPORT_SYMBOL(console_lock);
 
-int try_acquire_console_sem(void)
+/**
+ * console_trylock - try to lock the console system for exclusive use.
+ *
+ * Tried to acquire a lock which guarantees that the caller has
+ * exclusive access to the console system and the console_drivers list.
+ *
+ * returns 1 on success, and 0 on failure to acquire the lock.
+ */
+int console_trylock(void)
 {
        if (down_trylock(&console_sem))
-               return -1;
+               return 0;
        if (console_suspended) {
                up(&console_sem);
-               return -1;
+               return 0;
        }
        console_locked = 1;
        console_may_schedule = 0;
-       return 0;
+       return 1;
 }
-EXPORT_SYMBOL(try_acquire_console_sem);
+EXPORT_SYMBOL(console_trylock);
 
 int is_console_locked(void)
 {
@@ -1095,20 +1119,20 @@ void wake_up_klogd(void)
 }
 
 /**
- * release_console_sem - unlock the console system
+ * console_unlock - unlock the console system
  *
- * Releases the semaphore which the caller holds on the console system
+ * Releases the console_lock which the caller holds on the console system
  * and the console driver list.
  *
- * While the semaphore was held, console output may have been buffered
- * by printk().  If this is the case, release_console_sem() emits
- * the output prior to releasing the semaphore.
+ * While the console_lock was held, console output may have been buffered
+ * by printk().  If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
  *
  * If there is output waiting for klogd, we wake it up.
  *
- * release_console_sem() may be called from any context.
+ * console_unlock(); may be called from any context.
  */
-void release_console_sem(void)
+void console_unlock(void)
 {
        unsigned long flags;
        unsigned _con_start, _log_end;
@@ -1141,7 +1165,7 @@ void release_console_sem(void)
        if (wake_klogd)
                wake_up_klogd();
 }
-EXPORT_SYMBOL(release_console_sem);
+EXPORT_SYMBOL(console_unlock);
 
 /**
  * console_conditional_schedule - yield the CPU if required
@@ -1150,7 +1174,7 @@ EXPORT_SYMBOL(release_console_sem);
  * if this CPU should yield the CPU to another task, do
  * so here.
  *
- * Must be called within acquire_console_sem().
+ * Must be called within console_lock();.
  */
 void __sched console_conditional_schedule(void)
 {
@@ -1171,14 +1195,14 @@ void console_unblank(void)
                if (down_trylock(&console_sem) != 0)
                        return;
        } else
-               acquire_console_sem();
+               console_lock();
 
        console_locked = 1;
        console_may_schedule = 0;
        for_each_console(c)
                if ((c->flags & CON_ENABLED) && c->unblank)
                        c->unblank();
-       release_console_sem();
+       console_unlock();
 }
 
 /*
@@ -1189,7 +1213,7 @@ struct tty_driver *console_device(int *index)
        struct console *c;
        struct tty_driver *driver = NULL;
 
-       acquire_console_sem();
+       console_lock();
        for_each_console(c) {
                if (!c->device)
                        continue;
@@ -1197,7 +1221,7 @@ struct tty_driver *console_device(int *index)
                if (driver)
                        break;
        }
-       release_console_sem();
+       console_unlock();
        return driver;
 }
 
@@ -1208,17 +1232,17 @@ struct tty_driver *console_device(int *index)
  */
 void console_stop(struct console *console)
 {
-       acquire_console_sem();
+       console_lock();
        console->flags &= ~CON_ENABLED;
-       release_console_sem();
+       console_unlock();
 }
 EXPORT_SYMBOL(console_stop);
 
 void console_start(struct console *console)
 {
-       acquire_console_sem();
+       console_lock();
        console->flags |= CON_ENABLED;
-       release_console_sem();
+       console_unlock();
 }
 EXPORT_SYMBOL(console_start);
 
@@ -1340,7 +1364,7 @@ void register_console(struct console *newcon)
         *      Put this console in the list - keep the
         *      preferred driver at the head of the list.
         */
-       acquire_console_sem();
+       console_lock();
        if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
                newcon->next = console_drivers;
                console_drivers = newcon;
@@ -1352,14 +1376,14 @@ void register_console(struct console *newcon)
        }
        if (newcon->flags & CON_PRINTBUFFER) {
                /*
-                * release_console_sem() will print out the buffered messages
+                * console_unlock(); will print out the buffered messages
                 * for us.
                 */
                spin_lock_irqsave(&logbuf_lock, flags);
                con_start = log_start;
                spin_unlock_irqrestore(&logbuf_lock, flags);
        }
-       release_console_sem();
+       console_unlock();
        console_sysfs_notify();
 
        /*
@@ -1396,7 +1420,7 @@ int unregister_console(struct console *console)
                return braille_unregister_console(console);
 #endif
 
-       acquire_console_sem();
+       console_lock();
        if (console_drivers == console) {
                console_drivers=console->next;
                res = 0;
@@ -1418,7 +1442,7 @@ int unregister_console(struct console *console)
        if (console_drivers != NULL && console->flags & CON_CONSDEV)
                console_drivers->flags |= CON_CONSDEV;
 
-       release_console_sem();
+       console_unlock();
        console_sysfs_notify();
        return res;
 }
index 99bbaa3..e2302e4 100644 (file)
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
        return !err;
 }
 
-int ptrace_attach(struct task_struct *task)
+static int ptrace_attach(struct task_struct *task)
 {
        int retval;
 
@@ -219,7 +219,7 @@ out:
  * Performs checks and sets PT_PTRACED.
  * Should be used by all ptrace implementations for PTRACE_TRACEME.
  */
-int ptrace_traceme(void)
+static int ptrace_traceme(void)
 {
        int ret = -EPERM;
 
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
        return false;
 }
 
-int ptrace_detach(struct task_struct *child, unsigned int data)
+static int ptrace_detach(struct task_struct *child, unsigned int data)
 {
        bool dead = false;
 
@@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
                child->exit_code = data;
                dead = __ptrace_detach(current, child);
                if (!child->exit_state)
-                       wake_up_process(child);
+                       wake_up_state(child, TASK_TRACED | TASK_STOPPED);
        }
        write_unlock_irq(&tasklist_lock);
 
index 77e9166..0c26e2d 100644 (file)
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->nr_running--;
 }
 
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
                                            int global_update)
 {
@@ -721,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
        u64 now, delta;
        unsigned long load = cfs_rq->load.weight;
 
-       if (!cfs_rq)
+       if (cfs_rq->tg == &root_task_group)
                return;
 
-       now = rq_of(cfs_rq)->clock;
+       now = rq_of(cfs_rq)->clock_task;
        delta = now - cfs_rq->load_stamp;
 
        /* truncate load history at 4 idle periods */
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+                               long weight_delta)
+{
+       long load_weight, load, shares;
+
+       load = cfs_rq->load.weight + weight_delta;
+
+       load_weight = atomic_read(&tg->load_weight);
+       load_weight -= cfs_rq->load_contribution;
+       load_weight += load;
+
+       shares = (tg->shares * load);
+       if (load_weight)
+               shares /= load_weight;
+
+       if (shares < MIN_SHARES)
+               shares = MIN_SHARES;
+       if (shares > tg->shares)
+               shares = tg->shares;
+
+       return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+               update_cfs_load(cfs_rq, 0);
+               update_cfs_shares(cfs_rq, 0);
+       }
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+                               long weight_delta)
+{
+       return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -782,41 +828,20 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
 {
        struct task_group *tg;
        struct sched_entity *se;
-       long load_weight, load, shares;
-
-       if (!cfs_rq)
-               return;
+       long shares;
 
        tg = cfs_rq->tg;
        se = tg->se[cpu_of(rq_of(cfs_rq))];
        if (!se)
                return;
-
-       load = cfs_rq->load.weight + weight_delta;
-
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight -= cfs_rq->load_contribution;
-       load_weight += load;
-
-       shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
-
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       if (shares > tg->shares)
-               shares = tg->shares;
+#ifndef CONFIG_SMP
+       if (likely(se->load.weight == tg->shares))
+               return;
+#endif
+       shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
 
        reweight_entity(cfs_rq_of(se), se, shares);
 }
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
-               update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq, 0);
-       }
-}
 #else /* CONFIG_FAIR_GROUP_SCHED */
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
@@ -1404,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
 
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
-       unsigned long this_load, load;
+       s64 this_load, load;
        int idx, this_cpu, prev_cpu;
        unsigned long tl_per_task;
        struct task_group *tg;
@@ -1443,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       if (this_load) {
-               unsigned long this_eff_load, prev_eff_load;
+       if (this_load > 0) {
+               s64 this_eff_load, prev_eff_load;
 
                this_eff_load = 100;
                this_eff_load *= power_of(prev_cpu);
index c914ec7..01f75a5 100644 (file)
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-       int this_cpu = smp_processor_id();
        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
        struct sched_rt_entity *rt_se;
 
-       rt_se = rt_rq->tg->rt_se[this_cpu];
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 
 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
-       int this_cpu = smp_processor_id();
        struct sched_rt_entity *rt_se;
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 
-       rt_se = rt_rq->tg->rt_se[this_cpu];
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_se && on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
-               } else if (rt_rq->rt_nr_running)
+               } else if (rt_rq->rt_nr_running) {
                        idle = 0;
+                       if (!rt_rq_throttled(rt_rq))
+                               enqueue = 1;
+               }
 
                if (enqueue)
                        sched_rt_rq_enqueue(rt_rq);
@@ -625,7 +629,7 @@ static void update_curr_rt(struct rq *rq)
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        u64 delta_exec;
 
-       if (!task_has_rt_policy(curr))
+       if (curr->sched_class != &rt_sched_class)
                return;
 
        delta_exec = rq->clock_task - curr->se.exec_start;
index 31b71a2..18da702 100644 (file)
@@ -1385,7 +1385,8 @@ static int check_prlimit_permission(struct task_struct *task)
        const struct cred *cred = current_cred(), *tcred;
 
        tcred = __task_cred(task);
-       if ((cred->uid != tcred->euid ||
+       if (current != task &&
+           (cred->uid != tcred->euid ||
             cred->uid != tcred->suid ||
             cred->uid != tcred->uid  ||
             cred->gid != tcred->egid ||
index bc86bb3..4eed0af 100644 (file)
@@ -170,7 +170,8 @@ static int proc_taint(struct ctl_table *table, int write,
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */
+/* Note: sysrq code uses it's own private copy */
+static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 
 static int sysrq_sysctl_handler(ctl_table *table, int write,
                                void __user *buffer, size_t *lenp,
@@ -193,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
 static struct ctl_table root_table[];
 static struct ctl_table_root sysctl_table_root;
 static struct ctl_table_header root_table_header = {
-       .count = 1,
+       {{.count = 1,
        .ctl_table = root_table,
-       .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
+       .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}},
        .root = &sysctl_table_root,
        .set = &sysctl_table_root.default_set,
 };
@@ -1566,11 +1567,16 @@ void sysctl_head_get(struct ctl_table_header *head)
        spin_unlock(&sysctl_lock);
 }
 
+static void free_head(struct rcu_head *rcu)
+{
+       kfree(container_of(rcu, struct ctl_table_header, rcu));
+}
+
 void sysctl_head_put(struct ctl_table_header *head)
 {
        spin_lock(&sysctl_lock);
        if (!--head->count)
-               kfree(head);
+               call_rcu(&head->rcu, free_head);
        spin_unlock(&sysctl_lock);
 }
 
@@ -1947,10 +1953,10 @@ void unregister_sysctl_table(struct ctl_table_header * header)
        start_unregistering(header);
        if (!--header->parent->count) {
                WARN_ON(1);
-               kfree(header->parent);
+               call_rcu(&header->parent->rcu, free_head);
        }
        if (!--header->count)
-               kfree(header);
+               call_rcu(&header->rcu, free_head);
        spin_unlock(&sysctl_lock);
 }
 
index 48b2761..a3b5aff 100644 (file)
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
        return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
 }
 
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+       return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
 #endif
index 051bc80..ed228ef 100644 (file)
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
 {
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-       return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+       if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+               return 0;
+       if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+               return 1;
+       return tick_broadcast_oneshot_available();
 }
 
 /*
index 290eefb..f65d3a7 100644 (file)
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
 extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
 static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
        return 0;
 }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
 #endif /* !TICK_ONESHOT */
 
 /*
index 3e216e0..c55ea24 100644 (file)
@@ -642,8 +642,7 @@ static void tick_nohz_switch_to_nohz(void)
        }
        local_irq_enable();
 
-       printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
-              smp_processor_id());
+       printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
 }
 
 /*
@@ -795,8 +794,10 @@ void tick_setup_sched_timer(void)
        }
 
 #ifdef CONFIG_NO_HZ
-       if (tick_nohz_enabled)
+       if (tick_nohz_enabled) {
                ts->nohz_mode = NOHZ_MODE_HIGHRES;
+               printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
+       }
 #endif
 }
 #endif /* HIGH_RES_TIMERS */
index 32a19f9..3258455 100644 (file)
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym)
        char symname[KSYM_NAME_LEN];
 
        if (lookup_symbol_name((unsigned long)sym, symname) < 0)
-               SEQ_printf(m, "<%p>", sym);
+               SEQ_printf(m, "<%pK>", sym);
        else
                SEQ_printf(m, "%s", symname);
 }
@@ -112,7 +112,7 @@ next_one:
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
-       SEQ_printf(m, "  .base:       %p\n", base);
+       SEQ_printf(m, "  .base:       %pK\n", base);
        SEQ_printf(m, "  .index:      %d\n",
                        base->index);
        SEQ_printf(m, "  .resolution: %Lu nsecs\n",
index 43ca993..d645992 100644 (file)
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
  *
  * Synchronization rules: Callers must prevent restarting of the timer,
  * otherwise this function is meaningless. It must not be called from
- * hardirq contexts. The caller must not hold locks which would prevent
+ * interrupt contexts. The caller must not hold locks which would prevent
  * completion of the timer's handler. The timer's handler must not call
  * add_timer_on(). Upon exit the timer is not queued and the handler is
  * not running on any CPU.
@@ -969,10 +969,12 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
 int del_timer_sync(struct timer_list *timer)
 {
 #ifdef CONFIG_LOCKDEP
-       local_bh_disable();
+       unsigned long flags;
+
+       local_irq_save(flags);
        lock_map_acquire(&timer->lockdep_map);
        lock_map_release(&timer->lockdep_map);
-       local_bh_enable();
+       local_irq_restore(flags);
 #endif
        /*
         * don't use it in hardirq context, because it
index 153562d..cbafed7 100644 (file)
@@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
                     !blk_tracer_enabled))
                return;
 
+       /*
+        * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
+        * message to the trace.
+        */
+       if (!(bt->act_mask & BLK_TC_NOTIFY))
+               return;
+
        local_irq_save(flags);
        buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
        va_start(args, fmt);
@@ -1820,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
        rwbs[i] = '\0';
 }
 
-void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
-{
-       int rw = rq->cmd_flags & 0x03;
-       int bytes;
-
-       if (rq->cmd_flags & REQ_DISCARD)
-               rw |= REQ_DISCARD;
-
-       if (rq->cmd_flags & REQ_SECURE)
-               rw |= REQ_SECURE;
-
-       bytes = blk_rq_bytes(rq);
-
-       blk_fill_rwbs(rwbs, rw, bytes);
-}
-
 #endif /* CONFIG_EVENT_TRACING */
 
index 35fde09..5f499e0 100644 (file)
@@ -1284,7 +1284,7 @@ trace_create_file_ops(struct module *mod)
 static void trace_module_add_events(struct module *mod)
 {
        struct ftrace_module_file_ops *file_ops = NULL;
-       struct ftrace_event_call *call, *start, *end;
+       struct ftrace_event_call **call, **start, **end;
 
        start = mod->trace_events;
        end = mod->trace_events + mod->num_trace_events;
@@ -1297,7 +1297,7 @@ static void trace_module_add_events(struct module *mod)
                return;
 
        for_each_event(call, start, end) {
-               __trace_add_event_call(call, mod,
+               __trace_add_event_call(*call, mod,
                                       &file_ops->id, &file_ops->enable,
                                       &file_ops->filter, &file_ops->format);
        }
@@ -1367,8 +1367,8 @@ static struct notifier_block trace_module_nb = {
        .priority = 0,
 };
 
-extern struct ftrace_event_call __start_ftrace_events[];
-extern struct ftrace_event_call __stop_ftrace_events[];
+extern struct ftrace_event_call *__start_ftrace_events[];
+extern struct ftrace_event_call *__stop_ftrace_events[];
 
 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
 
@@ -1384,7 +1384,7 @@ __setup("trace_event=", setup_trace_event);
 
 static __init int event_trace_init(void)
 {
-       struct ftrace_event_call *call;
+       struct ftrace_event_call **call;
        struct dentry *d_tracer;
        struct dentry *entry;
        struct dentry *d_events;
@@ -1430,7 +1430,7 @@ static __init int event_trace_init(void)
                pr_warning("tracing: Failed to allocate common fields");
 
        for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
-               __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
+               __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
                                       &ftrace_enable_fops,
                                       &ftrace_event_filter_fops,
                                       &ftrace_event_format_fops);
index 4b74d71..bbeec31 100644 (file)
@@ -161,13 +161,13 @@ struct ftrace_event_class event_class_ftrace_##call = {                   \
        .fields                 = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
 };                                                                     \
                                                                        \
-struct ftrace_event_call __used                                                \
-__attribute__((__aligned__(4)))                                                \
-__attribute__((section("_ftrace_events"))) event_##call = {            \
+struct ftrace_event_call __used event_##call = {                       \
        .name                   = #call,                                \
        .event.type             = etype,                                \
        .class                  = &event_class_ftrace_##call,           \
        .print_fmt              = print,                                \
 };                                                                     \
+struct ftrace_event_call __used                                                \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
 
 #include "trace_entries.h"
index b706529..5c9fe08 100644 (file)
@@ -55,20 +55,21 @@ struct ftrace_event_class event_class_syscall_exit = {
        .raw_init       = init_syscall_trace,
 };
 
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
+extern struct syscall_metadata *__start_syscalls_metadata[];
+extern struct syscall_metadata *__stop_syscalls_metadata[];
 
 static struct syscall_metadata **syscalls_metadata;
 
-static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
+static __init struct syscall_metadata *
+find_syscall_meta(unsigned long syscall)
 {
-       struct syscall_metadata *start;
-       struct syscall_metadata *stop;
+       struct syscall_metadata **start;
+       struct syscall_metadata **stop;
        char str[KSYM_SYMBOL_LEN];
 
 
-       start = (struct syscall_metadata *)__start_syscalls_metadata;
-       stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+       start = __start_syscalls_metadata;
+       stop = __stop_syscalls_metadata;
        kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 
        for ( ; start < stop; start++) {
@@ -78,8 +79,8 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
                 * with "SyS" instead of "sys", leading to an unwanted
                 * mismatch.
                 */
-               if (start->name && !strcmp(start->name + 3, str + 3))
-                       return start;
+               if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
+                       return *start;
        }
        return NULL;
 }
index e95ee7f..68187af 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/sched.h>
 #include <linux/jump_label.h>
 
-extern struct tracepoint __start___tracepoints[];
-extern struct tracepoint __stop___tracepoints[];
+extern struct tracepoint * const __start___tracepoints_ptrs[];
+extern struct tracepoint * const __stop___tracepoints_ptrs[];
 
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
@@ -298,10 +298,10 @@ static void disable_tracepoint(struct tracepoint *elem)
  *
  * Updates the probe callback corresponding to a range of tracepoints.
  */
-void
-tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+                                  struct tracepoint * const *end)
 {
-       struct tracepoint *iter;
+       struct tracepoint * const *iter;
        struct tracepoint_entry *mark_entry;
 
        if (!begin)
@@ -309,12 +309,12 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
 
        mutex_lock(&tracepoints_mutex);
        for (iter = begin; iter < end; iter++) {
-               mark_entry = get_tracepoint(iter->name);
+               mark_entry = get_tracepoint((*iter)->name);
                if (mark_entry) {
-                       set_tracepoint(&mark_entry, iter,
+                       set_tracepoint(&mark_entry, *iter,
                                        !!mark_entry->refcount);
                } else {
-                       disable_tracepoint(iter);
+                       disable_tracepoint(*iter);
                }
        }
        mutex_unlock(&tracepoints_mutex);
@@ -326,8 +326,8 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
 static void tracepoint_update_probes(void)
 {
        /* Core kernel tracepoints */
-       tracepoint_update_probe_range(__start___tracepoints,
-               __stop___tracepoints);
+       tracepoint_update_probe_range(__start___tracepoints_ptrs,
+               __stop___tracepoints_ptrs);
        /* tracepoints in modules. */
        module_update_tracepoints();
 }
@@ -514,8 +514,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
  * Will return the first tracepoint in the range if the input tracepoint is
  * NULL.
  */
-int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-       struct tracepoint *begin, struct tracepoint *end)
+int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+       struct tracepoint * const *begin, struct tracepoint * const *end)
 {
        if (!*tracepoint && begin != end) {
                *tracepoint = begin;
@@ -534,7 +534,8 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
        /* Core kernel tracepoints */
        if (!iter->module) {
                found = tracepoint_get_iter_range(&iter->tracepoint,
-                               __start___tracepoints, __stop___tracepoints);
+                               __start___tracepoints_ptrs,
+                               __stop___tracepoints_ptrs);
                if (found)
                        goto end;
        }
@@ -585,8 +586,8 @@ int tracepoint_module_notify(struct notifier_block *self,
        switch (val) {
        case MODULE_STATE_COMING:
        case MODULE_STATE_GOING:
-               tracepoint_update_probe_range(mod->tracepoints,
-                       mod->tracepoints + mod->num_tracepoints);
+               tracepoint_update_probe_range(mod->tracepoints_ptrs,
+                       mod->tracepoints_ptrs + mod->num_tracepoints);
                break;
        }
        return 0;
index d7ebdf4..18bb157 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/irq_regs.h>
 #include <linux/perf_event.h>
 
-int watchdog_enabled;
+int watchdog_enabled = 1;
 int __read_mostly softlockup_thresh = 60;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int no_watchdog;
-
-
 /* boot commands */
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
        if (!strncmp(str, "panic", 5))
                hardlockup_panic = 1;
        else if (!strncmp(str, "0", 1))
-               no_watchdog = 1;
+               watchdog_enabled = 0;
        return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
 
 static int __init nowatchdog_setup(char *str)
 {
-       no_watchdog = 1;
+       watchdog_enabled = 0;
        return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
 /* deprecated */
 static int __init nosoftlockup_setup(char *str)
 {
-       no_watchdog = 1;
+       watchdog_enabled = 0;
        return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
@@ -366,8 +363,14 @@ static int watchdog_nmi_enable(int cpu)
                goto out_save;
        }
 
-       printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n",
-              cpu, PTR_ERR(event));
+
+       /* vary the KERN level based on the returned errno */
+       if (PTR_ERR(event) == -EOPNOTSUPP)
+               printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+       else if (PTR_ERR(event) == -ENOENT)
+               printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
+       else
+               printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
        return PTR_ERR(event);
 
        /* success path */
@@ -432,9 +435,6 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
-
        return 0;
 }
 
@@ -462,12 +462,16 @@ static void watchdog_disable(int cpu)
 static void watchdog_enable_all_cpus(void)
 {
        int cpu;
-       int result = 0;
+
+       watchdog_enabled = 0;
 
        for_each_online_cpu(cpu)
-               result += watchdog_enable(cpu);
+               if (!watchdog_enable(cpu))
+                       /* if any cpu succeeds, watchdog is considered
+                          enabled for the system */
+                       watchdog_enabled = 1;
 
-       if (result)
+       if (!watchdog_enabled)
                printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
 
 }
@@ -476,9 +480,6 @@ static void watchdog_disable_all_cpus(void)
 {
        int cpu;
 
-       if (no_watchdog)
-               return;
-
        for_each_online_cpu(cpu)
                watchdog_disable(cpu);
 
@@ -498,10 +499,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
 {
        proc_dointvec(table, write, buffer, length, ppos);
 
-       if (watchdog_enabled)
-               watchdog_enable_all_cpus();
-       else
-               watchdog_disable_all_cpus();
+       if (write) {
+               if (watchdog_enabled)
+                       watchdog_enable_all_cpus();
+               else
+                       watchdog_disable_all_cpus();
+       }
        return 0;
 }
 
@@ -530,7 +533,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               err = watchdog_enable(hotcpu);
+               if (watchdog_enabled)
+                       err = watchdog_enable(hotcpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
@@ -555,9 +559,6 @@ void __init lockup_detector_init(void)
        void *cpu = (void *)(long)smp_processor_id();
        int err;
 
-       if (no_watchdog)
-               return;
-
        err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
        WARN_ON(notifier_to_errno(err));
 
index 11869fa..ee6578b 100644 (file)
@@ -79,7 +79,9 @@ enum {
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
 
-       MAYDAY_INITIAL_TIMEOUT  = HZ / 100,     /* call for help after 10ms */
+       MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+                                               /* call for help after 10ms
+                                                  (min two ticks) */
        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
        TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
@@ -2047,6 +2049,15 @@ repeat:
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
+
+               /*
+                * Leave this gcwq.  If keep_working() is %true, notify a
+                * regular worker; otherwise, we end up with 0 concurrency
+                * and stalling the execution.
+                */
+               if (keep_working(gcwq))
+                       wake_up_worker(gcwq);
+
                spin_unlock_irq(&gcwq->lock);
        }
 
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
         */
        spin_lock(&workqueue_lock);
 
-       if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+       if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
                for_each_cwq_cpu(cpu, wq)
                        get_cwq(cpu, wq)->max_active = 0;
 
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
                spin_lock_irq(&gcwq->lock);
 
-               if (!(wq->flags & WQ_FREEZEABLE) ||
+               if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
         * want to get it over with ASAP - spam rescuers, wake up as
         * many idlers as necessary and create new ones till the
         * worklist is empty.  Note that if the gcwq is frozen, there
-        * may be frozen works in freezeable cwqs.  Don't declare
+        * may be frozen works in freezable cwqs.  Don't declare
         * completion while frozen.
         */
        while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (cwq && wq->flags & WQ_FREEZEABLE)
+                       if (cwq && wq->flags & WQ_FREEZABLE)
                                cwq->max_active = 0;
                }
 
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        /* restore max_active and repopulate worklist */
index 0ee67e0..3a55a43 100644 (file)
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
        bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
        depends on EXPERIMENTAL && BROKEN
 
+config CPU_RMAP
+       bool
+       depends on SMP
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
@@ -217,6 +221,13 @@ config LRU_CACHE
        tristate
 
 config AVERAGE
-       bool
+       bool "Averaging functions"
+       help
+         This option is provided for the case where no in-kernel-tree
+         modules require averaging functions, but a module built outside
+         the kernel tree does. Such modules that use library averaging
+         functions require Y here.
+
+         If unsure, say N.
 
 endmenu
index 3967c23..2b97418 100644 (file)
@@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS
 config FRAME_POINTER
        bool "Compile the kernel with frame pointers"
        depends on DEBUG_KERNEL && \
-               (CRIS || M68K || M68KNOMMU || FRV || UML || \
+               (CRIS || M68K || FRV || UML || \
                 AVR32 || SUPERH || BLACKFIN || MN10300) || \
                ARCH_WANT_FRAME_POINTERS
        default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
index cbb774f..b73ba01 100644 (file)
@@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
 
 obj-$(CONFIG_AVERAGE) += average.o
 
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
 
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644 (file)
index 0000000..987acfa
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities.  This can be seen as a reverse-map of
+ * CPU affinity.  However, we do not assume that the object affinities
+ * cover all CPUs in the system.  For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+       struct cpu_rmap *rmap;
+       unsigned int cpu;
+       size_t obj_offset;
+
+       /* This is a silly number of objects, and we use u16 indices. */
+       if (size > 0xffff)
+               return NULL;
+
+       /* Offset of object pointer array from base structure */
+       obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+                          sizeof(void *));
+
+       rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+       if (!rmap)
+               return NULL;
+
+       rmap->obj = (void **)((char *)rmap + obj_offset);
+
+       /* Initially assign CPUs to objects on a rota, since we have
+        * no idea where the objects are.  Use infinite distance, so
+        * any object with known distance is preferable.  Include the
+        * CPUs that are not present/online, since we definitely want
+        * any newly-hotplugged CPUs to have some object assigned.
+        */
+       for_each_possible_cpu(cpu) {
+               rmap->near[cpu].index = cpu % size;
+               rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+       }
+
+       rmap->size = size;
+       return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+                               const struct cpumask *mask, u16 dist)
+{
+       int neigh;
+
+       for_each_cpu(neigh, mask) {
+               if (rmap->near[cpu].dist > dist &&
+                   rmap->near[neigh].dist <= dist) {
+                       rmap->near[cpu].index = rmap->near[neigh].index;
+                       rmap->near[cpu].dist = dist;
+                       return true;
+               }
+       }
+       return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+       unsigned index;
+       unsigned int cpu;
+
+       pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+       for_each_possible_cpu(cpu) {
+               index = rmap->near[cpu].index;
+               pr_info("cpu %d -> obj %u (distance %u)\n",
+                       cpu, index, rmap->near[cpu].dist);
+       }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+       u16 index;
+
+       BUG_ON(rmap->used >= rmap->size);
+       index = rmap->used++;
+       rmap->obj[index] = obj;
+       return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+                   const struct cpumask *affinity)
+{
+       cpumask_var_t update_mask;
+       unsigned int cpu;
+
+       if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+               return -ENOMEM;
+
+       /* Invalidate distance for all CPUs for which this used to be
+        * the nearest object.  Mark those CPUs for update.
+        */
+       for_each_online_cpu(cpu) {
+               if (rmap->near[cpu].index == index) {
+                       rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+                       cpumask_set_cpu(cpu, update_mask);
+               }
+       }
+
+       debug_print_rmap(rmap, "after invalidating old distances");
+
+       /* Set distance to 0 for all CPUs in the new affinity mask.
+        * Mark all CPUs within their NUMA nodes for update.
+        */
+       for_each_cpu(cpu, affinity) {
+               rmap->near[cpu].index = index;
+               rmap->near[cpu].dist = 0;
+               cpumask_or(update_mask, update_mask,
+                          cpumask_of_node(cpu_to_node(cpu)));
+       }
+
+       debug_print_rmap(rmap, "after updating neighbours");
+
+       /* Update distances based on topology */
+       for_each_cpu(cpu, update_mask) {
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       topology_thread_cpumask(cpu), 1))
+                       continue;
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       topology_core_cpumask(cpu), 2))
+                       continue;
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       cpumask_of_node(cpu_to_node(cpu)), 3))
+                       continue;
+               /* We could continue into NUMA node distances, but for now
+                * we give up.
+                */
+       }
+
+       debug_print_rmap(rmap, "after copying neighbours");
+
+       free_cpumask_var(update_mask);
+       return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+       struct irq_affinity_notify notify;
+       struct cpu_rmap *rmap;
+       u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+       struct irq_glue *glue;
+       u16 index;
+
+       if (!rmap)
+               return;
+
+       for (index = 0; index < rmap->used; index++) {
+               glue = rmap->obj[index];
+               irq_set_affinity_notifier(glue->notify.irq, NULL);
+       }
+       irq_run_affinity_notifiers();
+
+       kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+       struct irq_glue *glue =
+               container_of(notify, struct irq_glue, notify);
+       int rc;
+
+       rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+       if (rc)
+               pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+       struct irq_glue *glue =
+               container_of(ref, struct irq_glue, notify.kref);
+       kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+       struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       int rc;
+
+       if (!glue)
+               return -ENOMEM;
+       glue->notify.notify = irq_cpu_rmap_notify;
+       glue->notify.release = irq_cpu_rmap_release;
+       glue->rmap = rmap;
+       glue->index = cpu_rmap_add(rmap, glue);
+       rc = irq_set_affinity_notifier(irq, &glue->notify);
+       if (rc)
+               kfree(glue);
+       return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
index 344c710..b8029a5 100644 (file)
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
 }
 EXPORT_SYMBOL(__list_add);
 
+void __list_del_entry(struct list_head *entry)
+{
+       struct list_head *prev, *next;
+
+       prev = entry->prev;
+       next = entry->next;
+
+       if (WARN(next == LIST_POISON1,
+               "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+               entry, LIST_POISON1) ||
+           WARN(prev == LIST_POISON2,
+               "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+               entry, LIST_POISON2) ||
+           WARN(prev->next != entry,
+               "list_del corruption. prev->next should be %p, "
+               "but was %p\n", entry, prev->next) ||
+           WARN(next->prev != entry,
+               "list_del corruption. next->prev should be %p, "
+               "but was %p\n", entry, next->prev))
+               return;
+
+       __list_del(prev, next);
+}
+EXPORT_SYMBOL(__list_del_entry);
+
 /**
  * list_del - deletes entry from list.
  * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
  */
 void list_del(struct list_head *entry)
 {
-       WARN(entry->next == LIST_POISON1,
-               "list_del corruption, next is LIST_POISON1 (%p)\n",
-               LIST_POISON1);
-       WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
-               "list_del corruption, prev is LIST_POISON2 (%p)\n",
-               LIST_POISON2);
-       WARN(entry->prev->next != entry,
-               "list_del corruption. prev->next should be %p, "
-               "but was %p\n", entry, entry->prev->next);
-       WARN(entry->next->prev != entry,
-               "list_del corruption. next->prev should be %p, "
-               "but was %p\n", entry, entry->next->prev);
-       __list_del(entry->prev, entry->next);
+       __list_del_entry(entry);
        entry->next = LIST_POISON1;
        entry->prev = LIST_POISON2;
 }
index 5021cbc..ac09f22 100644 (file)
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
 {
        int i, len = 0;
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < n; i++, p++) {
                if (p->len)
                        len += nla_total_size(p->len);
                else if (nla_attr_minlen[p->type])
index 5086bb9..7ea2e03 100644 (file)
@@ -736,10 +736,11 @@ next:
                }
        }
        /*
-        * The iftag must have been set somewhere because otherwise
-        * we would return immediated at the beginning of the function
+        * We need not to tag the root tag if there is no tag which is set with
+        * settag within the range from *first_indexp to last_index.
         */
-       root_tag_set(root, settag);
+       if (tagged > 0)
+               root_tag_set(root, settag);
        *first_indexp = index;
 
        return tagged;
index 4693f79..a16be19 100644 (file)
@@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
 
        rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_insert);
 
 /*
  * before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node)
 
        return deepest;
 }
+EXPORT_SYMBOL(rb_augment_erase_begin);
 
 /*
  * after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
        if (node)
                rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_erase_end);
 
 /*
  * This function returns the first node (in sort order) of the tree.
index c47bbe1..93ca08b 100644 (file)
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        /*
         * Ensure that the address returned is DMA'ble
         */
-       if (!dma_capable(dev, dev_addr, size))
-               panic("map_single: bounce buffer is not DMA'ble");
+       if (!dma_capable(dev, dev_addr, size)) {
+               swiotlb_tbl_unmap_single(dev, map, size, dir);
+               dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+       }
 
        return dev_addr;
 }
index d608331..e0cc014 100644 (file)
@@ -13,7 +13,7 @@
  *
  * INTRODUCTION
  *
- *   The textsearch infrastructure provides text searching facitilies for
+ *   The textsearch infrastructure provides text searching facilities for
  *   both linear and non-linear data. Individual search algorithms are
  *   implemented in modules and chosen by the user.
  *
@@ -43,7 +43,7 @@
  *       to the algorithm to store persistent variables.
  *   (4) Core eventually resets the search offset and forwards the find()
  *       request to the algorithm.
- *   (5) Algorithm calls get_next_block() provided by the user continously
+ *   (5) Algorithm calls get_next_block() provided by the user continuously
  *       to fetch the data to be searched in block by block.
  *   (6) Algorithm invokes finish() after the last call to get_next_block
  *       to clean up any leftovers from get_next_block. (Optional)
  *   the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
  *   to perform case insensitive matching. But it might slow down
  *   performance of algorithm, so you should use it at own your risk.
- *   The returned configuration may then be used for an arbitary
+ *   The returned configuration may then be used for an arbitrary
  *   amount of times and even in parallel as long as a separate struct
  *   ts_state variable is provided to every instance.
  *
  *   The actual search is performed by either calling textsearch_find_-
  *   continuous() for linear data or by providing an own get_next_block()
  *   implementation and calling textsearch_find(). Both functions return
- *   the position of the first occurrence of the patern or UINT_MAX if
- *   no match was found. Subsequent occurences can be found by calling
+ *   the position of the first occurrence of the pattern or UINT_MAX if
+ *   no match was found. Subsequent occurrences can be found by calling
  *   textsearch_next() regardless of the linearity of the data.
  *
  *   Once you're done using a configuration it must be given back via
index 3ad483b..e9c0c61 100644 (file)
@@ -179,7 +179,7 @@ config SPLIT_PTLOCK_CPUS
 config COMPACTION
        bool "Allow for memory compaction"
        select MIGRATION
-       depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+       depends on MMU
        help
          Allows the compaction of memory for the allocation of huge pages.
 
index e187454..dbe99a5 100644 (file)
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)
 
 static inline struct page *alloc_hugepage_vma(int defrag,
                                              struct vm_area_struct *vma,
-                                             unsigned long haddr)
+                                             unsigned long haddr, int nd)
 {
        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
-                              HPAGE_PMD_ORDER, vma, haddr);
+                              HPAGE_PMD_ORDER, vma, haddr, nd);
 }
 
 #ifndef CONFIG_NUMA
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(khugepaged_enter(vma)))
                        return VM_FAULT_OOM;
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                         vma, haddr);
+                                         vma, haddr, numa_node_id());
                if (unlikely(!page))
                        goto out;
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
-               pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
-                                         vma, address);
+               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
+                                              vma, address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_newpage_charge(pages[i], mm,
                                                       GFP_KERNEL))) {
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                             vma, haddr);
+                                             vma, haddr, numa_node_id());
        else
                new_page = NULL;
 
@@ -1162,7 +1162,12 @@ static void __split_huge_page_refcount(struct page *page)
                /* after clearing PageTail the gup refcount can be released */
                smp_mb();
 
-               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+               /*
+                * retain hwpoison flag of the poisoned tail page:
+                *   fix for the unsuitable process killed on Guest Machine(KVM)
+                *   by the memory-failure.
+                */
+               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
                page_tail->flags |= (page->flags &
                                     ((1L << PG_referenced) |
                                      (1L << PG_swapbacked) |
@@ -1740,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 static void collapse_huge_page(struct mm_struct *mm,
                               unsigned long address,
                               struct page **hpage,
-                              struct vm_area_struct *vma)
+                              struct vm_area_struct *vma,
+                              int node)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -1768,7 +1774,8 @@ static void collapse_huge_page(struct mm_struct *mm,
         * mmap_sem in read mode is good idea also to allow greater
         * scalability.
         */
-       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
+       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+                                     node);
        if (unlikely(!new_page)) {
                up_read(&mm->mmap_sem);
                *hpage = ERR_PTR(-ENOMEM);
@@ -1806,6 +1813,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
        if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
                goto out;
+       if (is_vma_temporary_stack(vma))
+               goto out;
        VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
 
        pgd = pgd_offset(mm, address);
@@ -1847,7 +1856,6 @@ static void collapse_huge_page(struct mm_struct *mm,
                set_pmd_at(mm, address, pmd, _pmd);
                spin_unlock(&mm->page_table_lock);
                anon_vma_unlock(vma->anon_vma);
-               mem_cgroup_uncharge_page(new_page);
                goto out;
        }
 
@@ -1893,6 +1901,7 @@ out_up_write:
        return;
 
 out:
+       mem_cgroup_uncharge_page(new_page);
 #ifdef CONFIG_NUMA
        put_page(new_page);
 #endif
@@ -1912,6 +1921,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        struct page *page;
        unsigned long _address;
        spinlock_t *ptl;
+       int node = -1;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
@@ -1942,6 +1952,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                page = vm_normal_page(vma, _address, pteval);
                if (unlikely(!page))
                        goto out_unmap;
+               /*
+                * Chose the node of the first page. This could
+                * be more sophisticated and look at more pages,
+                * but isn't for now.
+                */
+               if (node == -1)
+                       node = page_to_nid(page);
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
@@ -1958,7 +1975,7 @@ out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret)
                /* collapse_huge_page will return with the mmap_sem released */
-               collapse_huge_page(mm, address, hpage, vma);
+               collapse_huge_page(mm, address, hpage, vma, node);
 out:
        return ret;
 }
@@ -2027,32 +2044,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                if ((!(vma->vm_flags & VM_HUGEPAGE) &&
                     !khugepaged_always()) ||
                    (vma->vm_flags & VM_NOHUGEPAGE)) {
+               skip:
                        progress++;
                        continue;
                }
-
                /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
-               if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
-                       khugepaged_scan.address = vma->vm_end;
-                       progress++;
-                       continue;
-               }
+               if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+                       goto skip;
+               if (is_vma_temporary_stack(vma))
+                       goto skip;
+
                VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
 
                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
                hend = vma->vm_end & HPAGE_PMD_MASK;
-               if (hstart >= hend) {
-                       progress++;
-                       continue;
-               }
+               if (hstart >= hend)
+                       goto skip;
+               if (khugepaged_scan.address > hend)
+                       goto skip;
                if (khugepaged_scan.address < hstart)
                        khugepaged_scan.address = hstart;
-               if (khugepaged_scan.address > hend) {
-                       khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
-                       progress++;
-                       continue;
-               }
-               BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+               VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
 
                while (khugepaged_scan.address < hend) {
                        int ret;
@@ -2081,7 +2093,7 @@ breakouterloop:
 breakouterloop_mmap_sem:
 
        spin_lock(&khugepaged_mm_lock);
-       BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+       VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
        /*
         * Release the current mm_slot if this mm is about to die, or
         * if we scanned all vmas of this mm.
@@ -2236,9 +2248,9 @@ static int khugepaged(void *none)
 
        for (;;) {
                mutex_unlock(&khugepaged_mutex);
-               BUG_ON(khugepaged_thread != current);
+               VM_BUG_ON(khugepaged_thread != current);
                khugepaged_loop();
-               BUG_ON(khugepaged_thread != current);
+               VM_BUG_ON(khugepaged_thread != current);
 
                mutex_lock(&khugepaged_mutex);
                if (!khugepaged_enabled())
index 177a516..ff0d977 100644 (file)
@@ -75,13 +75,11 @@ static int __init kmemleak_test_init(void)
         * after the module is removed.
         */
        for (i = 0; i < 10; i++) {
-               elem = kmalloc(sizeof(*elem), GFP_KERNEL);
-               pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem);
+               elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+               pr_info("kmemleak: kzalloc(sizeof(*elem)) = %p\n", elem);
                if (!elem)
                        return -ENOMEM;
-               memset(elem, 0, sizeof(*elem));
                INIT_LIST_HEAD(&elem->list);
-
                list_add_tail(&elem->list, &test_list);
        }
 
index bd9bc21..84225f3 100644 (file)
 #define BYTES_PER_POINTER      sizeof(void *)
 
 /* GFP bitmask for kmemleak internal allocations */
-#define GFP_KMEMLEAK_MASK      (GFP_KERNEL | GFP_ATOMIC)
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+                                __GFP_NORETRY | __GFP_NOMEMALLOC | \
+                                __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -511,9 +513,10 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        struct kmemleak_object *object;
        struct prio_tree_node *node;
 
-       object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
+       object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
        if (!object) {
-               kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
+               pr_warning("Cannot allocate a kmemleak_object structure\n");
+               kmemleak_disable();
                return NULL;
        }
 
@@ -734,9 +737,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                return;
        }
 
-       area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
+       area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
        if (!area) {
-               kmemleak_warn("Cannot allocate a scan area\n");
+               pr_warning("Cannot allocate a scan area\n");
                goto out;
        }
 
index bdba245..4618fda 100644 (file)
@@ -137,8 +137,6 @@ static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
 
        BUG_ON(0 == size);
 
-       size = memblock_align_up(size, align);
-
        /* Pump up max_addr */
        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
                end = memblock.current_limit;
index db76ef7..da53a25 100644 (file)
@@ -612,8 +612,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
        /* pagein of a big page is an event. So, ignore page size */
        if (nr_pages > 0)
                __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
-       else
+       else {
                __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+               nr_pages = -nr_pages; /* for event */
+       }
 
        __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
 
@@ -1111,6 +1113,23 @@ static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
        return false;
 }
 
+/**
+ * mem_cgroup_check_margin - check if the memory cgroup allows charging
+ * @mem: memory cgroup to check
+ * @bytes: the number of bytes the caller intends to charge
+ *
+ * Returns a boolean value on whether @mem can be charged @bytes or
+ * whether this would exceed the limit.
+ */
+static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+{
+       if (!res_counter_check_margin(&mem->res, bytes))
+               return false;
+       if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
+               return false;
+       return true;
+}
+
 static unsigned int get_swappiness(struct mem_cgroup *memcg)
 {
        struct cgroup *cgrp = memcg->css.cgroup;
@@ -1832,27 +1851,39 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
                if (likely(!ret))
                        return CHARGE_OK;
 
+               res_counter_uncharge(&mem->res, csize);
                mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
                flags |= MEM_CGROUP_RECLAIM_NOSWAP;
        } else
                mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
-
-       if (csize > PAGE_SIZE) /* change csize and retry */
+       /*
+        * csize can be either a huge page (HPAGE_SIZE), a batch of
+        * regular pages (CHARGE_SIZE), or a single regular page
+        * (PAGE_SIZE).
+        *
+        * Never reclaim on behalf of optional batching, retry with a
+        * single page instead.
+        */
+       if (csize == CHARGE_SIZE)
                return CHARGE_RETRY;
 
        if (!(gfp_mask & __GFP_WAIT))
                return CHARGE_WOULDBLOCK;
 
        ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
-                                       gfp_mask, flags);
+                                             gfp_mask, flags);
+       if (mem_cgroup_check_margin(mem_over_limit, csize))
+               return CHARGE_RETRY;
        /*
-        * try_to_free_mem_cgroup_pages() might not give us a full
-        * picture of reclaim. Some pages are reclaimed and might be
-        * moved to swap cache or just unmapped from the cgroup.
-        * Check the limit again to see if the reclaim reduced the
-        * current usage of the cgroup before giving up
+        * Even though the limit is exceeded at this point, reclaim
+        * may have been able to free some pages.  Retry the charge
+        * before killing the task.
+        *
+        * Only for regular pages, though: huge pages are rather
+        * unlikely to succeed so close to the limit, and we fall back
+        * to regular pages anyway in case of failure.
         */
-       if (ret || mem_cgroup_check_under_limit(mem_over_limit))
+       if (csize == PAGE_SIZE && ret)
                return CHARGE_RETRY;
 
        /*
@@ -2144,6 +2175,8 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
        struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
        unsigned long flags;
 
+       if (mem_cgroup_disabled())
+               return;
        /*
         * We have no races with charge/uncharge but will have races with
         * page state accounting.
@@ -2233,7 +2266,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
 {
        int ret = -EINVAL;
        unsigned long flags;
-
+       /*
+        * The page is isolated from LRU. So, collapse function
+        * will not handle this page. But page splitting can happen.
+        * Do this check under compound_page_lock(). The caller should
+        * hold it.
+        */
        if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
                return -EBUSY;
 
@@ -2265,7 +2303,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
        struct cgroup *cg = child->css.cgroup;
        struct cgroup *pcg = cg->parent;
        struct mem_cgroup *parent;
-       int charge = PAGE_SIZE;
+       int page_size = PAGE_SIZE;
        unsigned long flags;
        int ret;
 
@@ -2278,23 +2316,26 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
                goto out;
        if (isolate_lru_page(page))
                goto put;
-       /* The page is isolated from LRU and we have no race with splitting */
-       charge = PAGE_SIZE << compound_order(page);
+
+       if (PageTransHuge(page))
+               page_size = HPAGE_SIZE;
 
        parent = mem_cgroup_from_cont(pcg);
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, charge);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask,
+                               &parent, false, page_size);
        if (ret || !parent)
                goto put_back;
 
-       if (charge > PAGE_SIZE)
+       if (page_size > PAGE_SIZE)
                flags = compound_lock_irqsave(page);
 
-       ret = mem_cgroup_move_account(pc, child, parent, true, charge);
+       ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
        if (ret)
-               mem_cgroup_cancel_charge(parent, charge);
-put_back:
-       if (charge > PAGE_SIZE)
+               mem_cgroup_cancel_charge(parent, page_size);
+
+       if (page_size > PAGE_SIZE)
                compound_unlock_irqrestore(page, flags);
+put_back:
        putback_lru_page(page);
 put:
        put_page(page);
@@ -2312,13 +2353,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask, enum charge_type ctype)
 {
        struct mem_cgroup *mem = NULL;
+       int page_size = PAGE_SIZE;
        struct page_cgroup *pc;
+       bool oom = true;
        int ret;
-       int page_size = PAGE_SIZE;
 
        if (PageTransHuge(page)) {
                page_size <<= compound_order(page);
                VM_BUG_ON(!PageTransHuge(page));
+               /*
+                * Never OOM-kill a process for a huge page.  The
+                * fault handler will fall back to regular pages.
+                */
+               oom = false;
        }
 
        pc = lookup_page_cgroup(page);
@@ -2327,7 +2374,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
                return 0;
        prefetchw(pc);
 
-       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
+       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
        if (ret || !mem)
                return ret;
 
@@ -5013,9 +5060,9 @@ struct cgroup_subsys mem_cgroup_subsys = {
 static int __init enable_swap_account(char *s)
 {
        /* consider enabled if no parameter or 1 is given */
-       if (!s || !strcmp(s, "1"))
+       if (!(*s) || !strcmp(s, "=1"))
                really_do_swap_account = 1;
-       else if (!strcmp(s, "0"))
+       else if (!strcmp(s, "=0"))
                really_do_swap_account = 0;
        return 1;
 }
@@ -5023,7 +5070,8 @@ __setup("swapaccount", enable_swap_account);
 
 static int __init disable_swap_account(char *s)
 {
-       enable_swap_account("0");
+       printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
+       enable_swap_account("=0");
        return 1;
 }
 __setup("noswapaccount", disable_swap_account);
index 548fbd7..0207c2f 100644 (file)
@@ -233,8 +233,8 @@ void shake_page(struct page *p, int access)
        }
 
        /*
-        * Only all shrink_slab here (which would also
-        * shrink other caches) if access is not potentially fatal.
+        * Only call shrink_slab here (which would also shrink other caches) if
+        * access is not potentially fatal.
         */
        if (access) {
                int nr;
@@ -386,8 +386,6 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct task_struct *tsk;
        struct anon_vma *av;
 
-       if (!PageHuge(page) && unlikely(split_huge_page(page)))
-               return;
        read_lock(&tasklist_lock);
        av = page_lock_anon_vma(page);
        if (av == NULL) /* Not actually mapped anymore */
@@ -856,6 +854,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        int ret;
        int kill = 1;
        struct page *hpage = compound_head(p);
+       struct page *ppage;
 
        if (PageReserved(p) || PageSlab(p))
                return SWAP_SUCCESS;
@@ -896,6 +895,44 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
                }
        }
 
+       /*
+        * ppage: poisoned page
+        *   if p is regular page(4k page)
+        *        ppage == real poisoned page;
+        *   else p is hugetlb or THP, ppage == head page.
+        */
+       ppage = hpage;
+
+       if (PageTransHuge(hpage)) {
+               /*
+                * Verify that this isn't a hugetlbfs head page, the check for
+                * PageAnon is just for avoid tripping a split_huge_page
+                * internal debug check, as split_huge_page refuses to deal with
+                * anything that isn't an anon page. PageAnon can't go away fro
+                * under us because we hold a refcount on the hpage, without a
+                * refcount on the hpage. split_huge_page can't be safely called
+                * in the first place, having a refcount on the tail isn't
+                * enough * to be safe.
+                */
+               if (!PageHuge(hpage) && PageAnon(hpage)) {
+                       if (unlikely(split_huge_page(hpage))) {
+                               /*
+                                * FIXME: if splitting THP is failed, it is
+                                * better to stop the following operation rather
+                                * than causing panic by unmapping. System might
+                                * survive if the page is freed later.
+                                */
+                               printk(KERN_INFO
+                                       "MCE %#lx: failed to split THP\n", pfn);
+
+                               BUG_ON(!PageHWPoison(p));
+                               return SWAP_FAIL;
+                       }
+                       /* THP is split, so ppage should be the real poisoned page. */
+                       ppage = p;
+               }
+       }
+
        /*
         * First collect all the processes that have the page
         * mapped in dirty form.  This has to be done before try_to_unmap,
@@ -905,12 +942,18 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * there's nothing that can be done.
         */
        if (kill)
-               collect_procs(hpage, &tokill);
+               collect_procs(ppage, &tokill);
+
+       if (hpage != ppage)
+               lock_page_nosync(ppage);
 
-       ret = try_to_unmap(hpage, ttu);
+       ret = try_to_unmap(ppage, ttu);
        if (ret != SWAP_SUCCESS)
                printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
-                               pfn, page_mapcount(hpage));
+                               pfn, page_mapcount(ppage));
+
+       if (hpage != ppage)
+               unlock_page(ppage);
 
        /*
         * Now that the dirty bit has been propagated to the
@@ -921,7 +964,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * use a more force-full uncatchable kill to prevent
         * any accesses to the poisoned memory.
         */
-       kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
+       kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
                      ret != SWAP_SUCCESS, p, pfn);
 
        return ret;
@@ -1022,19 +1065,22 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
         */
-       if (!PageLRU(p) && !PageHuge(p))
-               shake_page(p, 0);
-       if (!PageLRU(p) && !PageHuge(p)) {
-               /*
-                * shake_page could have turned it free.
-                */
-               if (is_free_buddy_page(p)) {
-                       action_result(pfn, "free buddy, 2nd try", DELAYED);
-                       return 0;
+       if (!PageHuge(p) && !PageTransCompound(p)) {
+               if (!PageLRU(p))
+                       shake_page(p, 0);
+               if (!PageLRU(p)) {
+                       /*
+                        * shake_page could have turned it free.
+                        */
+                       if (is_free_buddy_page(p)) {
+                               action_result(pfn, "free buddy, 2nd try",
+                                               DELAYED);
+                               return 0;
+                       }
+                       action_result(pfn, "non LRU", IGNORED);
+                       put_page(p);
+                       return -EBUSY;
                }
-               action_result(pfn, "non LRU", IGNORED);
-               put_page(p);
-               return -EBUSY;
        }
 
        /*
@@ -1064,7 +1110,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         * For error on the tail page, we should set PG_hwpoison
         * on the head page to show that the hugepage is hwpoisoned
         */
-       if (PageTail(p) && TestSetPageHWPoison(hpage)) {
+       if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
                action_result(pfn, "hugepage already hardware poisoned",
                                IGNORED);
                unlock_page(hpage);
@@ -1295,7 +1341,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
        ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
                                true);
        if (ret) {
-               putback_lru_pages(&pagelist);
+               struct page *page1, *page2;
+               list_for_each_entry_safe(page1, page2, &pagelist, lru)
+                       put_page(page1);
+
                pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
                         pfn, ret, page->flags);
                if (ret > 0)
@@ -1419,6 +1468,7 @@ int soft_offline_page(struct page *page, int flags)
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
                                                                0, true);
                if (ret) {
+                       putback_lru_pages(&pagelist);
                        pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
                                pfn, ret, page->flags);
                        if (ret > 0)
index 31250fa..5823698 100644 (file)
@@ -2219,7 +2219,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                                         &ptl);
                        if (!pte_same(*page_table, orig_pte)) {
                                unlock_page(old_page);
-                               page_cache_release(old_page);
                                goto unlock;
                        }
                        page_cache_release(old_page);
@@ -2289,7 +2288,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                                         &ptl);
                        if (!pte_same(*page_table, orig_pte)) {
                                unlock_page(old_page);
-                               page_cache_release(old_page);
                                goto unlock;
                        }
 
@@ -2367,16 +2365,6 @@ gotten:
        }
        __SetPageUptodate(new_page);
 
-       /*
-        * Don't let another task, with possibly unlocked vma,
-        * keep the mlocked page.
-        */
-       if ((vma->vm_flags & VM_LOCKED) && old_page) {
-               lock_page(old_page);    /* for LRU manipulation */
-               clear_page_mlock(old_page);
-               unlock_page(old_page);
-       }
-
        if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
@@ -2444,10 +2432,20 @@ gotten:
 
        if (new_page)
                page_cache_release(new_page);
-       if (old_page)
-               page_cache_release(old_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
+       if (old_page) {
+               /*
+                * Don't let another task, with possibly unlocked vma,
+                * keep the mlocked page.
+                */
+               if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+                       lock_page(old_page);    /* LRU manipulation */
+                       munlock_vma_page(old_page);
+                       unlock_page(old_page);
+               }
+               page_cache_release(old_page);
+       }
        return ret;
 oom_free_new:
        page_cache_release(new_page);
@@ -2650,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
        details.i_mmap_lock = &mapping->i_mmap_lock;
 
+       mutex_lock(&mapping->unmap_mutex);
        spin_lock(&mapping->i_mmap_lock);
 
        /* Protect against endless unmapping loops */
@@ -2666,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->unmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
@@ -3053,12 +3053,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                goto out;
                        }
                        charged = 1;
-                       /*
-                        * Don't let another task, with possibly unlocked vma,
-                        * keep the mlocked page.
-                        */
-                       if (vma->vm_flags & VM_LOCKED)
-                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
index 368fc9d..b53ec99 100644 (file)
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
 }
 
 /* Return a zonelist indicated by gfp for node representing a mempolicy */
-static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
+static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
+       int nd)
 {
-       int nd = numa_node_id();
-
        switch (policy->mode) {
        case MPOL_PREFERRED:
                if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
                zl = node_zonelist(interleave_nid(*mpol, vma, addr,
                                huge_page_shift(hstate_vma(vma))), gfp_flags);
        } else {
-               zl = policy_zonelist(gfp_flags, *mpol);
+               zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
                if ((*mpol)->mode == MPOL_BIND)
                        *nodemask = &(*mpol)->v.nodes;
        }
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  */
 struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
-               unsigned long addr)
+               unsigned long addr, int node)
 {
        struct mempolicy *pol = get_vma_policy(current, vma, addr);
        struct zonelist *zl;
@@ -1830,13 +1829,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
-               nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+               nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
                put_mems_allowed();
                return page;
        }
-       zl = policy_zonelist(gfp, pol);
+       zl = policy_zonelist(gfp, pol, node);
        if (unlikely(mpol_needs_cond_ref(pol))) {
                /*
                 * slow path: ref counted shared policy
@@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
        else
                page = __alloc_pages_nodemask(gfp, order,
-                       policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
+                               policy_zonelist(gfp, pol, numa_node_id()),
+                               policy_nodemask(gfp, pol));
        put_mems_allowed();
        return page;
 }
index 46fe8cc..352de55 100644 (file)
@@ -772,6 +772,7 @@ uncharge:
 unlock:
        unlock_page(page);
 
+move_newpage:
        if (rc != -EAGAIN) {
                /*
                 * A page that has been migrated has all references
@@ -785,8 +786,6 @@ unlock:
                putback_lru_page(page);
        }
 
-move_newpage:
-
        /*
         * Move the new page to the LRU. If migration was not successful
         * then this will free the page.
@@ -888,7 +887,7 @@ out:
  * are movable anymore because to has become empty
  * or no retryable pages exist anymore.
  * Caller should call putback_lru_pages to return pages to the LRU
- * or free list.
+ * or free list only if ret != 0.
  *
  * Return: Number of pages not migrated or error code.
  */
@@ -981,10 +980,6 @@ int migrate_huge_pages(struct list_head *from,
        }
        rc = 0;
 out:
-
-       list_for_each_entry_safe(page, page2, from, lru)
-               put_page(page);
-
        if (rc)
                return rc;
 
@@ -1292,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
                return -EPERM;
 
        /* Find the mm_struct */
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        task = pid ? find_task_by_vpid(pid) : current;
        if (!task) {
-               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        mm = get_task_mm(task);
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        if (!mm)
                return -EINVAL;
index 13e81ee..c3924c7 100644 (file)
@@ -178,6 +178,13 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
                gup_flags |= FOLL_WRITE;
 
+       /*
+        * We want mlock to succeed for regions that have any permissions
+        * other than PROT_NONE.
+        */
+       if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+               gup_flags |= FOLL_FORCE;
+
        if (vma->vm_flags & VM_LOCKED)
                gup_flags |= FOLL_MLOCK;
 
index 9925b63..1de98d4 100644 (file)
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                 */
                mapping = vma->vm_file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
-               if (new_vma->vm_truncate_count &&
-                   new_vma->vm_truncate_count != vma->vm_truncate_count)
-                       new_vma->vm_truncate_count = 0;
+               new_vma->vm_truncate_count = 0;
        }
 
        /*
index 90c1439..cdef1d4 100644 (file)
@@ -1088,8 +1088,10 @@ static void drain_pages(unsigned int cpu)
                pset = per_cpu_ptr(zone->pageset, cpu);
 
                pcp = &pset->pcp;
-               free_pcppages_bulk(zone, pcp->count, pcp);
-               pcp->count = 0;
+               if (pcp->count) {
+                       free_pcppages_bulk(zone, pcp->count, pcp);
+                       pcp->count = 0;
+               }
                local_irq_restore(flags);
        }
 }
@@ -2034,6 +2036,14 @@ restart:
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+       /*
+        * Find the true preferred zone if the allocation is unconstrained by
+        * cpusets.
+        */
+       if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+               first_zones_zonelist(zonelist, high_zoneidx, NULL,
+                                       &preferred_zone);
+
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2192,7 +2202,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 
        get_mems_allowed();
        /* The preferred zone is used for statistics later */
-       first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+       first_zones_zonelist(zonelist, high_zoneidx,
+                               nodemask ? : &cpuset_current_mems_allowed,
+                               &preferred_zone);
        if (!preferred_zone) {
                put_mems_allowed();
                return NULL;
@@ -5364,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
                unsigned long check = pfn + iter;
 
-               if (!pfn_valid_within(check)) {
-                       iter++;
+               if (!pfn_valid_within(check))
                        continue;
-               }
+
                page = pfn_to_page(check);
                if (!page_count(page)) {
                        if (PageBuddy(page))
index 0369f5b..eb663fb 100644 (file)
@@ -6,6 +6,7 @@
  *  Copyright (C) 2010  Linus Torvalds
  */
 
+#include <linux/pagemap.h>
 #include <asm/tlb.h>
 #include <asm-generic/pgtable.h>
 
index 07a458d..0341c57 100644 (file)
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 
        error = -EINVAL;
        if (S_ISBLK(inode->i_mode)) {
-               bdev = I_BDEV(inode);
+               bdev = bdgrab(I_BDEV(inode));
                error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
                                   sys_swapon);
                if (error < 0) {
index 49feb46..d64296b 100644 (file)
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        next = start;
        while (next <= end &&
               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        unlock_page(page);
                }
                pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
                cond_resched();
        }
 
index f5d90de..6771ea7 100644 (file)
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
        if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
                return false;
 
-       /*
-        * If we failed to reclaim and have scanned the full list, stop.
-        * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
-        *       faster but obviously would be less likely to succeed
-        *       allocation. If this is desirable, use GFP_REPEAT to decide
-        *       if both reclaimed and scanned should be checked or just
-        *       reclaimed
-        */
-       if (!nr_reclaimed && !nr_scanned)
-               return false;
+       /* Consider stopping depending on scan and reclaim activity */
+       if (sc->gfp_mask & __GFP_REPEAT) {
+               /*
+                * For __GFP_REPEAT allocations, stop reclaiming if the
+                * full LRU list has been scanned and we are still failing
+                * to reclaim pages. This full LRU scan is potentially
+                * expensive but a __GFP_REPEAT caller really wants to succeed
+                */
+               if (!nr_reclaimed && !nr_scanned)
+                       return false;
+       } else {
+               /*
+                * For non-__GFP_REPEAT allocations which can presumably
+                * fail without consequence, stop if we failed to reclaim
+                * any pages from the last SWAP_CLUSTER_MAX number of
+                * pages that were scanned. This will return to the
+                * caller faster at the risk reclaim/compaction and
+                * the resulting allocation attempt fails
+                */
+               if (!nr_reclaimed)
+                       return false;
+       }
 
        /*
         * If we have not reclaimed enough pages for compaction and the
@@ -1882,12 +1894,12 @@ static void shrink_zone(int priority, struct zone *zone,
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
        enum lru_list l;
-       unsigned long nr_reclaimed;
+       unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-       unsigned long nr_scanned = sc->nr_scanned;
 
 restart:
        nr_reclaimed = 0;
+       nr_scanned = sc->nr_scanned;
        get_scan_count(zone, sc, nr, priority);
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2083,7 +2095,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        struct zone *preferred_zone;
 
                        first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
-                                                       NULL, &preferred_zone);
+                                               &cpuset_current_mems_allowed,
+                                               &preferred_zone);
                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
                }
        }
index 6e64f7c..7850412 100644 (file)
@@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
 static void vlan_transfer_features(struct net_device *dev,
                                   struct net_device *vlandev)
 {
-       unsigned long old_features = vlandev->features;
+       u32 old_features = vlandev->features;
 
        vlandev->features &= ~dev->vlan_features;
        vlandev->features |= dev->features & dev->vlan_features;
index be73753..ae610f0 100644 (file)
@@ -625,6 +625,19 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
                rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
+{
+       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       const struct net_device_ops *ops = real_dev->netdev_ops;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+       return rc;
+}
 #endif
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
@@ -858,6 +871,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
        .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+       .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
 };
 
index 17c5ba7..29a54cc 100644 (file)
@@ -59,7 +59,6 @@
                                                 * safely advertise a maxsize
                                                 * of 64k */
 
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
 /**
  * struct p9_trans_rdma - RDMA transport instance
  *
index 7284062..79cabf1 100644 (file)
@@ -221,6 +221,12 @@ config RPS
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
+config RFS_ACCEL
+       boolean
+       depends on RPS && GENERIC_HARDIRQS
+       select CPU_RMAP
+       default y
+
 config XPS
        boolean
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
index a3330eb..a51d946 100644 (file)
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER)               += netfilter/
 obj-$(CONFIG_INET)             += ipv4/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
-ifneq ($(CONFIG_IPV6),)
-obj-y                          += ipv6/
-endif
+obj-$(CONFIG_NET)              += ipv6/
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index d257da5..1d4be60 100644 (file)
@@ -502,8 +502,6 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
        struct atmarp_entry *entry;
        int error;
        struct clip_vcc *clip_vcc;
-       struct flowi fl = { .fl4_dst = ip,
-                           .fl4_tos = 1 };
        struct rtable *rt;
 
        if (vcc->push != clip_push) {
@@ -520,9 +518,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
                unlink_clip_vcc(clip_vcc);
                return 0;
        }
-       error = ip_route_output_key(&init_net, &rt, &fl);
-       if (error)
-               return error;
+       rt = ip_route_output(&init_net, ip, 0, 1, 0);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
        neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
        ip_rt_put(rt);
        if (!neigh)
index d936aec..2de93d0 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
index 3850a3e..af45d6b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
                               int packet_len,
                               unsigned long send_time,
                               bool directlink,
-                              struct batman_if *if_incoming,
+                              struct hard_iface *if_incoming,
                               struct forw_packet *forw_packet)
 {
        struct batman_packet *batman_packet =
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
 /* create a new aggregated packet and add this packet to it */
 static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
                                  unsigned long send_time, bool direct_link,
-                                 struct batman_if *if_incoming,
+                                 struct hard_iface *if_incoming,
                                  int own_packet)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
                            unsigned char *packet_buff, int packet_len,
-                           struct batman_if *if_incoming, char own_packet,
+                           struct hard_iface *if_incoming, char own_packet,
                            unsigned long send_time)
 {
        /**
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
 
 /* unpack the aggregated packets and process them one by one */
 void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-                            int packet_len, struct batman_if *if_incoming)
+                            int packet_len, struct hard_iface *if_incoming)
 {
        struct batman_packet *batman_packet;
        int buff_pos = 0;
index 71a91b3..0622042 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
                            unsigned char *packet_buff, int packet_len,
-                           struct batman_if *if_incoming, char own_packet,
+                           struct hard_iface *if_incoming, char own_packet,
                            unsigned long send_time);
 void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-                            int packet_len, struct batman_if *if_incoming);
+                            int packet_len, struct hard_iface *if_incoming);
 
 #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
index 0ae81d0..0e9d435 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -52,7 +52,6 @@ static void emit_log_char(struct debug_log *debug_log, char c)
 
 static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
 {
-       int printed_len;
        va_list args;
        static char debug_log_buf[256];
        char *p;
@@ -62,8 +61,7 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
 
        spin_lock_bh(&debug_log->lock);
        va_start(args, fmt);
-       printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
-                                fmt, args);
+       vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
        va_end(args);
 
        for (p = debug_log_buf; *p != 0; p++)
index 72df532..bc9cda3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index cd7bb51..e449bf6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
                               char *buff)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        ssize_t length;
 
-       if (!batman_if)
+       if (!hard_iface)
                return 0;
 
-       length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
-                        "none" : batman_if->soft_iface->name);
+       length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
+                        "none" : hard_iface->soft_iface->name);
 
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 
        return length;
 }
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
                                char *buff, size_t count)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        int status_tmp = -1;
-       int ret;
+       int ret = count;
 
-       if (!batman_if)
+       if (!hard_iface)
                return count;
 
        if (buff[count - 1] == '\n')
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
        if (strlen(buff) >= IFNAMSIZ) {
                pr_err("Invalid parameter for 'mesh_iface' setting received: "
                       "interface name too long '%s'\n", buff);
-               kref_put(&batman_if->refcount, hardif_free_ref);
+               hardif_free_ref(hard_iface);
                return -EINVAL;
        }
 
@@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
        else
                status_tmp = IF_I_WANT_YOU;
 
-       if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
-           (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
-               kref_put(&batman_if->refcount, hardif_free_ref);
-               return count;
-       }
+       if (hard_iface->if_status == status_tmp)
+               goto out;
+
+       if ((hard_iface->soft_iface) &&
+           (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
+               goto out;
 
        if (status_tmp == IF_NOT_IN_USE) {
                rtnl_lock();
-               hardif_disable_interface(batman_if);
+               hardif_disable_interface(hard_iface);
                rtnl_unlock();
-               kref_put(&batman_if->refcount, hardif_free_ref);
-               return count;
+               goto out;
        }
 
        /* if the interface already is in use */
-       if (batman_if->if_status != IF_NOT_IN_USE) {
+       if (hard_iface->if_status != IF_NOT_IN_USE) {
                rtnl_lock();
-               hardif_disable_interface(batman_if);
+               hardif_disable_interface(hard_iface);
                rtnl_unlock();
        }
 
-       ret = hardif_enable_interface(batman_if, buff);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       ret = hardif_enable_interface(hard_iface, buff);
 
+out:
+       hardif_free_ref(hard_iface);
        return ret;
 }
 
@@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
                                 char *buff)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        ssize_t length;
 
-       if (!batman_if)
+       if (!hard_iface)
                return 0;
 
-       switch (batman_if->if_status) {
+       switch (hard_iface->if_status) {
        case IF_TO_BE_REMOVED:
                length = sprintf(buff, "disabling\n");
                break;
@@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
                break;
        }
 
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 
        return length;
 }
index 7f186c0..02f1fa7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index bbcd8f7..ad2ca92 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index ac54017..769c246 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 0065ffb..3cc4355 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
 #include <linux/udp.h>
 #include <linux/if_vlan.h>
 
-static void gw_node_free_ref(struct kref *refcount)
+static void gw_node_free_rcu(struct rcu_head *rcu)
 {
        struct gw_node *gw_node;
 
-       gw_node = container_of(refcount, struct gw_node, refcount);
+       gw_node = container_of(rcu, struct gw_node, rcu);
        kfree(gw_node);
 }
 
-static void gw_node_free_rcu(struct rcu_head *rcu)
+static void gw_node_free_ref(struct gw_node *gw_node)
 {
-       struct gw_node *gw_node;
-
-       gw_node = container_of(rcu, struct gw_node, rcu);
-       kref_put(&gw_node->refcount, gw_node_free_ref);
+       if (atomic_dec_and_test(&gw_node->refcount))
+               call_rcu(&gw_node->rcu, gw_node_free_rcu);
 }
 
 void *gw_get_selected(struct bat_priv *bat_priv)
 {
-       struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
+       struct gw_node *curr_gateway_tmp;
+       struct orig_node *orig_node = NULL;
 
+       rcu_read_lock();
+       curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
        if (!curr_gateway_tmp)
-               return NULL;
+               goto out;
+
+       orig_node = curr_gateway_tmp->orig_node;
+       if (!orig_node)
+               goto out;
 
-       return curr_gateway_tmp->orig_node;
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               orig_node = NULL;
+
+out:
+       rcu_read_unlock();
+       return orig_node;
 }
 
 void gw_deselect(struct bat_priv *bat_priv)
 {
-       struct gw_node *gw_node = bat_priv->curr_gw;
+       struct gw_node *gw_node;
 
-       bat_priv->curr_gw = NULL;
+       spin_lock_bh(&bat_priv->gw_list_lock);
+       gw_node = rcu_dereference(bat_priv->curr_gw);
+       rcu_assign_pointer(bat_priv->curr_gw, NULL);
+       spin_unlock_bh(&bat_priv->gw_list_lock);
 
        if (gw_node)
-               kref_put(&gw_node->refcount, gw_node_free_ref);
+               gw_node_free_ref(gw_node);
 }
 
-static struct gw_node *gw_select(struct bat_priv *bat_priv,
-                         struct gw_node *new_gw_node)
+static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
 {
-       struct gw_node *curr_gw_node = bat_priv->curr_gw;
+       struct gw_node *curr_gw_node;
 
-       if (new_gw_node)
-               kref_get(&new_gw_node->refcount);
+       if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
+               new_gw_node = NULL;
+
+       spin_lock_bh(&bat_priv->gw_list_lock);
+       curr_gw_node = rcu_dereference(bat_priv->curr_gw);
+       rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+       spin_unlock_bh(&bat_priv->gw_list_lock);
 
-       bat_priv->curr_gw = new_gw_node;
-       return curr_gw_node;
+       if (curr_gw_node)
+               gw_node_free_ref(curr_gw_node);
 }
 
 void gw_election(struct bat_priv *bat_priv)
 {
        struct hlist_node *node;
-       struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL;
+       struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
        uint8_t max_tq = 0;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
        int down, up;
@@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv)
        if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
                return;
 
-       if (bat_priv->curr_gw)
+       rcu_read_lock();
+       curr_gw = rcu_dereference(bat_priv->curr_gw);
+       if (curr_gw) {
+               rcu_read_unlock();
                return;
+       }
 
-       rcu_read_lock();
        if (hlist_empty(&bat_priv->gw_list)) {
-               rcu_read_unlock();
 
-               if (bat_priv->curr_gw) {
+               if (curr_gw) {
+                       rcu_read_unlock();
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Removing selected gateway - "
                                "no gateway in range\n");
                        gw_deselect(bat_priv);
-               }
+               } else
+                       rcu_read_unlock();
 
                return;
        }
@@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv)
                        max_gw_factor = tmp_gw_factor;
        }
 
-       if (bat_priv->curr_gw != curr_gw_tmp) {
-               if ((bat_priv->curr_gw) && (!curr_gw_tmp))
+       if (curr_gw != curr_gw_tmp) {
+               if ((curr_gw) && (!curr_gw_tmp))
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Removing selected gateway - "
                                "no gateway in range\n");
-               else if ((!bat_priv->curr_gw) && (curr_gw_tmp))
+               else if ((!curr_gw) && (curr_gw_tmp))
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Adding route to gateway %pM "
                                "(gw_flags: %i, tq: %i)\n",
@@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv)
                                curr_gw_tmp->orig_node->gw_flags,
                                curr_gw_tmp->orig_node->router->tq_avg);
 
-               old_gw_node = gw_select(bat_priv, curr_gw_tmp);
+               gw_select(bat_priv, curr_gw_tmp);
        }
 
        rcu_read_unlock();
-
-       /* the kfree() has to be outside of the rcu lock */
-       if (old_gw_node)
-               kref_put(&old_gw_node->refcount, gw_node_free_ref);
 }
 
 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
-       struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
+       struct gw_node *curr_gateway_tmp;
        uint8_t gw_tq_avg, orig_tq_avg;
 
+       rcu_read_lock();
+       curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
        if (!curr_gateway_tmp)
-               return;
+               goto out_rcu;
 
        if (!curr_gateway_tmp->orig_node)
-               goto deselect;
+               goto deselect_rcu;
 
        if (!curr_gateway_tmp->orig_node->router)
-               goto deselect;
+               goto deselect_rcu;
 
        /* this node already is the gateway */
        if (curr_gateway_tmp->orig_node == orig_node)
-               return;
+               goto out_rcu;
 
        if (!orig_node->router)
-               return;
+               goto out_rcu;
 
        gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
+       rcu_read_unlock();
+
        orig_tq_avg = orig_node->router->tq_avg;
 
        /* the TQ value has to be better */
        if (orig_tq_avg < gw_tq_avg)
-               return;
+               goto out;
 
        /**
         * if the routing class is greater than 3 the value tells us how much
@@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
         **/
        if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
            (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
-               return;
+               goto out;
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Restarting gateway selection: better gateway found (tq curr: "
                "%i, tq new: %i)\n",
                gw_tq_avg, orig_tq_avg);
+       goto deselect;
 
+out_rcu:
+       rcu_read_unlock();
+       goto out;
+deselect_rcu:
+       rcu_read_unlock();
 deselect:
        gw_deselect(bat_priv);
+out:
+       return;
 }
 
 static void gw_node_add(struct bat_priv *bat_priv,
@@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv,
        memset(gw_node, 0, sizeof(struct gw_node));
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
-       kref_init(&gw_node->refcount);
+       atomic_set(&gw_node->refcount, 1);
 
        spin_lock_bh(&bat_priv->gw_list_lock);
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
@@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv,
                                "Gateway %pM removed from gateway list\n",
                                orig_node->orig);
 
-                       if (gw_node == bat_priv->curr_gw) {
+                       if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
                                rcu_read_unlock();
                                gw_deselect(bat_priv);
                                return;
@@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv)
                    atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
                        continue;
 
-               if (bat_priv->curr_gw == gw_node)
+               if (rcu_dereference(bat_priv->curr_gw) == gw_node)
                        gw_deselect(bat_priv);
 
                hlist_del_rcu(&gw_node->list);
-               call_rcu(&gw_node->rcu, gw_node_free_rcu);
+               gw_node_free_ref(gw_node);
        }
 
 
@@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv)
 static int _write_buffer_text(struct bat_priv *bat_priv,
                              struct seq_file *seq, struct gw_node *gw_node)
 {
-       int down, up;
+       struct gw_node *curr_gw;
+       int down, up, ret;
 
        gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
 
-       return seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
-                      (bat_priv->curr_gw == gw_node ? "=>" : "  "),
+       rcu_read_lock();
+       curr_gw = rcu_dereference(bat_priv->curr_gw);
+
+       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+                      (curr_gw == gw_node ? "=>" : "  "),
                       gw_node->orig_node->orig,
                       gw_node->orig_node->router->tq_avg,
                       gw_node->orig_node->router->addr,
@@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv,
                       (down > 2048 ? "MBit" : "KBit"),
                       (up > 2048 ? up / 1024 : up),
                       (up > 2048 ? "MBit" : "KBit"));
+
+       rcu_read_unlock();
+       return ret;
 }
 
 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
@@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
        if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
                return -1;
 
-       if (!bat_priv->curr_gw)
+       rcu_read_lock();
+       if (!rcu_dereference(bat_priv->curr_gw)) {
+               rcu_read_unlock();
                return 0;
+       }
+       rcu_read_unlock();
 
        return 1;
 }
index 4585e65..2aa4391 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index b962982..50d3a59 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 5e728d0..55e527a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 4f95777..b3058e4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 
 #include <linux/if_arp.h>
 
-/* protect update critical side of if_list - but not the content */
-static DEFINE_SPINLOCK(if_list_lock);
+/* protect update critical side of hardif_list - but not the content */
+static DEFINE_SPINLOCK(hardif_list_lock);
 
-static void hardif_free_rcu(struct rcu_head *rcu)
+
+static int batman_skb_recv(struct sk_buff *skb,
+                          struct net_device *dev,
+                          struct packet_type *ptype,
+                          struct net_device *orig_dev);
+
+void hardif_free_rcu(struct rcu_head *rcu)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
-       batman_if = container_of(rcu, struct batman_if, rcu);
-       dev_put(batman_if->net_dev);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hard_iface = container_of(rcu, struct hard_iface, rcu);
+       dev_put(hard_iface->net_dev);
+       kfree(hard_iface);
 }
 
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
+struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->net_dev == net_dev)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->net_dev == net_dev &&
+                   atomic_inc_not_zero(&hard_iface->refcount))
                        goto out;
        }
 
-       batman_if = NULL;
+       hard_iface = NULL;
 
 out:
-       if (batman_if)
-               kref_get(&batman_if->refcount);
-
        rcu_read_unlock();
-       return batman_if;
+       return hard_iface;
 }
 
 static int is_valid_iface(struct net_device *net_dev)
@@ -75,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
                return 0;
 
        /* no batman over batman */
-#ifdef HAVE_NET_DEVICE_OPS
-       if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
-               return 0;
-#else
-       if (net_dev->hard_start_xmit == interface_tx)
+       if (softif_is_valid(net_dev))
                return 0;
-#endif
 
        /* Device is being bridged */
        /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
@@ -90,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
        return 1;
 }
 
-static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
+static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               if (batman_if->if_status == IF_ACTIVE)
+               if (hard_iface->if_status == IF_ACTIVE &&
+                   atomic_inc_not_zero(&hard_iface->refcount))
                        goto out;
        }
 
-       batman_if = NULL;
+       hard_iface = NULL;
 
 out:
-       if (batman_if)
-               kref_get(&batman_if->refcount);
-
        rcu_read_unlock();
-       return batman_if;
+       return hard_iface;
 }
 
 static void update_primary_addr(struct bat_priv *bat_priv)
@@ -126,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
 }
 
 static void set_primary_if(struct bat_priv *bat_priv,
-                          struct batman_if *batman_if)
+                          struct hard_iface *hard_iface)
 {
        struct batman_packet *batman_packet;
-       struct batman_if *old_if;
+       struct hard_iface *old_if;
 
-       if (batman_if)
-               kref_get(&batman_if->refcount);
+       if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount))
+               hard_iface = NULL;
 
        old_if = bat_priv->primary_if;
-       bat_priv->primary_if = batman_if;
+       bat_priv->primary_if = hard_iface;
 
        if (old_if)
-               kref_put(&old_if->refcount, hardif_free_ref);
+               hardif_free_ref(old_if);
 
        if (!bat_priv->primary_if)
                return;
 
-       batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+       batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
        batman_packet->flags = PRIMARIES_FIRST_HOP;
        batman_packet->ttl = TTL;
 
@@ -156,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
        atomic_set(&bat_priv->hna_local_changed, 1);
 }
 
-static bool hardif_is_iface_up(struct batman_if *batman_if)
+static bool hardif_is_iface_up(struct hard_iface *hard_iface)
 {
-       if (batman_if->net_dev->flags & IFF_UP)
+       if (hard_iface->net_dev->flags & IFF_UP)
                return true;
 
        return false;
 }
 
-static void update_mac_addresses(struct batman_if *batman_if)
+static void update_mac_addresses(struct hard_iface *hard_iface)
 {
-       memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
-              batman_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
-              batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
+              hard_iface->net_dev->dev_addr, ETH_ALEN);
+       memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
+              hard_iface->net_dev->dev_addr, ETH_ALEN);
 }
 
 static void check_known_mac_addr(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->if_status != IF_ACTIVE) &&
-                   (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if ((hard_iface->if_status != IF_ACTIVE) &&
+                   (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                        continue;
 
-               if (batman_if->net_dev == net_dev)
+               if (hard_iface->net_dev == net_dev)
                        continue;
 
-               if (!compare_orig(batman_if->net_dev->dev_addr,
-                                 net_dev->dev_addr))
+               if (!compare_eth(hard_iface->net_dev->dev_addr,
+                                net_dev->dev_addr))
                        continue;
 
                pr_warning("The newly added mac address (%pM) already exists "
                           "on: %s\n", net_dev->dev_addr,
-                          batman_if->net_dev->name);
+                          hard_iface->net_dev->name);
                pr_warning("It is strongly recommended to keep mac addresses "
                           "unique to avoid problems!\n");
        }
@@ -201,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
 int hardif_min_mtu(struct net_device *soft_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        /* allow big frames if all devices are capable to do so
         * (have MTU > 1500 + BAT_HEADER_LEN) */
        int min_mtu = ETH_DATA_LEN;
@@ -210,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
                goto out;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->if_status != IF_ACTIVE) &&
-                   (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if ((hard_iface->if_status != IF_ACTIVE) &&
+                   (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                        continue;
 
-               if (batman_if->soft_iface != soft_iface)
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN,
+               min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
                                min_mtu);
        }
        rcu_read_unlock();
@@ -236,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
                soft_iface->mtu = min_mtu;
 }
 
-static void hardif_activate_interface(struct batman_if *batman_if)
+static void hardif_activate_interface(struct hard_iface *hard_iface)
 {
        struct bat_priv *bat_priv;
 
-       if (batman_if->if_status != IF_INACTIVE)
+       if (hard_iface->if_status != IF_INACTIVE)
                return;
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
+       bat_priv = netdev_priv(hard_iface->soft_iface);
 
-       update_mac_addresses(batman_if);
-       batman_if->if_status = IF_TO_BE_ACTIVATED;
+       update_mac_addresses(hard_iface);
+       hard_iface->if_status = IF_TO_BE_ACTIVATED;
 
        /**
         * the first active interface becomes our primary interface or
         * the next active interface after the old primay interface was removed
         */
        if (!bat_priv->primary_if)
-               set_primary_if(bat_priv, batman_if);
+               set_primary_if(bat_priv, hard_iface);
 
-       bat_info(batman_if->soft_iface, "Interface activated: %s\n",
-                batman_if->net_dev->name);
+       bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
+                hard_iface->net_dev->name);
 
-       update_min_mtu(batman_if->soft_iface);
+       update_min_mtu(hard_iface->soft_iface);
        return;
 }
 
-static void hardif_deactivate_interface(struct batman_if *batman_if)
+static void hardif_deactivate_interface(struct hard_iface *hard_iface)
 {
-       if ((batman_if->if_status != IF_ACTIVE) &&
-          (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       if ((hard_iface->if_status != IF_ACTIVE) &&
+           (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                return;
 
-       batman_if->if_status = IF_INACTIVE;
+       hard_iface->if_status = IF_INACTIVE;
 
-       bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
-                batman_if->net_dev->name);
+       bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
+                hard_iface->net_dev->name);
 
-       update_min_mtu(batman_if->soft_iface);
+       update_min_mtu(hard_iface->soft_iface);
 }
 
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
+int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
 {
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet;
+       struct net_device *soft_iface;
+       int ret;
 
-       if (batman_if->if_status != IF_NOT_IN_USE)
+       if (hard_iface->if_status != IF_NOT_IN_USE)
                goto out;
 
-       batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
+       if (!atomic_inc_not_zero(&hard_iface->refcount))
+               goto out;
 
-       if (!batman_if->soft_iface) {
-               batman_if->soft_iface = softif_create(iface_name);
+       soft_iface = dev_get_by_name(&init_net, iface_name);
 
-               if (!batman_if->soft_iface)
+       if (!soft_iface) {
+               soft_iface = softif_create(iface_name);
+
+               if (!soft_iface) {
+                       ret = -ENOMEM;
                        goto err;
+               }
 
                /* dev_get_by_name() increases the reference counter for us */
-               dev_hold(batman_if->soft_iface);
+               dev_hold(soft_iface);
+       }
+
+       if (!softif_is_valid(soft_iface)) {
+               pr_err("Can't create batman mesh interface %s: "
+                      "already exists as regular interface\n",
+                      soft_iface->name);
+               dev_put(soft_iface);
+               ret = -EINVAL;
+               goto err;
        }
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
-       batman_if->packet_len = BAT_PACKET_LEN;
-       batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
+       hard_iface->soft_iface = soft_iface;
+       bat_priv = netdev_priv(hard_iface->soft_iface);
+       hard_iface->packet_len = BAT_PACKET_LEN;
+       hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
 
-       if (!batman_if->packet_buff) {
-               bat_err(batman_if->soft_iface, "Can't add interface packet "
-                       "(%s): out of memory\n", batman_if->net_dev->name);
+       if (!hard_iface->packet_buff) {
+               bat_err(hard_iface->soft_iface, "Can't add interface packet "
+                       "(%s): out of memory\n", hard_iface->net_dev->name);
+               ret = -ENOMEM;
                goto err;
        }
 
-       batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+       batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
        batman_packet->packet_type = BAT_PACKET;
        batman_packet->version = COMPAT_VERSION;
        batman_packet->flags = 0;
@@ -314,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
        batman_packet->tq = TQ_MAX_VALUE;
        batman_packet->num_hna = 0;
 
-       batman_if->if_num = bat_priv->num_ifaces;
+       hard_iface->if_num = bat_priv->num_ifaces;
        bat_priv->num_ifaces++;
-       batman_if->if_status = IF_INACTIVE;
-       orig_hash_add_if(batman_if, bat_priv->num_ifaces);
+       hard_iface->if_status = IF_INACTIVE;
+       orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
 
-       batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
-       batman_if->batman_adv_ptype.func = batman_skb_recv;
-       batman_if->batman_adv_ptype.dev = batman_if->net_dev;
-       kref_get(&batman_if->refcount);
-       dev_add_pack(&batman_if->batman_adv_ptype);
+       hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
+       hard_iface->batman_adv_ptype.func = batman_skb_recv;
+       hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+       dev_add_pack(&hard_iface->batman_adv_ptype);
 
-       atomic_set(&batman_if->seqno, 1);
-       atomic_set(&batman_if->frag_seqno, 1);
-       bat_info(batman_if->soft_iface, "Adding interface: %s\n",
-                batman_if->net_dev->name);
+       atomic_set(&hard_iface->seqno, 1);
+       atomic_set(&hard_iface->frag_seqno, 1);
+       bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
+                hard_iface->net_dev->name);
 
-       if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
+       if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
                ETH_DATA_LEN + BAT_HEADER_LEN)
-               bat_info(batman_if->soft_iface,
+               bat_info(hard_iface->soft_iface,
                        "The MTU of interface %s is too small (%i) to handle "
                        "the transport of batman-adv packets. Packets going "
                        "over this interface will be fragmented on layer2 "
                        "which could impact the performance. Setting the MTU "
                        "to %zi would solve the problem.\n",
-                       batman_if->net_dev->name, batman_if->net_dev->mtu,
+                       hard_iface->net_dev->name, hard_iface->net_dev->mtu,
                        ETH_DATA_LEN + BAT_HEADER_LEN);
 
-       if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
+       if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
                ETH_DATA_LEN + BAT_HEADER_LEN)
-               bat_info(batman_if->soft_iface,
+               bat_info(hard_iface->soft_iface,
                        "The MTU of interface %s is too small (%i) to handle "
                        "the transport of batman-adv packets. If you experience"
                        " problems getting traffic through try increasing the "
                        "MTU to %zi.\n",
-                       batman_if->net_dev->name, batman_if->net_dev->mtu,
+                       hard_iface->net_dev->name, hard_iface->net_dev->mtu,
                        ETH_DATA_LEN + BAT_HEADER_LEN);
 
-       if (hardif_is_iface_up(batman_if))
-               hardif_activate_interface(batman_if);
+       if (hardif_is_iface_up(hard_iface))
+               hardif_activate_interface(hard_iface);
        else
-               bat_err(batman_if->soft_iface, "Not using interface %s "
+               bat_err(hard_iface->soft_iface, "Not using interface %s "
                        "(retrying later): interface not active\n",
-                       batman_if->net_dev->name);
+                       hard_iface->net_dev->name);
 
        /* begin scheduling originator messages on that interface */
-       schedule_own_packet(batman_if);
+       schedule_own_packet(hard_iface);
 
 out:
        return 0;
 
 err:
-       return -ENOMEM;
+       hardif_free_ref(hard_iface);
+       return ret;
 }
 
-void hardif_disable_interface(struct batman_if *batman_if)
+void hardif_disable_interface(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 
-       if (batman_if->if_status == IF_ACTIVE)
-               hardif_deactivate_interface(batman_if);
+       if (hard_iface->if_status == IF_ACTIVE)
+               hardif_deactivate_interface(hard_iface);
 
-       if (batman_if->if_status != IF_INACTIVE)
+       if (hard_iface->if_status != IF_INACTIVE)
                return;
 
-       bat_info(batman_if->soft_iface, "Removing interface: %s\n",
-                batman_if->net_dev->name);
-       dev_remove_pack(&batman_if->batman_adv_ptype);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
+                hard_iface->net_dev->name);
+       dev_remove_pack(&hard_iface->batman_adv_ptype);
 
        bat_priv->num_ifaces--;
-       orig_hash_del_if(batman_if, bat_priv->num_ifaces);
+       orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
 
-       if (batman_if == bat_priv->primary_if) {
-               struct batman_if *new_if;
+       if (hard_iface == bat_priv->primary_if) {
+               struct hard_iface *new_if;
 
-               new_if = get_active_batman_if(batman_if->soft_iface);
+               new_if = hardif_get_active(hard_iface->soft_iface);
                set_primary_if(bat_priv, new_if);
 
                if (new_if)
-                       kref_put(&new_if->refcount, hardif_free_ref);
+                       hardif_free_ref(new_if);
        }
 
-       kfree(batman_if->packet_buff);
-       batman_if->packet_buff = NULL;
-       batman_if->if_status = IF_NOT_IN_USE;
+       kfree(hard_iface->packet_buff);
+       hard_iface->packet_buff = NULL;
+       hard_iface->if_status = IF_NOT_IN_USE;
 
-       /* delete all references to this batman_if */
+       /* delete all references to this hard_iface */
        purge_orig_ref(bat_priv);
-       purge_outstanding_packets(bat_priv, batman_if);
-       dev_put(batman_if->soft_iface);
+       purge_outstanding_packets(bat_priv, hard_iface);
+       dev_put(hard_iface->soft_iface);
 
        /* nobody uses this interface anymore */
        if (!bat_priv->num_ifaces)
-               softif_destroy(batman_if->soft_iface);
+               softif_destroy(hard_iface->soft_iface);
 
-       batman_if->soft_iface = NULL;
+       hard_iface->soft_iface = NULL;
+       hardif_free_ref(hard_iface);
 }
 
-static struct batman_if *hardif_add_interface(struct net_device *net_dev)
+static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        int ret;
 
        ret = is_valid_iface(net_dev);
@@ -423,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
 
        dev_hold(net_dev);
 
-       batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
-       if (!batman_if) {
+       hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
+       if (!hard_iface) {
                pr_err("Can't add interface (%s): out of memory\n",
                       net_dev->name);
                goto release_dev;
        }
 
-       ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
+       ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
        if (ret)
                goto free_if;
 
-       batman_if->if_num = -1;
-       batman_if->net_dev = net_dev;
-       batman_if->soft_iface = NULL;
-       batman_if->if_status = IF_NOT_IN_USE;
-       INIT_LIST_HEAD(&batman_if->list);
-       kref_init(&batman_if->refcount);
+       hard_iface->if_num = -1;
+       hard_iface->net_dev = net_dev;
+       hard_iface->soft_iface = NULL;
+       hard_iface->if_status = IF_NOT_IN_USE;
+       INIT_LIST_HEAD(&hard_iface->list);
+       /* extra reference for return */
+       atomic_set(&hard_iface->refcount, 2);
 
-       check_known_mac_addr(batman_if->net_dev);
+       check_known_mac_addr(hard_iface->net_dev);
 
-       spin_lock(&if_list_lock);
-       list_add_tail_rcu(&batman_if->list, &if_list);
-       spin_unlock(&if_list_lock);
+       spin_lock(&hardif_list_lock);
+       list_add_tail_rcu(&hard_iface->list, &hardif_list);
+       spin_unlock(&hardif_list_lock);
 
-       /* extra reference for return */
-       kref_get(&batman_if->refcount);
-       return batman_if;
+       return hard_iface;
 
 free_if:
-       kfree(batman_if);
+       kfree(hard_iface);
 release_dev:
        dev_put(net_dev);
 out:
        return NULL;
 }
 
-static void hardif_remove_interface(struct batman_if *batman_if)
+static void hardif_remove_interface(struct hard_iface *hard_iface)
 {
        /* first deactivate interface */
-       if (batman_if->if_status != IF_NOT_IN_USE)
-               hardif_disable_interface(batman_if);
+       if (hard_iface->if_status != IF_NOT_IN_USE)
+               hardif_disable_interface(hard_iface);
 
-       if (batman_if->if_status != IF_NOT_IN_USE)
+       if (hard_iface->if_status != IF_NOT_IN_USE)
                return;
 
-       batman_if->if_status = IF_TO_BE_REMOVED;
-       sysfs_del_hardif(&batman_if->hardif_obj);
-       call_rcu(&batman_if->rcu, hardif_free_rcu);
+       hard_iface->if_status = IF_TO_BE_REMOVED;
+       sysfs_del_hardif(&hard_iface->hardif_obj);
+       hardif_free_ref(hard_iface);
 }
 
 void hardif_remove_interfaces(void)
 {
-       struct batman_if *batman_if, *batman_if_tmp;
+       struct hard_iface *hard_iface, *hard_iface_tmp;
        struct list_head if_queue;
 
        INIT_LIST_HEAD(&if_queue);
 
-       spin_lock(&if_list_lock);
-       list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
-               list_del_rcu(&batman_if->list);
-               list_add_tail(&batman_if->list, &if_queue);
+       spin_lock(&hardif_list_lock);
+       list_for_each_entry_safe(hard_iface, hard_iface_tmp,
+                                &hardif_list, list) {
+               list_del_rcu(&hard_iface->list);
+               list_add_tail(&hard_iface->list, &if_queue);
        }
-       spin_unlock(&if_list_lock);
+       spin_unlock(&hardif_list_lock);
 
        rtnl_lock();
-       list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
-               hardif_remove_interface(batman_if);
+       list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
+               hardif_remove_interface(hard_iface);
        }
        rtnl_unlock();
 }
@@ -498,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
                         unsigned long event, void *ptr)
 {
        struct net_device *net_dev = (struct net_device *)ptr;
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        struct bat_priv *bat_priv;
 
-       if (!batman_if && event == NETDEV_REGISTER)
-               batman_if = hardif_add_interface(net_dev);
+       if (!hard_iface && event == NETDEV_REGISTER)
+               hard_iface = hardif_add_interface(net_dev);
 
-       if (!batman_if)
+       if (!hard_iface)
                goto out;
 
        switch (event) {
        case NETDEV_UP:
-               hardif_activate_interface(batman_if);
+               hardif_activate_interface(hard_iface);
                break;
        case NETDEV_GOING_DOWN:
        case NETDEV_DOWN:
-               hardif_deactivate_interface(batman_if);
+               hardif_deactivate_interface(hard_iface);
                break;
        case NETDEV_UNREGISTER:
-               spin_lock(&if_list_lock);
-               list_del_rcu(&batman_if->list);
-               spin_unlock(&if_list_lock);
+               spin_lock(&hardif_list_lock);
+               list_del_rcu(&hard_iface->list);
+               spin_unlock(&hardif_list_lock);
 
-               hardif_remove_interface(batman_if);
+               hardif_remove_interface(hard_iface);
                break;
        case NETDEV_CHANGEMTU:
-               if (batman_if->soft_iface)
-                       update_min_mtu(batman_if->soft_iface);
+               if (hard_iface->soft_iface)
+                       update_min_mtu(hard_iface->soft_iface);
                break;
        case NETDEV_CHANGEADDR:
-               if (batman_if->if_status == IF_NOT_IN_USE)
+               if (hard_iface->if_status == IF_NOT_IN_USE)
                        goto hardif_put;
 
-               check_known_mac_addr(batman_if->net_dev);
-               update_mac_addresses(batman_if);
+               check_known_mac_addr(hard_iface->net_dev);
+               update_mac_addresses(hard_iface);
 
-               bat_priv = netdev_priv(batman_if->soft_iface);
-               if (batman_if == bat_priv->primary_if)
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               if (hard_iface == bat_priv->primary_if)
                        update_primary_addr(bat_priv);
                break;
        default:
@@ -542,22 +557,23 @@ static int hard_if_event(struct notifier_block *this,
        };
 
 hardif_put:
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 out:
        return NOTIFY_DONE;
 }
 
 /* receive a packet with the batman ethertype coming on a hard
  * interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+                          struct packet_type *ptype,
+                          struct net_device *orig_dev)
 {
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet;
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        int ret;
 
-       batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
+       hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
        skb = skb_share_check(skb, GFP_ATOMIC);
 
        /* skb was released by skb_share_check() */
@@ -573,16 +589,16 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                                || !skb_mac_header(skb)))
                goto err_free;
 
-       if (!batman_if->soft_iface)
+       if (!hard_iface->soft_iface)
                goto err_free;
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
+       bat_priv = netdev_priv(hard_iface->soft_iface);
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto err_free;
 
        /* discard frames on not active interfaces */
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                goto err_free;
 
        batman_packet = (struct batman_packet *)skb->data;
@@ -600,32 +616,32 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        switch (batman_packet->packet_type) {
                /* batman originator packet */
        case BAT_PACKET:
-               ret = recv_bat_packet(skb, batman_if);
+               ret = recv_bat_packet(skb, hard_iface);
                break;
 
                /* batman icmp packet */
        case BAT_ICMP:
-               ret = recv_icmp_packet(skb, batman_if);
+               ret = recv_icmp_packet(skb, hard_iface);
                break;
 
                /* unicast packet */
        case BAT_UNICAST:
-               ret = recv_unicast_packet(skb, batman_if);
+               ret = recv_unicast_packet(skb, hard_iface);
                break;
 
                /* fragmented unicast packet */
        case BAT_UNICAST_FRAG:
-               ret = recv_ucast_frag_packet(skb, batman_if);
+               ret = recv_ucast_frag_packet(skb, hard_iface);
                break;
 
                /* broadcast packet */
        case BAT_BCAST:
-               ret = recv_bcast_packet(skb, batman_if);
+               ret = recv_bcast_packet(skb, hard_iface);
                break;
 
                /* vis packet */
        case BAT_VIS:
-               ret = recv_vis_packet(skb, batman_if);
+               ret = recv_vis_packet(skb, hard_iface);
                break;
        default:
                ret = NET_RX_DROP;
index 30ec3b8..a9ddf36 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 
 extern struct notifier_block hard_if_notifier;
 
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
-void hardif_disable_interface(struct batman_if *batman_if);
+struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev);
+int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name);
+void hardif_disable_interface(struct hard_iface *hard_iface);
 void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
-                               struct net_device *dev,
-                               struct packet_type *ptype,
-                               struct net_device *orig_dev);
 int hardif_min_mtu(struct net_device *soft_iface);
 void update_min_mtu(struct net_device *soft_iface);
+void hardif_free_rcu(struct rcu_head *rcu);
 
-static inline void hardif_free_ref(struct kref *refcount)
+static inline void hardif_free_ref(struct hard_iface *hard_iface)
 {
-       struct batman_if *batman_if;
-
-       batman_if = container_of(refcount, struct batman_if, refcount);
-       kfree(batman_if);
+       if (atomic_dec_and_test(&hard_iface->refcount))
+               call_rcu(&hard_iface->rcu, hardif_free_rcu);
 }
 
 #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
index 26e623e..c5213d8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
 {
        int i;
 
-       for (i = 0 ; i < hash->size; i++)
+       for (i = 0 ; i < hash->size; i++) {
                INIT_HLIST_HEAD(&hash->table[i]);
+               spin_lock_init(&hash->list_locks[i]);
+       }
 }
 
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash)
 {
+       kfree(hash->list_locks);
        kfree(hash->table);
        kfree(hash);
 }
@@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size)
 {
        struct hashtable_t *hash;
 
-       hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
-
+       hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
        if (!hash)
                return NULL;
 
-       hash->size = size;
        hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
+       if (!hash->table)
+               goto free_hash;
 
-       if (!hash->table) {
-               kfree(hash);
-               return NULL;
-       }
+       hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
+       if (!hash->list_locks)
+               goto free_table;
 
+       hash->size = size;
        hash_init(hash);
-
        return hash;
+
+free_table:
+       kfree(hash->table);
+free_hash:
+       kfree(hash);
+       return NULL;
 }
index 09216ad..434822b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
  * compare 2 element datas for their keys,
  * return 0 if same and not 0 if not
  * same */
-typedef int (*hashdata_compare_cb)(void *, void *);
+typedef int (*hashdata_compare_cb)(struct hlist_node *, void *);
 
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
 typedef int (*hashdata_choose_cb)(void *, int);
-typedef void (*hashdata_free_cb)(void *, void *);
-
-struct element_t {
-       void *data;             /* pointer to the data */
-       struct hlist_node hlist;        /* bucket list pointer */
-};
+typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
-       struct hlist_head *table;   /* the hashtable itself, with the buckets */
+       struct hlist_head *table;   /* the hashtable itself with the buckets */
+       spinlock_t *list_locks;     /* spinlock for each hash list entry */
        int size;                   /* size of hashtable */
 };
 
 /* allocates and clears the hash */
 struct hashtable_t *hash_new(int size);
 
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove().  But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
 
@@ -64,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash,
                               hashdata_free_cb free_cb, void *arg)
 {
        struct hlist_head *head;
-       struct hlist_node *walk, *safe;
-       struct element_t *bucket;
+       struct hlist_node *node, *node_tmp;
+       spinlock_t *list_lock; /* spinlock to protect write access */
        int i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
 
-               hlist_for_each_safe(walk, safe, head) {
-                       bucket = hlist_entry(walk, struct element_t, hlist);
-                       if (free_cb)
-                               free_cb(bucket->data, arg);
+               spin_lock_bh(list_lock);
+               hlist_for_each_safe(node, node_tmp, head) {
+                       hlist_del_rcu(node);
 
-                       hlist_del(walk);
-                       kfree(bucket);
+                       if (free_cb)
+                               free_cb(node, arg);
                }
+               spin_unlock_bh(list_lock);
        }
 
        hash_destroy(hash);
@@ -87,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash,
 /* adds data to the hashtable. returns 0 on success, -1 on error */
 static inline int hash_add(struct hashtable_t *hash,
                           hashdata_compare_cb compare,
-                          hashdata_choose_cb choose, void *data)
+                          hashdata_choose_cb choose,
+                          void *data, struct hlist_node *data_node)
 {
        int index;
        struct hlist_head *head;
-       struct hlist_node *walk, *safe;
-       struct element_t *bucket;
+       struct hlist_node *node;
+       spinlock_t *list_lock; /* spinlock to protect write access */
 
        if (!hash)
-               return -1;
+               goto err;
 
        index = choose(data, hash->size);
        head = &hash->table[index];
+       list_lock = &hash->list_locks[index];
+
+       rcu_read_lock();
+       __hlist_for_each_rcu(node, head) {
+               if (!compare(node, data))
+                       continue;
 
-       hlist_for_each_safe(walk, safe, head) {
-               bucket = hlist_entry(walk, struct element_t, hlist);
-               if (compare(bucket->data, data))
-                       return -1;
+               goto err_unlock;
        }
+       rcu_read_unlock();
 
        /* no duplicate found in list, add new element */
-       bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
-
-       if (!bucket)
-               return -1;
-
-       bucket->data = data;
-       hlist_add_head(&bucket->hlist, head);
+       spin_lock_bh(list_lock);
+       hlist_add_head_rcu(data_node, head);
+       spin_unlock_bh(list_lock);
 
        return 0;
+
+err_unlock:
+       rcu_read_unlock();
+err:
+       return -1;
 }
 
 /* removes data from hash, if found. returns pointer do data on success, so you
@@ -127,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash,
                                hashdata_choose_cb choose, void *data)
 {
        size_t index;
-       struct hlist_node *walk;
-       struct element_t *bucket;
+       struct hlist_node *node;
        struct hlist_head *head;
-       void *data_save;
+       void *data_save = NULL;
 
        index = choose(data, hash->size);
        head = &hash->table[index];
 
-       hlist_for_each_entry(bucket, walk, head, hlist) {
-               if (compare(bucket->data, data)) {
-                       data_save = bucket->data;
-                       hlist_del(walk);
-                       kfree(bucket);
-                       return data_save;
-               }
-       }
-
-       return NULL;
-}
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-static inline void *hash_find(struct hashtable_t *hash,
-                             hashdata_compare_cb compare,
-                             hashdata_choose_cb choose, void *keydata)
-{
-       int index;
-       struct hlist_head *head;
-       struct hlist_node *walk;
-       struct element_t *bucket;
-
-       if (!hash)
-               return NULL;
-
-       index = choose(keydata , hash->size);
-       head = &hash->table[index];
+       spin_lock_bh(&hash->list_locks[index]);
+       hlist_for_each(node, head) {
+               if (!compare(node, data))
+                       continue;
 
-       hlist_for_each(walk, head) {
-               bucket = hlist_entry(walk, struct element_t, hlist);
-               if (compare(bucket->data, keydata))
-                       return bucket->data;
+               data_save = node;
+               hlist_del_rcu(node);
+               break;
        }
+       spin_unlock_bh(&hash->list_locks[index]);
 
-       return NULL;
+       return data_save;
 }
 
 #endif /* _NET_BATMAN_ADV_HASH_H_ */
index ecf6d7f..34ce56c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -24,7 +24,6 @@
 #include <linux/slab.h>
 #include "icmp_socket.h"
 #include "send.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 #include "hard-interface.h"
@@ -157,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        struct sk_buff *skb;
        struct icmp_packet_rr *icmp_packet;
 
-       struct orig_node *orig_node;
-       struct batman_if *batman_if;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        size_t packet_len = sizeof(struct icmp_packet);
-       uint8_t dstaddr[ETH_ALEN];
 
        if (len < sizeof(struct icmp_packet)) {
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -220,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto dst_unreach;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  icmp_packet->dst));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
 
        if (!orig_node)
                goto unlock;
 
-       if (!orig_node->router)
+       neigh_node = orig_node->router;
+
+       if (!neigh_node)
                goto unlock;
 
-       batman_if = orig_node->router->if_incoming;
-       memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
 
-       if (!batman_if)
+       if (!neigh_node->if_incoming)
                goto dst_unreach;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (neigh_node->if_incoming->if_status != IF_ACTIVE)
                goto dst_unreach;
 
        memcpy(icmp_packet->orig,
               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 
        if (packet_len == sizeof(struct icmp_packet_rr))
-               memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
-
-
-       send_skb_packet(skb, batman_if, dstaddr);
+               memcpy(icmp_packet->rr,
+                      neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
 
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
        goto out;
 
 unlock:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
 dst_unreach:
        icmp_packet->msg_type = DESTINATION_UNREACHABLE;
        bat_socket_add_packet(socket_client, icmp_packet, packet_len);
 free_skb:
        kfree_skb(skb);
 out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return len;
 }
 
index bf9b348..462b190 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
 #define _NET_BATMAN_ADV_ICMP_SOCKET_H_
 
-#include "types.h"
-
 #define ICMP_SOCKET "socket"
 
 void bat_socket_init(void);
index b827f6a..709b33b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
-#include "types.h"
 #include "vis.h"
 #include "hash.h"
 
-struct list_head if_list;
+struct list_head hardif_list;
 
 unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
@@ -42,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue;
 
 static int __init batman_init(void)
 {
-       INIT_LIST_HEAD(&if_list);
+       INIT_LIST_HEAD(&hardif_list);
 
        /* the name should not be longer than 10 chars - see
         * http://lwn.net/Articles/23634/ */
@@ -80,7 +79,6 @@ int mesh_init(struct net_device *soft_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
 
-       spin_lock_init(&bat_priv->orig_hash_lock);
        spin_lock_init(&bat_priv->forw_bat_list_lock);
        spin_lock_init(&bat_priv->forw_bcast_list_lock);
        spin_lock_init(&bat_priv->hna_lhash_lock);
@@ -155,14 +153,14 @@ void dec_module_count(void)
 
 int is_my_mac(uint8_t *addr)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->if_status != IF_ACTIVE)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->if_status != IF_ACTIVE)
                        continue;
 
-               if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
+               if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
                }
index 65106fb..dc24869 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,9 +22,6 @@
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-/* Kernel Programming */
-#define LINUX
-
 #define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
                      "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
 #define DRIVER_DESC   "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
 
 #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
 
-#define PACKBUFF_SIZE 2000
 #define LOG_BUF_LEN 8192         /* has to be a power of 2 */
 
 #define VIS_INTERVAL 5000      /* 5 seconds */
 #define DBG_ROUTES 2   /* route or hna added / changed / deleted */
 #define DBG_ALL 3
 
-#define LOG_BUF_LEN 8192          /* has to be a power of 2 */
-
 
 /*
  *  Vis
  */
 
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
 /*
  * Kernel headers
  */
 #define REVISION_VERSION_STR " "REVISION_VERSION
 #endif
 
-extern struct list_head if_list;
+extern struct list_head hardif_list;
 
 extern unsigned char broadcast_addr[];
 extern struct workqueue_struct *bat_event_workqueue;
@@ -158,13 +150,6 @@ static inline void bat_dbg(char type __always_unused,
 }
 #endif
 
-#define bat_warning(net_dev, fmt, arg...)                              \
-       do {                                                            \
-               struct net_device *_netdev = (net_dev);                 \
-               struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-               bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);                \
-               pr_warning("%s: " fmt, _netdev->name, ## arg);          \
-       } while (0)
 #define bat_info(net_dev, fmt, arg...)                                 \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
@@ -180,4 +165,14 @@ static inline void bat_dbg(char type __always_unused,
                pr_err("%s: " fmt, _netdev->name, ## arg);              \
        } while (0)
 
+/**
+ * returns 1 if they are the same ethernet addr
+ *
+ * note: can't use compare_ether_addr() as it requires aligned memory
+ */
+static inline int compare_eth(void *data1, void *data2)
+{
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
index 6b7fb6b..0b91330 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv)
        if (bat_priv->orig_hash)
                return 1;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        bat_priv->orig_hash = hash_new(1024);
 
        if (!bat_priv->orig_hash)
                goto err;
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        start_purge_timer(bat_priv);
        return 1;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        return 0;
 }
 
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-               uint8_t *neigh, struct batman_if *if_incoming)
+static void neigh_node_free_rcu(struct rcu_head *rcu)
+{
+       struct neigh_node *neigh_node;
+
+       neigh_node = container_of(rcu, struct neigh_node, rcu);
+       kfree(neigh_node);
+}
+
+void neigh_node_free_ref(struct neigh_node *neigh_node)
+{
+       if (atomic_dec_and_test(&neigh_node->refcount))
+               call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
+}
+
+struct neigh_node *create_neighbor(struct orig_node *orig_node,
+                                  struct orig_node *orig_neigh_node,
+                                  uint8_t *neigh,
+                                  struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct neigh_node *neigh_node;
@@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
        if (!neigh_node)
                return NULL;
 
-       INIT_LIST_HEAD(&neigh_node->list);
+       INIT_HLIST_NODE(&neigh_node->list);
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
 
        memcpy(neigh_node->addr, neigh, ETH_ALEN);
        neigh_node->orig_node = orig_neigh_node;
        neigh_node->if_incoming = if_incoming;
 
-       list_add_tail(&neigh_node->list, &orig_node->neigh_list);
+       /* extra reference for return */
+       atomic_set(&neigh_node->refcount, 2);
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+       spin_unlock_bh(&orig_node->neigh_list_lock);
        return neigh_node;
 }
 
-static void free_orig_node(void *data, void *arg)
+static void orig_node_free_rcu(struct rcu_head *rcu)
 {
-       struct list_head *list_pos, *list_pos_tmp;
-       struct neigh_node *neigh_node;
-       struct orig_node *orig_node = (struct orig_node *)data;
-       struct bat_priv *bat_priv = (struct bat_priv *)arg;
+       struct hlist_node *node, *node_tmp;
+       struct neigh_node *neigh_node, *tmp_neigh_node;
+       struct orig_node *orig_node;
 
-       /* for all neighbors towards this originator ... */
-       list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-               neigh_node = list_entry(list_pos, struct neigh_node, list);
+       orig_node = container_of(rcu, struct orig_node, rcu);
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
+       /* for all bonding members ... */
+       list_for_each_entry_safe(neigh_node, tmp_neigh_node,
+                                &orig_node->bond_list, bonding_list) {
+               list_del_rcu(&neigh_node->bonding_list);
+               neigh_node_free_ref(neigh_node);
+       }
 
-               list_del(list_pos);
-               kfree(neigh_node);
+       /* for all neighbors towards this originator ... */
+       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+                                 &orig_node->neigh_list, list) {
+               hlist_del_rcu(&neigh_node->list);
+               neigh_node_free_ref(neigh_node);
        }
 
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
        frag_list_free(&orig_node->frag_list);
-       hna_global_del_orig(bat_priv, orig_node, "originator timed out");
+       hna_global_del_orig(orig_node->bat_priv, orig_node,
+                           "originator timed out");
 
        kfree(orig_node->bcast_own);
        kfree(orig_node->bcast_own_sum);
        kfree(orig_node);
 }
 
+void orig_node_free_ref(struct orig_node *orig_node)
+{
+       if (atomic_dec_and_test(&orig_node->refcount))
+               call_rcu(&orig_node->rcu, orig_node_free_rcu);
+}
+
 void originator_free(struct bat_priv *bat_priv)
 {
-       if (!bat_priv->orig_hash)
+       struct hashtable_t *hash = bat_priv->orig_hash;
+       struct hlist_node *node, *node_tmp;
+       struct hlist_head *head;
+       spinlock_t *list_lock; /* spinlock to protect write access */
+       struct orig_node *orig_node;
+       int i;
+
+       if (!hash)
                return;
 
        cancel_delayed_work_sync(&bat_priv->orig_work);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
        bat_priv->orig_hash = NULL;
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
+
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+                                         head, hash_entry) {
+
+                       hlist_del_rcu(node);
+                       orig_node_free_ref(orig_node);
+               }
+               spin_unlock_bh(list_lock);
+       }
+
+       hash_destroy(hash);
 }
 
 /* this function finds or creates an originator entry for the given
@@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        int size;
        int hash_added;
 
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  addr));
-
+       orig_node = orig_hash_find(bat_priv, addr);
        if (orig_node)
                return orig_node;
 
@@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        if (!orig_node)
                return NULL;
 
-       INIT_LIST_HEAD(&orig_node->neigh_list);
+       INIT_HLIST_HEAD(&orig_node->neigh_list);
+       INIT_LIST_HEAD(&orig_node->bond_list);
+       spin_lock_init(&orig_node->ogm_cnt_lock);
+       spin_lock_init(&orig_node->bcast_seqno_lock);
+       spin_lock_init(&orig_node->neigh_list_lock);
+
+       /* extra reference for return */
+       atomic_set(&orig_node->refcount, 2);
 
+       orig_node->bat_priv = bat_priv;
        memcpy(orig_node->orig, addr, ETH_ALEN);
        orig_node->router = NULL;
        orig_node->hna_buff = NULL;
@@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        orig_node->batman_seqno_reset = jiffies - 1
                                        - msecs_to_jiffies(RESET_PROTECTION_MS);
 
+       atomic_set(&orig_node->bond_candidates, 0);
+
        size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
 
        orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
@@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        if (!orig_node->bcast_own_sum)
                goto free_bcast_own;
 
-       hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
-                             orig_node);
+       hash_added = hash_add(bat_priv->orig_hash, compare_orig,
+                             choose_orig, orig_node, &orig_node->hash_entry);
        if (hash_added < 0)
                goto free_bcast_own_sum;
 
@@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
                                 struct orig_node *orig_node,
                                 struct neigh_node **best_neigh_node)
 {
-       struct list_head *list_pos, *list_pos_tmp;
+       struct hlist_node *node, *node_tmp;
        struct neigh_node *neigh_node;
        bool neigh_purged = false;
 
        *best_neigh_node = NULL;
 
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
        /* for all neighbors towards this originator ... */
-       list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-               neigh_node = list_entry(list_pos, struct neigh_node, list);
+       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+                                 &orig_node->neigh_list, list) {
 
                if ((time_after(jiffies,
                        neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
                    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
+                   (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
                    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
 
-                       if (neigh_node->if_incoming->if_status ==
-                                                       IF_TO_BE_REMOVED)
+                       if ((neigh_node->if_incoming->if_status ==
+                                                               IF_INACTIVE) ||
+                           (neigh_node->if_incoming->if_status ==
+                                                       IF_NOT_IN_USE) ||
+                           (neigh_node->if_incoming->if_status ==
+                                                       IF_TO_BE_REMOVED))
                                bat_dbg(DBG_BATMAN, bat_priv,
                                        "neighbor purge: originator %pM, "
                                        "neighbor: %pM, iface: %s\n",
@@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
                                        (neigh_node->last_valid / HZ));
 
                        neigh_purged = true;
-                       list_del(list_pos);
-                       kfree(neigh_node);
+
+                       hlist_del_rcu(&neigh_node->list);
+                       bonding_candidate_del(orig_node, neigh_node);
+                       neigh_node_free_ref(neigh_node);
                } else {
                        if ((!*best_neigh_node) ||
                            (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
                                *best_neigh_node = neigh_node;
                }
        }
+
+       spin_unlock_bh(&orig_node->neigh_list_lock);
        return neigh_purged;
 }
 
@@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
                                      best_neigh_node,
                                      orig_node->hna_buff,
                                      orig_node->hna_buff_len);
-                       /* update bonding candidates, we could have lost
-                        * some candidates. */
-                       update_bonding_candidates(bat_priv, orig_node);
                }
        }
 
@@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
 static void _purge_orig(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
+       spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
        int i;
 
        if (!hash)
                return;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        /* for all origins... */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       orig_node = bucket->data;
-
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+                                         head, hash_entry) {
                        if (purge_orig_node(bat_priv, orig_node)) {
                                if (orig_node->gw_flags)
                                        gw_node_delete(bat_priv, orig_node);
-                               hlist_del(walk);
-                               kfree(bucket);
-                               free_orig_node(orig_node, bat_priv);
+                               hlist_del_rcu(node);
+                               orig_node_free_ref(orig_node);
+                               continue;
                        }
 
                        if (time_after(jiffies, orig_node->last_frag_packet +
                                                msecs_to_jiffies(FRAG_TIMEOUT)))
                                frag_list_free(&orig_node->frag_list);
                }
+               spin_unlock_bh(list_lock);
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        gw_node_purge(bat_priv);
        gw_election(bat_priv);
 
@@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct neigh_node *neigh_node;
        int batman_count = 0;
@@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
                   "outgoingIF", "Potential nexthops");
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        if (!orig_node->router)
                                continue;
 
@@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                                   neigh_node->addr,
                                   neigh_node->if_incoming->net_dev->name);
 
-                       list_for_each_entry(neigh_node, &orig_node->neigh_list,
-                                           list) {
+                       hlist_for_each_entry_rcu(neigh_node, node_tmp,
+                                                &orig_node->neigh_list, list) {
                                seq_printf(seq, " %pM (%3i)", neigh_node->addr,
                                                neigh_node->tq_avg);
                        }
@@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                        seq_printf(seq, "\n");
                        batman_count++;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        if ((batman_count == 0))
                seq_printf(seq, "No batman nodes in range ...\n");
 
@@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
        return 0;
 }
 
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
+int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
-       int i;
+       int i, ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       ret = orig_node_add_if(orig_node, max_if_num);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
-                       if (orig_node_add_if(orig_node, max_if_num) == -1)
+                       if (ret == -1)
                                goto err;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        return 0;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
        return -ENOMEM;
 }
 
@@ -508,57 +572,55 @@ free_own_sum:
        return 0;
 }
 
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
+int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
-       struct batman_if *batman_if_tmp;
+       struct hard_iface *hard_iface_tmp;
        struct orig_node *orig_node;
        int i, ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
                        ret = orig_node_del_if(orig_node, max_if_num,
-                                       batman_if->if_num);
+                                       hard_iface->if_num);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
                        if (ret == -1)
                                goto err;
                }
+               rcu_read_unlock();
        }
 
        /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
-               if (batman_if_tmp->if_status == IF_NOT_IN_USE)
+       list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
+               if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
                        continue;
 
-               if (batman_if == batman_if_tmp)
+               if (hard_iface == hard_iface_tmp)
                        continue;
 
-               if (batman_if->soft_iface != batman_if_tmp->soft_iface)
+               if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
                        continue;
 
-               if (batman_if_tmp->if_num > batman_if->if_num)
-                       batman_if_tmp->if_num--;
+               if (hard_iface_tmp->if_num > hard_iface->if_num)
+                       hard_iface_tmp->if_num--;
        }
        rcu_read_unlock();
 
-       batman_if->if_num = -1;
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       hard_iface->if_num = -1;
        return 0;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
        return -ENOMEM;
 }
index d474ceb..5cc0110 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
 #define _NET_BATMAN_ADV_ORIGINATOR_H_
 
+#include "hash.h"
+
 int originator_init(struct bat_priv *bat_priv);
 void originator_free(struct bat_priv *bat_priv);
 void purge_orig_ref(struct bat_priv *bat_priv);
+void orig_node_free_ref(struct orig_node *orig_node);
 struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-               uint8_t *neigh, struct batman_if *if_incoming);
+struct neigh_node *create_neighbor(struct orig_node *orig_node,
+                                  struct orig_node *orig_neigh_node,
+                                  uint8_t *neigh,
+                                  struct hard_iface *if_incoming);
+void neigh_node_free_ref(struct neigh_node *neigh_node);
 int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
+int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
+int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 
 /* returns 1 if they are the same originator */
-static inline int compare_orig(void *data1, void *data2)
+static inline int compare_orig(struct hlist_node *node, void *data2)
 {
+       void *data1 = container_of(node, struct orig_node, hash_entry);
+
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
@@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size)
        return hash % size;
 }
 
+static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
+                                              void *data)
+{
+       struct hashtable_t *hash = bat_priv->orig_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct orig_node *orig_node, *orig_node_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               if (!compare_eth(orig_node, data))
+                       continue;
+
+               if (!atomic_inc_not_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node_tmp = orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node_tmp;
+}
+
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
index 2284e81..e757187 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -50,6 +50,7 @@
 
 /* fragmentation defines */
 #define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
 
 struct batman_packet {
        uint8_t  packet_type;
index defd37c..5bb6a61 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 6b0cb9a..0395b27 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 8828edd..c172f5d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -28,7 +28,6 @@
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "types.h"
 #include "ring_buffer.h"
 #include "vis.h"
 #include "aggregation.h"
 #include "gateway_client.h"
 #include "unicast.h"
 
-void slide_own_bcast_window(struct batman_if *batman_if)
+void slide_own_bcast_window(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        unsigned long *word;
        int i;
        size_t word_index;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-                       word_index = batman_if->if_num * NUM_WORDS;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       word_index = hard_iface->if_num * NUM_WORDS;
                        word = &(orig_node->bcast_own[word_index]);
 
                        bit_get_packet(bat_priv, word, 1, 0);
-                       orig_node->bcast_own_sum[batman_if->if_num] =
+                       orig_node->bcast_own_sum[hard_iface->if_num] =
                                bit_packet_count(word);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
                }
+               rcu_read_unlock();
        }
-
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 }
 
 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
@@ -90,6 +87,8 @@ static void update_route(struct bat_priv *bat_priv,
                         struct neigh_node *neigh_node,
                         unsigned char *hna_buff, int hna_buff_len)
 {
+       struct neigh_node *neigh_node_tmp;
+
        /* route deleted */
        if ((orig_node->router) && (!neigh_node)) {
 
@@ -116,7 +115,12 @@ static void update_route(struct bat_priv *bat_priv,
                        orig_node->router->addr);
        }
 
+       if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
+               neigh_node = NULL;
+       neigh_node_tmp = orig_node->router;
        orig_node->router = neigh_node;
+       if (neigh_node_tmp)
+               neigh_node_free_ref(neigh_node_tmp);
 }
 
 
@@ -139,73 +143,93 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
 static int is_bidirectional_neigh(struct orig_node *orig_node,
                                struct orig_node *orig_neigh_node,
                                struct batman_packet *batman_packet,
-                               struct batman_if *if_incoming)
+                               struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+       struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+       struct hlist_node *node;
        unsigned char total_count;
+       uint8_t orig_eq_count, neigh_rq_count, tq_own;
+       int tq_asym_penalty, ret = 0;
 
        if (orig_node == orig_neigh_node) {
-               list_for_each_entry(tmp_neigh_node,
-                                   &orig_node->neigh_list,
-                                   list) {
-
-                       if (compare_orig(tmp_neigh_node->addr,
-                                        orig_neigh_node->orig) &&
-                           (tmp_neigh_node->if_incoming == if_incoming))
-                               neigh_node = tmp_neigh_node;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                        &orig_node->neigh_list, list) {
+
+                       if (!compare_eth(tmp_neigh_node->addr,
+                                        orig_neigh_node->orig))
+                               continue;
+
+                       if (tmp_neigh_node->if_incoming != if_incoming)
+                               continue;
+
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
+
+                       neigh_node = tmp_neigh_node;
                }
+               rcu_read_unlock();
 
                if (!neigh_node)
                        neigh_node = create_neighbor(orig_node,
                                                     orig_neigh_node,
                                                     orig_neigh_node->orig,
                                                     if_incoming);
-               /* create_neighbor failed, return 0 */
                if (!neigh_node)
-                       return 0;
+                       goto out;
 
                neigh_node->last_valid = jiffies;
        } else {
                /* find packet count of corresponding one hop neighbor */
-               list_for_each_entry(tmp_neigh_node,
-                                   &orig_neigh_node->neigh_list, list) {
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                        &orig_neigh_node->neigh_list, list) {
 
-                       if (compare_orig(tmp_neigh_node->addr,
-                                        orig_neigh_node->orig) &&
-                           (tmp_neigh_node->if_incoming == if_incoming))
-                               neigh_node = tmp_neigh_node;
+                       if (!compare_eth(tmp_neigh_node->addr,
+                                        orig_neigh_node->orig))
+                               continue;
+
+                       if (tmp_neigh_node->if_incoming != if_incoming)
+                               continue;
+
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
+
+                       neigh_node = tmp_neigh_node;
                }
+               rcu_read_unlock();
 
                if (!neigh_node)
                        neigh_node = create_neighbor(orig_neigh_node,
                                                     orig_neigh_node,
                                                     orig_neigh_node->orig,
                                                     if_incoming);
-               /* create_neighbor failed, return 0 */
                if (!neigh_node)
-                       return 0;
+                       goto out;
        }
 
        orig_node->last_valid = jiffies;
 
+       spin_lock_bh(&orig_node->ogm_cnt_lock);
+       orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
+       neigh_rq_count = neigh_node->real_packet_count;
+       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+
        /* pay attention to not get a value bigger than 100 % */
-       total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
-                      neigh_node->real_packet_count ?
-                      neigh_node->real_packet_count :
-                      orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
+       total_count = (orig_eq_count > neigh_rq_count ?
+                      neigh_rq_count : orig_eq_count);
 
        /* if we have too few packets (too less data) we set tq_own to zero */
        /* if we receive too few packets it is not considered bidirectional */
        if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
-           (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
-               orig_neigh_node->tq_own = 0;
+           (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+               tq_own = 0;
        else
                /* neigh_node->real_packet_count is never zero as we
                 * only purge old information when getting new
                 * information */
-               orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
-                       neigh_node->real_packet_count;
+               tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
 
        /*
         * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
@@ -213,20 +237,16 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
         * punishes asymmetric links more.  This will give a value
         * between 0 and TQ_MAX_VALUE
         */
-       orig_neigh_node->tq_asym_penalty =
-               TQ_MAX_VALUE -
-               (TQ_MAX_VALUE *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
-               (TQ_LOCAL_WINDOW_SIZE *
-                TQ_LOCAL_WINDOW_SIZE *
-                TQ_LOCAL_WINDOW_SIZE);
-
-       batman_packet->tq = ((batman_packet->tq *
-                             orig_neigh_node->tq_own *
-                             orig_neigh_node->tq_asym_penalty) /
-                            (TQ_MAX_VALUE * TQ_MAX_VALUE));
+       tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
+                                       (TQ_LOCAL_WINDOW_SIZE *
+                                        TQ_LOCAL_WINDOW_SIZE *
+                                        TQ_LOCAL_WINDOW_SIZE);
+
+       batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
+                                               (TQ_MAX_VALUE * TQ_MAX_VALUE));
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "bidirectional: "
@@ -234,34 +254,141 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
                "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
                "total tq: %3i\n",
                orig_node->orig, orig_neigh_node->orig, total_count,
-               neigh_node->real_packet_count, orig_neigh_node->tq_own,
-               orig_neigh_node->tq_asym_penalty, batman_packet->tq);
+               neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
 
        /* if link has the minimum required transmission quality
         * consider it bidirectional */
        if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
-               return 1;
+               ret = 1;
 
-       return 0;
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       return ret;
+}
+
+/* caller must hold the neigh_list_lock */
+void bonding_candidate_del(struct orig_node *orig_node,
+                          struct neigh_node *neigh_node)
+{
+       /* this neighbor is not part of our candidate list */
+       if (list_empty(&neigh_node->bonding_list))
+               goto out;
+
+       list_del_rcu(&neigh_node->bonding_list);
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
+       neigh_node_free_ref(neigh_node);
+       atomic_dec(&orig_node->bond_candidates);
+
+out:
+       return;
+}
+
+static void bonding_candidate_add(struct orig_node *orig_node,
+                                 struct neigh_node *neigh_node)
+{
+       struct hlist_node *node;
+       struct neigh_node *tmp_neigh_node;
+       uint8_t best_tq, interference_candidate = 0;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
+       /* only consider if it has the same primary address ...  */
+       if (!compare_eth(orig_node->orig,
+                        neigh_node->orig_node->primary_addr))
+               goto candidate_del;
+
+       if (!orig_node->router)
+               goto candidate_del;
+
+       best_tq = orig_node->router->tq_avg;
+
+       /* ... and is good enough to be considered */
+       if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
+               goto candidate_del;
+
+       /**
+        * check if we have another candidate with the same mac address or
+        * interface. If we do, we won't select this candidate because of
+        * possible interference.
+        */
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
+
+               if (tmp_neigh_node == neigh_node)
+                       continue;
+
+               /* we only care if the other candidate is even
+               * considered as candidate. */
+               if (list_empty(&tmp_neigh_node->bonding_list))
+                       continue;
+
+               if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
+                   (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
+                       interference_candidate = 1;
+                       break;
+               }
+       }
+
+       /* don't care further if it is an interference candidate */
+       if (interference_candidate)
+               goto candidate_del;
+
+       /* this neighbor already is part of our candidate list */
+       if (!list_empty(&neigh_node->bonding_list))
+               goto out;
+
+       if (!atomic_inc_not_zero(&neigh_node->refcount))
+               goto out;
+
+       list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
+       atomic_inc(&orig_node->bond_candidates);
+       goto out;
+
+candidate_del:
+       bonding_candidate_del(orig_node, neigh_node);
+
+out:
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+       return;
+}
+
+/* copy primary address for bonding */
+static void bonding_save_primary(struct orig_node *orig_node,
+                                struct orig_node *orig_neigh_node,
+                                struct batman_packet *batman_packet)
+{
+       if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+               return;
+
+       memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
 }
 
 static void update_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node,
                        struct ethhdr *ethhdr,
                        struct batman_packet *batman_packet,
-                       struct batman_if *if_incoming,
+                       struct hard_iface *if_incoming,
                        unsigned char *hna_buff, int hna_buff_len,
                        char is_duplicate)
 {
        struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+       struct orig_node *orig_node_tmp;
+       struct hlist_node *node;
        int tmp_hna_buff_len;
+       uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
 
        bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
                "Searching and updating originator entry of received packet\n");
 
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-               if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
-                   (tmp_neigh_node->if_incoming == if_incoming)) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
+               if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+                   (tmp_neigh_node->if_incoming == if_incoming) &&
+                    atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+                       if (neigh_node)
+                               neigh_node_free_ref(neigh_node);
                        neigh_node = tmp_neigh_node;
                        continue;
                }
@@ -280,16 +407,20 @@ static void update_orig(struct bat_priv *bat_priv,
 
                orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
                if (!orig_tmp)
-                       return;
+                       goto unlock;
 
                neigh_node = create_neighbor(orig_node, orig_tmp,
                                             ethhdr->h_source, if_incoming);
+
+               orig_node_free_ref(orig_tmp);
                if (!neigh_node)
-                       return;
+                       goto unlock;
        } else
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Updating existing last-hop neighbor of originator\n");
 
+       rcu_read_unlock();
+
        orig_node->flags = batman_packet->flags;
        neigh_node->last_valid = jiffies;
 
@@ -303,6 +434,8 @@ static void update_orig(struct bat_priv *bat_priv,
                neigh_node->last_ttl = batman_packet->ttl;
        }
 
+       bonding_candidate_add(orig_node, neigh_node);
+
        tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
                            batman_packet->num_hna * ETH_ALEN : hna_buff_len);
 
@@ -319,10 +452,22 @@ static void update_orig(struct bat_priv *bat_priv,
        /* if the TQ is the same and the link not more symetric we
         * won't consider it either */
        if ((orig_node->router) &&
-            ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
-            (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
-             >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
-               goto update_hna;
+            (neigh_node->tq_avg == orig_node->router->tq_avg)) {
+               orig_node_tmp = orig_node->router->orig_node;
+               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               bcast_own_sum_orig =
+                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+               orig_node_tmp = neigh_node->orig_node;
+               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               bcast_own_sum_neigh =
+                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+               if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+                       goto update_hna;
+       }
 
        update_routes(bat_priv, orig_node, neigh_node,
                      hna_buff, tmp_hna_buff_len);
@@ -343,6 +488,14 @@ update_gw:
            (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
            (atomic_read(&bat_priv->gw_sel_class) > 2))
                gw_check_election(bat_priv, orig_node);
+
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
 }
 
 /* checks whether the host restarted and is in the protection time.
@@ -380,34 +533,38 @@ static int window_protected(struct bat_priv *bat_priv,
  */
 static char count_real_packets(struct ethhdr *ethhdr,
                               struct batman_packet *batman_packet,
-                              struct batman_if *if_incoming)
+                              struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct orig_node *orig_node;
        struct neigh_node *tmp_neigh_node;
+       struct hlist_node *node;
        char is_duplicate = 0;
        int32_t seq_diff;
        int need_update = 0;
-       int set_mark;
+       int set_mark, ret = -1;
 
        orig_node = get_orig_node(bat_priv, batman_packet->orig);
        if (!orig_node)
                return 0;
 
+       spin_lock_bh(&orig_node->ogm_cnt_lock);
        seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
 
        /* signalize caller that the packet is to be dropped. */
        if (window_protected(bat_priv, seq_diff,
                             &orig_node->batman_seqno_reset))
-               return -1;
+               goto out;
 
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
 
                is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
                                               orig_node->last_real_seqno,
                                               batman_packet->seqno);
 
-               if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
+               if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
                    (tmp_neigh_node->if_incoming == if_incoming))
                        set_mark = 1;
                else
@@ -421,6 +578,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
                tmp_neigh_node->real_packet_count =
                        bit_packet_count(tmp_neigh_node->real_bits);
        }
+       rcu_read_unlock();
 
        if (need_update) {
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -429,123 +587,21 @@ static char count_real_packets(struct ethhdr *ethhdr,
                orig_node->last_real_seqno = batman_packet->seqno;
        }
 
-       return is_duplicate;
-}
-
-/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
-                                struct orig_node *orig_node,
-                                struct orig_node *orig_neigh_node,
-                                struct batman_packet *batman_packet)
+       ret = is_duplicate;
 
-{
-       if (batman_packet->flags & PRIMARIES_FIRST_HOP)
-               memcpy(orig_neigh_node->primary_addr,
-                      orig_node->orig, ETH_ALEN);
-
-       return;
-}
-
-/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
-                              struct orig_node *orig_node)
-{
-       int candidates;
-       int interference_candidate;
-       int best_tq;
-       struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
-       struct neigh_node *first_candidate, *last_candidate;
-
-       /* update the candidates for this originator */
-       if (!orig_node->router) {
-               orig_node->bond.candidates = 0;
-               return;
-       }
-
-       best_tq = orig_node->router->tq_avg;
-
-       /* update bond.candidates */
-
-       candidates = 0;
-
-       /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
-        * as "bonding partner" */
-
-       /* first, zero the list */
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-               tmp_neigh_node->next_bond_candidate = NULL;
-       }
-
-       first_candidate = NULL;
-       last_candidate = NULL;
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
-               /* only consider if it has the same primary address ...  */
-               if (memcmp(orig_node->orig,
-                               tmp_neigh_node->orig_node->primary_addr,
-                               ETH_ALEN) != 0)
-                       continue;
-
-               /* ... and is good enough to be considered */
-               if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
-                       continue;
-
-               /* check if we have another candidate with the same
-                * mac address or interface. If we do, we won't
-                * select this candidate because of possible interference. */
-
-               interference_candidate = 0;
-               list_for_each_entry(tmp_neigh_node2,
-                               &orig_node->neigh_list, list) {
-
-                       if (tmp_neigh_node2 == tmp_neigh_node)
-                               continue;
-
-                       /* we only care if the other candidate is even
-                        * considered as candidate. */
-                       if (!tmp_neigh_node2->next_bond_candidate)
-                               continue;
-
-
-                       if ((tmp_neigh_node->if_incoming ==
-                               tmp_neigh_node2->if_incoming)
-                               || (memcmp(tmp_neigh_node->addr,
-                               tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
-
-                               interference_candidate = 1;
-                               break;
-                       }
-               }
-               /* don't care further if it is an interference candidate */
-               if (interference_candidate)
-                       continue;
-
-               if (!first_candidate) {
-                       first_candidate = tmp_neigh_node;
-                       tmp_neigh_node->next_bond_candidate = first_candidate;
-               } else
-                       tmp_neigh_node->next_bond_candidate = last_candidate;
-
-               last_candidate = tmp_neigh_node;
-
-               candidates++;
-       }
-
-       if (candidates > 0) {
-               first_candidate->next_bond_candidate = last_candidate;
-               orig_node->bond.selected = first_candidate;
-       }
-
-       orig_node->bond.candidates = candidates;
+out:
+       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       orig_node_free_ref(orig_node);
+       return ret;
 }
 
 void receive_bat_packet(struct ethhdr *ethhdr,
-                               struct batman_packet *batman_packet,
-                               unsigned char *hna_buff, int hna_buff_len,
-                               struct batman_if *if_incoming)
+                       struct batman_packet *batman_packet,
+                       unsigned char *hna_buff, int hna_buff_len,
+                       struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct orig_node *orig_neigh_node, *orig_node;
        char has_directlink_flag;
        char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
@@ -573,8 +629,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
 
        has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 
-       is_single_hop_neigh = (compare_orig(ethhdr->h_source,
-                                           batman_packet->orig) ? 1 : 0);
+       is_single_hop_neigh = (compare_eth(ethhdr->h_source,
+                                          batman_packet->orig) ? 1 : 0);
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
@@ -587,26 +643,26 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                has_directlink_flag);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->if_status != IF_ACTIVE)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->if_status != IF_ACTIVE)
                        continue;
 
-               if (batman_if->soft_iface != if_incoming->soft_iface)
+               if (hard_iface->soft_iface != if_incoming->soft_iface)
                        continue;
 
-               if (compare_orig(ethhdr->h_source,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(ethhdr->h_source,
+                               hard_iface->net_dev->dev_addr))
                        is_my_addr = 1;
 
-               if (compare_orig(batman_packet->orig,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(batman_packet->orig,
+                               hard_iface->net_dev->dev_addr))
                        is_my_orig = 1;
 
-               if (compare_orig(batman_packet->prev_sender,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(batman_packet->prev_sender,
+                               hard_iface->net_dev->dev_addr))
                        is_my_oldorig = 1;
 
-               if (compare_orig(ethhdr->h_source, broadcast_addr))
+               if (compare_eth(ethhdr->h_source, broadcast_addr))
                        is_broadcast = 1;
        }
        rcu_read_unlock();
@@ -638,7 +694,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                int offset;
 
                orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
-
                if (!orig_neigh_node)
                        return;
 
@@ -647,18 +702,22 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                /* if received seqno equals last send seqno save new
                 * seqno for bidirectional check */
                if (has_directlink_flag &&
-                   compare_orig(if_incoming->net_dev->dev_addr,
-                                batman_packet->orig) &&
+                   compare_eth(if_incoming->net_dev->dev_addr,
+                               batman_packet->orig) &&
                    (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
                        offset = if_incoming->if_num * NUM_WORDS;
+
+                       spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
                        word = &(orig_neigh_node->bcast_own[offset]);
                        bit_mark(word, 0);
                        orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
                                bit_packet_count(word);
+                       spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
                }
 
                bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
                        "originator packet from myself (via neighbor)\n");
+               orig_node_free_ref(orig_neigh_node);
                return;
        }
 
@@ -679,27 +738,27 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: packet within seqno protection time "
                        "(sender: %pM)\n", ethhdr->h_source);
-               return;
+               goto out;
        }
 
        if (batman_packet->tq == 0) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: originator packet with tq equal 0\n");
-               return;
+               goto out;
        }
 
        /* avoid temporary routing loops */
        if ((orig_node->router) &&
            (orig_node->router->orig_node->router) &&
-           (compare_orig(orig_node->router->addr,
-                         batman_packet->prev_sender)) &&
-           !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
-           (compare_orig(orig_node->router->addr,
-                         orig_node->router->orig_node->router->addr))) {
+           (compare_eth(orig_node->router->addr,
+                        batman_packet->prev_sender)) &&
+           !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
+           (compare_eth(orig_node->router->addr,
+                        orig_node->router->orig_node->router->addr))) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: ignoring all rebroadcast packets that "
                        "may make me loop (sender: %pM)\n", ethhdr->h_source);
-               return;
+               goto out;
        }
 
        /* if sender is a direct neighbor the sender mac equals
@@ -708,19 +767,21 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                           orig_node :
                           get_orig_node(bat_priv, ethhdr->h_source));
        if (!orig_neigh_node)
-               return;
+               goto out;
 
        /* drop packet if sender is not a direct neighbor and if we
         * don't route towards it */
        if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: OGM via unknown neighbor!\n");
-               return;
+               goto out_neigh;
        }
 
        is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
                                                batman_packet, if_incoming);
 
+       bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
+
        /* update ranking if it is not a duplicate or has the same
         * seqno and similar ttl as the non-duplicate */
        if (is_bidirectional &&
@@ -730,10 +791,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                update_orig(bat_priv, orig_node, ethhdr, batman_packet,
                            if_incoming, hna_buff, hna_buff_len, is_duplicate);
 
-       mark_bonding_address(bat_priv, orig_node,
-                            orig_neigh_node, batman_packet);
-       update_bonding_candidates(bat_priv, orig_node);
-
        /* is single hop (direct) neighbor */
        if (is_single_hop_neigh) {
 
@@ -743,31 +800,36 @@ void receive_bat_packet(struct ethhdr *ethhdr,
 
                bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
                        "rebroadcast neighbor packet with direct link flag\n");
-               return;
+               goto out_neigh;
        }
 
        /* multihop originator */
        if (!is_bidirectional) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: not received via bidirectional link\n");
-               return;
+               goto out_neigh;
        }
 
        if (is_duplicate) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: duplicate packet received\n");
-               return;
+               goto out_neigh;
        }
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Forwarding packet: rebroadcast originator packet\n");
        schedule_forward_packet(orig_node, ethhdr, batman_packet,
                                0, hna_buff_len, if_incoming);
+
+out_neigh:
+       if ((orig_neigh_node) && (!is_single_hop_neigh))
+               orig_node_free_ref(orig_neigh_node);
+out:
+       orig_node_free_ref(orig_node);
 }
 
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
+int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
        struct ethhdr *ethhdr;
 
        /* drop packet if it has not necessary minimum size */
@@ -794,12 +856,10 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        receive_aggr_bat_packet(ethhdr,
                                skb->data,
                                skb_headlen(skb),
-                               batman_if);
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+                               hard_iface);
 
        kfree_skb(skb);
        return NET_RX_SUCCESS;
@@ -808,135 +868,144 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
                               struct sk_buff *skb, size_t icmp_len)
 {
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct icmp_packet_rr *icmp_packet;
-       struct ethhdr *ethhdr;
-       struct batman_if *batman_if;
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        icmp_packet = (struct icmp_packet_rr *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* add data to device queue */
        if (icmp_packet->msg_type != ECHO_REQUEST) {
                bat_socket_receive_packet(icmp_packet, icmp_len);
-               return NET_RX_DROP;
+               goto out;
        }
 
        if (!bat_priv->primary_if)
-               return NET_RX_DROP;
+               goto out;
 
        /* answer echo request (ping) */
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  icmp_packet->orig));
-       ret = NET_RX_DROP;
-
-       if ((orig_node) && (orig_node->router)) {
-
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
 
-               icmp_packet = (struct icmp_packet_rr *)skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!orig_node)
+               goto unlock;
 
-               memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig,
-                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-               icmp_packet->msg_type = ECHO_REPLY;
-               icmp_packet->ttl = TTL;
+       neigh_node = orig_node->router;
 
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       if (!neigh_node)
+               goto unlock;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
+
+       rcu_read_unlock();
+
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
+
+       icmp_packet = (struct icmp_packet_rr *)skb->data;
+
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig,
+               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+       icmp_packet->msg_type = ECHO_REPLY;
+       icmp_packet->ttl = TTL;
 
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
-                                 struct sk_buff *skb, size_t icmp_len)
+                                 struct sk_buff *skb)
 {
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct icmp_packet *icmp_packet;
-       struct ethhdr *ethhdr;
-       struct batman_if *batman_if;
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        icmp_packet = (struct icmp_packet *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
        if (icmp_packet->msg_type != ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to "
                         "%pM: ttl exceeded\n", icmp_packet->orig,
                         icmp_packet->dst);
-               return NET_RX_DROP;
+               goto out;
        }
 
        if (!bat_priv->primary_if)
-               return NET_RX_DROP;
+               goto out;
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              icmp_packet->orig));
-       ret = NET_RX_DROP;
-
-       if ((orig_node) && (orig_node->router)) {
-
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
 
-               icmp_packet = (struct icmp_packet *) skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!orig_node)
+               goto unlock;
 
-               memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig,
-                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-               icmp_packet->msg_type = TTL_EXCEEDED;
-               icmp_packet->ttl = TTL;
+       neigh_node = orig_node->router;
 
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       if (!neigh_node)
+               goto unlock;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
+       rcu_read_unlock();
+
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
+
+       icmp_packet = (struct icmp_packet *)skb->data;
+
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig,
+               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+       icmp_packet->msg_type = TTL_EXCEEDED;
+       icmp_packet->ttl = TTL;
+
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct icmp_packet_rr *icmp_packet;
        struct ethhdr *ethhdr;
-       struct orig_node *orig_node;
-       struct batman_if *batman_if;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        int hdr_size = sizeof(struct icmp_packet);
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        /**
         * we truncate all incoming icmp packets if they don't match our size
@@ -946,21 +1015,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
 
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
+               goto out;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        /* not for me */
        if (!is_my_mac(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        icmp_packet = (struct icmp_packet_rr *)skb->data;
 
@@ -978,53 +1047,61 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
 
        /* TTL exceeded */
        if (icmp_packet->ttl < 2)
-               return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
-
-       ret = NET_RX_DROP;
+               return recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              icmp_packet->dst));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
 
-       if ((orig_node) && (orig_node->router)) {
+       if (!orig_node)
+               goto unlock;
 
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       neigh_node = orig_node->router;
 
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       if (!neigh_node)
+               goto unlock;
 
-               icmp_packet = (struct icmp_packet_rr *)skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
-               /* decrement ttl */
-               icmp_packet->ttl--;
+       rcu_read_unlock();
 
-               /* route it */
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       icmp_packet = (struct icmp_packet_rr *)skb->data;
+
+       /* decrement ttl */
+       icmp_packet->ttl--;
 
+       /* route it */
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 /* find a suitable router for this originator, and use
- * bonding if possible. */
+ * bonding if possible. increases the found neighbors
+ * refcount.*/
 struct neigh_node *find_router(struct bat_priv *bat_priv,
                               struct orig_node *orig_node,
-                              struct batman_if *recv_if)
+                              struct hard_iface *recv_if)
 {
        struct orig_node *primary_orig_node;
        struct orig_node *router_orig;
-       struct neigh_node *router, *first_candidate, *best_router;
+       struct neigh_node *router, *first_candidate, *tmp_neigh_node;
        static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
        int bonding_enabled;
 
@@ -1036,78 +1113,128 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
 
        /* without bonding, the first node should
         * always choose the default router. */
-
        bonding_enabled = atomic_read(&bat_priv->bonding);
 
-       if ((!recv_if) && (!bonding_enabled))
-               return orig_node->router;
-
+       rcu_read_lock();
+       /* select default router to output */
+       router = orig_node->router;
        router_orig = orig_node->router->orig_node;
+       if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
+       if ((!recv_if) && (!bonding_enabled))
+               goto return_router;
 
        /* if we have something in the primary_addr, we can search
         * for a potential bonding candidate. */
-       if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
-               return orig_node->router;
+       if (compare_eth(router_orig->primary_addr, zero_mac))
+               goto return_router;
 
        /* find the orig_node which has the primary interface. might
         * even be the same as our router_orig in many cases */
 
-       if (memcmp(router_orig->primary_addr,
-                               router_orig->orig, ETH_ALEN) == 0) {
+       if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
                primary_orig_node = router_orig;
        } else {
-               primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
-                                              choose_orig,
-                                              router_orig->primary_addr);
-
+               primary_orig_node = orig_hash_find(bat_priv,
+                                                  router_orig->primary_addr);
                if (!primary_orig_node)
-                       return orig_node->router;
+                       goto return_router;
+
+               orig_node_free_ref(primary_orig_node);
        }
 
        /* with less than 2 candidates, we can't do any
         * bonding and prefer the original router. */
-
-       if (primary_orig_node->bond.candidates < 2)
-               return orig_node->router;
+       if (atomic_read(&primary_orig_node->bond_candidates) < 2)
+               goto return_router;
 
 
        /* all nodes between should choose a candidate which
         * is is not on the interface where the packet came
         * in. */
-       first_candidate = primary_orig_node->bond.selected;
-       router = first_candidate;
+
+       neigh_node_free_ref(router);
+       first_candidate = NULL;
+       router = NULL;
 
        if (bonding_enabled) {
                /* in the bonding case, send the packets in a round
                 * robin fashion over the remaining interfaces. */
-               do {
+
+               list_for_each_entry_rcu(tmp_neigh_node,
+                               &primary_orig_node->bond_list, bonding_list) {
+                       if (!first_candidate)
+                               first_candidate = tmp_neigh_node;
                        /* recv_if == NULL on the first node. */
-                       if (router->if_incoming != recv_if)
+                       if (tmp_neigh_node->if_incoming != recv_if &&
+                           atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+                               router = tmp_neigh_node;
                                break;
+                       }
+               }
+
+               /* use the first candidate if nothing was found. */
+               if (!router && first_candidate &&
+                   atomic_inc_not_zero(&first_candidate->refcount))
+                       router = first_candidate;
 
-                       router = router->next_bond_candidate;
-               } while (router != first_candidate);
+               if (!router) {
+                       rcu_read_unlock();
+                       return NULL;
+               }
 
-               primary_orig_node->bond.selected = router->next_bond_candidate;
+               /* selected should point to the next element
+                * after the current router */
+               spin_lock_bh(&primary_orig_node->neigh_list_lock);
+               /* this is a list_move(), which unfortunately
+                * does not exist as rcu version */
+               list_del_rcu(&primary_orig_node->bond_list);
+               list_add_rcu(&primary_orig_node->bond_list,
+                               &router->bonding_list);
+               spin_unlock_bh(&primary_orig_node->neigh_list_lock);
 
        } else {
                /* if bonding is disabled, use the best of the
                 * remaining candidates which are not using
                 * this interface. */
-               best_router = first_candidate;
+               list_for_each_entry_rcu(tmp_neigh_node,
+                       &primary_orig_node->bond_list, bonding_list) {
+                       if (!first_candidate)
+                               first_candidate = tmp_neigh_node;
 
-               do {
                        /* recv_if == NULL on the first node. */
-                       if ((router->if_incoming != recv_if) &&
-                               (router->tq_avg > best_router->tq_avg))
-                                       best_router = router;
+                       if (tmp_neigh_node->if_incoming == recv_if)
+                               continue;
 
-                       router = router->next_bond_candidate;
-               } while (router != first_candidate);
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
 
-               router = best_router;
-       }
+                       /* if we don't have a router yet
+                        * or this one is better, choose it. */
+                       if ((!router) ||
+                           (tmp_neigh_node->tq_avg > router->tq_avg)) {
+                               /* decrement refcount of
+                                * previously selected router */
+                               if (router)
+                                       neigh_node_free_ref(router);
+
+                               router = tmp_neigh_node;
+                               atomic_inc_not_zero(&router->refcount);
+                       }
+
+                       neigh_node_free_ref(tmp_neigh_node);
+               }
 
+               /* use the first candidate if nothing was found. */
+               if (!router && first_candidate &&
+                   atomic_inc_not_zero(&first_candidate->refcount))
+                       router = first_candidate;
+       }
+return_router:
+       rcu_read_unlock();
        return router;
 }
 
@@ -1136,17 +1263,14 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
        return 0;
 }
 
-int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
-                        int hdr_size)
+int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct orig_node *orig_node;
-       struct neigh_node *router;
-       struct batman_if *batman_if;
-       uint8_t dstaddr[ETH_ALEN];
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct unicast_packet *unicast_packet;
        struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
-       int ret;
+       int ret = NET_RX_DROP;
        struct sk_buff *new_skb;
 
        unicast_packet = (struct unicast_packet *)skb->data;
@@ -1156,53 +1280,51 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
                pr_debug("Warning - can't forward unicast packet from %pM to "
                         "%pM: ttl exceeded\n", ethhdr->h_source,
                         unicast_packet->dest);
-               return NET_RX_DROP;
+               goto out;
        }
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              unicast_packet->dest));
-
-       router = find_router(bat_priv, orig_node, recv_if);
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
 
-       if (!router) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (!orig_node)
+               goto unlock;
 
-       /* don't lock while sending the packets ... we therefore
-        * copy the required data before sending */
+       rcu_read_unlock();
 
-       batman_if = router->if_incoming;
-       memcpy(dstaddr, router->addr, ETH_ALEN);
+       /* find_router() increases neigh_nodes refcount if found. */
+       neigh_node = find_router(bat_priv, orig_node, recv_if);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!neigh_node)
+               goto out;
 
        /* create a copy of the skb, if needed, to modify it. */
        if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-               return NET_RX_DROP;
+               goto out;
 
        unicast_packet = (struct unicast_packet *)skb->data;
 
        if (unicast_packet->packet_type == BAT_UNICAST &&
            atomic_read(&bat_priv->fragmentation) &&
-           skb->len > batman_if->net_dev->mtu)
-               return frag_send_skb(skb, bat_priv, batman_if,
-                                    dstaddr);
+           skb->len > neigh_node->if_incoming->net_dev->mtu) {
+               ret = frag_send_skb(skb, bat_priv,
+                                   neigh_node->if_incoming, neigh_node->addr);
+               goto out;
+       }
 
        if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
-           2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+           frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
 
                ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
 
                if (ret == NET_RX_DROP)
-                       return NET_RX_DROP;
+                       goto out;
 
                /* packet was buffered for late merge */
-               if (!new_skb)
-                       return NET_RX_SUCCESS;
+               if (!new_skb) {
+                       ret = NET_RX_SUCCESS;
+                       goto out;
+               }
 
                skb = new_skb;
                unicast_packet = (struct unicast_packet *)skb->data;
@@ -1212,12 +1334,21 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
        unicast_packet->ttl--;
 
        /* route it */
-       send_skb_packet(skb, batman_if, dstaddr);
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
 
-       return NET_RX_SUCCESS;
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return ret;
 }
 
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct unicast_packet *unicast_packet;
        int hdr_size = sizeof(struct unicast_packet);
@@ -1233,10 +1364,10 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
                return NET_RX_SUCCESS;
        }
 
-       return route_unicast_packet(skb, recv_if, hdr_size);
+       return route_unicast_packet(skb, recv_if);
 }
 
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct unicast_frag_packet *unicast_packet;
@@ -1266,89 +1397,96 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
                return NET_RX_SUCCESS;
        }
 
-       return route_unicast_packet(skb, recv_if, hdr_size);
+       return route_unicast_packet(skb, recv_if);
 }
 
 
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
        struct bcast_packet *bcast_packet;
        struct ethhdr *ethhdr;
        int hdr_size = sizeof(struct bcast_packet);
+       int ret = NET_RX_DROP;
        int32_t seq_diff;
 
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
+               goto out;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with broadcast indication but unicast recipient */
        if (!is_broadcast_ether_addr(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        /* ignore broadcasts sent by myself */
        if (is_my_mac(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        bcast_packet = (struct bcast_packet *)skb->data;
 
        /* ignore broadcasts originated by myself */
        if (is_my_mac(bcast_packet->orig))
-               return NET_RX_DROP;
+               goto out;
 
        if (bcast_packet->ttl < 2)
-               return NET_RX_DROP;
+               goto out;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              bcast_packet->orig));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
 
-       if (!orig_node) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (!orig_node)
+               goto rcu_unlock;
+
+       rcu_read_unlock();
+
+       spin_lock_bh(&orig_node->bcast_seqno_lock);
 
        /* check whether the packet is a duplicate */
-       if (get_bit_status(orig_node->bcast_bits,
-                          orig_node->last_bcast_seqno,
-                          ntohl(bcast_packet->seqno))) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+                          ntohl(bcast_packet->seqno)))
+               goto spin_unlock;
 
        seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
 
        /* check whether the packet is old and the host just restarted. */
        if (window_protected(bat_priv, seq_diff,
-                            &orig_node->bcast_seqno_reset)) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+                            &orig_node->bcast_seqno_reset))
+               goto spin_unlock;
 
        /* mark broadcast in flood history, update window position
         * if required. */
        if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
                orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       spin_unlock_bh(&orig_node->bcast_seqno_lock);
+
        /* rebroadcast packet */
        add_bcast_packet_to_list(bat_priv, skb);
 
        /* broadcast for me */
        interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+       ret = NET_RX_SUCCESS;
+       goto out;
 
-       return NET_RX_SUCCESS;
+rcu_unlock:
+       rcu_read_unlock();
+       goto out;
+spin_unlock:
+       spin_unlock_bh(&orig_node->bcast_seqno_lock);
+out:
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return ret;
 }
 
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct vis_packet *vis_packet;
        struct ethhdr *ethhdr;
index f108f23..b5a064c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_ROUTING_H_
 #define _NET_BATMAN_ADV_ROUTING_H_
 
-#include "types.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if);
+void slide_own_bcast_window(struct hard_iface *hard_iface);
 void receive_bat_packet(struct ethhdr *ethhdr,
                                struct batman_packet *batman_packet,
                                unsigned char *hna_buff, int hna_buff_len,
-                               struct batman_if *if_incoming);
+                               struct hard_iface *if_incoming);
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
                   struct neigh_node *neigh_node, unsigned char *hna_buff,
                   int hna_buff_len);
-int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
-                        int hdr_size);
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
+int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
-               struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
-                              struct orig_node *orig_node);
+                              struct orig_node *orig_node,
+                              struct hard_iface *recv_if);
+void bonding_candidate_del(struct orig_node *orig_node,
+                          struct neigh_node *neigh_node);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
index b89b9f7..d49e54d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -25,7 +25,6 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "types.h"
 #include "vis.h"
 #include "aggregation.h"
 #include "gateway_common.h"
@@ -49,7 +48,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv)
 }
 
 /* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
 {
        return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
 }
@@ -57,20 +56,20 @@ static unsigned long forward_send_time(struct bat_priv *bat_priv)
 /* send out an already prepared packet to the given address via the
  * specified batman interface */
 int send_skb_packet(struct sk_buff *skb,
-                               struct batman_if *batman_if,
+                               struct hard_iface *hard_iface,
                                uint8_t *dst_addr)
 {
        struct ethhdr *ethhdr;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                goto send_skb_err;
 
-       if (unlikely(!batman_if->net_dev))
+       if (unlikely(!hard_iface->net_dev))
                goto send_skb_err;
 
-       if (!(batman_if->net_dev->flags & IFF_UP)) {
+       if (!(hard_iface->net_dev->flags & IFF_UP)) {
                pr_warning("Interface %s is not up - can't send packet via "
-                          "that interface!\n", batman_if->net_dev->name);
+                          "that interface!\n", hard_iface->net_dev->name);
                goto send_skb_err;
        }
 
@@ -81,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb,
        skb_reset_mac_header(skb);
 
        ethhdr = (struct ethhdr *) skb_mac_header(skb);
-       memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
        ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
 
@@ -89,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb,
        skb->priority = TC_PRIO_CONTROL;
        skb->protocol = __constant_htons(ETH_P_BATMAN);
 
-       skb->dev = batman_if->net_dev;
+       skb->dev = hard_iface->net_dev;
 
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
@@ -103,16 +102,16 @@ send_skb_err:
 
 /* Send a packet to a given interface */
 static void send_packet_to_if(struct forw_packet *forw_packet,
-                             struct batman_if *batman_if)
+                             struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        char *fwd_str;
        uint8_t packet_num;
        int16_t buff_pos;
        struct batman_packet *batman_packet;
        struct sk_buff *skb;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                return;
 
        packet_num = 0;
@@ -127,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                /* we might have aggregated direct link packets with an
                 * ordinary base packet */
                if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-                   (forw_packet->if_incoming == batman_if))
+                   (forw_packet->if_incoming == hard_iface))
                        batman_packet->flags |= DIRECTLINK;
                else
                        batman_packet->flags &= ~DIRECTLINK;
@@ -143,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                        batman_packet->tq, batman_packet->ttl,
                        (batman_packet->flags & DIRECTLINK ?
                         "on" : "off"),
-                       batman_if->net_dev->name, batman_if->net_dev->dev_addr);
+                       hard_iface->net_dev->name,
+                       hard_iface->net_dev->dev_addr);
 
                buff_pos += sizeof(struct batman_packet) +
                        (batman_packet->num_hna * ETH_ALEN);
@@ -155,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
        /* create clone because function is called more than once */
        skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
        if (skb)
-               send_skb_packet(skb, batman_if, broadcast_addr);
+               send_skb_packet(skb, hard_iface, broadcast_addr);
 }
 
 /* send a batman packet */
 static void send_packet(struct forw_packet *forw_packet)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct net_device *soft_iface;
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet =
@@ -205,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet)
 
        /* broadcast on every interface */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               send_packet_to_if(forw_packet, batman_if);
+               send_packet_to_if(forw_packet, hard_iface);
        }
        rcu_read_unlock();
 }
 
 static void rebuild_batman_packet(struct bat_priv *bat_priv,
-                                 struct batman_if *batman_if)
+                                 struct hard_iface *hard_iface)
 {
        int new_len;
        unsigned char *new_buff;
@@ -227,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
 
        /* keep old buffer if kmalloc should fail */
        if (new_buff) {
-               memcpy(new_buff, batman_if->packet_buff,
+               memcpy(new_buff, hard_iface->packet_buff,
                       sizeof(struct batman_packet));
                batman_packet = (struct batman_packet *)new_buff;
 
@@ -235,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
                                new_buff + sizeof(struct batman_packet),
                                new_len - sizeof(struct batman_packet));
 
-               kfree(batman_if->packet_buff);
-               batman_if->packet_buff = new_buff;
-               batman_if->packet_len = new_len;
+               kfree(hard_iface->packet_buff);
+               hard_iface->packet_buff = new_buff;
+               hard_iface->packet_len = new_len;
        }
 }
 
-void schedule_own_packet(struct batman_if *batman_if)
+void schedule_own_packet(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        unsigned long send_time;
        struct batman_packet *batman_packet;
        int vis_server;
 
-       if ((batman_if->if_status == IF_NOT_IN_USE) ||
-           (batman_if->if_status == IF_TO_BE_REMOVED))
+       if ((hard_iface->if_status == IF_NOT_IN_USE) ||
+           (hard_iface->if_status == IF_TO_BE_REMOVED))
                return;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
@@ -261,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if)
         * outdated packets (especially uninitialized mac addresses) in the
         * packet queue
         */
-       if (batman_if->if_status == IF_TO_BE_ACTIVATED)
-               batman_if->if_status = IF_ACTIVE;
+       if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = IF_ACTIVE;
 
        /* if local hna has changed and interface is a primary interface */
        if ((atomic_read(&bat_priv->hna_local_changed)) &&
-           (batman_if == bat_priv->primary_if))
-               rebuild_batman_packet(bat_priv, batman_if);
+           (hard_iface == bat_priv->primary_if))
+               rebuild_batman_packet(bat_priv, hard_iface);
 
        /**
         * NOTE: packet_buff might just have been re-allocated in
         * rebuild_batman_packet()
         */
-       batman_packet = (struct batman_packet *)batman_if->packet_buff;
+       batman_packet = (struct batman_packet *)hard_iface->packet_buff;
 
        /* change sequence number to network order */
        batman_packet->seqno =
-               htonl((uint32_t)atomic_read(&batman_if->seqno));
+               htonl((uint32_t)atomic_read(&hard_iface->seqno));
 
        if (vis_server == VIS_TYPE_SERVER_SYNC)
                batman_packet->flags |= VIS_SERVER;
        else
                batman_packet->flags &= ~VIS_SERVER;
 
-       if ((batman_if == bat_priv->primary_if) &&
+       if ((hard_iface == bat_priv->primary_if) &&
            (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
                batman_packet->gw_flags =
                                (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
        else
                batman_packet->gw_flags = 0;
 
-       atomic_inc(&batman_if->seqno);
+       atomic_inc(&hard_iface->seqno);
 
-       slide_own_bcast_window(batman_if);
+       slide_own_bcast_window(hard_iface);
        send_time = own_send_time(bat_priv);
        add_bat_packet_to_list(bat_priv,
-                              batman_if->packet_buff,
-                              batman_if->packet_len,
-                              batman_if, 1, send_time);
+                              hard_iface->packet_buff,
+                              hard_iface->packet_len,
+                              hard_iface, 1, send_time);
 }
 
 void schedule_forward_packet(struct orig_node *orig_node,
                             struct ethhdr *ethhdr,
                             struct batman_packet *batman_packet,
                             uint8_t directlink, int hna_buff_len,
-                            struct batman_if *if_incoming)
+                            struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        unsigned char in_tq, in_ttl, tq_avg = 0;
@@ -327,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
        if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
 
                /* rebroadcast ogm of best ranking neighbor as is */
-               if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
+               if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
                        batman_packet->tq = orig_node->router->tq_avg;
 
                        if (orig_node->router->last_ttl)
@@ -356,7 +356,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
        else
                batman_packet->flags &= ~DIRECTLINK;
 
-       send_time = forward_send_time(bat_priv);
+       send_time = forward_send_time();
        add_bat_packet_to_list(bat_priv,
                               (unsigned char *)batman_packet,
                               sizeof(struct batman_packet) + hna_buff_len,
@@ -444,7 +444,7 @@ out:
 
 static void send_outstanding_bcast_packet(struct work_struct *work)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct delayed_work *delayed_work =
                container_of(work, struct delayed_work, work);
        struct forw_packet *forw_packet =
@@ -462,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
 
        /* rebroadcast packet */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
                /* send a copy of the saved skb */
                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
                if (skb1)
-                       send_skb_packet(skb1, batman_if, broadcast_addr);
+                       send_skb_packet(skb1, hard_iface, broadcast_addr);
        }
        rcu_read_unlock();
 
@@ -522,15 +522,15 @@ out:
 }
 
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-                              struct batman_if *batman_if)
+                              struct hard_iface *hard_iface)
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
 
-       if (batman_if)
+       if (hard_iface)
                bat_dbg(DBG_BATMAN, bat_priv,
                        "purge_outstanding_packets(): %s\n",
-                       batman_if->net_dev->name);
+                       hard_iface->net_dev->name);
        else
                bat_dbg(DBG_BATMAN, bat_priv,
                        "purge_outstanding_packets()\n");
@@ -544,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * if purge_outstanding_packets() was called with an argmument
                 * we delete only packets belonging to the given interface
                 */
-               if ((batman_if) &&
-                   (forw_packet->if_incoming != batman_if))
+               if ((hard_iface) &&
+                   (forw_packet->if_incoming != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -568,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * if purge_outstanding_packets() was called with an argmument
                 * we delete only packets belonging to the given interface
                 */
-               if ((batman_if) &&
-                   (forw_packet->if_incoming != batman_if))
+               if ((hard_iface) &&
+                   (forw_packet->if_incoming != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bat_list_lock);
index c4cefa8..7b2ff19 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_SEND_H_
 #define _NET_BATMAN_ADV_SEND_H_
 
-#include "types.h"
-
 int send_skb_packet(struct sk_buff *skb,
-                               struct batman_if *batman_if,
+                               struct hard_iface *hard_iface,
                                uint8_t *dst_addr);
-void schedule_own_packet(struct batman_if *batman_if);
+void schedule_own_packet(struct hard_iface *hard_iface);
 void schedule_forward_packet(struct orig_node *orig_node,
                             struct ethhdr *ethhdr,
                             struct batman_packet *batman_packet,
                             uint8_t directlink, int hna_buff_len,
-                            struct batman_if *if_outgoing);
+                            struct hard_iface *if_outgoing);
 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
 void send_outstanding_bat_packet(struct work_struct *work);
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-                              struct batman_if *batman_if);
+                              struct hard_iface *hard_iface);
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
index e89ede1..9ed2614 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #include "send.h"
 #include "bat_debugfs.h"
 #include "translation-table.h"
-#include "types.h"
 #include "hash.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
-#include "send.h"
 #include "bat_sysfs.h"
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include "unicast.h"
-#include "routing.h"
 
 
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -79,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
        return 0;
 }
 
-static void softif_neigh_free_ref(struct kref *refcount)
+static void softif_neigh_free_rcu(struct rcu_head *rcu)
 {
        struct softif_neigh *softif_neigh;
 
-       softif_neigh = container_of(refcount, struct softif_neigh, refcount);
+       softif_neigh = container_of(rcu, struct softif_neigh, rcu);
        kfree(softif_neigh);
 }
 
-static void softif_neigh_free_rcu(struct rcu_head *rcu)
+static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
 {
-       struct softif_neigh *softif_neigh;
-
-       softif_neigh = container_of(rcu, struct softif_neigh, rcu);
-       kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
+       if (atomic_dec_and_test(&softif_neigh->refcount))
+               call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
 }
 
 void softif_neigh_purge(struct bat_priv *bat_priv)
@@ -119,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
                                 softif_neigh->addr, softif_neigh->vid);
                        softif_neigh_tmp = bat_priv->softif_neigh;
                        bat_priv->softif_neigh = NULL;
-                       kref_put(&softif_neigh_tmp->refcount,
-                                softif_neigh_free_ref);
+                       softif_neigh_free_ref(softif_neigh_tmp);
                }
 
-               call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
+               softif_neigh_free_ref(softif_neigh);
        }
 
        spin_unlock_bh(&bat_priv->softif_neigh_lock);
@@ -138,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
        rcu_read_lock();
        hlist_for_each_entry_rcu(softif_neigh, node,
                                 &bat_priv->softif_neigh_list, list) {
-               if (memcmp(softif_neigh->addr, addr, ETH_ALEN) != 0)
+               if (!compare_eth(softif_neigh->addr, addr))
                        continue;
 
                if (softif_neigh->vid != vid)
                        continue;
 
+               if (!atomic_inc_not_zero(&softif_neigh->refcount))
+                       continue;
+
                softif_neigh->last_seen = jiffies;
-               goto found;
+               goto out;
        }
 
        softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
@@ -155,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
        memcpy(softif_neigh->addr, addr, ETH_ALEN);
        softif_neigh->vid = vid;
        softif_neigh->last_seen = jiffies;
-       kref_init(&softif_neigh->refcount);
+       /* initialize with 2 - caller decrements counter by one */
+       atomic_set(&softif_neigh->refcount, 2);
 
        INIT_HLIST_NODE(&softif_neigh->list);
        spin_lock_bh(&bat_priv->softif_neigh_lock);
        hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
        spin_unlock_bh(&bat_priv->softif_neigh_lock);
 
-found:
-       kref_get(&softif_neigh->refcount);
 out:
        rcu_read_unlock();
        return softif_neigh;
@@ -175,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct softif_neigh *softif_neigh;
        struct hlist_node *node;
-       size_t buf_size, pos;
-       char *buff;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -186,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 
        seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
 
-       buf_size = 1;
-       /* Estimate length for: "   xx:xx:xx:xx:xx:xx\n" */
        rcu_read_lock();
        hlist_for_each_entry_rcu(softif_neigh, node,
                                 &bat_priv->softif_neigh_list, list)
-               buf_size += 30;
-       rcu_read_unlock();
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff)
-               return -ENOMEM;
-
-       buff[0] = '\0';
-       pos = 0;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh, node,
-                                &bat_priv->softif_neigh_list, list) {
-               pos += snprintf(buff + pos, 31, "%s %pM (vid: %d)\n",
+               seq_printf(seq, "%s %pM (vid: %d)\n",
                                bat_priv->softif_neigh == softif_neigh
                                ? "=>" : "  ", softif_neigh->addr,
                                softif_neigh->vid);
-       }
        rcu_read_unlock();
 
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
        return 0;
 }
 
@@ -267,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
                         softif_neigh->addr, softif_neigh->vid);
                softif_neigh_tmp = bat_priv->softif_neigh;
                bat_priv->softif_neigh = softif_neigh;
-               kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref);
+               softif_neigh_free_ref(softif_neigh_tmp);
                /* we need to hold the additional reference */
                goto err;
        }
@@ -285,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
        }
 
 out:
-       kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
+       softif_neigh_free_ref(softif_neigh);
 err:
        kfree_skb(skb);
        return;
@@ -438,7 +414,7 @@ end:
 }
 
 void interface_rx(struct net_device *soft_iface,
-                 struct sk_buff *skb, struct batman_if *recv_if,
+                 struct sk_buff *skb, struct hard_iface *recv_if,
                  int hdr_size)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -486,7 +462,7 @@ void interface_rx(struct net_device *soft_iface,
 
                memcpy(unicast_packet->dest,
                       bat_priv->softif_neigh->addr, ETH_ALEN);
-               ret = route_unicast_packet(skb, recv_if, hdr_size);
+               ret = route_unicast_packet(skb, recv_if);
                if (ret == NET_RX_DROP)
                        goto dropped;
 
@@ -646,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface)
        unregister_netdevice(soft_iface);
 }
 
+int softif_is_valid(struct net_device *net_dev)
+{
+#ifdef HAVE_NET_DEVICE_OPS
+       if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
+               return 1;
+#else
+       if (net_dev->hard_start_xmit == interface_tx)
+               return 1;
+#endif
+
+       return 0;
+}
+
 /* ethtool */
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
index 02b7733..4789b6f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
 void softif_neigh_purge(struct bat_priv *bat_priv);
 int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
 void interface_rx(struct net_device *soft_iface,
-                 struct sk_buff *skb, struct batman_if *recv_if,
+                 struct sk_buff *skb, struct hard_iface *recv_if,
                  int hdr_size);
 struct net_device *softif_create(char *name);
 void softif_destroy(struct net_device *soft_iface);
+int softif_is_valid(struct net_device *net_dev);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
index a633b5a..8d15b48 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,7 +22,6 @@
 #include "main.h"
 #include "translation-table.h"
 #include "soft-interface.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 
@@ -31,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
                                 struct hna_global_entry *hna_global_entry,
                                 char *message);
 
+/* returns 1 if they are the same mac addr */
+static int compare_lhna(struct hlist_node *node, void *data2)
+{
+       void *data1 = container_of(node, struct hna_local_entry, hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
+/* returns 1 if they are the same mac addr */
+static int compare_ghna(struct hlist_node *node, void *data2)
+{
+       void *data1 = container_of(node, struct hna_global_entry, hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 static void hna_local_start_timer(struct bat_priv *bat_priv)
 {
        INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
        queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
 }
 
+static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
+                                                  void *data)
+{
+       struct hashtable_t *hash = bat_priv->hna_local_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
+               if (!compare_eth(hna_local_entry, data))
+                       continue;
+
+               hna_local_entry_tmp = hna_local_entry;
+               break;
+       }
+       rcu_read_unlock();
+
+       return hna_local_entry_tmp;
+}
+
+static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
+                                                    void *data)
+{
+       struct hashtable_t *hash = bat_priv->hna_global_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct hna_global_entry *hna_global_entry;
+       struct hna_global_entry *hna_global_entry_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
+               if (!compare_eth(hna_global_entry, data))
+                       continue;
+
+               hna_global_entry_tmp = hna_global_entry;
+               break;
+       }
+       rcu_read_unlock();
+
+       return hna_global_entry_tmp;
+}
+
 int hna_local_init(struct bat_priv *bat_priv)
 {
        if (bat_priv->hna_local_hash)
@@ -61,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        int required_bytes;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
-       hna_local_entry =
-               ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
-                                                    compare_orig, choose_orig,
-                                                    addr));
+       hna_local_entry = hna_local_hash_find(bat_priv, addr);
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
 
        if (hna_local_entry) {
@@ -100,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        hna_local_entry->last_seen = jiffies;
 
        /* the batman interface mac address should never be purged */
-       if (compare_orig(addr, soft_iface->dev_addr))
+       if (compare_eth(addr, soft_iface->dev_addr))
                hna_local_entry->never_purge = 1;
        else
                hna_local_entry->never_purge = 0;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
-       hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
-                hna_local_entry);
+       hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
+                hna_local_entry, &hna_local_entry->hash_entry);
        bat_priv->num_local_hna++;
        atomic_set(&bat_priv->hna_local_changed, 1);
 
@@ -117,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        /* remove address from global hash if present */
        spin_lock_bh(&bat_priv->hna_ghash_lock);
 
-       hna_global_entry = ((struct hna_global_entry *)
-                               hash_find(bat_priv->hna_global_hash,
-                                         compare_orig, choose_orig, addr));
+       hna_global_entry = hna_global_hash_find(bat_priv, addr);
 
        if (hna_global_entry)
                _hna_global_del_orig(bat_priv, hna_global_entry,
@@ -133,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
 {
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       struct element_t *bucket;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       int count = 0;
+       int i, count = 0;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_local_entry, node,
+                                        head, hash_entry) {
                        if (buff_len < (count + 1) * ETH_ALEN)
                                break;
 
-                       hna_local_entry = bucket->data;
                        memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
                               ETH_ALEN);
 
                        count++;
                }
+               rcu_read_unlock();
        }
 
        /* if we did not get all new local hnas see you next time  ;-) */
@@ -171,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        size_t buf_size, pos;
        char *buff;
+       int i;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -195,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each(walk, head)
+               rcu_read_lock();
+               __hlist_for_each_rcu(node, head)
                        buf_size += 21;
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -204,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
                spin_unlock_bh(&bat_priv->hna_lhash_lock);
                return -ENOMEM;
        }
+
        buff[0] = '\0';
        pos = 0;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_local_entry = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_local_entry, node,
+                                        head, hash_entry) {
                        pos += snprintf(buff + pos, 22, " * %pM\n",
                                        hna_local_entry->addr);
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
@@ -225,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        return 0;
 }
 
-static void _hna_local_del(void *data, void *arg)
+static void _hna_local_del(struct hlist_node *node, void *arg)
 {
        struct bat_priv *bat_priv = (struct bat_priv *)arg;
+       void *data = container_of(node, struct hna_local_entry, hash_entry);
 
        kfree(data);
        bat_priv->num_local_hna--;
@@ -241,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
        bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
                hna_local_entry->addr, message);
 
-       hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
+       hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
                    hna_local_entry->addr);
-       _hna_local_del(hna_local_entry, bat_priv);
+       _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
 }
 
 void hna_local_remove(struct bat_priv *bat_priv,
@@ -253,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
-       hna_local_entry = (struct hna_local_entry *)
-               hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
-                         addr);
+       hna_local_entry = hna_local_hash_find(bat_priv, addr);
 
        if (hna_local_entry)
                hna_local_del(bat_priv, hna_local_entry, message);
@@ -271,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
                container_of(delayed_work, struct bat_priv, hna_work);
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       int i;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        unsigned long timeout;
+       int i;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       hna_local_entry = bucket->data;
+               hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
+                                         head, hash_entry) {
+                       if (hna_local_entry->never_purge)
+                               continue;
 
                        timeout = hna_local_entry->last_seen;
                        timeout += LOCAL_HNA_TIMEOUT * HZ;
 
-                       if ((!hna_local_entry->never_purge) &&
-                           time_after(jiffies, timeout))
-                               hna_local_del(bat_priv, hna_local_entry,
-                                       "address timed out");
+                       if (time_before(jiffies, timeout))
+                               continue;
+
+                       hna_local_del(bat_priv, hna_local_entry,
+                                     "address timed out");
                }
        }
 
@@ -335,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                spin_lock_bh(&bat_priv->hna_ghash_lock);
 
                hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_global_entry = (struct hna_global_entry *)
-                       hash_find(bat_priv->hna_global_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
 
                if (!hna_global_entry) {
                        spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -357,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                                hna_global_entry->addr, orig_node->orig);
 
                        spin_lock_bh(&bat_priv->hna_ghash_lock);
-                       hash_add(bat_priv->hna_global_hash, compare_orig,
-                                choose_orig, hna_global_entry);
+                       hash_add(bat_priv->hna_global_hash, compare_ghna,
+                                choose_orig, hna_global_entry,
+                                &hna_global_entry->hash_entry);
 
                }
 
@@ -369,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                spin_lock_bh(&bat_priv->hna_lhash_lock);
 
                hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_local_entry = (struct hna_local_entry *)
-                       hash_find(bat_priv->hna_local_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
 
                if (hna_local_entry)
                        hna_local_del(bat_priv, hna_local_entry,
@@ -401,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->hna_global_hash;
        struct hna_global_entry *hna_global_entry;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        size_t buf_size, pos;
        char *buff;
+       int i;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -424,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each(walk, head)
+               rcu_read_lock();
+               __hlist_for_each_rcu(node, head)
                        buf_size += 43;
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -439,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_global_entry = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_global_entry, node,
+                                        head, hash_entry) {
                        pos += snprintf(buff + pos, 44,
                                        " * %pM via %pM\n",
                                        hna_global_entry->addr,
                                        hna_global_entry->orig_node->orig);
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -465,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
                hna_global_entry->addr, hna_global_entry->orig_node->orig,
                message);
 
-       hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
+       hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
                    hna_global_entry->addr);
        kfree(hna_global_entry);
 }
@@ -484,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
 
        while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
                hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_global_entry = (struct hna_global_entry *)
-                       hash_find(bat_priv->hna_global_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
 
                if ((hna_global_entry) &&
                    (hna_global_entry->orig_node == orig_node))
@@ -503,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
        orig_node->hna_buff = NULL;
 }
 
-static void hna_global_del(void *data, void *arg)
+static void hna_global_del(struct hlist_node *node, void *arg)
 {
+       void *data = container_of(node, struct hna_global_entry, hash_entry);
+
        kfree(data);
 }
 
@@ -520,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
 {
        struct hna_global_entry *hna_global_entry;
+       struct orig_node *orig_node = NULL;
 
        spin_lock_bh(&bat_priv->hna_ghash_lock);
-       hna_global_entry = (struct hna_global_entry *)
-                               hash_find(bat_priv->hna_global_hash,
-                                         compare_orig, choose_orig, addr);
-       spin_unlock_bh(&bat_priv->hna_ghash_lock);
+       hna_global_entry = hna_global_hash_find(bat_priv, addr);
 
        if (!hna_global_entry)
-               return NULL;
+               goto out;
 
-       return hna_global_entry->orig_node;
+       if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
+               goto out;
+
+       orig_node = hna_global_entry->orig_node;
+
+out:
+       spin_unlock_bh(&bat_priv->hna_ghash_lock);
+       return orig_node;
 }
index 10c4c5c..f19931c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-#include "types.h"
-
 int hna_local_init(struct bat_priv *bat_priv);
 void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
 void hna_local_remove(struct bat_priv *bat_priv,
index bf3f6f5..83445cf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -33,7 +33,7 @@
         sizeof(struct bcast_packet))))
 
 
-struct batman_if {
+struct hard_iface {
        struct list_head list;
        int16_t if_num;
        char if_status;
@@ -43,7 +43,7 @@ struct batman_if {
        unsigned char *packet_buff;
        int packet_len;
        struct kobject *hardif_obj;
-       struct kref refcount;
+       atomic_t refcount;
        struct packet_type batman_adv_ptype;
        struct net_device *soft_iface;
        struct rcu_head rcu;
@@ -70,8 +70,6 @@ struct orig_node {
        struct neigh_node *router;
        unsigned long *bcast_own;
        uint8_t *bcast_own_sum;
-       uint8_t tq_own;
-       int tq_asym_penalty;
        unsigned long last_valid;
        unsigned long bcast_seqno_reset;
        unsigned long batman_seqno_reset;
@@ -83,20 +81,28 @@ struct orig_node {
        uint8_t last_ttl;
        unsigned long bcast_bits[NUM_WORDS];
        uint32_t last_bcast_seqno;
-       struct list_head neigh_list;
+       struct hlist_head neigh_list;
        struct list_head frag_list;
+       spinlock_t neigh_list_lock; /* protects neighbor list */
+       atomic_t refcount;
+       struct rcu_head rcu;
+       struct hlist_node hash_entry;
+       struct bat_priv *bat_priv;
        unsigned long last_frag_packet;
-       struct {
-               uint8_t candidates;
-               struct neigh_node *selected;
-       } bond;
+       spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum,
+                                 * neigh_node->real_bits,
+                                 * neigh_node->real_packet_count */
+       spinlock_t bcast_seqno_lock; /* protects bcast_bits,
+                                     *  last_bcast_seqno */
+       atomic_t bond_candidates;
+       struct list_head bond_list;
 };
 
 struct gw_node {
        struct hlist_node list;
        struct orig_node *orig_node;
        unsigned long deleted;
-       struct kref refcount;
+       atomic_t refcount;
        struct rcu_head rcu;
 };
 
@@ -105,18 +111,20 @@ struct gw_node {
  *     @last_valid: when last packet via this neighbor was received
  */
 struct neigh_node {
-       struct list_head list;
+       struct hlist_node list;
        uint8_t addr[ETH_ALEN];
        uint8_t real_packet_count;
        uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
        uint8_t tq_index;
        uint8_t tq_avg;
        uint8_t last_ttl;
-       struct neigh_node *next_bond_candidate;
+       struct list_head bonding_list;
        unsigned long last_valid;
        unsigned long real_bits[NUM_WORDS];
+       atomic_t refcount;
+       struct rcu_head rcu;
        struct orig_node *orig_node;
-       struct batman_if *if_incoming;
+       struct hard_iface *if_incoming;
 };
 
 
@@ -140,7 +148,7 @@ struct bat_priv {
        struct hlist_head softif_neigh_list;
        struct softif_neigh *softif_neigh;
        struct debug_log *debug_log;
-       struct batman_if *primary_if;
+       struct hard_iface *primary_if;
        struct kobject *mesh_obj;
        struct dentry *debug_dir;
        struct hlist_head forw_bat_list;
@@ -151,12 +159,11 @@ struct bat_priv {
        struct hashtable_t *hna_local_hash;
        struct hashtable_t *hna_global_hash;
        struct hashtable_t *vis_hash;
-       spinlock_t orig_hash_lock; /* protects orig_hash */
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
        spinlock_t hna_lhash_lock; /* protects hna_local_hash */
        spinlock_t hna_ghash_lock; /* protects hna_global_hash */
-       spinlock_t gw_list_lock; /* protects gw_list */
+       spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
        spinlock_t vis_hash_lock; /* protects vis_hash */
        spinlock_t vis_list_lock; /* protects vis_info::recv_list */
        spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
@@ -165,7 +172,7 @@ struct bat_priv {
        struct delayed_work hna_work;
        struct delayed_work orig_work;
        struct delayed_work vis_work;
-       struct gw_node *curr_gw;
+       struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
        struct vis_info *my_vis_info;
 };
 
@@ -188,11 +195,13 @@ struct hna_local_entry {
        uint8_t addr[ETH_ALEN];
        unsigned long last_seen;
        char never_purge;
+       struct hlist_node hash_entry;
 };
 
 struct hna_global_entry {
        uint8_t addr[ETH_ALEN];
        struct orig_node *orig_node;
+       struct hlist_node hash_entry;
 };
 
 /**
@@ -208,7 +217,7 @@ struct forw_packet {
        uint32_t direct_link_flags;
        uint8_t num_packets;
        struct delayed_work delayed_work;
-       struct batman_if *if_incoming;
+       struct hard_iface *if_incoming;
 };
 
 /* While scanning for vis-entries of a particular vis-originator
@@ -242,6 +251,7 @@ struct vis_info {
                             * from.  we should not reply to them. */
        struct list_head send_list;
        struct kref refcount;
+       struct hlist_node hash_entry;
        struct bat_priv *bat_priv;
        /* this packet might be part of the vis send queue. */
        struct sk_buff *skb_packet;
@@ -264,7 +274,7 @@ struct softif_neigh {
        uint8_t addr[ETH_ALEN];
        unsigned long last_seen;
        short vid;
-       struct kref refcount;
+       atomic_t refcount;
        struct rcu_head rcu;
 };
 
index ee41fef..19f84bd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
                (struct unicast_frag_packet *)skb->data;
        struct sk_buff *tmp_skb;
        struct unicast_packet *unicast_packet;
-       int hdr_len = sizeof(struct unicast_packet),
-           uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+       int hdr_len = sizeof(struct unicast_packet);
+       int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
 
        /* set skb to the first part and tmp_skb to the second part */
        if (up->flags & UNI_FRAG_HEAD) {
@@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
                skb = tfp->skb;
        }
 
+       if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
+               goto err;
+
        skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
-       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
-               /* free buffered skb, skb will be freed later */
-               kfree_skb(tfp->skb);
-               return NULL;
-       }
+       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
+               goto err;
 
        /* move free entry to end */
        tfp->skb = NULL;
@@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
        unicast_packet->packet_type = BAT_UNICAST;
 
        return skb;
+
+err:
+       /* free buffered skb, skb will be freed later */
+       kfree_skb(tfp->skb);
+       return NULL;
 }
 
 static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
@@ -178,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
                (struct unicast_frag_packet *)skb->data;
 
        *new_skb = NULL;
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                   hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                             unicast_packet->orig));
 
-       if (!orig_node) {
-               pr_debug("couldn't find originator in orig_hash\n");
+       orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
+       if (!orig_node)
                goto out;
-       }
 
        orig_node->last_frag_packet = jiffies;
 
@@ -210,21 +210,24 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        /* if not, merge failed */
        if (*new_skb)
                ret = NET_RX_SUCCESS;
-out:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 
+out:
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-                 struct batman_if *batman_if, uint8_t dstaddr[])
+                 struct hard_iface *hard_iface, uint8_t dstaddr[])
 {
        struct unicast_packet tmp_uc, *unicast_packet;
        struct sk_buff *frag_skb;
        struct unicast_frag_packet *frag1, *frag2;
        int uc_hdr_len = sizeof(struct unicast_packet);
        int ucf_hdr_len = sizeof(struct unicast_frag_packet);
-       int data_len = skb->len;
+       int data_len = skb->len - uc_hdr_len;
+       int large_tail = 0;
+       uint16_t seqno;
 
        if (!bat_priv->primary_if)
                goto dropped;
@@ -232,10 +235,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
        if (!frag_skb)
                goto dropped;
+       skb_reserve(frag_skb, ucf_hdr_len);
 
        unicast_packet = (struct unicast_packet *) skb->data;
        memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
-       skb_split(skb, frag_skb, data_len / 2);
+       skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
 
        if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
            my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -253,16 +257,18 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
 
-       frag1->flags |= UNI_FRAG_HEAD;
-       frag2->flags &= ~UNI_FRAG_HEAD;
+       if (data_len & 1)
+               large_tail = UNI_FRAG_LARGETAIL;
 
-       frag1->seqno = htons((uint16_t)atomic_inc_return(
-                            &batman_if->frag_seqno));
-       frag2->seqno = htons((uint16_t)atomic_inc_return(
-                            &batman_if->frag_seqno));
+       frag1->flags = UNI_FRAG_HEAD | large_tail;
+       frag2->flags = large_tail;
 
-       send_skb_packet(skb, batman_if, dstaddr);
-       send_skb_packet(frag_skb, batman_if, dstaddr);
+       seqno = atomic_add_return(2, &hard_iface->frag_seqno);
+       frag1->seqno = htons(seqno - 1);
+       frag2->seqno = htons(seqno);
+
+       send_skb_packet(skb, hard_iface, dstaddr);
+       send_skb_packet(frag_skb, hard_iface, dstaddr);
        return NET_RX_SUCCESS;
 
 drop_frag:
@@ -277,44 +283,36 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
        struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
        struct unicast_packet *unicast_packet;
        struct orig_node *orig_node;
-       struct batman_if *batman_if;
-       struct neigh_node *router;
+       struct neigh_node *neigh_node;
        int data_len = skb->len;
-       uint8_t dstaddr[6];
-
-       spin_lock_bh(&bat_priv->orig_hash_lock);
+       int ret = 1;
 
        /* get routing information */
-       if (is_multicast_ether_addr(ethhdr->h_dest))
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
                orig_node = (struct orig_node *)gw_get_selected(bat_priv);
-       else
-               orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                          compare_orig,
-                                                          choose_orig,
-                                                          ethhdr->h_dest));
-
-       /* check for hna host */
-       if (!orig_node)
-               orig_node = transtable_search(bat_priv, ethhdr->h_dest);
-
-       router = find_router(bat_priv, orig_node, NULL);
-
-       if (!router)
-               goto unlock;
+               if (orig_node)
+                       goto find_router;
+       }
 
-       /* don't lock while sending the packets ... we therefore
-               * copy the required data before sending */
+       /* check for hna host - increases orig_node refcount */
+       orig_node = transtable_search(bat_priv, ethhdr->h_dest);
 
-       batman_if = router->if_incoming;
-       memcpy(dstaddr, router->addr, ETH_ALEN);
+find_router:
+       /**
+        * find_router():
+        *  - if orig_node is NULL it returns NULL
+        *  - increases neigh_nodes refcount if found.
+        */
+       neigh_node = find_router(bat_priv, orig_node, NULL);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!neigh_node)
+               goto out;
 
-       if (batman_if->if_status != IF_ACTIVE)
-               goto dropped;
+       if (neigh_node->if_incoming->if_status != IF_ACTIVE)
+               goto out;
 
        if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
-               goto dropped;
+               goto out;
 
        unicast_packet = (struct unicast_packet *)skb->data;
 
@@ -328,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
 
        if (atomic_read(&bat_priv->fragmentation) &&
            data_len + sizeof(struct unicast_packet) >
-           batman_if->net_dev->mtu) {
+                               neigh_node->if_incoming->net_dev->mtu) {
                /* send frag skb decreases ttl */
                unicast_packet->ttl++;
-               return frag_send_skb(skb, bat_priv, batman_if,
-                                    dstaddr);
+               ret = frag_send_skb(skb, bat_priv,
+                                   neigh_node->if_incoming, neigh_node->addr);
+               goto out;
        }
-       send_skb_packet(skb, batman_if, dstaddr);
-       return 0;
 
-unlock:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-dropped:
-       kfree_skb(skb);
-       return 1;
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = 0;
+       goto out;
+
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       if (ret == 1)
+               kfree_skb(skb);
+       return ret;
 }
index e32b786..16ad7a9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -22,6 +22,8 @@
 #ifndef _NET_BATMAN_ADV_UNICAST_H_
 #define _NET_BATMAN_ADV_UNICAST_H_
 
+#include "packet.h"
+
 #define FRAG_TIMEOUT 10000     /* purge frag list entrys after time in ms */
 #define FRAG_BUFFER_SIZE 6     /* number of list elements in buffer */
 
@@ -30,6 +32,27 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
 void frag_list_free(struct list_head *head);
 int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-                 struct batman_if *batman_if, uint8_t dstaddr[]);
+                 struct hard_iface *hard_iface, uint8_t dstaddr[]);
+
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+       struct unicast_frag_packet *unicast_packet;
+       int uneven_correction = 0;
+       unsigned int merged_size;
+
+       unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+       if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+               if (unicast_packet->flags & UNI_FRAG_HEAD)
+                       uneven_correction = 1;
+               else
+                       uneven_correction = -1;
+       }
+
+       merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+       merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+       return merged_size <= mtu;
+}
 
 #endif /* _NET_BATMAN_ADV_UNICAST_H_ */
index cd4c423..f90212f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -64,18 +64,20 @@ static void free_info(struct kref *ref)
 
        spin_unlock_bh(&bat_priv->vis_list_lock);
        kfree_skb(info->skb_packet);
+       kfree(info);
 }
 
 /* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(void *data1, void *data2)
+static int vis_info_cmp(struct hlist_node *node, void *data2)
 {
        struct vis_info *d1, *d2;
        struct vis_packet *p1, *p2;
-       d1 = data1;
+
+       d1 = container_of(node, struct vis_info, hash_entry);
        d2 = data2;
        p1 = (struct vis_packet *)d1->skb_packet->data;
        p2 = (struct vis_packet *)d2->skb_packet->data;
-       return compare_orig(p1->vis_orig, p2->vis_orig);
+       return compare_eth(p1->vis_orig, p2->vis_orig);
 }
 
 /* hash function to choose an entry in a hash table of given size */
@@ -103,6 +105,34 @@ static int vis_info_choose(void *data, int size)
        return hash % size;
 }
 
+static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
+                                     void *data)
+{
+       struct hashtable_t *hash = bat_priv->vis_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct vis_info *vis_info, *vis_info_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = vis_info_choose(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
+               if (!vis_info_cmp(node, data))
+                       continue;
+
+               vis_info_tmp = vis_info;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vis_info_tmp;
+}
+
 /* insert interface to the list of interfaces of one originator, if it
  * does not already exist in the list */
 static void vis_data_insert_interface(const uint8_t *interface,
@@ -113,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
        struct hlist_node *pos;
 
        hlist_for_each_entry(entry, pos, if_list, list) {
-               if (compare_orig(entry->addr, (void *)interface))
+               if (compare_eth(entry->addr, (void *)interface))
                        return;
        }
 
@@ -165,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
        /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
        if (primary && entry->quality == 0)
                return sprintf(buff, "HNA %pM, ", entry->dest);
-       else if (compare_orig(entry->src, src))
+       else if (compare_eth(entry->src, src))
                return sprintf(buff, "TQ %pM %d, ", entry->dest,
                               entry->quality);
 
@@ -174,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
 
 int vis_seq_print_text(struct seq_file *seq, void *offset)
 {
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct vis_info *info;
        struct vis_packet *packet;
        struct vis_info_entry *entries;
@@ -202,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       info = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(info, node, head, hash_entry) {
                        packet = (struct vis_packet *)info->skb_packet->data;
                        entries = (struct vis_info_entry *)
                                ((char *)packet + sizeof(struct vis_packet));
@@ -212,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                if (entries[j].quality == 0)
                                        continue;
                                compare =
-                                compare_orig(entries[j].src, packet->vis_orig);
+                                compare_eth(entries[j].src, packet->vis_orig);
                                vis_data_insert_interface(entries[j].src,
                                                          &vis_if_list,
                                                          compare);
@@ -222,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                buf_size += 18 + 26 * packet->entries;
 
                                /* add primary/secondary records */
-                               if (compare_orig(entry->addr, packet->vis_orig))
+                               if (compare_eth(entry->addr, packet->vis_orig))
                                        buf_size +=
                                          vis_data_count_prim_sec(&vis_if_list);
 
@@ -235,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                kfree(entry);
                        }
                }
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -248,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       info = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(info, node, head, hash_entry) {
                        packet = (struct vis_packet *)info->skb_packet->data;
                        entries = (struct vis_info_entry *)
                                ((char *)packet + sizeof(struct vis_packet));
@@ -258,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                if (entries[j].quality == 0)
                                        continue;
                                compare =
-                                compare_orig(entries[j].src, packet->vis_orig);
+                                compare_eth(entries[j].src, packet->vis_orig);
                                vis_data_insert_interface(entries[j].src,
                                                          &vis_if_list,
                                                          compare);
@@ -268,15 +298,15 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                buff_pos += sprintf(buff + buff_pos, "%pM,",
                                                entry->addr);
 
-                               for (i = 0; i < packet->entries; i++)
+                               for (j = 0; j < packet->entries; j++)
                                        buff_pos += vis_data_read_entry(
                                                        buff + buff_pos,
-                                                       &entries[i],
+                                                       &entries[j],
                                                        entry->addr,
                                                        entry->primary);
 
                                /* add primary/secondary records */
-                               if (compare_orig(entry->addr, packet->vis_orig))
+                               if (compare_eth(entry->addr, packet->vis_orig))
                                        buff_pos +=
                                         vis_data_read_prim_sec(buff + buff_pos,
                                                                &vis_if_list);
@@ -290,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                kfree(entry);
                        }
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->vis_hash_lock);
@@ -344,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
 
        spin_lock_bh(&bat_priv->vis_list_lock);
        list_for_each_entry(entry, recv_list, list) {
-               if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
+               if (compare_eth(entry->mac, mac)) {
                        spin_unlock_bh(&bat_priv->vis_list_lock);
                        return 1;
                }
@@ -380,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
                                                     sizeof(struct vis_packet));
 
        memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-       old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                            &search_elem);
+       old_info = vis_hash_find(bat_priv, &search_elem);
        kfree_skb(search_elem.skb_packet);
 
        if (old_info) {
@@ -441,10 +471,10 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
 
        /* try to add it */
        hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                             info);
+                             info, &info->hash_entry);
        if (hash_added < 0) {
                /* did not work (for some reason) */
-               kref_put(&old_info->refcount, free_info);
+               kref_put(&info->refcount, free_info);
                info = NULL;
        }
 
@@ -528,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
                                struct vis_info *info)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct vis_packet *packet;
        int best_tq = -1, i;
@@ -540,16 +569,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        if ((orig_node) && (orig_node->router) &&
-                       (orig_node->flags & VIS_SERVER) &&
-                       (orig_node->router->tq_avg > best_tq)) {
+                           (orig_node->flags & VIS_SERVER) &&
+                           (orig_node->router->tq_avg > best_tq)) {
                                best_tq = orig_node->router->tq_avg;
                                memcpy(packet->target_orig, orig_node->orig,
                                       ETH_ALEN);
                        }
                }
+               rcu_read_unlock();
        }
 
        return best_tq;
@@ -572,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info)
 static int generate_vis_packet(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct neigh_node *neigh_node;
        struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
@@ -586,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        info->first_seen = jiffies;
        packet->vis_type = atomic_read(&bat_priv->vis_mode);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
        packet->ttl = TTL;
        packet->seqno = htonl(ntohl(packet->seqno) + 1);
@@ -596,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
                best_tq = find_best_vis_server(bat_priv, info);
 
-               if (best_tq < 0) {
-                       spin_unlock_bh(&bat_priv->orig_hash_lock);
+               if (best_tq < 0)
                        return -1;
-               }
        }
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        neigh_node = orig_node->router;
 
                        if (!neigh_node)
                                continue;
 
-                       if (!compare_orig(neigh_node->addr, orig_node->orig))
+                       if (!compare_eth(neigh_node->addr, orig_node->orig))
                                continue;
 
                        if (neigh_node->if_incoming->if_status != IF_ACTIVE)
@@ -631,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
                        entry->quality = neigh_node->tq_avg;
                        packet->entries++;
 
-                       if (vis_packet_full(info)) {
-                               spin_unlock_bh(&bat_priv->orig_hash_lock);
-                               return 0;
-                       }
+                       if (vis_packet_full(info))
+                               goto unlock;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        hash = bat_priv->hna_local_hash;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_local_entry = bucket->data;
+               hlist_for_each_entry(hna_local_entry, node, head, hash_entry) {
                        entry = (struct vis_info_entry *)
                                        skb_put(info->skb_packet,
                                                sizeof(*entry));
@@ -665,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
 
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
        return 0;
+
+unlock:
+       rcu_read_unlock();
+       return 0;
 }
 
 /* free old vis packets. Must be called with this vis_hash_lock
@@ -673,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
 {
        int i;
        struct hashtable_t *hash = bat_priv->vis_hash;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct vis_info *info;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       info = bucket->data;
-
+               hlist_for_each_entry_safe(info, node, node_tmp,
+                                         head, hash_entry) {
                        /* never purge own data. */
                        if (info == bat_priv->my_vis_info)
                                continue;
 
                        if (time_after(jiffies,
                                       info->first_seen + VIS_TIMEOUT * HZ)) {
-                               hlist_del(walk);
-                               kfree(bucket);
+                               hlist_del(node);
                                send_list_del(info);
                                kref_put(&info->refcount, free_info);
                        }
@@ -703,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
                                 struct vis_info *info)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct vis_packet *packet;
        struct sk_buff *skb;
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        uint8_t dstaddr[ETH_ALEN];
        int i;
 
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        packet = (struct vis_packet *)info->skb_packet->data;
 
        /* send to all routers in range. */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        /* if it's a vis server and reachable, send it. */
                        if ((!orig_node) || (!orig_node->router))
                                continue;
@@ -736,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
                                continue;
 
                        memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-                       batman_if = orig_node->router->if_incoming;
+                       hard_iface = orig_node->router->if_incoming;
                        memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-                       spin_unlock_bh(&bat_priv->orig_hash_lock);
 
                        skb = skb_clone(info->skb_packet, GFP_ATOMIC);
                        if (skb)
-                               send_skb_packet(skb, batman_if, dstaddr);
+                               send_skb_packet(skb, hard_iface, dstaddr);
 
-                       spin_lock_bh(&bat_priv->orig_hash_lock);
                }
-
+               rcu_read_unlock();
        }
-
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 }
 
 static void unicast_vis_packet(struct bat_priv *bat_priv,
                               struct vis_info *info)
 {
        struct orig_node *orig_node;
+       struct neigh_node *neigh_node = NULL;
        struct sk_buff *skb;
        struct vis_packet *packet;
-       struct batman_if *batman_if;
-       uint8_t dstaddr[ETH_ALEN];
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        packet = (struct vis_packet *)info->skb_packet->data;
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  packet->target_orig));
 
-       if ((!orig_node) || (!orig_node->router))
-               goto out;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, packet->target_orig);
 
-       /* don't lock while sending the packets ... we therefore
-        * copy the required data before sending */
-       batman_if = orig_node->router->if_incoming;
-       memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!orig_node)
+               goto unlock;
+
+       neigh_node = orig_node->router;
+
+       if (!neigh_node)
+               goto unlock;
+
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
+
+       rcu_read_unlock();
 
        skb = skb_clone(info->skb_packet, GFP_ATOMIC);
        if (skb)
-               send_skb_packet(skb, batman_if, dstaddr);
+               send_skb_packet(skb, neigh_node->if_incoming,
+                               neigh_node->addr);
 
-       return;
+       goto out;
 
+unlock:
+       rcu_read_unlock();
 out:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return;
 }
 
 /* only send one vis packet. called from send_vis_packets() */
@@ -815,7 +842,7 @@ static void send_vis_packets(struct work_struct *work)
                container_of(work, struct delayed_work, work);
        struct bat_priv *bat_priv =
                container_of(delayed_work, struct bat_priv, vis_work);
-       struct vis_info *info, *temp;
+       struct vis_info *info;
 
        spin_lock_bh(&bat_priv->vis_hash_lock);
        purge_vis_packets(bat_priv);
@@ -825,8 +852,9 @@ static void send_vis_packets(struct work_struct *work)
                send_list_add(bat_priv, bat_priv->my_vis_info);
        }
 
-       list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
-                                send_list) {
+       while (!list_empty(&bat_priv->vis_send_list)) {
+               info = list_first_entry(&bat_priv->vis_send_list,
+                                       typeof(*info), send_list);
 
                kref_get(&info->refcount);
                spin_unlock_bh(&bat_priv->vis_hash_lock);
@@ -894,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv)
        INIT_LIST_HEAD(&bat_priv->vis_send_list);
 
        hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                             bat_priv->my_vis_info);
+                             bat_priv->my_vis_info,
+                             &bat_priv->my_vis_info->hash_entry);
        if (hash_added < 0) {
                pr_err("Can't add own vis packet into hash\n");
                /* not in hash, need to remove it manually. */
@@ -916,10 +945,11 @@ err:
 }
 
 /* Decrease the reference count on a hash item info */
-static void free_info_ref(void *data, void *arg)
+static void free_info_ref(struct hlist_node *node, void *arg)
 {
-       struct vis_info *info = data;
+       struct vis_info *info;
 
+       info = container_of(node, struct vis_info, hash_entry);
        send_list_del(info);
        kref_put(&info->refcount, free_info);
 }
index 2c3b330..31b820d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index ed37168..6ae5ec5 100644 (file)
@@ -27,31 +27,27 @@ menuconfig BT
          compile it as module (bluetooth).
 
          To use Linux Bluetooth subsystem, you will need several user-space
-         utilities like hciconfig and hcid.  These utilities and updates to
-         Bluetooth kernel modules are provided in the BlueZ packages.
-         For more information, see <http://www.bluez.org/>.
+         utilities like hciconfig and bluetoothd.  These utilities and updates
+         to Bluetooth kernel modules are provided in the BlueZ packages.  For
+         more information, see <http://www.bluez.org/>.
+
+if BT != n
 
 config BT_L2CAP
-       tristate "L2CAP protocol support"
-       depends on BT
+       bool "L2CAP protocol support"
        select CRC16
        help
          L2CAP (Logical Link Control and Adaptation Protocol) provides
          connection oriented and connection-less data transport.  L2CAP
          support is required for most Bluetooth applications.
 
-         Say Y here to compile L2CAP support into the kernel or say M to
-         compile it as module (l2cap).
-
 config BT_SCO
-       tristate "SCO links support"
-       depends on BT
+       bool "SCO links support"
        help
          SCO link provides voice transport over Bluetooth.  SCO support is
          required for voice applications like Headset and Audio.
 
-         Say Y here to compile SCO support into the kernel or say M to
-         compile it as module (sco).
+endif
 
 source "net/bluetooth/rfcomm/Kconfig"
 
index 250f954..f04fe9a 100644 (file)
@@ -3,11 +3,11 @@
 #
 
 obj-$(CONFIG_BT)       += bluetooth.o
-obj-$(CONFIG_BT_L2CAP) += l2cap.o
-obj-$(CONFIG_BT_SCO)   += sco.o
 obj-$(CONFIG_BT_RFCOMM)        += rfcomm/
 obj-$(CONFIG_BT_BNEP)  += bnep/
 obj-$(CONFIG_BT_CMTP)  += cmtp/
 obj-$(CONFIG_BT_HIDP)  += hidp/
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-$(CONFIG_BT_L2CAP)   += l2cap_core.o l2cap_sock.o
+bluetooth-$(CONFIG_BT_SCO)     += sco.o
index c4cf3f5..8add9b4 100644 (file)
@@ -40,7 +40,7 @@
 
 #include <net/bluetooth/bluetooth.h>
 
-#define VERSION "2.15"
+#define VERSION "2.16"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
 
        BT_DBG("parent %p", parent);
 
+       local_bh_disable();
        list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
                sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
 
-               lock_sock(sk);
+               bh_lock_sock(sk);
 
                /* FIXME: Is this check still needed */
                if (sk->sk_state == BT_CLOSED) {
-                       release_sock(sk);
+                       bh_unlock_sock(sk);
                        bt_accept_unlink(sk);
                        continue;
                }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
                        bt_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
-                       release_sock(sk);
+
+                       bh_unlock_sock(sk);
+                       local_bh_enable();
                        return sk;
                }
 
-               release_sock(sk);
+               bh_unlock_sock(sk);
        }
+       local_bh_enable();
+
        return NULL;
 }
 EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
-       if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb) {
                if (sk->sk_shutdown & RCV_SHUTDOWN)
                        return 0;
                return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if (copied >= target)
                                break;
 
-                       if ((err = sock_error(sk)) != 0)
+                       err = sock_error(sk);
+                       if (err)
                                break;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
                                break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        unsigned int mask = 0;
@@ -538,13 +545,39 @@ static int __init bt_init(void)
 
        BT_INFO("HCI device and connection manager initialized");
 
-       hci_sock_init();
+       err = hci_sock_init();
+       if (err < 0)
+               goto error;
+
+       err = l2cap_init();
+       if (err < 0)
+               goto sock_err;
+
+       err = sco_init();
+       if (err < 0) {
+               l2cap_exit();
+               goto sock_err;
+       }
 
        return 0;
+
+sock_err:
+       hci_sock_cleanup();
+
+error:
+       sock_unregister(PF_BLUETOOTH);
+       bt_sysfs_cleanup();
+
+       return err;
 }
 
 static void __exit bt_exit(void)
 {
+
+       sco_exit();
+
+       l2cap_exit();
+
        hci_sock_cleanup();
 
        sock_unregister(PF_BLUETOOTH);
index 5868597..03d4d12 100644 (file)
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
 {
        char flt[50] = "";
 
-       l2cap_load();
-
 #ifdef CONFIG_BT_BNEP_PROTO_FILTER
        strcat(flt, "protocol ");
 #endif
index 2862f53..d935da7 100644 (file)
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
                        sockfd_put(nsock);
                        return -EBADFD;
                }
+               ca.device[sizeof(ca.device)-1] = 0;
 
                err = bnep_add_connection(&ca, nsock);
                if (!err) {
index 3487cfe..67cff81 100644 (file)
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
 
        BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
 
-       if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
+       skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for interoperability packet");
                return;
        }
index 8e5f292..964ea91 100644 (file)
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
 
        size = (skb) ? skb->len + count : count;
 
-       if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
+       nskb = alloc_skb(size, GFP_ATOMIC);
+       if (!nskb) {
                BT_ERR("Can't allocate memory for CAPI message");
                return;
        }
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
 
        BT_DBG("session %p", session);
 
-       if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
+       nskb = alloc_skb(session->mtu, GFP_ATOMIC);
+       if (!nskb) {
                BT_ERR("Can't allocate memory for new frame");
                return;
        }
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
        while ((skb = skb_dequeue(&session->transmit))) {
                struct cmtp_scb *scb = (void *) skb->cb;
 
-               if ((tail = (session->mtu - nskb->len)) < 5) {
+               tail = session->mtu - nskb->len;
+               if (tail < 5) {
                        cmtp_send_frame(session, nskb->data, nskb->len);
                        skb_trim(nskb, 0);
                        tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
 
 static int __init cmtp_init(void)
 {
-       l2cap_load();
-
        BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
 
        cmtp_init_sockets();
index 6b90a41..7a6f56b 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+static void hci_le_connect(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct hci_cp_le_create_conn cp;
+
+       conn->state = BT_CONNECT;
+       conn->out = 1;
+       conn->link_mode |= HCI_LM_MASTER;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.scan_interval = cpu_to_le16(0x0004);
+       cp.scan_window = cpu_to_le16(0x0004);
+       bacpy(&cp.peer_addr, &conn->dst);
+       cp.conn_interval_min = cpu_to_le16(0x0008);
+       cp.conn_interval_max = cpu_to_le16(0x0100);
+       cp.supervision_timeout = cpu_to_le16(0x0064);
+       cp.min_ce_len = cpu_to_le16(0x0001);
+       cp.max_ce_len = cpu_to_le16(0x0001);
+
+       hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+}
+
+static void hci_le_connect_cancel(struct hci_conn *conn)
+{
+       hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+}
+
 void hci_acl_connect(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
        hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
 }
 
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+                                       u16 latency, u16 to_multiplier)
+{
+       struct hci_cp_le_conn_update cp;
+       struct hci_dev *hdev = conn->hdev;
+
+       memset(&cp, 0, sizeof(cp));
+
+       cp.handle               = cpu_to_le16(conn->handle);
+       cp.conn_interval_min    = cpu_to_le16(min);
+       cp.conn_interval_max    = cpu_to_le16(max);
+       cp.conn_latency         = cpu_to_le16(latency);
+       cp.supervision_timeout  = cpu_to_le16(to_multiplier);
+       cp.min_ce_len           = cpu_to_le16(0x0001);
+       cp.max_ce_len           = cpu_to_le16(0x0001);
+
+       hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_conn_update);
+
 /* Device _must_ be locked */
 void hci_sco_setup(struct hci_conn *conn, __u8 status)
 {
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
        switch (conn->state) {
        case BT_CONNECT:
        case BT_CONNECT2:
-               if (conn->type == ACL_LINK && conn->out)
-                       hci_acl_connect_cancel(conn);
+               if (conn->out) {
+                       if (conn->type == ACL_LINK)
+                               hci_acl_connect_cancel(conn);
+                       else if (conn->type == LE_LINK)
+                               hci_le_connect_cancel(conn);
+               }
                break;
        case BT_CONFIG:
        case BT_CONNECTED:
@@ -234,6 +285,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        conn->mode  = HCI_CM_ACTIVE;
        conn->state = BT_OPEN;
        conn->auth_type = HCI_AT_GENERAL_BONDING;
+       conn->io_capability = hdev->io_capability;
+       conn->remote_auth = 0xff;
 
        conn->power_save = 1;
        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +348,11 @@ int hci_conn_del(struct hci_conn *conn)
 
                /* Unacked frames */
                hdev->acl_cnt += conn->sent;
+       } else if (conn->type == LE_LINK) {
+               if (hdev->le_pkts)
+                       hdev->le_cnt += conn->sent;
+               else
+                       hdev->acl_cnt += conn->sent;
        } else {
                struct hci_conn *acl = conn->link;
                if (acl) {
@@ -360,15 +418,31 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
-/* Create SCO or ACL connection.
+/* Create SCO, ACL or LE connection.
  * Device _must_ be locked */
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
 {
        struct hci_conn *acl;
        struct hci_conn *sco;
+       struct hci_conn *le;
 
        BT_DBG("%s dst %s", hdev->name, batostr(dst));
 
+       if (type == LE_LINK) {
+               le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+               if (le)
+                       return ERR_PTR(-EBUSY);
+               le = hci_conn_add(hdev, LE_LINK, dst);
+               if (!le)
+                       return ERR_PTR(-ENOMEM);
+               if (le->state == BT_OPEN)
+                       hci_le_connect(le);
+
+               hci_conn_hold(le);
+
+               return le;
+       }
+
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
                acl = hci_conn_add(hdev, ACL_LINK, dst);
@@ -379,14 +453,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
        hci_conn_hold(acl);
 
        if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
-               acl->sec_level = sec_level;
+               acl->sec_level = BT_SECURITY_LOW;
+               acl->pending_sec_level = sec_level;
                acl->auth_type = auth_type;
                hci_acl_connect(acl);
-       } else {
-               if (acl->sec_level < sec_level)
-                       acl->sec_level = sec_level;
-               if (acl->auth_type < auth_type)
-                       acl->auth_type = auth_type;
        }
 
        if (type == ACL_LINK)
@@ -442,11 +512,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
 {
        BT_DBG("conn %p", conn);
 
+       if (conn->pending_sec_level > sec_level)
+               sec_level = conn->pending_sec_level;
+
        if (sec_level > conn->sec_level)
-               conn->sec_level = sec_level;
+               conn->pending_sec_level = sec_level;
        else if (conn->link_mode & HCI_LM_AUTH)
                return 1;
 
+       /* Make sure we preserve an existing MITM requirement*/
+       auth_type |= (conn->auth_type & 0x01);
+
        conn->auth_type = auth_type;
 
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
index 8b602d8..b372fb8 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
 #include <linux/rfkill.h>
+#include <linux/timer.h>
 #include <net/sock.h>
 
 #include <asm/system.h>
@@ -50,6 +51,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+#define AUTO_OFF_TIMEOUT 2000
+
 static void hci_cmd_task(unsigned long arg);
 static void hci_rx_task(unsigned long arg);
 static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
 {
        BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
 
-       /* If the request has set req_last_cmd (typical for multi-HCI
-        * command requests) check if the completed command matches
-        * this, and if not just return. Single HCI command requests
-        * typically leave req_last_cmd as 0 */
-       if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
+       /* If this is the init phase check if the completed command matches
+        * the last init command, and if not just return.
+        */
+       if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
                return;
 
        if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
 
 /* Execute request and wait for completion. */
 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
-                               unsigned long opt, __u32 timeout)
+                                       unsigned long opt, __u32 timeout)
 {
        DECLARE_WAITQUEUE(wait, current);
        int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
                break;
        }
 
-       hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
+       hdev->req_status = hdev->req_result = 0;
 
        BT_DBG("%s end: err %d", hdev->name, err);
 
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
 }
 
 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
-                               unsigned long opt, __u32 timeout)
+                                       unsigned long opt, __u32 timeout)
 {
        int ret;
 
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
 
 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
 {
+       struct hci_cp_delete_stored_link_key cp;
        struct sk_buff *skb;
        __le16 param;
        __u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        flt_type = HCI_FLT_CLEAR_ALL;
        hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 
-       /* Page timeout ~20 secs */
-       param = cpu_to_le16(0x8000);
-       hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
-
        /* Connection accept timeout ~20 secs */
        param = cpu_to_le16(0x7d00);
        hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-       hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
+       bacpy(&cp.bdaddr, BDADDR_ANY);
+       cp.delete_all = 1;
+       hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+       BT_DBG("%s", hdev->name);
+
+       /* Read LE buffer size */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 }
 
 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
        if (copy_from_user(&ir, ptr, sizeof(ir)))
                return -EFAULT;
 
-       if (!(hdev = hci_dev_get(ir.dev_id)))
+       hdev = hci_dev_get(ir.dev_id);
+       if (!hdev)
                return -ENODEV;
 
        hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
        /* cache_dump can't sleep. Therefore we allocate temp buffer and then
         * copy it to the user space.
         */
-       buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+       buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
        if (!buf) {
                err = -ENOMEM;
                goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
        struct hci_dev *hdev;
        int ret = 0;
 
-       if (!(hdev = hci_dev_get(dev)))
+       hdev = hci_dev_get(dev);
+       if (!hdev)
                return -ENODEV;
 
        BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
        if (!test_bit(HCI_RAW, &hdev->flags)) {
                atomic_set(&hdev->cmd_cnt, 1);
                set_bit(HCI_INIT, &hdev->flags);
+               hdev->init_last_cmd = 0;
 
-               //__hci_request(hdev, hci_reset_req, 0, HZ);
                ret = __hci_request(hdev, hci_init_req, 0,
                                        msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
+               if (lmp_le_capable(hdev))
+                       ret = __hci_request(hdev, hci_le_init_req, 0,
+                                       msecs_to_jiffies(HCI_INIT_TIMEOUT));
+
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
+               if (!test_bit(HCI_SETUP, &hdev->flags))
+                       mgmt_powered(hdev->id, 1);
        } else {
                /* Init failed, cleanup */
                tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        /* Drop last sent command */
        if (hdev->sent_cmd) {
+               del_timer_sync(&hdev->cmd_timer);
                kfree_skb(hdev->sent_cmd);
                hdev->sent_cmd = NULL;
        }
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
+       mgmt_powered(hdev->id, 0);
+
        /* Clear flags */
        hdev->flags = 0;
 
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
                hdev->flush(hdev);
 
        atomic_set(&hdev->cmd_cnt, 1);
-       hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+       hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
 
        if (!test_bit(HCI_RAW, &hdev->flags))
                ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
        read_lock_bh(&hci_dev_list_lock);
        list_for_each(p, &hci_dev_list) {
                struct hci_dev *hdev;
+
                hdev = list_entry(p, struct hci_dev, list);
+
+               hci_del_off_timer(hdev);
+
+               if (!test_bit(HCI_MGMT, &hdev->flags))
+                       set_bit(HCI_PAIRABLE, &hdev->flags);
+
                (dr + n)->dev_id  = hdev->id;
                (dr + n)->dev_opt = hdev->flags;
+
                if (++n >= dev_num)
                        break;
        }
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
+       hci_del_off_timer(hdev);
+
+       if (!test_bit(HCI_MGMT, &hdev->flags))
+               set_bit(HCI_PAIRABLE, &hdev->flags);
+
        strcpy(di.name, hdev->name);
        di.bdaddr   = hdev->bdaddr;
        di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
 }
 EXPORT_SYMBOL(hci_free_dev);
 
+static void hci_power_on(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+
+       BT_DBG("%s", hdev->name);
+
+       if (hci_dev_open(hdev->id) < 0)
+               return;
+
+       if (test_bit(HCI_AUTO_OFF, &hdev->flags))
+               mod_timer(&hdev->off_timer,
+                               jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+
+       if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
+               mgmt_index_added(hdev->id);
+}
+
+static void hci_power_off(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_close(hdev->id);
+}
+
+static void hci_auto_off(unsigned long data)
+{
+       struct hci_dev *hdev = (struct hci_dev *) data;
+
+       BT_DBG("%s", hdev->name);
+
+       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
+       queue_work(hdev->workqueue, &hdev->power_off);
+}
+
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+       BT_DBG("%s", hdev->name);
+
+       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+       del_timer(&hdev->off_timer);
+}
+
+int hci_uuids_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->uuids) {
+               struct bt_uuid *uuid;
+
+               uuid = list_entry(p, struct bt_uuid, list);
+
+               list_del(p);
+               kfree(uuid);
+       }
+
+       return 0;
+}
+
+int hci_link_keys_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->link_keys) {
+               struct link_key *key;
+
+               key = list_entry(p, struct link_key, list);
+
+               list_del(p);
+               kfree(key);
+       }
+
+       return 0;
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct list_head *p;
+
+       list_for_each(p, &hdev->link_keys) {
+               struct link_key *k;
+
+               k = list_entry(p, struct link_key, list);
+
+               if (bacmp(bdaddr, &k->bdaddr) == 0)
+                       return k;
+       }
+
+       return NULL;
+}
+
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+                                               u8 *val, u8 type, u8 pin_len)
+{
+       struct link_key *key, *old_key;
+       u8 old_key_type;
+
+       old_key = hci_find_link_key(hdev, bdaddr);
+       if (old_key) {
+               old_key_type = old_key->type;
+               key = old_key;
+       } else {
+               old_key_type = 0xff;
+               key = kzalloc(sizeof(*key), GFP_ATOMIC);
+               if (!key)
+                       return -ENOMEM;
+               list_add(&key->list, &hdev->link_keys);
+       }
+
+       BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+
+       bacpy(&key->bdaddr, bdaddr);
+       memcpy(key->val, val, 16);
+       key->type = type;
+       key->pin_len = pin_len;
+
+       if (new_key)
+               mgmt_new_key(hdev->id, key, old_key_type);
+
+       if (type == 0x06)
+               key->type = old_key_type;
+
+       return 0;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct link_key *key;
+
+       key = hci_find_link_key(hdev, bdaddr);
+       if (!key)
+               return -ENOENT;
+
+       BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+       list_del(&key->list);
+       kfree(key);
+
+       return 0;
+}
+
+/* HCI command timer function */
+static void hci_cmd_timer(unsigned long arg)
+{
+       struct hci_dev *hdev = (void *) arg;
+
+       BT_ERR("%s command tx timeout", hdev->name);
+       atomic_set(&hdev->cmd_cnt, 1);
+       tasklet_schedule(&hdev->cmd_task);
+}
+
 /* Register HCI device */
 int hci_register_dev(struct hci_dev *hdev)
 {
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
+       hdev->io_capability = 0x03; /* No Input No Output */
 
        hdev->idle_timeout = 0;
        hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
        skb_queue_head_init(&hdev->cmd_q);
        skb_queue_head_init(&hdev->raw_q);
 
+       setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+
        for (i = 0; i < NUM_REASSEMBLY; i++)
                hdev->reassembly[i] = NULL;
 
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
 
        INIT_LIST_HEAD(&hdev->blacklist);
 
+       INIT_LIST_HEAD(&hdev->uuids);
+
+       INIT_LIST_HEAD(&hdev->link_keys);
+
+       INIT_WORK(&hdev->power_on, hci_power_on);
+       INIT_WORK(&hdev->power_off, hci_power_off);
+       setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
        atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
                }
        }
 
-       mgmt_index_added(hdev->id);
+       set_bit(HCI_AUTO_OFF, &hdev->flags);
+       set_bit(HCI_SETUP, &hdev->flags);
+       queue_work(hdev->workqueue, &hdev->power_on);
+
        hci_notify(hdev, HCI_DEV_REG);
 
        return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
        for (i = 0; i < NUM_REASSEMBLY; i++)
                kfree_skb(hdev->reassembly[i]);
 
-       mgmt_index_removed(hdev->id);
+       if (!test_bit(HCI_INIT, &hdev->flags) &&
+                                       !test_bit(HCI_SETUP, &hdev->flags))
+               mgmt_index_removed(hdev->id);
+
        hci_notify(hdev, HCI_DEV_UNREG);
 
        if (hdev->rfkill) {
@@ -1009,8 +1212,16 @@ int hci_unregister_dev(struct hci_dev *hdev)
 
        hci_unregister_sysfs(hdev);
 
+       hci_del_off_timer(hdev);
+
        destroy_workqueue(hdev->workqueue);
 
+       hci_dev_lock_bh(hdev);
+       hci_blacklist_clear(hdev);
+       hci_uuids_clear(hdev);
+       hci_link_keys_clear(hdev);
+       hci_dev_unlock_bh(hdev);
+
        __hci_dev_put(hdev);
 
        return 0;
@@ -1309,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
                /* Time stamp */
                __net_timestamp(skb);
 
-               hci_send_to_sock(hdev, skb);
+               hci_send_to_sock(hdev, skb, NULL);
        }
 
        /* Get rid of skb owner, prior to sending to the driver. */
@@ -1345,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
        skb->dev = (void *) hdev;
 
+       if (test_bit(HCI_INIT, &hdev->flags))
+               hdev->init_last_cmd = opcode;
+
        skb_queue_tail(&hdev->cmd_q, skb);
        tasklet_schedule(&hdev->cmd_task);
 
@@ -1391,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
 
        skb->dev = (void *) hdev;
        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-       hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
+       hci_add_acl_hdr(skb, conn->handle, flags);
 
        list = skb_shinfo(skb)->frag_list;
        if (!list) {
@@ -1409,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
                spin_lock_bh(&conn->data_q.lock);
 
                __skb_queue_tail(&conn->data_q, skb);
+
+               flags &= ~ACL_START;
+               flags |= ACL_CONT;
                do {
                        skb = list; list = list->next;
 
                        skb->dev = (void *) hdev;
                        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-                       hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+                       hci_add_acl_hdr(skb, conn->handle, flags);
 
                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
@@ -1482,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
        }
 
        if (conn) {
-               int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
-               int q = cnt / num;
+               int cnt, q;
+
+               switch (conn->type) {
+               case ACL_LINK:
+                       cnt = hdev->acl_cnt;
+                       break;
+               case SCO_LINK:
+               case ESCO_LINK:
+                       cnt = hdev->sco_cnt;
+                       break;
+               case LE_LINK:
+                       cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+                       break;
+               default:
+                       cnt = 0;
+                       BT_ERR("Unknown link type");
+               }
+
+               q = cnt / num;
                *quote = q ? q : 1;
        } else
                *quote = 0;
@@ -1492,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
        return conn;
 }
 
-static inline void hci_acl_tx_to(struct hci_dev *hdev)
+static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
        struct hci_conn  *c;
 
-       BT_ERR("%s ACL tx timeout", hdev->name);
+       BT_ERR("%s link tx timeout", hdev->name);
 
        /* Kill stalled connections */
        list_for_each(p, &h->list) {
                c = list_entry(p, struct hci_conn, list);
-               if (c->type == ACL_LINK && c->sent) {
-                       BT_ERR("%s killing stalled ACL connection %s",
+               if (c->type == type && c->sent) {
+                       BT_ERR("%s killing stalled connection %s",
                                hdev->name, batostr(&c->dst));
                        hci_acl_disconn(c, 0x13);
                }
@@ -1523,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
                /* ACL tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
-                       hci_acl_tx_to(hdev);
+                       hci_link_tx_to(hdev, ACL_LINK);
        }
 
        while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1582,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
        }
 }
 
+static inline void hci_sched_le(struct hci_dev *hdev)
+{
+       struct hci_conn *conn;
+       struct sk_buff *skb;
+       int quote, cnt;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_RAW, &hdev->flags)) {
+               /* LE tx timeout must be longer than maximum
+                * link supervision timeout (40.9 seconds) */
+               if (!hdev->le_cnt && hdev->le_pkts &&
+                               time_after(jiffies, hdev->le_last_tx + HZ * 45))
+                       hci_link_tx_to(hdev, LE_LINK);
+       }
+
+       cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+       while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
+               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+                       BT_DBG("skb %p len %d", skb, skb->len);
+
+                       hci_send_frame(skb);
+                       hdev->le_last_tx = jiffies;
+
+                       cnt--;
+                       conn->sent++;
+               }
+       }
+       if (hdev->le_pkts)
+               hdev->le_cnt = cnt;
+       else
+               hdev->acl_cnt = cnt;
+}
+
 static void hci_tx_task(unsigned long arg)
 {
        struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1589,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
 
        read_lock(&hci_task_lock);
 
-       BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
+       BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
+               hdev->sco_cnt, hdev->le_cnt);
 
        /* Schedule queues and send stuff to HCI driver */
 
@@ -1599,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
 
        hci_sched_esco(hdev);
 
+       hci_sched_le(hdev);
+
        /* Send next queued raw (unknown type) packet */
        while ((skb = skb_dequeue(&hdev->raw_q)))
                hci_send_frame(skb);
@@ -1696,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
        while ((skb = skb_dequeue(&hdev->rx_q))) {
                if (atomic_read(&hdev->promisc)) {
                        /* Send copy to the sockets */
-                       hci_send_to_sock(hdev, skb);
+                       hci_send_to_sock(hdev, skb, NULL);
                }
 
                if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1746,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
 
        BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
 
-       if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
-               BT_ERR("%s command tx timeout", hdev->name);
-               atomic_set(&hdev->cmd_cnt, 1);
-       }
-
        /* Send queued commands */
-       if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+       if (atomic_read(&hdev->cmd_cnt)) {
+               skb = skb_dequeue(&hdev->cmd_q);
+               if (!skb)
+                       return;
+
                kfree_skb(hdev->sent_cmd);
 
                hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
                if (hdev->sent_cmd) {
                        atomic_dec(&hdev->cmd_cnt);
                        hci_send_frame(skb);
-                       hdev->cmd_last_tx = jiffies;
+                       mod_timer(&hdev->cmd_timer,
+                                 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
                } else {
                        skb_queue_head(&hdev->cmd_q, skb);
                        tasklet_schedule(&hdev->cmd_task);
index 3810017..3fbfa50 100644 (file)
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        if (!status) {
                __u8 param = *((__u8 *) sent);
+               int old_pscan, old_iscan;
 
-               clear_bit(HCI_PSCAN, &hdev->flags);
-               clear_bit(HCI_ISCAN, &hdev->flags);
+               old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+               old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
 
-               if (param & SCAN_INQUIRY)
+               if (param & SCAN_INQUIRY) {
                        set_bit(HCI_ISCAN, &hdev->flags);
+                       if (!old_iscan)
+                               mgmt_discoverable(hdev->id, 1);
+               } else if (old_iscan)
+                       mgmt_discoverable(hdev->id, 0);
 
-               if (param & SCAN_PAGE)
+               if (param & SCAN_PAGE) {
                        set_bit(HCI_PSCAN, &hdev->flags);
+                       if (!old_pscan)
+                               mgmt_connectable(hdev->id, 1);
+               } else if (old_pscan)
+                       mgmt_connectable(hdev->id, 0);
        }
 
        hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
        hdev->ssp_mode = *((__u8 *) sent);
 }
 
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+       if (hdev->features[6] & LMP_EXT_INQ)
+               return 2;
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               return 1;
+
+       if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+                                               hdev->lmp_subver == 0x0757)
+               return 1;
+
+       if (hdev->manufacturer == 15) {
+               if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+                       return 1;
+               if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+                       return 1;
+               if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+                       return 1;
+       }
+
+       if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+                                               hdev->lmp_subver == 0x1805)
+               return 1;
+
+       return 0;
+}
+
+static void hci_setup_inquiry_mode(struct hci_dev *hdev)
+{
+       u8 mode;
+
+       mode = hci_get_inquiry_mode(hdev);
+
+       hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_dev *hdev)
+{
+       /* The second byte is 0xff instead of 0x9f (two reserved bits
+        * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+        * command otherwise */
+       u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+       /* Events for 1.2 and newer controllers */
+       if (hdev->lmp_ver > 1) {
+               events[4] |= 0x01; /* Flow Specification Complete */
+               events[4] |= 0x02; /* Inquiry Result with RSSI */
+               events[4] |= 0x04; /* Read Remote Extended Features Complete */
+               events[5] |= 0x08; /* Synchronous Connection Complete */
+               events[5] |= 0x10; /* Synchronous Connection Changed */
+       }
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               events[4] |= 0x04; /* Inquiry Result with RSSI */
+
+       if (hdev->features[5] & LMP_SNIFF_SUBR)
+               events[5] |= 0x20; /* Sniff Subrating */
+
+       if (hdev->features[5] & LMP_PAUSE_ENC)
+               events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+       if (hdev->features[6] & LMP_EXT_INQ)
+               events[5] |= 0x40; /* Extended Inquiry Result */
+
+       if (hdev->features[6] & LMP_NO_FLUSH)
+               events[7] |= 0x01; /* Enhanced Flush Complete */
+
+       if (hdev->features[7] & LMP_LSTO)
+               events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+               events[6] |= 0x01;      /* IO Capability Request */
+               events[6] |= 0x02;      /* IO Capability Response */
+               events[6] |= 0x04;      /* User Confirmation Request */
+               events[6] |= 0x08;      /* User Passkey Request */
+               events[6] |= 0x10;      /* Remote OOB Data Request */
+               events[6] |= 0x20;      /* Simple Pairing Complete */
+               events[7] |= 0x04;      /* User Passkey Notification */
+               events[7] |= 0x08;      /* Keypress Notification */
+               events[7] |= 0x10;      /* Remote Host Supported
+                                        * Features Notification */
+       }
+
+       if (hdev->features[4] & LMP_LE)
+               events[7] |= 0x20;      /* LE Meta-Event */
+
+       hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+}
+
+static void hci_setup(struct hci_dev *hdev)
+{
+       hci_setup_event_mask(hdev);
+
+       if (hdev->lmp_ver > 1)
+               hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+               u8 mode = 0x01;
+               hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+       }
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               hci_setup_inquiry_mode(hdev);
+
+       if (hdev->features[7] & LMP_INQ_TX_PWR)
+               hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+}
+
 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 
        hdev->hci_ver = rp->hci_ver;
        hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+       hdev->lmp_ver = rp->lmp_ver;
        hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+       hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 
        BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
                                        hdev->manufacturer,
                                        hdev->hci_ver, hdev->hci_rev);
+
+       if (test_bit(HCI_INIT, &hdev->flags))
+               hci_setup(hdev);
+}
+
+static void hci_setup_link_policy(struct hci_dev *hdev)
+{
+       u16 link_policy = 0;
+
+       if (hdev->features[0] & LMP_RSWITCH)
+               link_policy |= HCI_LP_RSWITCH;
+       if (hdev->features[0] & LMP_HOLD)
+               link_policy |= HCI_LP_HOLD;
+       if (hdev->features[0] & LMP_SNIFF)
+               link_policy |= HCI_LP_SNIFF;
+       if (hdev->features[1] & LMP_PARK)
+               link_policy |= HCI_LP_PARK;
+
+       link_policy = cpu_to_le16(link_policy);
+       hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+                                       sizeof(link_policy), &link_policy);
 }
 
 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
        if (rp->status)
-               return;
+               goto done;
 
        memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+
+       if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
+               hci_setup_link_policy(hdev);
+
+done:
+       hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
 }
 
 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,130 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
        hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
 }
 
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
+}
+
+static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
+}
+
+static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
+}
+
+static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+}
+
+static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
+}
+
+static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+       struct hci_cp_pin_code_reply *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+
+       if (rp->status != 0)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
+       if (!cp)
+               return;
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (conn)
+               conn->pin_length = cp->pin_len;
+}
+
+static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+                                                               rp->status);
+}
+static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
+                                      struct sk_buff *skb)
+{
+       struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
+       hdev->le_pkts = rp->le_max_pkt;
+
+       hdev->le_cnt = hdev->le_pkts;
+
+       BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
+       hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
+}
+
+static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+                                                               rp->status);
+}
+
+static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+                                                               rp->status);
+}
+
 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
        BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +893,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
        hci_dev_lock(hdev);
 
        acl = hci_conn_hash_lookup_handle(hdev, handle);
-       if (acl && (sco = acl->link)) {
-               sco->state = BT_CLOSED;
+       if (acl) {
+               sco = acl->link;
+               if (sco) {
+                       sco->state = BT_CLOSED;
 
-               hci_proto_connect_cfm(sco, status);
-               hci_conn_del(sco);
+                       hci_proto_connect_cfm(sco, status);
+                       hci_conn_del(sco);
+               }
        }
 
        hci_dev_unlock(hdev);
@@ -687,18 +961,18 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
 }
 
 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
-                                               struct hci_conn *conn)
+                                                       struct hci_conn *conn)
 {
        if (conn->state != BT_CONFIG || !conn->out)
                return 0;
 
-       if (conn->sec_level == BT_SECURITY_SDP)
+       if (conn->pending_sec_level == BT_SECURITY_SDP)
                return 0;
 
        /* Only request authentication for SSP connections or non-SSP
         * devices with sec_level HIGH */
        if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
-                                       conn->sec_level != BT_SECURITY_HIGH)
+                               conn->pending_sec_level != BT_SECURITY_HIGH)
                return 0;
 
        return 1;
@@ -808,11 +1082,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
        hci_dev_lock(hdev);
 
        acl = hci_conn_hash_lookup_handle(hdev, handle);
-       if (acl && (sco = acl->link)) {
-               sco->state = BT_CLOSED;
+       if (acl) {
+               sco = acl->link;
+               if (sco) {
+                       sco->state = BT_CLOSED;
 
-               hci_proto_connect_cfm(sco, status);
-               hci_conn_del(sco);
+                       hci_proto_connect_cfm(sco, status);
+                       hci_conn_del(sco);
+               }
        }
 
        hci_dev_unlock(hdev);
@@ -872,6 +1149,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
        hci_dev_unlock(hdev);
 }
 
+static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
+{
+       struct hci_cp_le_create_conn *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+
+       BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
+               conn);
+
+       if (status) {
+               if (conn && conn->state == BT_CONNECT) {
+                       conn->state = BT_CLOSED;
+                       hci_proto_connect_cfm(conn, status);
+                       hci_conn_del(conn);
+               }
+       } else {
+               if (!conn) {
+                       conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+                       if (conn)
+                               conn->out = 1;
+                       else
+                               BT_ERR("No memory for new connection");
+               }
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -942,6 +1256,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        conn->state = BT_CONFIG;
                        hci_conn_hold(conn);
                        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+                       mgmt_connected(hdev->id, &ev->bdaddr);
                } else
                        conn->state = BT_CONNECTED;
 
@@ -970,8 +1285,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
                                                        sizeof(cp), &cp);
                }
-       } else
+       } else {
                conn->state = BT_CLOSED;
+               if (conn->type == ACL_LINK)
+                       mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+       }
 
        if (conn->type == ACL_LINK)
                hci_sco_setup(conn, ev->status);
@@ -998,7 +1316,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
 
        mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
 
-       if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+       if ((mask & HCI_LM_ACCEPT) &&
+                       !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
                /* Connection accepted */
                struct inquiry_entry *ie;
                struct hci_conn *conn;
@@ -1068,19 +1387,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, ev->status);
 
-       if (ev->status)
+       if (ev->status) {
+               mgmt_disconnect_failed(hdev->id);
                return;
+       }
 
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-       if (conn) {
-               conn->state = BT_CLOSED;
+       if (!conn)
+               goto unlock;
 
-               hci_proto_disconn_cfm(conn, ev->reason);
-               hci_conn_del(conn);
-       }
+       conn->state = BT_CLOSED;
 
+       if (conn->type == ACL_LINK)
+               mgmt_disconnected(hdev->id, &conn->dst);
+
+       hci_proto_disconn_cfm(conn, ev->reason);
+       hci_conn_del(conn);
+
+unlock:
        hci_dev_unlock(hdev);
 }
 
@@ -1095,10 +1421,13 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
        if (conn) {
-               if (!ev->status)
+               if (!ev->status) {
                        conn->link_mode |= HCI_LM_AUTH;
-               else
+                       conn->sec_level = conn->pending_sec_level;
+               } else {
+                       mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
                        conn->sec_level = BT_SECURITY_LOW;
+               }
 
                clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
 
@@ -1392,11 +1721,54 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_write_ca_timeout(hdev, skb);
                break;
 
+       case HCI_OP_DELETE_STORED_LINK_KEY:
+               hci_cc_delete_stored_link_key(hdev, skb);
+               break;
+
+       case HCI_OP_SET_EVENT_MASK:
+               hci_cc_set_event_mask(hdev, skb);
+               break;
+
+       case HCI_OP_WRITE_INQUIRY_MODE:
+               hci_cc_write_inquiry_mode(hdev, skb);
+               break;
+
+       case HCI_OP_READ_INQ_RSP_TX_POWER:
+               hci_cc_read_inq_rsp_tx_power(hdev, skb);
+               break;
+
+       case HCI_OP_SET_EVENT_FLT:
+               hci_cc_set_event_flt(hdev, skb);
+               break;
+
+       case HCI_OP_PIN_CODE_REPLY:
+               hci_cc_pin_code_reply(hdev, skb);
+               break;
+
+       case HCI_OP_PIN_CODE_NEG_REPLY:
+               hci_cc_pin_code_neg_reply(hdev, skb);
+               break;
+
+       case HCI_OP_LE_READ_BUFFER_SIZE:
+               hci_cc_le_read_buffer_size(hdev, skb);
+               break;
+
+       case HCI_OP_USER_CONFIRM_REPLY:
+               hci_cc_user_confirm_reply(hdev, skb);
+               break;
+
+       case HCI_OP_USER_CONFIRM_NEG_REPLY:
+               hci_cc_user_confirm_neg_reply(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%x", hdev->name, opcode);
                break;
        }
 
+       if (ev->opcode != HCI_OP_NOP)
+               del_timer(&hdev->cmd_timer);
+
        if (ev->ncmd) {
                atomic_set(&hdev->cmd_cnt, 1);
                if (!skb_queue_empty(&hdev->cmd_q))
@@ -1458,11 +1830,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cs_exit_sniff_mode(hdev, ev->status);
                break;
 
+       case HCI_OP_DISCONNECT:
+               if (ev->status != 0)
+                       mgmt_disconnect_failed(hdev->id);
+               break;
+
+       case HCI_OP_LE_CREATE_CONN:
+               hci_cs_le_create_conn(hdev, ev->status);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%x", hdev->name, opcode);
                break;
        }
 
+       if (ev->opcode != HCI_OP_NOP)
+               del_timer(&hdev->cmd_timer);
+
        if (ev->ncmd) {
                atomic_set(&hdev->cmd_cnt, 1);
                if (!skb_queue_empty(&hdev->cmd_q))
@@ -1528,6 +1912,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
                                hdev->acl_cnt += count;
                                if (hdev->acl_cnt > hdev->acl_pkts)
                                        hdev->acl_cnt = hdev->acl_pkts;
+                       } else if (conn->type == LE_LINK) {
+                               if (hdev->le_pkts) {
+                                       hdev->le_cnt += count;
+                                       if (hdev->le_cnt > hdev->le_pkts)
+                                               hdev->le_cnt = hdev->le_pkts;
+                               } else {
+                                       hdev->acl_cnt += count;
+                                       if (hdev->acl_cnt > hdev->acl_pkts)
+                                               hdev->acl_cnt = hdev->acl_pkts;
+                               }
                        } else {
                                hdev->sco_cnt += count;
                                if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1585,18 +1979,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
                hci_conn_put(conn);
        }
 
+       if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+               hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+                                       sizeof(ev->bdaddr), &ev->bdaddr);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_request(hdev->id, &ev->bdaddr);
+
        hci_dev_unlock(hdev);
 }
 
 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
+       struct hci_ev_link_key_req *ev = (void *) skb->data;
+       struct hci_cp_link_key_reply cp;
+       struct hci_conn *conn;
+       struct link_key *key;
+
        BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+               return;
+
+       hci_dev_lock(hdev);
+
+       key = hci_find_link_key(hdev, &ev->bdaddr);
+       if (!key) {
+               BT_DBG("%s link key not found for %s", hdev->name,
+                                                       batostr(&ev->bdaddr));
+               goto not_found;
+       }
+
+       BT_DBG("%s found key type %u for %s", hdev->name, key->type,
+                                                       batostr(&ev->bdaddr));
+
+       if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
+               BT_DBG("%s ignoring debug key", hdev->name);
+               goto not_found;
+       }
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
+       if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
+                                               (conn->auth_type & 0x01)) {
+               BT_DBG("%s ignoring unauthenticated key", hdev->name);
+               goto not_found;
+       }
+
+       bacpy(&cp.bdaddr, &ev->bdaddr);
+       memcpy(cp.link_key, key->val, 16);
+
+       hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
+
+       hci_dev_unlock(hdev);
+
+       return;
+
+not_found:
+       hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
+       hci_dev_unlock(hdev);
 }
 
 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_link_key_notify *ev = (void *) skb->data;
        struct hci_conn *conn;
+       u8 pin_len = 0;
 
        BT_DBG("%s", hdev->name);
 
@@ -1606,9 +2054,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
        if (conn) {
                hci_conn_hold(conn);
                conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+               pin_len = conn->pin_length;
                hci_conn_put(conn);
        }
 
+       if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+               hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
+                                                       ev->key_type, pin_len);
+
        hci_dev_unlock(hdev);
 }
 
@@ -1682,7 +2135,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
        hci_dev_lock(hdev);
 
        if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
-               struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
+               struct inquiry_info_with_rssi_and_pscan_mode *info;
+               info = (void *) (skb->data + 1);
 
                for (; num_rsp; num_rsp--) {
                        bacpy(&data.bdaddr, &info->bdaddr);
@@ -1823,17 +2277,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_sniff_subrate *ev = (void *) skb->data;
-       struct hci_conn *conn;
 
        BT_DBG("%s status %d", hdev->name, ev->status);
-
-       hci_dev_lock(hdev);
-
-       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-       if (conn) {
-       }
-
-       hci_dev_unlock(hdev);
 }
 
 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1851,12 +2296,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
 
        for (; num_rsp; num_rsp--) {
                bacpy(&data.bdaddr, &info->bdaddr);
-               data.pscan_rep_mode     = info->pscan_rep_mode;
-               data.pscan_period_mode  = info->pscan_period_mode;
-               data.pscan_mode         = 0x00;
+               data.pscan_rep_mode     = info->pscan_rep_mode;
+               data.pscan_period_mode  = info->pscan_period_mode;
+               data.pscan_mode         = 0x00;
                memcpy(data.dev_class, info->dev_class, 3);
-               data.clock_offset       = info->clock_offset;
-               data.rssi               = info->rssi;
+               data.clock_offset       = info->clock_offset;
+               data.rssi               = info->rssi;
                data.ssp_mode           = 0x01;
                info++;
                hci_inquiry_cache_update(hdev, &data);
@@ -1865,6 +2310,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        hci_dev_unlock(hdev);
 }
 
+static inline u8 hci_get_auth_req(struct hci_conn *conn)
+{
+       /* If remote requests dedicated bonding follow that lead */
+       if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+               /* If both remote and local IO capabilities allow MITM
+                * protection then require it, otherwise don't */
+               if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
+                       return 0x02;
+               else
+                       return 0x03;
+       }
+
+       /* If remote requests no-bonding follow that lead */
+       if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+               return 0x00;
+
+       return conn->auth_type;
+}
+
 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1875,8 +2339,73 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (conn)
-               hci_conn_hold(conn);
+       if (!conn)
+               goto unlock;
+
+       hci_conn_hold(conn);
+
+       if (!test_bit(HCI_MGMT, &hdev->flags))
+               goto unlock;
+
+       if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
+                       (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+               struct hci_cp_io_capability_reply cp;
+
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               cp.capability = conn->io_capability;
+               cp.oob_data = 0;
+               cp.authentication = hci_get_auth_req(conn);
+
+               hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
+                                                       sizeof(cp), &cp);
+       } else {
+               struct hci_cp_io_capability_neg_reply cp;
+
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               cp.reason = 0x16; /* Pairing not allowed */
+
+               hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
+                                                       sizeof(cp), &cp);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               goto unlock;
+
+       hci_conn_hold(conn);
+
+       conn->remote_cap = ev->capability;
+       conn->remote_oob = ev->oob_data;
+       conn->remote_auth = ev->authentication;
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_ev_user_confirm_req *ev = (void *) skb->data;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
 
        hci_dev_unlock(hdev);
 }
@@ -1891,9 +2420,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (conn)
-               hci_conn_put(conn);
+       if (!conn)
+               goto unlock;
+
+       /* To avoid duplicate auth_failed events to user space we check
+        * the HCI_CONN_AUTH_PEND flag which will be set if we
+        * initiated the authentication. A traditional auth_complete
+        * event gets always produced as initiator and is also mapped to
+        * the mgmt_auth_failed event */
+       if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
+               mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+
+       hci_conn_put(conn);
 
+unlock:
        hci_dev_unlock(hdev);
 }
 
@@ -1913,6 +2453,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
        hci_dev_unlock(hdev);
 }
 
+static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %d", hdev->name, ev->status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
+       if (!conn) {
+               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               if (!conn) {
+                       BT_ERR("No memory for new connection");
+                       hci_dev_unlock(hdev);
+                       return;
+               }
+       }
+
+       if (ev->status) {
+               hci_proto_connect_cfm(conn, ev->status);
+               conn->state = BT_CLOSED;
+               hci_conn_del(conn);
+               goto unlock;
+       }
+
+       conn->handle = __le16_to_cpu(ev->handle);
+       conn->state = BT_CONNECTED;
+
+       hci_conn_hold_device(conn);
+       hci_conn_add_sysfs(conn);
+
+       hci_proto_connect_cfm(conn, ev->status);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_le_meta *le_ev = (void *) skb->data;
+
+       skb_pull(skb, sizeof(*le_ev));
+
+       switch (le_ev->subevent) {
+       case HCI_EV_LE_CONN_COMPLETE:
+               hci_le_conn_complete_evt(hdev, skb);
+               break;
+
+       default:
+               break;
+       }
+}
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2041,6 +2635,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_io_capa_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_IO_CAPA_REPLY:
+               hci_io_capa_reply_evt(hdev, skb);
+               break;
+
+       case HCI_EV_USER_CONFIRM_REQUEST:
+               hci_user_confirm_request_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
@@ -2049,6 +2651,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_remote_host_features_evt(hdev, skb);
                break;
 
+       case HCI_EV_LE_META:
+               hci_le_meta_evt(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s event 0x%x", hdev->name, event);
                break;
@@ -2082,6 +2688,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 
        bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
        skb->dev = (void *) hdev;
-       hci_send_to_sock(hdev, skb);
+       hci_send_to_sock(hdev, skb, NULL);
        kfree_skb(skb);
 }
index 29827c7..295e4a8 100644 (file)
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
 };
 
 /* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+                                                       struct sock *skip_sk)
 {
        struct sock *sk;
        struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
                struct hci_filter *flt;
                struct sk_buff *nskb;
 
+               if (sk == skip_sk)
+                       continue;
+
                if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
                        continue;
 
@@ -857,7 +861,7 @@ error:
        return err;
 }
 
-void __exit hci_sock_cleanup(void)
+void hci_sock_cleanup(void)
 {
        if (bt_sock_unregister(BTPROTO_HCI) < 0)
                BT_ERR("HCI socket unregistration failed");
index 5fce3d6..3c838a6 100644 (file)
@@ -11,7 +11,7 @@
 
 static struct class *bt_class;
 
-struct dentry *bt_debugfs = NULL;
+struct dentry *bt_debugfs;
 EXPORT_SYMBOL_GPL(bt_debugfs);
 
 static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
                                conn->features[6], conn->features[7]);
 }
 
-#define LINK_ATTR(_name,_mode,_show,_store) \
-struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+#define LINK_ATTR(_name, _mode, _show, _store) \
+struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
 
 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
        .llseek         = seq_lseek,
        .release        = single_release,
 };
+
+static void print_bt_uuid(struct seq_file *f, u8 *uuid)
+{
+       u32 data0, data4;
+       u16 data1, data2, data3, data5;
+
+       memcpy(&data0, &uuid[0], 4);
+       memcpy(&data1, &uuid[4], 2);
+       memcpy(&data2, &uuid[6], 2);
+       memcpy(&data3, &uuid[8], 2);
+       memcpy(&data4, &uuid[10], 4);
+       memcpy(&data5, &uuid[14], 2);
+
+       seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
+                               ntohl(data0), ntohs(data1), ntohs(data2),
+                               ntohs(data3), ntohl(data4), ntohs(data5));
+}
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct list_head *l;
+
+       hci_dev_lock_bh(hdev);
+
+       list_for_each(l, &hdev->uuids) {
+               struct bt_uuid *uuid;
+
+               uuid = list_entry(l, struct bt_uuid, list);
+
+               print_bt_uuid(f, uuid->uuid);
+       }
+
+       hci_dev_unlock_bh(hdev);
+
+       return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+       .open           = uuids_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 int hci_register_sysfs(struct hci_dev *hdev)
 {
        struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
        debugfs_create_file("blacklist", 0444, hdev->debugfs,
                                                hdev, &blacklist_fops);
 
+       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
        return 0;
 }
 
index 29544c2..2429ca2 100644 (file)
@@ -157,7 +157,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
 
        session->leds = newleds;
 
-       if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
+       skb = alloc_skb(3, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -250,7 +251,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
 
        BT_DBG("session %p data %p size %d", session, data, size);
 
-       if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+       skb = alloc_skb(size + 1, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -283,7 +285,8 @@ static int hidp_queue_report(struct hidp_session *session,
 
        BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
 
-       if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+       skb = alloc_skb(size + 1, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -1016,8 +1019,6 @@ static int __init hidp_init(void)
 {
        int ret;
 
-       l2cap_load();
-
        BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
 
        ret = hid_register_driver(&hidp_driver);
similarity index 75%
rename from net/bluetooth/l2cap.c
rename to net/bluetooth/l2cap_core.c
index c791fcd..c9f9cec 100644 (file)
@@ -24,7 +24,7 @@
    SOFTWARE IS DISCLAIMED.
 */
 
-/* Bluetooth L2CAP core and sockets. */
+/* Bluetooth L2CAP core. */
 
 #include <linux/module.h>
 
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
-#define VERSION "2.15"
-
-static int disable_ertm;
+int disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
 static u8 l2cap_fixed_chan[8] = { 0x02, };
 
-static const struct proto_ops l2cap_sock_ops;
-
 static struct workqueue_struct *_busy_wq;
 
-static struct bt_sock_list l2cap_sk_list = {
+struct bt_sock_list l2cap_sk_list = {
        .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
 };
 
 static void l2cap_busy_work(struct work_struct *work);
 
-static void __l2cap_sock_close(struct sock *sk, int reason);
-static void l2cap_sock_close(struct sock *sk);
-static void l2cap_sock_kill(struct sock *sk);
-
-static int l2cap_build_conf_req(struct sock *sk, void *data);
 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
                                u8 code, u8 ident, u16 dlen, void *data);
 
 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
 
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
-       BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
-       sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
-       BT_DBG("sock %p state %d", sk, sk->sk_state);
-       sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void l2cap_sock_timeout(unsigned long arg)
-{
-       struct sock *sk = (struct sock *) arg;
-       int reason;
-
-       BT_DBG("sock %p state %d", sk, sk->sk_state);
-
-       bh_lock_sock(sk);
-
-       if (sock_owned_by_user(sk)) {
-               /* sk is owned by user. Try again later */
-               l2cap_sock_set_timer(sk, HZ / 5);
-               bh_unlock_sock(sk);
-               sock_put(sk);
-               return;
-       }
-
-       if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
-               reason = ECONNREFUSED;
-       else if (sk->sk_state == BT_CONNECT &&
-                               l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
-               reason = ECONNREFUSED;
-       else
-               reason = ETIMEDOUT;
-
-       __l2cap_sock_close(sk, reason);
-
-       bh_unlock_sock(sk);
-
-       l2cap_sock_kill(sk);
-       sock_put(sk);
-}
-
 /* ---- L2CAP channels ---- */
 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
 {
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
        l2cap_pi(sk)->conn = conn;
 
        if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
-               /* Alloc CID for connection-oriented socket */
-               l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+               if (conn->hcon->type == LE_LINK) {
+                       /* LE connection */
+                       l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+                       l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+                       l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
+               } else {
+                       /* Alloc CID for connection-oriented socket */
+                       l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+                       l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+               }
        } else if (sk->sk_type == SOCK_DGRAM) {
                /* Connectionless socket */
                l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
 
 /* Delete channel.
  * Must be called on the locked socket. */
-static void l2cap_chan_del(struct sock *sk, int err)
+void l2cap_chan_del(struct sock *sk, int err)
 {
        struct l2cap_conn *conn = l2cap_pi(sk)->conn;
        struct sock *parent = bt_sk(sk)->parent;
@@ -305,39 +258,50 @@ static void l2cap_chan_del(struct sock *sk, int err)
        }
 }
 
-/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
+static inline u8 l2cap_get_auth_type(struct sock *sk)
 {
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       __u8 auth_type;
+       if (sk->sk_type == SOCK_RAW) {
+               switch (l2cap_pi(sk)->sec_level) {
+               case BT_SECURITY_HIGH:
+                       return HCI_AT_DEDICATED_BONDING_MITM;
+               case BT_SECURITY_MEDIUM:
+                       return HCI_AT_DEDICATED_BONDING;
+               default:
+                       return HCI_AT_NO_BONDING;
+               }
+       } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+               if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
 
-       if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
                if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-                       auth_type = HCI_AT_NO_BONDING_MITM;
+                       return HCI_AT_NO_BONDING_MITM;
                else
-                       auth_type = HCI_AT_NO_BONDING;
-
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+                       return HCI_AT_NO_BONDING;
        } else {
                switch (l2cap_pi(sk)->sec_level) {
                case BT_SECURITY_HIGH:
-                       auth_type = HCI_AT_GENERAL_BONDING_MITM;
-                       break;
+                       return HCI_AT_GENERAL_BONDING_MITM;
                case BT_SECURITY_MEDIUM:
-                       auth_type = HCI_AT_GENERAL_BONDING;
-                       break;
+                       return HCI_AT_GENERAL_BONDING;
                default:
-                       auth_type = HCI_AT_NO_BONDING;
-                       break;
+                       return HCI_AT_NO_BONDING;
                }
        }
+}
+
+/* Service level security */
+static inline int l2cap_check_security(struct sock *sk)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       __u8 auth_type;
+
+       auth_type = l2cap_get_auth_type(sk);
 
        return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
                                                                auth_type);
 }
 
-static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
+u8 l2cap_get_ident(struct l2cap_conn *conn)
 {
        u8 id;
 
@@ -359,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
        return id;
 }
 
-static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
 {
        struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
+       u8 flags;
 
        BT_DBG("code 0x%2.2x", code);
 
        if (!skb)
                return;
 
-       hci_send_acl(conn->hcon, skb, 0);
+       if (lmp_no_flush_capable(conn->hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       hci_send_acl(conn->hcon, skb, flags);
 }
 
 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -378,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
        struct l2cap_conn *conn = pi->conn;
        struct sock *sk = (struct sock *)pi;
        int count, hlen = L2CAP_HDR_SIZE + 2;
+       u8 flags;
 
        if (sk->sk_state != BT_CONNECTED)
                return;
@@ -414,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
                put_unaligned_le16(fcs, skb_put(skb, 2));
        }
 
-       hci_send_acl(pi->conn->hcon, skb, 0);
+       if (lmp_no_flush_capable(conn->hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       hci_send_acl(pi->conn->hcon, skb, flags);
 }
 
 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -485,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
        }
 }
 
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
 {
        struct l2cap_disconn_req req;
 
@@ -613,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
        }
 }
 
+/* Find socket with cid and source bdaddr.
+ * Returns closest match, locked.
+ */
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+{
+       struct sock *s, *sk = NULL, *sk1 = NULL;
+       struct hlist_node *node;
+
+       read_lock(&l2cap_sk_list.lock);
+
+       sk_for_each(sk, node, &l2cap_sk_list.head) {
+               if (state && sk->sk_state != state)
+                       continue;
+
+               if (l2cap_pi(sk)->scid == cid) {
+                       /* Exact match. */
+                       if (!bacmp(&bt_sk(sk)->src, src))
+                               break;
+
+                       /* Closest match */
+                       if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+                               sk1 = sk;
+               }
+       }
+       s = node ? sk : sk1;
+       if (s)
+               bh_lock_sock(s);
+       read_unlock(&l2cap_sk_list.lock);
+
+       return s;
+}
+
+static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+{
+       struct l2cap_chan_list *list = &conn->chan_list;
+       struct sock *parent, *uninitialized_var(sk);
+
+       BT_DBG("");
+
+       /* Check if we have socket listening on cid */
+       parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+                                                       conn->src);
+       if (!parent)
+               return;
+
+       /* Check for backlog size */
+       if (sk_acceptq_is_full(parent)) {
+               BT_DBG("backlog full %d", parent->sk_ack_backlog);
+               goto clean;
+       }
+
+       sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+       if (!sk)
+               goto clean;
+
+       write_lock_bh(&list->lock);
+
+       hci_conn_hold(conn->hcon);
+
+       l2cap_sock_init(sk, parent);
+       bacpy(&bt_sk(sk)->src, conn->src);
+       bacpy(&bt_sk(sk)->dst, conn->dst);
+
+       __l2cap_chan_add(conn, sk, parent);
+
+       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+       sk->sk_state = BT_CONNECTED;
+       parent->sk_data_ready(parent, 0);
+
+       write_unlock_bh(&list->lock);
+
+clean:
+       bh_unlock_sock(parent);
+}
+
 static void l2cap_conn_ready(struct l2cap_conn *conn)
 {
        struct l2cap_chan_list *l = &conn->chan_list;
@@ -620,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
 
        BT_DBG("conn %p", conn);
 
+       if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+               l2cap_le_conn_ready(conn);
+
        read_lock(&l->lock);
 
        for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
                bh_lock_sock(sk);
 
+               if (conn->hcon->type == LE_LINK) {
+                       l2cap_sock_clear_timer(sk);
+                       sk->sk_state = BT_CONNECTED;
+                       sk->sk_state_change(sk);
+               }
+
                if (sk->sk_type != SOCK_SEQPACKET &&
                                sk->sk_type != SOCK_STREAM) {
                        l2cap_sock_clear_timer(sk);
@@ -683,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
 
        BT_DBG("hcon %p conn %p", hcon, conn);
 
-       conn->mtu = hcon->hdev->acl_mtu;
+       if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
+               conn->mtu = hcon->hdev->le_mtu;
+       else
+               conn->mtu = hcon->hdev->acl_mtu;
+
        conn->src = &hcon->hdev->bdaddr;
        conn->dst = &hcon->dst;
 
@@ -692,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
        spin_lock_init(&conn->lock);
        rwlock_init(&conn->chan_list.lock);
 
-       setup_timer(&conn->info_timer, l2cap_info_timeout,
+       if (hcon->type != LE_LINK)
+               setup_timer(&conn->info_timer, l2cap_info_timeout,
                                                (unsigned long) conn);
 
        conn->disc_reason = 0x13;
@@ -736,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
 }
 
 /* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
-       struct sock *sk;
-       struct hlist_node *node;
-       sk_for_each(sk, node, &l2cap_sk_list.head)
-               if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
-                       goto found;
-       sk = NULL;
-found:
-       return sk;
-}
 
 /* Find socket with psm and source bdaddr.
  * Returns closest match.
@@ -778,1520 +833,529 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
        return node ? sk : sk1;
 }
 
-static void l2cap_sock_destruct(struct sock *sk)
-{
-       BT_DBG("sk %p", sk);
-
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
+int l2cap_do_connect(struct sock *sk)
 {
-       struct sock *sk;
-
-       BT_DBG("parent %p", parent);
-
-       /* Close not yet accepted channels */
-       while ((sk = bt_accept_dequeue(parent, NULL)))
-               l2cap_sock_close(sk);
+       bdaddr_t *src = &bt_sk(sk)->src;
+       bdaddr_t *dst = &bt_sk(sk)->dst;
+       struct l2cap_conn *conn;
+       struct hci_conn *hcon;
+       struct hci_dev *hdev;
+       __u8 auth_type;
+       int err;
 
-       parent->sk_state = BT_CLOSED;
-       sock_set_flag(parent, SOCK_ZAPPED);
-}
+       BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
+                                                       l2cap_pi(sk)->psm);
 
-/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
- */
-static void l2cap_sock_kill(struct sock *sk)
-{
-       if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
-               return;
+       hdev = hci_get_route(dst, src);
+       if (!hdev)
+               return -EHOSTUNREACH;
 
-       BT_DBG("sk %p state %d", sk, sk->sk_state);
+       hci_dev_lock_bh(hdev);
 
-       /* Kill poor orphan */
-       bt_sock_unlink(&l2cap_sk_list, sk);
-       sock_set_flag(sk, SOCK_DEAD);
-       sock_put(sk);
-}
+       auth_type = l2cap_get_auth_type(sk);
 
-static void __l2cap_sock_close(struct sock *sk, int reason)
-{
-       BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+       if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+               hcon = hci_connect(hdev, LE_LINK, dst,
+                                       l2cap_pi(sk)->sec_level, auth_type);
+       else
+               hcon = hci_connect(hdev, ACL_LINK, dst,
+                                       l2cap_pi(sk)->sec_level, auth_type);
 
-       switch (sk->sk_state) {
-       case BT_LISTEN:
-               l2cap_sock_cleanup_listen(sk);
-               break;
+       if (IS_ERR(hcon)) {
+               err = PTR_ERR(hcon);
+               goto done;
+       }
 
-       case BT_CONNECTED:
-       case BT_CONFIG:
-               if (sk->sk_type == SOCK_SEQPACKET ||
-                               sk->sk_type == SOCK_STREAM) {
-                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       conn = l2cap_conn_add(hcon, 0);
+       if (!conn) {
+               hci_conn_put(hcon);
+               err = -ENOMEM;
+               goto done;
+       }
 
-                       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-                       l2cap_send_disconn_req(conn, sk, reason);
-               } else
-                       l2cap_chan_del(sk, reason);
-               break;
+       /* Update source addr of the socket */
+       bacpy(src, conn->src);
 
-       case BT_CONNECT2:
-               if (sk->sk_type == SOCK_SEQPACKET ||
-                               sk->sk_type == SOCK_STREAM) {
-                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-                       struct l2cap_conn_rsp rsp;
-                       __u16 result;
+       l2cap_chan_add(conn, sk, NULL);
 
-                       if (bt_sk(sk)->defer_setup)
-                               result = L2CAP_CR_SEC_BLOCK;
-                       else
-                               result = L2CAP_CR_BAD_PSM;
+       sk->sk_state = BT_CONNECT;
+       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
 
-                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-                       rsp.result = cpu_to_le16(result);
-                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+       if (hcon->state == BT_CONNECTED) {
+               if (sk->sk_type != SOCK_SEQPACKET &&
+                               sk->sk_type != SOCK_STREAM) {
+                       l2cap_sock_clear_timer(sk);
+                       if (l2cap_check_security(sk))
+                               sk->sk_state = BT_CONNECTED;
                } else
-                       l2cap_chan_del(sk, reason);
-               break;
+                       l2cap_do_start(sk);
+       }
 
-       case BT_CONNECT:
-       case BT_DISCONN:
-               l2cap_chan_del(sk, reason);
-               break;
+       err = 0;
 
-       default:
-               sock_set_flag(sk, SOCK_ZAPPED);
-               break;
-       }
+done:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+       return err;
 }
 
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
+int __l2cap_wait_ack(struct sock *sk)
 {
-       l2cap_sock_clear_timer(sk);
-       lock_sock(sk);
-       __l2cap_sock_close(sk, ECONNRESET);
-       release_sock(sk);
-       l2cap_sock_kill(sk);
-}
+       DECLARE_WAITQUEUE(wait, current);
+       int err = 0;
+       int timeo = HZ/5;
 
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       add_wait_queue(sk_sleep(sk), &wait);
+       while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-       BT_DBG("sk %p", sk);
+               if (!timeo)
+                       timeo = HZ/5;
 
-       if (parent) {
-               sk->sk_type = parent->sk_type;
-               bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
-
-               pi->imtu = l2cap_pi(parent)->imtu;
-               pi->omtu = l2cap_pi(parent)->omtu;
-               pi->conf_state = l2cap_pi(parent)->conf_state;
-               pi->mode = l2cap_pi(parent)->mode;
-               pi->fcs  = l2cap_pi(parent)->fcs;
-               pi->max_tx = l2cap_pi(parent)->max_tx;
-               pi->tx_win = l2cap_pi(parent)->tx_win;
-               pi->sec_level = l2cap_pi(parent)->sec_level;
-               pi->role_switch = l2cap_pi(parent)->role_switch;
-               pi->force_reliable = l2cap_pi(parent)->force_reliable;
-       } else {
-               pi->imtu = L2CAP_DEFAULT_MTU;
-               pi->omtu = 0;
-               if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
-                       pi->mode = L2CAP_MODE_ERTM;
-                       pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
-               } else {
-                       pi->mode = L2CAP_MODE_BASIC;
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
                }
-               pi->max_tx = L2CAP_DEFAULT_MAX_TX;
-               pi->fcs  = L2CAP_FCS_CRC16;
-               pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
-               pi->sec_level = BT_SECURITY_LOW;
-               pi->role_switch = 0;
-               pi->force_reliable = 0;
-       }
 
-       /* Default config options */
-       pi->conf_len = 0;
-       pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-       skb_queue_head_init(TX_QUEUE(sk));
-       skb_queue_head_init(SREJ_QUEUE(sk));
-       skb_queue_head_init(BUSY_QUEUE(sk));
-       INIT_LIST_HEAD(SREJ_LIST(sk));
-}
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
 
-static struct proto l2cap_proto = {
-       .name           = "L2CAP",
-       .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct l2cap_pinfo)
-};
+               err = sock_error(sk);
+               if (err)
+                       break;
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+       return err;
+}
 
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static void l2cap_monitor_timeout(unsigned long arg)
 {
-       struct sock *sk;
+       struct sock *sk = (void *) arg;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
-       if (!sk)
-               return NULL;
+       BT_DBG("sk %p", sk);
 
-       sock_init_data(sock, sk);
-       INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+       bh_lock_sock(sk);
+       if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
+               l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
+               bh_unlock_sock(sk);
+               return;
+       }
 
-       sk->sk_destruct = l2cap_sock_destruct;
-       sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+       l2cap_pi(sk)->retry_count++;
+       __mod_monitor_timer();
 
-       sock_reset_flag(sk, SOCK_ZAPPED);
+       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
+       bh_unlock_sock(sk);
+}
 
-       sk->sk_protocol = proto;
-       sk->sk_state = BT_OPEN;
-
-       setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
-
-       bt_sock_link(&l2cap_sk_list, sk);
-       return sk;
-}
-
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
-                            int kern)
-{
-       struct sock *sk;
-
-       BT_DBG("sock %p", sock);
-
-       sock->state = SS_UNCONNECTED;
-
-       if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
-                       sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
-               return -ESOCKTNOSUPPORT;
-
-       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
-               return -EPERM;
-
-       sock->ops = &l2cap_sock_ops;
-
-       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
-       if (!sk)
-               return -ENOMEM;
-
-       l2cap_sock_init(sk, NULL);
-       return 0;
-}
-
-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
-{
-       struct sock *sk = sock->sk;
-       struct sockaddr_l2 la;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
-
-       if (!addr || addr->sa_family != AF_BLUETOOTH)
-               return -EINVAL;
-
-       memset(&la, 0, sizeof(la));
-       len = min_t(unsigned int, sizeof(la), alen);
-       memcpy(&la, addr, len);
-
-       if (la.l2_cid)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       if (sk->sk_state != BT_OPEN) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       if (la.l2_psm) {
-               __u16 psm = __le16_to_cpu(la.l2_psm);
-
-               /* PSM must be odd and lsb of upper byte must be 0 */
-               if ((psm & 0x0101) != 0x0001) {
-                       err = -EINVAL;
-                       goto done;
-               }
-
-               /* Restrict usage of well-known PSMs */
-               if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
-                       err = -EACCES;
-                       goto done;
-               }
-       }
-
-       write_lock_bh(&l2cap_sk_list.lock);
-
-       if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
-               err = -EADDRINUSE;
-       } else {
-               /* Save source address */
-               bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
-               l2cap_pi(sk)->psm   = la.l2_psm;
-               l2cap_pi(sk)->sport = la.l2_psm;
-               sk->sk_state = BT_BOUND;
-
-               if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
-                                       __le16_to_cpu(la.l2_psm) == 0x0003)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-       }
-
-       write_unlock_bh(&l2cap_sk_list.lock);
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_do_connect(struct sock *sk)
-{
-       bdaddr_t *src = &bt_sk(sk)->src;
-       bdaddr_t *dst = &bt_sk(sk)->dst;
-       struct l2cap_conn *conn;
-       struct hci_conn *hcon;
-       struct hci_dev *hdev;
-       __u8 auth_type;
-       int err;
-
-       BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
-                                                       l2cap_pi(sk)->psm);
-
-       hdev = hci_get_route(dst, src);
-       if (!hdev)
-               return -EHOSTUNREACH;
-
-       hci_dev_lock_bh(hdev);
-
-       err = -ENOMEM;
-
-       if (sk->sk_type == SOCK_RAW) {
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_HIGH:
-                       auth_type = HCI_AT_DEDICATED_BONDING_MITM;
-                       break;
-               case BT_SECURITY_MEDIUM:
-                       auth_type = HCI_AT_DEDICATED_BONDING;
-                       break;
-               default:
-                       auth_type = HCI_AT_NO_BONDING;
-                       break;
-               }
-       } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-                       auth_type = HCI_AT_NO_BONDING_MITM;
-               else
-                       auth_type = HCI_AT_NO_BONDING;
-
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-       } else {
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_HIGH:
-                       auth_type = HCI_AT_GENERAL_BONDING_MITM;
-                       break;
-               case BT_SECURITY_MEDIUM:
-                       auth_type = HCI_AT_GENERAL_BONDING;
-                       break;
-               default:
-                       auth_type = HCI_AT_NO_BONDING;
-                       break;
-               }
-       }
-
-       hcon = hci_connect(hdev, ACL_LINK, dst,
-                                       l2cap_pi(sk)->sec_level, auth_type);
-       if (!hcon)
-               goto done;
-
-       conn = l2cap_conn_add(hcon, 0);
-       if (!conn) {
-               hci_conn_put(hcon);
-               goto done;
-       }
-
-       err = 0;
-
-       /* Update source addr of the socket */
-       bacpy(src, conn->src);
-
-       l2cap_chan_add(conn, sk, NULL);
-
-       sk->sk_state = BT_CONNECT;
-       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
-       if (hcon->state == BT_CONNECTED) {
-               if (sk->sk_type != SOCK_SEQPACKET &&
-                               sk->sk_type != SOCK_STREAM) {
-                       l2cap_sock_clear_timer(sk);
-                       sk->sk_state = BT_CONNECTED;
-               } else
-                       l2cap_do_start(sk);
-       }
-
-done:
-       hci_dev_unlock_bh(hdev);
-       hci_dev_put(hdev);
-       return err;
-}
-
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
-{
-       struct sock *sk = sock->sk;
-       struct sockaddr_l2 la;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
-
-       if (!addr || alen < sizeof(addr->sa_family) ||
-           addr->sa_family != AF_BLUETOOTH)
-               return -EINVAL;
-
-       memset(&la, 0, sizeof(la));
-       len = min_t(unsigned int, sizeof(la), alen);
-       memcpy(&la, addr, len);
-
-       if (la.l2_cid)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
-                       && !la.l2_psm) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       switch (l2cap_pi(sk)->mode) {
-       case L2CAP_MODE_BASIC:
-               break;
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               if (!disable_ertm)
-                       break;
-               /* fall through */
-       default:
-               err = -ENOTSUPP;
-               goto done;
-       }
-
-       switch (sk->sk_state) {
-       case BT_CONNECT:
-       case BT_CONNECT2:
-       case BT_CONFIG:
-               /* Already connecting */
-               goto wait;
-
-       case BT_CONNECTED:
-               /* Already connected */
-               err = -EISCONN;
-               goto done;
-
-       case BT_OPEN:
-       case BT_BOUND:
-               /* Can connect */
-               break;
-
-       default:
-               err = -EBADFD;
-               goto done;
-       }
-
-       /* PSM must be odd and lsb of upper byte must be 0 */
-       if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
-               sk->sk_type != SOCK_RAW) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       /* Set destination address and psm */
-       bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
-       l2cap_pi(sk)->psm = la.l2_psm;
-
-       err = l2cap_do_connect(sk);
-       if (err)
-               goto done;
-
-wait:
-       err = bt_sock_wait_state(sk, BT_CONNECTED,
-                       sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
-       struct sock *sk = sock->sk;
-       int err = 0;
-
-       BT_DBG("sk %p backlog %d", sk, backlog);
-
-       lock_sock(sk);
-
-       if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
-                       || sk->sk_state != BT_BOUND) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       switch (l2cap_pi(sk)->mode) {
-       case L2CAP_MODE_BASIC:
-               break;
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               if (!disable_ertm)
-                       break;
-               /* fall through */
-       default:
-               err = -ENOTSUPP;
-               goto done;
-       }
-
-       if (!l2cap_pi(sk)->psm) {
-               bdaddr_t *src = &bt_sk(sk)->src;
-               u16 psm;
-
-               err = -EINVAL;
-
-               write_lock_bh(&l2cap_sk_list.lock);
-
-               for (psm = 0x1001; psm < 0x1100; psm += 2)
-                       if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
-                               l2cap_pi(sk)->psm   = cpu_to_le16(psm);
-                               l2cap_pi(sk)->sport = cpu_to_le16(psm);
-                               err = 0;
-                               break;
-                       }
-
-               write_unlock_bh(&l2cap_sk_list.lock);
-
-               if (err < 0)
-                       goto done;
-       }
-
-       sk->sk_max_ack_backlog = backlog;
-       sk->sk_ack_backlog = 0;
-       sk->sk_state = BT_LISTEN;
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       struct sock *sk = sock->sk, *nsk;
-       long timeo;
-       int err = 0;
-
-       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
-       if (sk->sk_state != BT_LISTEN) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
-       BT_DBG("sk %p timeo %ld", sk, timeo);
-
-       /* Wait for an incoming connection. (wake-one). */
-       add_wait_queue_exclusive(sk_sleep(sk), &wait);
-       while (!(nsk = bt_accept_dequeue(sk, newsock))) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (!timeo) {
-                       err = -EAGAIN;
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
-               if (sk->sk_state != BT_LISTEN) {
-                       err = -EBADFD;
-                       break;
-               }
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-
-       if (err)
-               goto done;
-
-       newsock->state = SS_CONNECTED;
-
-       BT_DBG("new socket %p", nsk);
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
-{
-       struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
-       struct sock *sk = sock->sk;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       addr->sa_family = AF_BLUETOOTH;
-       *len = sizeof(struct sockaddr_l2);
-
-       if (peer) {
-               la->l2_psm = l2cap_pi(sk)->psm;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
-               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       } else {
-               la->l2_psm = l2cap_pi(sk)->sport;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
-               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
-       }
-
-       return 0;
-}
-
-static int __l2cap_wait_ack(struct sock *sk)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       int err = 0;
-       int timeo = HZ/5;
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               if (!timeo)
-                       timeo = HZ/5;
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-
-               err = sock_error(sk);
-               if (err)
-                       break;
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-       return err;
-}
-
-static void l2cap_monitor_timeout(unsigned long arg)
-{
-       struct sock *sk = (void *) arg;
-
-       BT_DBG("sk %p", sk);
-
-       bh_lock_sock(sk);
-       if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
-               l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
-               bh_unlock_sock(sk);
-               return;
-       }
-
-       l2cap_pi(sk)->retry_count++;
-       __mod_monitor_timer();
-
-       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
-       bh_unlock_sock(sk);
-}
-
-static void l2cap_retrans_timeout(unsigned long arg)
-{
-       struct sock *sk = (void *) arg;
-
-       BT_DBG("sk %p", sk);
-
-       bh_lock_sock(sk);
-       l2cap_pi(sk)->retry_count = 1;
-       __mod_monitor_timer();
-
-       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
-
-       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
-       bh_unlock_sock(sk);
-}
-
-static void l2cap_drop_acked_frames(struct sock *sk)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_peek(TX_QUEUE(sk))) &&
-                       l2cap_pi(sk)->unacked_frames) {
-               if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
-                       break;
-
-               skb = skb_dequeue(TX_QUEUE(sk));
-               kfree_skb(skb);
-
-               l2cap_pi(sk)->unacked_frames--;
-       }
-
-       if (!l2cap_pi(sk)->unacked_frames)
-               del_timer(&l2cap_pi(sk)->retrans_timer);
-}
-
-static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-
-       BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
-
-       hci_send_acl(pi->conn->hcon, skb, 0);
-}
-
-static void l2cap_streaming_send(struct sock *sk)
-{
-       struct sk_buff *skb;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control, fcs;
-
-       while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
-               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
-               control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
-               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
-
-               if (pi->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
-               }
-
-               l2cap_do_send(sk, skb);
-
-               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-       }
-}
-
-static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb, *tx_skb;
-       u16 control, fcs;
-
-       skb = skb_peek(TX_QUEUE(sk));
-       if (!skb)
-               return;
-
-       do {
-               if (bt_cb(skb)->tx_seq == tx_seq)
-                       break;
-
-               if (skb_queue_is_last(TX_QUEUE(sk), skb))
-                       return;
-
-       } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
-
-       if (pi->remote_max_tx &&
-                       bt_cb(skb)->retries == pi->remote_max_tx) {
-               l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
-               return;
-       }
-
-       tx_skb = skb_clone(skb, GFP_ATOMIC);
-       bt_cb(skb)->retries++;
-       control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-
-       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
-               control |= L2CAP_CTRL_FINAL;
-               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-       }
-
-       control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                       | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-
-       put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
-       if (pi->fcs == L2CAP_FCS_CRC16) {
-               fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
-               put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
-       }
-
-       l2cap_do_send(sk, tx_skb);
-}
-
-static int l2cap_ertm_send(struct sock *sk)
-{
-       struct sk_buff *skb, *tx_skb;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control, fcs;
-       int nsent = 0;
-
-       if (sk->sk_state != BT_CONNECTED)
-               return -ENOTCONN;
-
-       while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
-
-               if (pi->remote_max_tx &&
-                               bt_cb(skb)->retries == pi->remote_max_tx) {
-                       l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
-                       break;
-               }
-
-               tx_skb = skb_clone(skb, GFP_ATOMIC);
-
-               bt_cb(skb)->retries++;
-
-               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-               control &= L2CAP_CTRL_SAR;
-
-               if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
-                       control |= L2CAP_CTRL_FINAL;
-                       pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-               }
-               control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                               | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
-
-               if (pi->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
-               }
-
-               l2cap_do_send(sk, tx_skb);
-
-               __mod_retrans_timer();
-
-               bt_cb(skb)->tx_seq = pi->next_tx_seq;
-               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-
-               pi->unacked_frames++;
-               pi->frames_sent++;
-
-               if (skb_queue_is_last(TX_QUEUE(sk), skb))
-                       sk->sk_send_head = NULL;
-               else
-                       sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
-
-               nsent++;
-       }
-
-       return nsent;
-}
-
-static int l2cap_retransmit_frames(struct sock *sk)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       int ret;
-
-       if (!skb_queue_empty(TX_QUEUE(sk)))
-               sk->sk_send_head = TX_QUEUE(sk)->next;
-
-       pi->next_tx_seq = pi->expected_ack_seq;
-       ret = l2cap_ertm_send(sk);
-       return ret;
-}
-
-static void l2cap_send_ack(struct l2cap_pinfo *pi)
-{
-       struct sock *sk = (struct sock *)pi;
-       u16 control = 0;
-
-       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
-               pi->conn_state |= L2CAP_CONN_RNR_SENT;
-               l2cap_send_sframe(pi, control);
-               return;
-       }
-
-       if (l2cap_ertm_send(sk) > 0)
-               return;
-
-       control |= L2CAP_SUPER_RCV_READY;
-       l2cap_send_sframe(pi, control);
-}
-
-static void l2cap_send_srejtail(struct sock *sk)
-{
-       struct srej_list *tail;
-       u16 control;
-
-       control = L2CAP_SUPER_SELECT_REJECT;
-       control |= L2CAP_CTRL_FINAL;
-
-       tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
-       control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       l2cap_send_sframe(l2cap_pi(sk), control);
-}
-
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff **frag;
-       int err, sent = 0;
-
-       if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
-               return -EFAULT;
-
-       sent += count;
-       len  -= count;
-
-       /* Continuation fragments (no L2CAP header) */
-       frag = &skb_shinfo(skb)->frag_list;
-       while (len) {
-               count = min_t(unsigned int, conn->mtu, len);
-
-               *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
-               if (!*frag)
-                       return err;
-               if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
-                       return -EFAULT;
-
-               sent += count;
-               len  -= count;
-
-               frag = &(*frag)->next;
-       }
-
-       return sent;
-}
-
-static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
-       struct l2cap_hdr *lh;
-
-       BT_DBG("sk %p len %d", sk, (int)len);
-
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
-
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-       put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
-
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-       return skb;
-}
-
-static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+static void l2cap_retrans_timeout(unsigned long arg)
 {
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE;
-       struct l2cap_hdr *lh;
+       struct sock *sk = (void *) arg;
 
-       BT_DBG("sk %p len %d", sk, (int)len);
+       BT_DBG("sk %p", sk);
 
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
+       bh_lock_sock(sk);
+       l2cap_pi(sk)->retry_count = 1;
+       __mod_monitor_timer();
 
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
 
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-       return skb;
+       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
+       bh_unlock_sock(sk);
 }
 
-static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+static void l2cap_drop_acked_frames(struct sock *sk)
 {
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
        struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
-       struct l2cap_hdr *lh;
-
-       BT_DBG("sk %p len %d", sk, (int)len);
-
-       if (!conn)
-               return ERR_PTR(-ENOTCONN);
-
-       if (sdulen)
-               hlen += 2;
-
-       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
-               hlen += 2;
-
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
 
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-       put_unaligned_le16(control, skb_put(skb, 2));
-       if (sdulen)
-               put_unaligned_le16(sdulen, skb_put(skb, 2));
+       while ((skb = skb_peek(TX_QUEUE(sk))) &&
+                       l2cap_pi(sk)->unacked_frames) {
+               if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
+                       break;
 
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
+               skb = skb_dequeue(TX_QUEUE(sk));
                kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-
-       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
-               put_unaligned_le16(0, skb_put(skb, 2));
-
-       bt_cb(skb)->retries = 0;
-       return skb;
-}
-
-static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb;
-       struct sk_buff_head sar_queue;
-       u16 control;
-       size_t size = 0;
-
-       skb_queue_head_init(&sar_queue);
-       control = L2CAP_SDU_START;
-       skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       __skb_queue_tail(&sar_queue, skb);
-       len -= pi->remote_mps;
-       size += pi->remote_mps;
-
-       while (len > 0) {
-               size_t buflen;
-
-               if (len > pi->remote_mps) {
-                       control = L2CAP_SDU_CONTINUE;
-                       buflen = pi->remote_mps;
-               } else {
-                       control = L2CAP_SDU_END;
-                       buflen = len;
-               }
-
-               skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
-               if (IS_ERR(skb)) {
-                       skb_queue_purge(&sar_queue);
-                       return PTR_ERR(skb);
-               }
 
-               __skb_queue_tail(&sar_queue, skb);
-               len -= buflen;
-               size += buflen;
+               l2cap_pi(sk)->unacked_frames--;
        }
-       skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
-       if (sk->sk_send_head == NULL)
-               sk->sk_send_head = sar_queue.next;
 
-       return size;
+       if (!l2cap_pi(sk)->unacked_frames)
+               del_timer(&l2cap_pi(sk)->retrans_timer);
 }
 
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
 {
-       struct sock *sk = sock->sk;
        struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb;
-       u16 control;
-       int err;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       err = sock_error(sk);
-       if (err)
-               return err;
-
-       if (msg->msg_flags & MSG_OOB)
-               return -EOPNOTSUPP;
-
-       lock_sock(sk);
-
-       if (sk->sk_state != BT_CONNECTED) {
-               err = -ENOTCONN;
-               goto done;
-       }
-
-       /* Connectionless channel */
-       if (sk->sk_type == SOCK_DGRAM) {
-               skb = l2cap_create_connless_pdu(sk, msg, len);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-               } else {
-                       l2cap_do_send(sk, skb);
-                       err = len;
-               }
-               goto done;
-       }
-
-       switch (pi->mode) {
-       case L2CAP_MODE_BASIC:
-               /* Check outgoing MTU */
-               if (len > pi->omtu) {
-                       err = -EMSGSIZE;
-                       goto done;
-               }
+       struct hci_conn *hcon = pi->conn->hcon;
+       u16 flags;
 
-               /* Create a basic PDU */
-               skb = l2cap_create_basic_pdu(sk, msg, len);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-                       goto done;
-               }
+       BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
 
-               l2cap_do_send(sk, skb);
-               err = len;
-               break;
+       if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
 
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               /* Entire SDU fits into one PDU */
-               if (len <= pi->remote_mps) {
-                       control = L2CAP_SDU_UNSEGMENTED;
-                       skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
-                       if (IS_ERR(skb)) {
-                               err = PTR_ERR(skb);
-                               goto done;
-                       }
-                       __skb_queue_tail(TX_QUEUE(sk), skb);
+       hci_send_acl(hcon, skb, flags);
+}
 
-                       if (sk->sk_send_head == NULL)
-                               sk->sk_send_head = skb;
+void l2cap_streaming_send(struct sock *sk)
+{
+       struct sk_buff *skb;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control, fcs;
 
-               } else {
-               /* Segment SDU into multiples PDUs */
-                       err = l2cap_sar_segment_sdu(sk, msg, len);
-                       if (err < 0)
-                               goto done;
-               }
+       while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
+               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
+               control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
+               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
 
-               if (pi->mode == L2CAP_MODE_STREAMING) {
-                       l2cap_streaming_send(sk);
-               } else {
-                       if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
-                                       pi->conn_state && L2CAP_CONN_WAIT_F) {
-                               err = len;
-                               break;
-                       }
-                       err = l2cap_ertm_send(sk);
+               if (pi->fcs == L2CAP_FCS_CRC16) {
+                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
+                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
                }
 
-               if (err >= 0)
-                       err = len;
-               break;
+               l2cap_do_send(sk, skb);
 
-       default:
-               BT_DBG("bad state %1.1x", pi->mode);
-               err = -EBADFD;
+               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
        }
-
-done:
-       release_sock(sk);
-       return err;
 }
 
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
 {
-       struct sock *sk = sock->sk;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb, *tx_skb;
+       u16 control, fcs;
 
-       lock_sock(sk);
+       skb = skb_peek(TX_QUEUE(sk));
+       if (!skb)
+               return;
 
-       if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
-               struct l2cap_conn_rsp rsp;
-               struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-               u8 buf[128];
+       do {
+               if (bt_cb(skb)->tx_seq == tx_seq)
+                       break;
 
-               sk->sk_state = BT_CONFIG;
+               if (skb_queue_is_last(TX_QUEUE(sk), skb))
+                       return;
 
-               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-               rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
-               rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-               l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+       } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
 
-               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
-                       release_sock(sk);
-                       return 0;
-               }
+       if (pi->remote_max_tx &&
+                       bt_cb(skb)->retries == pi->remote_max_tx) {
+               l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
+               return;
+       }
 
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                               l2cap_build_conf_req(sk, buf), buf);
-               l2cap_pi(sk)->num_conf_req++;
+       tx_skb = skb_clone(skb, GFP_ATOMIC);
+       bt_cb(skb)->retries++;
+       control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
 
-               release_sock(sk);
-               return 0;
+       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+               control |= L2CAP_CTRL_FINAL;
+               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
        }
 
-       release_sock(sk);
+       control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+                       | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+
+       put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
 
-       if (sock->type == SOCK_STREAM)
-               return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+       if (pi->fcs == L2CAP_FCS_CRC16) {
+               fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+               put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+       }
 
-       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+       l2cap_do_send(sk, tx_skb);
 }
 
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+int l2cap_ertm_send(struct sock *sk)
 {
-       struct sock *sk = sock->sk;
-       struct l2cap_options opts;
-       int len, err = 0;
-       u32 opt;
+       struct sk_buff *skb, *tx_skb;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control, fcs;
+       int nsent = 0;
 
-       BT_DBG("sk %p", sk);
+       if (sk->sk_state != BT_CONNECTED)
+               return -ENOTCONN;
 
-       lock_sock(sk);
+       while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
 
-       switch (optname) {
-       case L2CAP_OPTIONS:
-               if (sk->sk_state == BT_CONNECTED) {
-                       err = -EINVAL;
+               if (pi->remote_max_tx &&
+                               bt_cb(skb)->retries == pi->remote_max_tx) {
+                       l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
                        break;
                }
 
-               opts.imtu     = l2cap_pi(sk)->imtu;
-               opts.omtu     = l2cap_pi(sk)->omtu;
-               opts.flush_to = l2cap_pi(sk)->flush_to;
-               opts.mode     = l2cap_pi(sk)->mode;
-               opts.fcs      = l2cap_pi(sk)->fcs;
-               opts.max_tx   = l2cap_pi(sk)->max_tx;
-               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
-               len = min_t(unsigned int, sizeof(opts), optlen);
-               if (copy_from_user((char *) &opts, optval, len)) {
-                       err = -EFAULT;
-                       break;
-               }
+               tx_skb = skb_clone(skb, GFP_ATOMIC);
 
-               if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
-                       err = -EINVAL;
-                       break;
-               }
+               bt_cb(skb)->retries++;
 
-               l2cap_pi(sk)->mode = opts.mode;
-               switch (l2cap_pi(sk)->mode) {
-               case L2CAP_MODE_BASIC:
-                       l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
-                       break;
-               case L2CAP_MODE_ERTM:
-               case L2CAP_MODE_STREAMING:
-                       if (!disable_ertm)
-                               break;
-                       /* fall through */
-               default:
-                       err = -EINVAL;
-                       break;
+               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+               control &= L2CAP_CTRL_SAR;
+
+               if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+                       control |= L2CAP_CTRL_FINAL;
+                       pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
                }
+               control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+                               | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
 
-               l2cap_pi(sk)->imtu = opts.imtu;
-               l2cap_pi(sk)->omtu = opts.omtu;
-               l2cap_pi(sk)->fcs  = opts.fcs;
-               l2cap_pi(sk)->max_tx = opts.max_tx;
-               l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
-               break;
 
-       case L2CAP_LM:
-               if (get_user(opt, (u32 __user *) optval)) {
-                       err = -EFAULT;
-                       break;
+               if (pi->fcs == L2CAP_FCS_CRC16) {
+                       fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
+                       put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
                }
 
-               if (opt & L2CAP_LM_AUTH)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
-               if (opt & L2CAP_LM_ENCRYPT)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
-               if (opt & L2CAP_LM_SECURE)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+               l2cap_do_send(sk, tx_skb);
 
-               l2cap_pi(sk)->role_switch    = (opt & L2CAP_LM_MASTER);
-               l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
-               break;
+               __mod_retrans_timer();
 
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
+               bt_cb(skb)->tx_seq = pi->next_tx_seq;
+               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
 
-       release_sock(sk);
-       return err;
-}
+               pi->unacked_frames++;
+               pi->frames_sent++;
 
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
-       struct sock *sk = sock->sk;
-       struct bt_security sec;
-       int len, err = 0;
-       u32 opt;
+               if (skb_queue_is_last(TX_QUEUE(sk), skb))
+                       sk->sk_send_head = NULL;
+               else
+                       sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
 
-       BT_DBG("sk %p", sk);
+               nsent++;
+       }
 
-       if (level == SOL_L2CAP)
-               return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+       return nsent;
+}
 
-       if (level != SOL_BLUETOOTH)
-               return -ENOPROTOOPT;
+static int l2cap_retransmit_frames(struct sock *sk)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int ret;
 
-       lock_sock(sk);
+       if (!skb_queue_empty(TX_QUEUE(sk)))
+               sk->sk_send_head = TX_QUEUE(sk)->next;
 
-       switch (optname) {
-       case BT_SECURITY:
-               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-                               && sk->sk_type != SOCK_RAW) {
-                       err = -EINVAL;
-                       break;
-               }
+       pi->next_tx_seq = pi->expected_ack_seq;
+       ret = l2cap_ertm_send(sk);
+       return ret;
+}
 
-               sec.level = BT_SECURITY_LOW;
+static void l2cap_send_ack(struct l2cap_pinfo *pi)
+{
+       struct sock *sk = (struct sock *)pi;
+       u16 control = 0;
 
-               len = min_t(unsigned int, sizeof(sec), optlen);
-               if (copy_from_user((char *) &sec, optval, len)) {
-                       err = -EFAULT;
-                       break;
-               }
+       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
 
-               if (sec.level < BT_SECURITY_LOW ||
-                                       sec.level > BT_SECURITY_HIGH) {
-                       err = -EINVAL;
-                       break;
-               }
+       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+               control |= L2CAP_SUPER_RCV_NOT_READY;
+               pi->conn_state |= L2CAP_CONN_RNR_SENT;
+               l2cap_send_sframe(pi, control);
+               return;
+       }
 
-               l2cap_pi(sk)->sec_level = sec.level;
-               break;
+       if (l2cap_ertm_send(sk) > 0)
+               return;
 
-       case BT_DEFER_SETUP:
-               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
-                       err = -EINVAL;
-                       break;
-               }
+       control |= L2CAP_SUPER_RCV_READY;
+       l2cap_send_sframe(pi, control);
+}
 
-               if (get_user(opt, (u32 __user *) optval)) {
-                       err = -EFAULT;
-                       break;
-               }
+static void l2cap_send_srejtail(struct sock *sk)
+{
+       struct srej_list *tail;
+       u16 control;
 
-               bt_sk(sk)->defer_setup = opt;
-               break;
+       control = L2CAP_SUPER_SELECT_REJECT;
+       control |= L2CAP_CTRL_FINAL;
 
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
+       tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
+       control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
 
-       release_sock(sk);
-       return err;
+       l2cap_send_sframe(l2cap_pi(sk), control);
 }
 
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
 {
-       struct sock *sk = sock->sk;
-       struct l2cap_options opts;
-       struct l2cap_conninfo cinfo;
-       int len, err = 0;
-       u32 opt;
-
-       BT_DBG("sk %p", sk);
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff **frag;
+       int err, sent = 0;
 
-       if (get_user(len, optlen))
+       if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
                return -EFAULT;
 
-       lock_sock(sk);
-
-       switch (optname) {
-       case L2CAP_OPTIONS:
-               opts.imtu     = l2cap_pi(sk)->imtu;
-               opts.omtu     = l2cap_pi(sk)->omtu;
-               opts.flush_to = l2cap_pi(sk)->flush_to;
-               opts.mode     = l2cap_pi(sk)->mode;
-               opts.fcs      = l2cap_pi(sk)->fcs;
-               opts.max_tx   = l2cap_pi(sk)->max_tx;
-               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
-               len = min_t(unsigned int, len, sizeof(opts));
-               if (copy_to_user(optval, (char *) &opts, len))
-                       err = -EFAULT;
+       sent += count;
+       len  -= count;
 
-               break;
+       /* Continuation fragments (no L2CAP header) */
+       frag = &skb_shinfo(skb)->frag_list;
+       while (len) {
+               count = min_t(unsigned int, conn->mtu, len);
 
-       case L2CAP_LM:
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_LOW:
-                       opt = L2CAP_LM_AUTH;
-                       break;
-               case BT_SECURITY_MEDIUM:
-                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
-                       break;
-               case BT_SECURITY_HIGH:
-                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
-                                                       L2CAP_LM_SECURE;
-                       break;
-               default:
-                       opt = 0;
-                       break;
-               }
+               *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
+               if (!*frag)
+                       return err;
+               if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+                       return -EFAULT;
 
-               if (l2cap_pi(sk)->role_switch)
-                       opt |= L2CAP_LM_MASTER;
+               sent += count;
+               len  -= count;
 
-               if (l2cap_pi(sk)->force_reliable)
-                       opt |= L2CAP_LM_RELIABLE;
+               frag = &(*frag)->next;
+       }
 
-               if (put_user(opt, (u32 __user *) optval))
-                       err = -EFAULT;
-               break;
+       return sent;
+}
 
-       case L2CAP_CONNINFO:
-               if (sk->sk_state != BT_CONNECTED &&
-                                       !(sk->sk_state == BT_CONNECT2 &&
-                                               bt_sk(sk)->defer_setup)) {
-                       err = -ENOTCONN;
-                       break;
-               }
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       struct l2cap_hdr *lh;
 
-               cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
-               memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+       BT_DBG("sk %p len %d", sk, (int)len);
 
-               len = min_t(unsigned int, len, sizeof(cinfo));
-               if (copy_to_user(optval, (char *) &cinfo, len))
-                       err = -EFAULT;
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
 
-               break;
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
 
-       default:
-               err = -ENOPROTOOPT;
-               break;
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
        }
-
-       release_sock(sk);
-       return err;
+       return skb;
 }
 
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
 {
-       struct sock *sk = sock->sk;
-       struct bt_security sec;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE;
+       struct l2cap_hdr *lh;
 
-       if (level == SOL_L2CAP)
-               return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+       BT_DBG("sk %p len %d", sk, (int)len);
 
-       if (level != SOL_BLUETOOTH)
-               return -ENOPROTOOPT;
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
 
-       if (get_user(len, optlen))
-               return -EFAULT;
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
 
-       lock_sock(sk);
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
+       }
+       return skb;
+}
 
-       switch (optname) {
-       case BT_SECURITY:
-               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-                               && sk->sk_type != SOCK_RAW) {
-                       err = -EINVAL;
-                       break;
-               }
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       struct l2cap_hdr *lh;
 
-               sec.level = l2cap_pi(sk)->sec_level;
+       BT_DBG("sk %p len %d", sk, (int)len);
 
-               len = min_t(unsigned int, len, sizeof(sec));
-               if (copy_to_user(optval, (char *) &sec, len))
-                       err = -EFAULT;
+       if (!conn)
+               return ERR_PTR(-ENOTCONN);
 
-               break;
+       if (sdulen)
+               hlen += 2;
 
-       case BT_DEFER_SETUP:
-               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
-                       err = -EINVAL;
-                       break;
-               }
+       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+               hlen += 2;
 
-               if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
-                       err = -EFAULT;
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
 
-               break;
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       put_unaligned_le16(control, skb_put(skb, 2));
+       if (sdulen)
+               put_unaligned_le16(sdulen, skb_put(skb, 2));
 
-       default:
-               err = -ENOPROTOOPT;
-               break;
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
        }
 
-       release_sock(sk);
-       return err;
+       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+               put_unaligned_le16(0, skb_put(skb, 2));
+
+       bt_cb(skb)->retries = 0;
+       return skb;
 }
 
-static int l2cap_sock_shutdown(struct socket *sock, int how)
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
 {
-       struct sock *sk = sock->sk;
-       int err = 0;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-       if (!sk->sk_shutdown) {
-               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
-                       err = __l2cap_wait_ack(sk);
-
-               sk->sk_shutdown = SHUTDOWN_MASK;
-               l2cap_sock_clear_timer(sk);
-               __l2cap_sock_close(sk, 0);
-
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
-                       err = bt_sock_wait_state(sk, BT_CLOSED,
-                                                       sk->sk_lingertime);
-       }
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb;
+       struct sk_buff_head sar_queue;
+       u16 control;
+       size_t size = 0;
 
-       if (!err && sk->sk_err)
-               err = -sk->sk_err;
+       skb_queue_head_init(&sar_queue);
+       control = L2CAP_SDU_START;
+       skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
 
-       release_sock(sk);
-       return err;
-}
+       __skb_queue_tail(&sar_queue, skb);
+       len -= pi->remote_mps;
+       size += pi->remote_mps;
 
-static int l2cap_sock_release(struct socket *sock)
-{
-       struct sock *sk = sock->sk;
-       int err;
+       while (len > 0) {
+               size_t buflen;
 
-       BT_DBG("sock %p, sk %p", sock, sk);
+               if (len > pi->remote_mps) {
+                       control = L2CAP_SDU_CONTINUE;
+                       buflen = pi->remote_mps;
+               } else {
+                       control = L2CAP_SDU_END;
+                       buflen = len;
+               }
 
-       if (!sk)
-               return 0;
+               skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
+               if (IS_ERR(skb)) {
+                       skb_queue_purge(&sar_queue);
+                       return PTR_ERR(skb);
+               }
 
-       err = l2cap_sock_shutdown(sock, 2);
+               __skb_queue_tail(&sar_queue, skb);
+               len -= buflen;
+               size += buflen;
+       }
+       skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
+       if (sk->sk_send_head == NULL)
+               sk->sk_send_head = sar_queue.next;
 
-       sock_orphan(sk);
-       l2cap_sock_kill(sk);
-       return err;
+       return size;
 }
 
 static void l2cap_chan_ready(struct sock *sk)
@@ -2365,7 +1429,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
 
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
-       lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+
+       if (conn->hcon->type == LE_LINK)
+               lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+       else
+               lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
 
        cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
        cmd->code  = code;
@@ -2512,7 +1580,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
        }
 }
 
-static int l2cap_build_conf_req(struct sock *sk, void *data)
+int l2cap_build_conf_req(struct sock *sk, void *data)
 {
        struct l2cap_pinfo *pi = l2cap_pi(sk);
        struct l2cap_conf_req *req = data;
@@ -2537,11 +1605,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
        }
 
 done:
+       if (pi->imtu != L2CAP_DEFAULT_MTU)
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+
        switch (pi->mode) {
        case L2CAP_MODE_BASIC:
-               if (pi->imtu != L2CAP_DEFAULT_MTU)
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-
                if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
                                !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
                        break;
@@ -2604,10 +1672,6 @@ done:
                break;
        }
 
-       /* FIXME: Need actual value of the flush timeout */
-       //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
-       //   l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
-
        req->dcid  = cpu_to_le16(pi->dcid);
        req->flags = cpu_to_le16(0);
 
@@ -3434,12 +2498,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
        return 0;
 }
 
-static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
+                                                       u16 to_multiplier)
+{
+       u16 max_latency;
+
+       if (min > max || min < 6 || max > 3200)
+               return -EINVAL;
+
+       if (to_multiplier < 10 || to_multiplier > 3200)
+               return -EINVAL;
+
+       if (max >= to_multiplier * 8)
+               return -EINVAL;
+
+       max_latency = (to_multiplier * 8 / max) - 1;
+       if (latency > 499 || latency > max_latency)
+               return -EINVAL;
+
+       return 0;
+}
+
+static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct hci_conn *hcon = conn->hcon;
+       struct l2cap_conn_param_update_req *req;
+       struct l2cap_conn_param_update_rsp rsp;
+       u16 min, max, latency, to_multiplier, cmd_len;
+       int err;
+
+       if (!(hcon->link_mode & HCI_LM_MASTER))
+               return -EINVAL;
+
+       cmd_len = __le16_to_cpu(cmd->len);
+       if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+               return -EPROTO;
+
+       req = (struct l2cap_conn_param_update_req *) data;
+       min             = __le16_to_cpu(req->min);
+       max             = __le16_to_cpu(req->max);
+       latency         = __le16_to_cpu(req->latency);
+       to_multiplier   = __le16_to_cpu(req->to_multiplier);
+
+       BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+                                               min, max, latency, to_multiplier);
+
+       memset(&rsp, 0, sizeof(rsp));
+
+       err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+       if (err)
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+       else
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+                                                       sizeof(rsp), &rsp);
+
+       if (!err)
+               hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+
+       return 0;
+}
+
+static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+       int err = 0;
+
+       switch (cmd->code) {
+       case L2CAP_COMMAND_REJ:
+               l2cap_command_rej(conn, cmd, data);
+               break;
+
+       case L2CAP_CONN_REQ:
+               err = l2cap_connect_req(conn, cmd, data);
+               break;
+
+       case L2CAP_CONN_RSP:
+               err = l2cap_connect_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_CONF_REQ:
+               err = l2cap_config_req(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_CONF_RSP:
+               err = l2cap_config_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_DISCONN_REQ:
+               err = l2cap_disconnect_req(conn, cmd, data);
+               break;
+
+       case L2CAP_DISCONN_RSP:
+               err = l2cap_disconnect_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_ECHO_REQ:
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
+               break;
+
+       case L2CAP_ECHO_RSP:
+               break;
+
+       case L2CAP_INFO_REQ:
+               err = l2cap_information_req(conn, cmd, data);
+               break;
+
+       case L2CAP_INFO_RSP:
+               err = l2cap_information_rsp(conn, cmd, data);
+               break;
+
+       default:
+               BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       switch (cmd->code) {
+       case L2CAP_COMMAND_REJ:
+               return 0;
+
+       case L2CAP_CONN_PARAM_UPDATE_REQ:
+               return l2cap_conn_param_update_req(conn, cmd, data);
+
+       case L2CAP_CONN_PARAM_UPDATE_RSP:
+               return 0;
+
+       default:
+               BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
+               return -EINVAL;
+       }
+}
+
+static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+                                                       struct sk_buff *skb)
 {
        u8 *data = skb->data;
        int len = skb->len;
        struct l2cap_cmd_hdr cmd;
-       int err = 0;
+       int err;
 
        l2cap_raw_recv(conn, skb);
 
@@ -3458,55 +2663,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
                        break;
                }
 
-               switch (cmd.code) {
-               case L2CAP_COMMAND_REJ:
-                       l2cap_command_rej(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONN_REQ:
-                       err = l2cap_connect_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONN_RSP:
-                       err = l2cap_connect_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONF_REQ:
-                       err = l2cap_config_req(conn, &cmd, cmd_len, data);
-                       break;
-
-               case L2CAP_CONF_RSP:
-                       err = l2cap_config_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_DISCONN_REQ:
-                       err = l2cap_disconnect_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_DISCONN_RSP:
-                       err = l2cap_disconnect_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_ECHO_REQ:
-                       l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
-                       break;
-
-               case L2CAP_ECHO_RSP:
-                       break;
-
-               case L2CAP_INFO_REQ:
-                       err = l2cap_information_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_INFO_RSP:
-                       err = l2cap_information_rsp(conn, &cmd, data);
-                       break;
-
-               default:
-                       BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
-                       err = -EINVAL;
-                       break;
-               }
+               if (conn->hcon->type == LE_LINK)
+                       err = l2cap_le_sig_cmd(conn, &cmd, data);
+               else
+                       err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
 
                if (err) {
                        struct l2cap_cmd_rej rej;
@@ -4503,6 +3663,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
        BT_DBG("len %d, cid 0x%4.4x", len, cid);
 
        switch (cid) {
+       case L2CAP_CID_LE_SIGNALING:
        case L2CAP_CID_SIGNALING:
                l2cap_sig_channel(conn, skb);
                break;
@@ -4560,7 +3721,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 
        BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
 
-       if (hcon->type != ACL_LINK)
+       if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
                return -EINVAL;
 
        if (!status) {
@@ -4589,7 +3750,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
-       if (hcon->type != ACL_LINK)
+       if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
                return -EINVAL;
 
        l2cap_conn_del(hcon, bt_err(reason));
@@ -4692,12 +3853,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
 {
        struct l2cap_conn *conn = hcon->l2cap_data;
 
-       if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
+       if (!conn)
+               conn = l2cap_conn_add(hcon, 0);
+
+       if (!conn)
                goto drop;
 
        BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
 
-       if (flags & ACL_START) {
+       if (!(flags & ACL_CONT)) {
                struct l2cap_hdr *hdr;
                struct sock *sk;
                u16 cid;
@@ -4803,12 +3967,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
        sk_for_each(sk, node, &l2cap_sk_list.head) {
                struct l2cap_pinfo *pi = l2cap_pi(sk);
 
-               seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+               seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
                                        batostr(&bt_sk(sk)->src),
                                        batostr(&bt_sk(sk)->dst),
                                        sk->sk_state, __le16_to_cpu(pi->psm),
                                        pi->scid, pi->dcid,
-                                       pi->imtu, pi->omtu, pi->sec_level);
+                                       pi->imtu, pi->omtu, pi->sec_level,
+                                       pi->mode);
        }
 
        read_unlock_bh(&l2cap_sk_list.lock);
@@ -4830,32 +3995,6 @@ static const struct file_operations l2cap_debugfs_fops = {
 
 static struct dentry *l2cap_debugfs;
 
-static const struct proto_ops l2cap_sock_ops = {
-       .family         = PF_BLUETOOTH,
-       .owner          = THIS_MODULE,
-       .release        = l2cap_sock_release,
-       .bind           = l2cap_sock_bind,
-       .connect        = l2cap_sock_connect,
-       .listen         = l2cap_sock_listen,
-       .accept         = l2cap_sock_accept,
-       .getname        = l2cap_sock_getname,
-       .sendmsg        = l2cap_sock_sendmsg,
-       .recvmsg        = l2cap_sock_recvmsg,
-       .poll           = bt_sock_poll,
-       .ioctl          = bt_sock_ioctl,
-       .mmap           = sock_no_mmap,
-       .socketpair     = sock_no_socketpair,
-       .shutdown       = l2cap_sock_shutdown,
-       .setsockopt     = l2cap_sock_setsockopt,
-       .getsockopt     = l2cap_sock_getsockopt
-};
-
-static const struct net_proto_family l2cap_sock_family_ops = {
-       .family = PF_BLUETOOTH,
-       .owner  = THIS_MODULE,
-       .create = l2cap_sock_create,
-};
-
 static struct hci_proto l2cap_hci_proto = {
        .name           = "L2CAP",
        .id             = HCI_PROTO_L2CAP,
@@ -4867,23 +4006,17 @@ static struct hci_proto l2cap_hci_proto = {
        .recv_acldata   = l2cap_recv_acldata
 };
 
-static int __init l2cap_init(void)
+int __init l2cap_init(void)
 {
        int err;
 
-       err = proto_register(&l2cap_proto, 0);
+       err = l2cap_init_sockets();
        if (err < 0)
                return err;
 
        _busy_wq = create_singlethread_workqueue("l2cap");
        if (!_busy_wq) {
-               proto_unregister(&l2cap_proto);
-               return -ENOMEM;
-       }
-
-       err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-       if (err < 0) {
-               BT_ERR("L2CAP socket registration failed");
+               err = -ENOMEM;
                goto error;
        }
 
@@ -4901,49 +4034,26 @@ static int __init l2cap_init(void)
                        BT_ERR("Failed to create L2CAP debug file");
        }
 
-       BT_INFO("L2CAP ver %s", VERSION);
-       BT_INFO("L2CAP socket layer initialized");
-
        return 0;
 
 error:
        destroy_workqueue(_busy_wq);
-       proto_unregister(&l2cap_proto);
+       l2cap_cleanup_sockets();
        return err;
 }
 
-static void __exit l2cap_exit(void)
+void l2cap_exit(void)
 {
        debugfs_remove(l2cap_debugfs);
 
        flush_workqueue(_busy_wq);
        destroy_workqueue(_busy_wq);
 
-       if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
-               BT_ERR("L2CAP socket unregistration failed");
-
        if (hci_unregister_proto(&l2cap_hci_proto) < 0)
                BT_ERR("L2CAP protocol unregistration failed");
 
-       proto_unregister(&l2cap_proto);
-}
-
-void l2cap_load(void)
-{
-       /* Dummy function to trigger automatic L2CAP module loading by
-        * other modules that use L2CAP sockets but don't use any other
-        * symbols from it. */
+       l2cap_cleanup_sockets();
 }
-EXPORT_SYMBOL(l2cap_load);
-
-module_init(l2cap_init);
-module_exit(l2cap_exit);
 
 module_param(disable_ertm, bool, 0644);
 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644 (file)
index 0000000..fc85e7a
--- /dev/null
@@ -0,0 +1,1156 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2000-2001 Qualcomm Incorporated
+   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+   Copyright (C) 2010 Google Inc.
+
+   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP sockets. */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+       struct sock *sk = (struct sock *) arg;
+       int reason;
+
+       BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+       bh_lock_sock(sk);
+
+       if (sock_owned_by_user(sk)) {
+               /* sk is owned by user. Try again later */
+               l2cap_sock_set_timer(sk, HZ / 5);
+               bh_unlock_sock(sk);
+               sock_put(sk);
+               return;
+       }
+
+       if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+               reason = ECONNREFUSED;
+       else if (sk->sk_state == BT_CONNECT &&
+                               l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+               reason = ECONNREFUSED;
+       else
+               reason = ETIMEDOUT;
+
+       __l2cap_sock_close(sk, reason);
+
+       bh_unlock_sock(sk);
+
+       l2cap_sock_kill(sk);
+       sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+       BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+       BT_DBG("sock %p state %d", sk, sk->sk_state);
+       sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+       struct sock *sk;
+       struct hlist_node *node;
+       sk_for_each(sk, node, &l2cap_sk_list.head)
+               if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+                       goto found;
+       sk = NULL;
+found:
+       return sk;
+}
+
+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+       struct sock *sk = sock->sk;
+       struct sockaddr_l2 la;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (!addr || addr->sa_family != AF_BLUETOOTH)
+               return -EINVAL;
+
+       memset(&la, 0, sizeof(la));
+       len = min_t(unsigned int, sizeof(la), alen);
+       memcpy(&la, addr, len);
+
+       if (la.l2_cid && la.l2_psm)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != BT_OPEN) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       if (la.l2_psm) {
+               __u16 psm = __le16_to_cpu(la.l2_psm);
+
+               /* PSM must be odd and lsb of upper byte must be 0 */
+               if ((psm & 0x0101) != 0x0001) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               /* Restrict usage of well-known PSMs */
+               if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
+                       err = -EACCES;
+                       goto done;
+               }
+       }
+
+       write_lock_bh(&l2cap_sk_list.lock);
+
+       if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+               err = -EADDRINUSE;
+       } else {
+               /* Save source address */
+               bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+               l2cap_pi(sk)->psm   = la.l2_psm;
+               l2cap_pi(sk)->sport = la.l2_psm;
+               sk->sk_state = BT_BOUND;
+
+               if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+                                       __le16_to_cpu(la.l2_psm) == 0x0003)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+       }
+
+       if (la.l2_cid)
+               l2cap_pi(sk)->scid = la.l2_cid;
+
+       write_unlock_bh(&l2cap_sk_list.lock);
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct sockaddr_l2 la;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (!addr || alen < sizeof(addr->sa_family) ||
+           addr->sa_family != AF_BLUETOOTH)
+               return -EINVAL;
+
+       memset(&la, 0, sizeof(la));
+       len = min_t(unsigned int, sizeof(la), alen);
+       memcpy(&la, addr, len);
+
+       if (la.l2_cid && la.l2_psm)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+                       && !(la.l2_psm || la.l2_cid)) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       switch (l2cap_pi(sk)->mode) {
+       case L2CAP_MODE_BASIC:
+               break;
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               if (!disable_ertm)
+                       break;
+               /* fall through */
+       default:
+               err = -ENOTSUPP;
+               goto done;
+       }
+
+       switch (sk->sk_state) {
+       case BT_CONNECT:
+       case BT_CONNECT2:
+       case BT_CONFIG:
+               /* Already connecting */
+               goto wait;
+
+       case BT_CONNECTED:
+               /* Already connected */
+               err = -EISCONN;
+               goto done;
+
+       case BT_OPEN:
+       case BT_BOUND:
+               /* Can connect */
+               break;
+
+       default:
+               err = -EBADFD;
+               goto done;
+       }
+
+       /* PSM must be odd and lsb of upper byte must be 0 */
+       if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+                               sk->sk_type != SOCK_RAW && !la.l2_cid) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       /* Set destination address and psm */
+       bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
+       l2cap_pi(sk)->psm = la.l2_psm;
+       l2cap_pi(sk)->dcid = la.l2_cid;
+
+       err = l2cap_do_connect(sk);
+       if (err)
+               goto done;
+
+wait:
+       err = bt_sock_wait_state(sk, BT_CONNECTED,
+                       sock_sndtimeo(sk, flags & O_NONBLOCK));
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+       struct sock *sk = sock->sk;
+       int err = 0;
+
+       BT_DBG("sk %p backlog %d", sk, backlog);
+
+       lock_sock(sk);
+
+       if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+                       || sk->sk_state != BT_BOUND) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       switch (l2cap_pi(sk)->mode) {
+       case L2CAP_MODE_BASIC:
+               break;
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               if (!disable_ertm)
+                       break;
+               /* fall through */
+       default:
+               err = -ENOTSUPP;
+               goto done;
+       }
+
+       if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
+               bdaddr_t *src = &bt_sk(sk)->src;
+               u16 psm;
+
+               err = -EINVAL;
+
+               write_lock_bh(&l2cap_sk_list.lock);
+
+               for (psm = 0x1001; psm < 0x1100; psm += 2)
+                       if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+                               l2cap_pi(sk)->psm   = cpu_to_le16(psm);
+                               l2cap_pi(sk)->sport = cpu_to_le16(psm);
+                               err = 0;
+                               break;
+                       }
+
+               write_unlock_bh(&l2cap_sk_list.lock);
+
+               if (err < 0)
+                       goto done;
+       }
+
+       sk->sk_max_ack_backlog = backlog;
+       sk->sk_ack_backlog = 0;
+       sk->sk_state = BT_LISTEN;
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       struct sock *sk = sock->sk, *nsk;
+       long timeo;
+       int err = 0;
+
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+       if (sk->sk_state != BT_LISTEN) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+       BT_DBG("sk %p timeo %ld", sk, timeo);
+
+       /* Wait for an incoming connection. (wake-one). */
+       add_wait_queue_exclusive(sk_sleep(sk), &wait);
+       while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (!timeo) {
+                       err = -EAGAIN;
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+               if (sk->sk_state != BT_LISTEN) {
+                       err = -EBADFD;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       if (err)
+               goto done;
+
+       newsock->state = SS_CONNECTED;
+
+       BT_DBG("new socket %p", nsk);
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+{
+       struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+       struct sock *sk = sock->sk;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       addr->sa_family = AF_BLUETOOTH;
+       *len = sizeof(struct sockaddr_l2);
+
+       if (peer) {
+               la->l2_psm = l2cap_pi(sk)->psm;
+               bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       } else {
+               la->l2_psm = l2cap_pi(sk)->sport;
+               bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
+       }
+
+       return 0;
+}
+
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_options opts;
+       struct l2cap_conninfo cinfo;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case L2CAP_OPTIONS:
+               memset(&opts, 0, sizeof(opts));
+               opts.imtu     = l2cap_pi(sk)->imtu;
+               opts.omtu     = l2cap_pi(sk)->omtu;
+               opts.flush_to = l2cap_pi(sk)->flush_to;
+               opts.mode     = l2cap_pi(sk)->mode;
+               opts.fcs      = l2cap_pi(sk)->fcs;
+               opts.max_tx   = l2cap_pi(sk)->max_tx;
+               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+               len = min_t(unsigned int, len, sizeof(opts));
+               if (copy_to_user(optval, (char *) &opts, len))
+                       err = -EFAULT;
+
+               break;
+
+       case L2CAP_LM:
+               switch (l2cap_pi(sk)->sec_level) {
+               case BT_SECURITY_LOW:
+                       opt = L2CAP_LM_AUTH;
+                       break;
+               case BT_SECURITY_MEDIUM:
+                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
+                       break;
+               case BT_SECURITY_HIGH:
+                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+                                                       L2CAP_LM_SECURE;
+                       break;
+               default:
+                       opt = 0;
+                       break;
+               }
+
+               if (l2cap_pi(sk)->role_switch)
+                       opt |= L2CAP_LM_MASTER;
+
+               if (l2cap_pi(sk)->force_reliable)
+                       opt |= L2CAP_LM_RELIABLE;
+
+               if (put_user(opt, (u32 __user *) optval))
+                       err = -EFAULT;
+               break;
+
+       case L2CAP_CONNINFO:
+               if (sk->sk_state != BT_CONNECTED &&
+                                       !(sk->sk_state == BT_CONNECT2 &&
+                                               bt_sk(sk)->defer_setup)) {
+                       err = -ENOTCONN;
+                       break;
+               }
+
+               cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+               memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+
+               len = min_t(unsigned int, len, sizeof(cinfo));
+               if (copy_to_user(optval, (char *) &cinfo, len))
+                       err = -EFAULT;
+
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+{
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (level == SOL_L2CAP)
+               return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+
+       if (level != SOL_BLUETOOTH)
+               return -ENOPROTOOPT;
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case BT_SECURITY:
+               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+                               && sk->sk_type != SOCK_RAW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               sec.level = l2cap_pi(sk)->sec_level;
+
+               len = min_t(unsigned int, len, sizeof(sec));
+               if (copy_to_user(optval, (char *) &sec, len))
+                       err = -EFAULT;
+
+               break;
+
+       case BT_DEFER_SETUP:
+               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+                       err = -EFAULT;
+
+               break;
+
+       case BT_FLUSHABLE:
+               if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
+                       err = -EFAULT;
+
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_options opts;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case L2CAP_OPTIONS:
+               if (sk->sk_state == BT_CONNECTED) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               opts.imtu     = l2cap_pi(sk)->imtu;
+               opts.omtu     = l2cap_pi(sk)->omtu;
+               opts.flush_to = l2cap_pi(sk)->flush_to;
+               opts.mode     = l2cap_pi(sk)->mode;
+               opts.fcs      = l2cap_pi(sk)->fcs;
+               opts.max_tx   = l2cap_pi(sk)->max_tx;
+               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+               len = min_t(unsigned int, sizeof(opts), optlen);
+               if (copy_from_user((char *) &opts, optval, len)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->mode = opts.mode;
+               switch (l2cap_pi(sk)->mode) {
+               case L2CAP_MODE_BASIC:
+                       l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+                       break;
+               case L2CAP_MODE_ERTM:
+               case L2CAP_MODE_STREAMING:
+                       if (!disable_ertm)
+                               break;
+                       /* fall through */
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->imtu = opts.imtu;
+               l2cap_pi(sk)->omtu = opts.omtu;
+               l2cap_pi(sk)->fcs  = opts.fcs;
+               l2cap_pi(sk)->max_tx = opts.max_tx;
+               l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
+               break;
+
+       case L2CAP_LM:
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opt & L2CAP_LM_AUTH)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
+               if (opt & L2CAP_LM_ENCRYPT)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
+               if (opt & L2CAP_LM_SECURE)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+
+               l2cap_pi(sk)->role_switch    = (opt & L2CAP_LM_MASTER);
+               l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+{
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       if (level == SOL_L2CAP)
+               return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+
+       if (level != SOL_BLUETOOTH)
+               return -ENOPROTOOPT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case BT_SECURITY:
+               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+                               && sk->sk_type != SOCK_RAW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               sec.level = BT_SECURITY_LOW;
+
+               len = min_t(unsigned int, sizeof(sec), optlen);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (sec.level < BT_SECURITY_LOW ||
+                                       sec.level > BT_SECURITY_HIGH) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->sec_level = sec.level;
+               break;
+
+       case BT_DEFER_SETUP:
+               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               bt_sk(sk)->defer_setup = opt;
+               break;
+
+       case BT_FLUSHABLE:
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opt > BT_FLUSHABLE_ON) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (opt == BT_FLUSHABLE_OFF) {
+                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+                       /* proceed futher only when we have l2cap_conn and
+                          No Flush support in the LM */
+                       if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+                               err = -EINVAL;
+                               break;
+                       }
+               }
+
+               l2cap_pi(sk)->flushable = opt;
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb;
+       u16 control;
+       int err;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       err = sock_error(sk);
+       if (err)
+               return err;
+
+       if (msg->msg_flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != BT_CONNECTED) {
+               err = -ENOTCONN;
+               goto done;
+       }
+
+       /* Connectionless channel */
+       if (sk->sk_type == SOCK_DGRAM) {
+               skb = l2cap_create_connless_pdu(sk, msg, len);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+               } else {
+                       l2cap_do_send(sk, skb);
+                       err = len;
+               }
+               goto done;
+       }
+
+       switch (pi->mode) {
+       case L2CAP_MODE_BASIC:
+               /* Check outgoing MTU */
+               if (len > pi->omtu) {
+                       err = -EMSGSIZE;
+                       goto done;
+               }
+
+               /* Create a basic PDU */
+               skb = l2cap_create_basic_pdu(sk, msg, len);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+                       goto done;
+               }
+
+               l2cap_do_send(sk, skb);
+               err = len;
+               break;
+
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               /* Entire SDU fits into one PDU */
+               if (len <= pi->remote_mps) {
+                       control = L2CAP_SDU_UNSEGMENTED;
+                       skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
+                       if (IS_ERR(skb)) {
+                               err = PTR_ERR(skb);
+                               goto done;
+                       }
+                       __skb_queue_tail(TX_QUEUE(sk), skb);
+
+                       if (sk->sk_send_head == NULL)
+                               sk->sk_send_head = skb;
+
+               } else {
+               /* Segment SDU into multiples PDUs */
+                       err = l2cap_sar_segment_sdu(sk, msg, len);
+                       if (err < 0)
+                               goto done;
+               }
+
+               if (pi->mode == L2CAP_MODE_STREAMING) {
+                       l2cap_streaming_send(sk);
+               } else {
+                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+                                       (pi->conn_state & L2CAP_CONN_WAIT_F)) {
+                               err = len;
+                               break;
+                       }
+                       err = l2cap_ertm_send(sk);
+               }
+
+               if (err >= 0)
+                       err = len;
+               break;
+
+       default:
+               BT_DBG("bad state %1.1x", pi->mode);
+               err = -EBADFD;
+       }
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+{
+       struct sock *sk = sock->sk;
+
+       lock_sock(sk);
+
+       if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+               struct l2cap_conn_rsp rsp;
+               struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+               u8 buf[128];
+
+               sk->sk_state = BT_CONFIG;
+
+               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+               rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+               rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+               l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+                       release_sock(sk);
+                       return 0;
+               }
+
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                               l2cap_build_conf_req(sk, buf), buf);
+               l2cap_pi(sk)->num_conf_req++;
+
+               release_sock(sk);
+               return 0;
+       }
+
+       release_sock(sk);
+
+       if (sock->type == SOCK_STREAM)
+               return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+
+       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
+/* Kill socket (only if zapped and orphan)
+ * Must be called on unlocked socket.
+ */
+void l2cap_sock_kill(struct sock *sk)
+{
+       if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+               return;
+
+       BT_DBG("sk %p state %d", sk, sk->sk_state);
+
+       /* Kill poor orphan */
+       bt_sock_unlink(&l2cap_sk_list, sk);
+       sock_set_flag(sk, SOCK_DEAD);
+       sock_put(sk);
+}
+
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+       l2cap_sock_clear_timer(sk);
+       lock_sock(sk);
+       __l2cap_sock_close(sk, ECONNRESET);
+       release_sock(sk);
+       l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+       struct sock *sk;
+
+       BT_DBG("parent %p", parent);
+
+       /* Close not yet accepted channels */
+       while ((sk = bt_accept_dequeue(parent, NULL)))
+               l2cap_sock_close(sk);
+
+       parent->sk_state = BT_CLOSED;
+       sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+       BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+       switch (sk->sk_state) {
+       case BT_LISTEN:
+               l2cap_sock_cleanup_listen(sk);
+               break;
+
+       case BT_CONNECTED:
+       case BT_CONFIG:
+               if ((sk->sk_type == SOCK_SEQPACKET ||
+                                       sk->sk_type == SOCK_STREAM) &&
+                                       conn->hcon->type == ACL_LINK) {
+                       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+                       l2cap_send_disconn_req(conn, sk, reason);
+               } else
+                       l2cap_chan_del(sk, reason);
+               break;
+
+       case BT_CONNECT2:
+               if ((sk->sk_type == SOCK_SEQPACKET ||
+                                       sk->sk_type == SOCK_STREAM) &&
+                                       conn->hcon->type == ACL_LINK) {
+                       struct l2cap_conn_rsp rsp;
+                       __u16 result;
+
+                       if (bt_sk(sk)->defer_setup)
+                               result = L2CAP_CR_SEC_BLOCK;
+                       else
+                               result = L2CAP_CR_BAD_PSM;
+
+                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+                       rsp.result = cpu_to_le16(result);
+                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+               } else
+                       l2cap_chan_del(sk, reason);
+               break;
+
+       case BT_CONNECT:
+       case BT_DISCONN:
+               l2cap_chan_del(sk, reason);
+               break;
+
+       default:
+               sock_set_flag(sk, SOCK_ZAPPED);
+               break;
+       }
+}
+
+static int l2cap_sock_shutdown(struct socket *sock, int how)
+{
+       struct sock *sk = sock->sk;
+       int err = 0;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (!sk->sk_shutdown) {
+               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+                       err = __l2cap_wait_ack(sk);
+
+               sk->sk_shutdown = SHUTDOWN_MASK;
+               l2cap_sock_clear_timer(sk);
+               __l2cap_sock_close(sk, 0);
+
+               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+                       err = bt_sock_wait_state(sk, BT_CLOSED,
+                                                       sk->sk_lingertime);
+       }
+
+       if (!err && sk->sk_err)
+               err = -sk->sk_err;
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       int err;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       if (!sk)
+               return 0;
+
+       err = l2cap_sock_shutdown(sock, 2);
+
+       sock_orphan(sk);
+       l2cap_sock_kill(sk);
+       return err;
+}
+
+static void l2cap_sock_destruct(struct sock *sk)
+{
+       BT_DBG("sk %p", sk);
+
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+       BT_DBG("sk %p", sk);
+
+       if (parent) {
+               sk->sk_type = parent->sk_type;
+               bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+
+               pi->imtu = l2cap_pi(parent)->imtu;
+               pi->omtu = l2cap_pi(parent)->omtu;
+               pi->conf_state = l2cap_pi(parent)->conf_state;
+               pi->mode = l2cap_pi(parent)->mode;
+               pi->fcs  = l2cap_pi(parent)->fcs;
+               pi->max_tx = l2cap_pi(parent)->max_tx;
+               pi->tx_win = l2cap_pi(parent)->tx_win;
+               pi->sec_level = l2cap_pi(parent)->sec_level;
+               pi->role_switch = l2cap_pi(parent)->role_switch;
+               pi->force_reliable = l2cap_pi(parent)->force_reliable;
+               pi->flushable = l2cap_pi(parent)->flushable;
+       } else {
+               pi->imtu = L2CAP_DEFAULT_MTU;
+               pi->omtu = 0;
+               if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+                       pi->mode = L2CAP_MODE_ERTM;
+                       pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+               } else {
+                       pi->mode = L2CAP_MODE_BASIC;
+               }
+               pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+               pi->fcs  = L2CAP_FCS_CRC16;
+               pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+               pi->sec_level = BT_SECURITY_LOW;
+               pi->role_switch = 0;
+               pi->force_reliable = 0;
+               pi->flushable = BT_FLUSHABLE_OFF;
+       }
+
+       /* Default config options */
+       pi->conf_len = 0;
+       pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+       skb_queue_head_init(TX_QUEUE(sk));
+       skb_queue_head_init(SREJ_QUEUE(sk));
+       skb_queue_head_init(BUSY_QUEUE(sk));
+       INIT_LIST_HEAD(SREJ_LIST(sk));
+}
+
+static struct proto l2cap_proto = {
+       .name           = "L2CAP",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct l2cap_pinfo)
+};
+
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+       if (!sk)
+               return NULL;
+
+       sock_init_data(sock, sk);
+       INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+       sk->sk_destruct = l2cap_sock_destruct;
+       sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
+       sk->sk_protocol = proto;
+       sk->sk_state = BT_OPEN;
+
+       setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+
+       bt_sock_link(&l2cap_sk_list, sk);
+       return sk;
+}
+
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+                            int kern)
+{
+       struct sock *sk;
+
+       BT_DBG("sock %p", sock);
+
+       sock->state = SS_UNCONNECTED;
+
+       if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
+                       sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+               return -ESOCKTNOSUPPORT;
+
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+               return -EPERM;
+
+       sock->ops = &l2cap_sock_ops;
+
+       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+       if (!sk)
+               return -ENOMEM;
+
+       l2cap_sock_init(sk, NULL);
+       return 0;
+}
+
+const struct proto_ops l2cap_sock_ops = {
+       .family         = PF_BLUETOOTH,
+       .owner          = THIS_MODULE,
+       .release        = l2cap_sock_release,
+       .bind           = l2cap_sock_bind,
+       .connect        = l2cap_sock_connect,
+       .listen         = l2cap_sock_listen,
+       .accept         = l2cap_sock_accept,
+       .getname        = l2cap_sock_getname,
+       .sendmsg        = l2cap_sock_sendmsg,
+       .recvmsg        = l2cap_sock_recvmsg,
+       .poll           = bt_sock_poll,
+       .ioctl          = bt_sock_ioctl,
+       .mmap           = sock_no_mmap,
+       .socketpair     = sock_no_socketpair,
+       .shutdown       = l2cap_sock_shutdown,
+       .setsockopt     = l2cap_sock_setsockopt,
+       .getsockopt     = l2cap_sock_getsockopt
+};
+
+static const struct net_proto_family l2cap_sock_family_ops = {
+       .family = PF_BLUETOOTH,
+       .owner  = THIS_MODULE,
+       .create = l2cap_sock_create,
+};
+
+int __init l2cap_init_sockets(void)
+{
+       int err;
+
+       err = proto_register(&l2cap_proto, 0);
+       if (err < 0)
+               return err;
+
+       err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
+       if (err < 0)
+               goto error;
+
+       BT_INFO("L2CAP socket layer initialized");
+
+       return 0;
+
+error:
+       BT_ERR("L2CAP socket registration failed");
+       proto_unregister(&l2cap_proto);
+       return err;
+}
+
+void l2cap_cleanup_sockets(void)
+{
+       if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
+               BT_ERR("L2CAP socket unregistration failed");
+
+       proto_unregister(&l2cap_proto);
+}
index f827fd9..0054c74 100644 (file)
@@ -22,7 +22,7 @@
 
 /* Bluetooth HCI Management interface */
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #define MGMT_VERSION   0
 #define MGMT_REVISION  1
 
-static int cmd_status(struct sock *sk, u16 cmd, u8 status)
+struct pending_cmd {
+       struct list_head list;
+       __u16 opcode;
+       int index;
+       void *cmd;
+       struct sock *sk;
+       void *user_data;
+};
+
+LIST_HEAD(cmd_list);
+
+static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
        struct mgmt_ev_cmd_status *ev;
 
-       BT_DBG("sock %p", sk);
+       BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
 
        skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
        if (!skb)
@@ -47,6 +58,7 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
        hdr = (void *) skb_put(skb, sizeof(*hdr));
 
        hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+       hdr->index = cpu_to_le16(index);
        hdr->len = cpu_to_le16(sizeof(*ev));
 
        ev = (void *) skb_put(skb, sizeof(*ev));
@@ -59,29 +71,30 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
        return 0;
 }
 
-static int read_version(struct sock *sk)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
+                                                               size_t rp_len)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
        struct mgmt_ev_cmd_complete *ev;
-       struct mgmt_rp_read_version *rp;
 
        BT_DBG("sock %p", sk);
 
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
        if (!skb)
                return -ENOMEM;
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
+
        hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+       hdr->index = cpu_to_le16(index);
+       hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
 
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+       ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+       put_unaligned_le16(cmd, &ev->opcode);
 
-       rp = (void *) skb_put(skb, sizeof(*rp));
-       rp->version = MGMT_VERSION;
-       put_unaligned_le16(MGMT_REVISION, &rp->revision);
+       if (rp)
+               memcpy(ev->data, rp, rp_len);
 
        if (sock_queue_rcv_skb(sk, skb) < 0)
                kfree_skb(skb);
@@ -89,16 +102,26 @@ static int read_version(struct sock *sk)
        return 0;
 }
 
+static int read_version(struct sock *sk)
+{
+       struct mgmt_rp_read_version rp;
+
+       BT_DBG("sock %p", sk);
+
+       rp.version = MGMT_VERSION;
+       put_unaligned_le16(MGMT_REVISION, &rp.revision);
+
+       return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
+                                                               sizeof(rp));
+}
+
 static int read_index_list(struct sock *sk)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_complete *ev;
        struct mgmt_rp_read_index_list *rp;
        struct list_head *p;
-       size_t body_len;
+       size_t rp_len;
        u16 count;
-       int i;
+       int i, err;
 
        BT_DBG("sock %p", sk);
 
@@ -109,164 +132,340 @@ static int read_index_list(struct sock *sk)
                count++;
        }
 
-       body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
-       skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
-       if (!skb)
+       rp_len = sizeof(*rp) + (2 * count);
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               read_unlock(&hci_dev_list_lock);
                return -ENOMEM;
+       }
 
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(body_len);
-
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
-
-       rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
        put_unaligned_le16(count, &rp->num_controllers);
 
        i = 0;
        list_for_each(p, &hci_dev_list) {
                struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+               hci_del_off_timer(d);
+
+               set_bit(HCI_MGMT, &d->flags);
+
+               if (test_bit(HCI_SETUP, &d->flags))
+                       continue;
+
                put_unaligned_le16(d->id, &rp->index[i++]);
                BT_DBG("Added hci%u", d->id);
        }
 
        read_unlock(&hci_dev_list_lock);
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
-               kfree_skb(skb);
+       err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
+                                                                       rp_len);
 
-       return 0;
+       kfree(rp);
+
+       return err;
 }
 
-static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
+static int read_controller_info(struct sock *sk, u16 index)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_complete *ev;
-       struct mgmt_rp_read_info *rp;
-       struct mgmt_cp_read_info *cp;
+       struct mgmt_rp_read_info rp;
        struct hci_dev *hdev;
-       u16 dev_id;
 
-       BT_DBG("sock %p", sk);
+       BT_DBG("sock %p hci%u", sk, index);
 
-       if (len != 2)
-               return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
 
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
-       if (!skb)
-               return -ENOMEM;
+       hci_del_off_timer(hdev);
 
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+       hci_dev_lock_bh(hdev);
 
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+       set_bit(HCI_MGMT, &hdev->flags);
 
-       rp = (void *) skb_put(skb, sizeof(*rp));
+       rp.type = hdev->dev_type;
 
-       cp = (void *) data;
-       dev_id = get_unaligned_le16(&cp->index);
+       rp.powered = test_bit(HCI_UP, &hdev->flags);
+       rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
+       rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+       rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+       if (test_bit(HCI_AUTH, &hdev->flags))
+               rp.sec_mode = 3;
+       else if (hdev->ssp_mode > 0)
+               rp.sec_mode = 4;
+       else
+               rp.sec_mode = 2;
 
-       BT_DBG("request for hci%u", dev_id);
+       bacpy(&rp.bdaddr, &hdev->bdaddr);
+       memcpy(rp.features, hdev->features, 8);
+       memcpy(rp.dev_class, hdev->dev_class, 3);
+       put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+       rp.hci_ver = hdev->hci_ver;
+       put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
 
-       hdev = hci_dev_get(dev_id);
-       if (!hdev) {
-               kfree_skb(skb);
-               return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+       sock_put(cmd->sk);
+       kfree(cmd->cmd);
+       kfree(cmd);
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+                                               u16 index, void *data, u16 len)
+{
+       struct pending_cmd *cmd;
+
+       cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return NULL;
+
+       cmd->opcode = opcode;
+       cmd->index = index;
+
+       cmd->cmd = kmalloc(len, GFP_ATOMIC);
+       if (!cmd->cmd) {
+               kfree(cmd);
+               return NULL;
+       }
+
+       memcpy(cmd->cmd, data, len);
+
+       cmd->sk = sk;
+       sock_hold(sk);
+
+       list_add(&cmd->list, &cmd_list);
+
+       return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, int index,
+                               void (*cb)(struct pending_cmd *cmd, void *data),
+                               void *data)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &cmd_list) {
+               struct pending_cmd *cmd;
+
+               cmd = list_entry(p, struct pending_cmd, list);
+
+               if (cmd->opcode != opcode)
+                       continue;
+
+               if (index >= 0 && cmd->index != index)
+                       continue;
+
+               cb(cmd, data);
+       }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+{
+       struct list_head *p;
+
+       list_for_each(p, &cmd_list) {
+               struct pending_cmd *cmd;
+
+               cmd = list_entry(p, struct pending_cmd, list);
+
+               if (cmd->opcode != opcode)
+                       continue;
+
+               if (index >= 0 && cmd->index != index)
+                       continue;
+
+               return cmd;
        }
 
+       return NULL;
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+}
+
+static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       struct pending_cmd *cmd;
+       int err, up;
+
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+
        hci_dev_lock_bh(hdev);
 
-       put_unaligned_le16(hdev->id, &rp->index);
-       rp->type = hdev->dev_type;
+       up = test_bit(HCI_UP, &hdev->flags);
+       if ((cp->val && up) || (!cp->val && !up)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+               goto failed;
+       }
 
-       rp->powered = test_bit(HCI_UP, &hdev->flags);
-       rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
-       rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+       if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+               goto failed;
+       }
 
-       if (test_bit(HCI_AUTH, &hdev->flags))
-               rp->sec_mode = 3;
-       else if (hdev->ssp_mode > 0)
-               rp->sec_mode = 4;
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       if (cp->val)
+               queue_work(hdev->workqueue, &hdev->power_on);
        else
-               rp->sec_mode = 2;
+               queue_work(hdev->workqueue, &hdev->power_off);
 
-       bacpy(&rp->bdaddr, &hdev->bdaddr);
-       memcpy(rp->features, hdev->features, 8);
-       memcpy(rp->dev_class, hdev->dev_class, 3);
-       put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
-       rp->hci_ver = hdev->hci_ver;
-       put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+       err = 0;
 
+failed:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
+       return err;
+}
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
-               kfree_skb(skb);
+static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       struct pending_cmd *cmd;
+       u8 scan;
+       int err;
 
-       return 0;
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+               goto failed;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+               goto failed;
+       }
+
+       if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+                                       test_bit(HCI_PSCAN, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       scan = SCAN_PAGE;
+
+       if (cp->val)
+               scan |= SCAN_INQUIRY;
+
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
 }
 
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
 {
-       unsigned char *buf;
-       struct mgmt_hdr *hdr;
-       u16 opcode, len;
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       struct pending_cmd *cmd;
+       u8 scan;
        int err;
 
-       BT_DBG("got %zu bytes", msglen);
+       cp = (void *) data;
 
-       if (msglen < sizeof(*hdr))
-               return -EINVAL;
+       BT_DBG("request for hci%u", index);
 
-       buf = kmalloc(msglen, GFP_ATOMIC);
-       if (!buf)
-               return -ENOMEM;
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
 
-       if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
-               err = -EFAULT;
-               goto done;
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+               goto failed;
        }
 
-       hdr = (struct mgmt_hdr *) buf;
-       opcode = get_unaligned_le16(&hdr->opcode);
-       len = get_unaligned_le16(&hdr->len);
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+               goto failed;
+       }
 
-       if (len != msglen - sizeof(*hdr)) {
-               err = -EINVAL;
-               goto done;
+       if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+               goto failed;
        }
 
-       switch (opcode) {
-       case MGMT_OP_READ_VERSION:
-               err = read_version(sk);
-               break;
-       case MGMT_OP_READ_INDEX_LIST:
-               err = read_index_list(sk);
-               break;
-       case MGMT_OP_READ_INFO:
-               err = read_controller_info(sk, buf + sizeof(*hdr), len);
-               break;
-       default:
-               BT_DBG("Unknown op %u", opcode);
-               err = cmd_status(sk, opcode, 0x01);
-               break;
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
        }
 
+       if (cp->val)
+               scan = SCAN_PAGE;
+       else
+               scan = 0;
+
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
        if (err < 0)
-               goto done;
+               mgmt_pending_remove(cmd);
 
-       err = msglen;
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
 
-done:
-       kfree(buf);
        return err;
 }
 
-static int mgmt_event(u16 event, void *data, u16 data_len)
+static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
+                                                       struct sock *skip_sk)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
@@ -279,30 +478,1168 @@ static int mgmt_event(u16 event, void *data, u16 data_len)
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
        hdr->opcode = cpu_to_le16(event);
+       hdr->index = cpu_to_le16(index);
        hdr->len = cpu_to_le16(data_len);
 
-       memcpy(skb_put(skb, data_len), data, data_len);
+       if (data)
+               memcpy(skb_put(skb, data_len), data, data_len);
 
-       hci_send_to_sock(NULL, skb);
+       hci_send_to_sock(NULL, skb, skip_sk);
        kfree_skb(skb);
 
        return 0;
 }
 
-int mgmt_index_added(u16 index)
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
 {
-       struct mgmt_ev_index_added ev;
+       struct mgmt_mode rp;
 
-       put_unaligned_le16(index, &ev.index);
+       rp.val = val;
 
-       return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+       return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
 }
 
-int mgmt_index_removed(u16 index)
+static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct mgmt_mode *cp, ev;
+       struct hci_dev *hdev;
+       int err;
+
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (cp->val)
+               set_bit(HCI_PAIRABLE, &hdev->flags);
+       else
+               clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+       err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
+       if (err < 0)
+               goto failed;
+
+       ev.val = cp->val;
+
+       err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+       struct list_head *p;
+       u8 val = 0;
+
+       list_for_each(p, &hdev->uuids) {
+               struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+
+               val |= uuid->svc_hint;
+       }
+
+       return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+       u8 cod[3];
+
+       BT_DBG("%s", hdev->name);
+
+       if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+               return 0;
+
+       cod[0] = hdev->minor_class;
+       cod[1] = hdev->major_class;
+       cod[2] = get_service_classes(hdev);
+
+       if (memcmp(cod, hdev->dev_class, 3) == 0)
+               return 0;
+
+       return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct mgmt_cp_add_uuid *cp;
+       struct hci_dev *hdev;
+       struct bt_uuid *uuid;
+       int err;
+
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+       if (!uuid) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       memcpy(uuid->uuid, cp->uuid, 16);
+       uuid->svc_hint = cp->svc_hint;
+
+       list_add(&uuid->list, &hdev->uuids);
+
+       err = update_class(hdev);
+       if (err < 0)
+               goto failed;
+
+       err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct list_head *p, *n;
+       struct mgmt_cp_remove_uuid *cp;
+       struct hci_dev *hdev;
+       u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+       int err, found;
+
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
+               err = hci_uuids_clear(hdev);
+               goto unlock;
+       }
+
+       found = 0;
+
+       list_for_each_safe(p, n, &hdev->uuids) {
+               struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
+
+               if (memcmp(match->uuid, cp->uuid, 16) != 0)
+                       continue;
+
+               list_del(&match->list);
+               found++;
+       }
+
+       if (found == 0) {
+               err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+               goto unlock;
+       }
+
+       err = update_class(hdev);
+       if (err < 0)
+               goto unlock;
+
+       err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
+
+unlock:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_dev_class *cp;
+       int err;
+
+       cp = (void *) data;
+
+       BT_DBG("request for hci%u", index);
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       hdev->major_class = cp->major;
+       hdev->minor_class = cp->minor;
+
+       err = update_class(hdev);
+
+       if (err == 0)
+               err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
+                                                                       u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_service_cache *cp;
+       int err;
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       BT_DBG("hci%u enable %d", index, cp->enable);
+
+       if (cp->enable) {
+               set_bit(HCI_SERVICE_CACHE, &hdev->flags);
+               err = 0;
+       } else {
+               clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
+               err = update_class(hdev);
+       }
+
+       if (err == 0)
+               err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
+                                                                       0);
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_load_keys *cp;
+       u16 key_count, expected_len;
+       int i;
+
+       cp = (void *) data;
+
+       if (len < sizeof(*cp))
+               return -EINVAL;
+
+       key_count = get_unaligned_le16(&cp->key_count);
+
+       expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+       if (expected_len != len) {
+               BT_ERR("load_keys: expected %u bytes, got %u bytes",
+                                                       len, expected_len);
+               return -EINVAL;
+       }
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+
+       BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
+                                                               key_count);
+
+       hci_dev_lock_bh(hdev);
+
+       hci_link_keys_clear(hdev);
+
+       set_bit(HCI_LINK_KEYS, &hdev->flags);
+
+       if (cp->debug_keys)
+               set_bit(HCI_DEBUG_KEYS, &hdev->flags);
+       else
+               clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
+
+       for (i = 0; i < key_count; i++) {
+               struct mgmt_key_info *key = &cp->keys[i];
+
+               hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
+                                                               key->pin_len);
+       }
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return 0;
+}
+
+static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_remove_key *cp;
+       struct hci_conn *conn;
+       int err;
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       err = hci_remove_link_key(hdev, &cp->bdaddr);
+       if (err < 0) {
+               err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+               goto unlock;
+       }
+
+       err = 0;
+
+       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+               goto unlock;
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (conn) {
+               struct hci_cp_disconnect dc;
+
+               put_unaligned_le16(conn->handle, &dc.handle);
+               dc.reason = 0x13; /* Remote User Terminated Connection */
+               err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+       }
+
+unlock:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_disconnect *cp;
+       struct hci_cp_disconnect dc;
+       struct pending_cmd *cmd;
+       struct hci_conn *conn;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+               goto failed;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+               goto failed;
+       }
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (!conn) {
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       put_unaligned_le16(conn->handle, &dc.handle);
+       dc.reason = 0x13; /* Remote User Terminated Connection */
+
+       err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int get_connections(struct sock *sk, u16 index)
+{
+       struct mgmt_rp_get_connections *rp;
+       struct hci_dev *hdev;
+       struct list_head *p;
+       size_t rp_len;
+       u16 count;
+       int i, err;
+
+       BT_DBG("");
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       count = 0;
+       list_for_each(p, &hdev->conn_hash.list) {
+               count++;
+       }
+
+       rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       put_unaligned_le16(count, &rp->conn_count);
+
+       read_lock(&hci_dev_list_lock);
+
+       i = 0;
+       list_for_each(p, &hdev->conn_hash.list) {
+               struct hci_conn *c = list_entry(p, struct hci_conn, list);
+
+               bacpy(&rp->conn[i++], &c->dst);
+       }
+
+       read_unlock(&hci_dev_list_lock);
+
+       err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+
+unlock:
+       kfree(rp);
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+       return err;
+}
+
+static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_pin_code_reply *cp;
+       struct hci_cp_pin_code_reply reply;
+       struct pending_cmd *cmd;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       bacpy(&reply.bdaddr, &cp->bdaddr);
+       reply.pin_len = cp->pin_len;
+       memcpy(reply.pin_code, cp->pin_code, 16);
+
+       err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_pin_code_neg_reply *cp;
+       struct pending_cmd *cmd;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+                                                                       EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+                                                                       ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
+                                                               ENETDOWN);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
+                                                               data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
+                                                               &cp->bdaddr);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
+                                                                       u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_io_capability *cp;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       hdev->io_capability = cp->io_capability;
+
+       BT_DBG("%s IO capability set to 0x%02x", hdev->name,
+                                                       hdev->io_capability);
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
+}
+
+static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct list_head *p;
+
+       list_for_each(p, &cmd_list) {
+               struct pending_cmd *cmd;
+
+               cmd = list_entry(p, struct pending_cmd, list);
+
+               if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
+                       continue;
+
+               if (cmd->index != hdev->id)
+                       continue;
+
+               if (cmd->user_data != conn)
+                       continue;
+
+               return cmd;
+       }
+
+       return NULL;
+}
+
+static void pairing_complete(struct pending_cmd *cmd, u8 status)
+{
+       struct mgmt_rp_pair_device rp;
+       struct hci_conn *conn = cmd->user_data;
+
+       bacpy(&rp.bdaddr, &conn->dst);
+       rp.status = status;
+
+       cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
+
+       /* So we don't get further callbacks for this connection */
+       conn->connect_cfm_cb = NULL;
+       conn->security_cfm_cb = NULL;
+       conn->disconn_cfm_cb = NULL;
+
+       hci_conn_put(conn);
+
+       mgmt_pending_remove(cmd);
+}
+
+static void pairing_complete_cb(struct hci_conn *conn, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status %u", status);
+
+       cmd = find_pairing(conn);
+       if (!cmd) {
+               BT_DBG("Unable to find a pending command");
+               return;
+       }
+
+       pairing_complete(cmd, status);
+}
+
+static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_pair_device *cp;
+       struct pending_cmd *cmd;
+       u8 sec_level, auth_type;
+       struct hci_conn *conn;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (cp->io_cap == 0x03) {
+               sec_level = BT_SECURITY_MEDIUM;
+               auth_type = HCI_AT_DEDICATED_BONDING;
+       } else {
+               sec_level = BT_SECURITY_HIGH;
+               auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+       }
+
+       conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+       if (IS_ERR(conn)) {
+               err = PTR_ERR(conn);
+               goto unlock;
+       }
+
+       if (conn->connect_cfm_cb) {
+               hci_conn_put(conn);
+               err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+               goto unlock;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               hci_conn_put(conn);
+               goto unlock;
+       }
+
+       conn->connect_cfm_cb = pairing_complete_cb;
+       conn->security_cfm_cb = pairing_complete_cb;
+       conn->disconn_cfm_cb = pairing_complete_cb;
+       conn->io_capability = cp->io_cap;
+       cmd->user_data = conn;
+
+       if (conn->state == BT_CONNECTED &&
+                               hci_conn_security(conn, sec_level, auth_type))
+               pairing_complete(cmd, 0);
+
+       err = 0;
+
+unlock:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
+                                                       u16 len, int success)
+{
+       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+       u16 mgmt_op, hci_op;
+       struct pending_cmd *cmd;
+       struct hci_dev *hdev;
+       int err;
+
+       BT_DBG("");
+
+       if (success) {
+               mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
+               hci_op = HCI_OP_USER_CONFIRM_REPLY;
+       } else {
+               mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
+               hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
+       }
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, mgmt_op, EINVAL);
+
+       hdev = hci_dev_get(index);
+       if (!hdev)
+               return cmd_status(sk, index, mgmt_op, ENODEV);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, mgmt_op, ENETDOWN);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+{
+       unsigned char *buf;
+       struct mgmt_hdr *hdr;
+       u16 opcode, index, len;
+       int err;
+
+       BT_DBG("got %zu bytes", msglen);
+
+       if (msglen < sizeof(*hdr))
+               return -EINVAL;
+
+       buf = kmalloc(msglen, GFP_ATOMIC);
+       if (!buf)
+               return -ENOMEM;
+
+       if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
+               err = -EFAULT;
+               goto done;
+       }
+
+       hdr = (struct mgmt_hdr *) buf;
+       opcode = get_unaligned_le16(&hdr->opcode);
+       index = get_unaligned_le16(&hdr->index);
+       len = get_unaligned_le16(&hdr->len);
+
+       if (len != msglen - sizeof(*hdr)) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       switch (opcode) {
+       case MGMT_OP_READ_VERSION:
+               err = read_version(sk);
+               break;
+       case MGMT_OP_READ_INDEX_LIST:
+               err = read_index_list(sk);
+               break;
+       case MGMT_OP_READ_INFO:
+               err = read_controller_info(sk, index);
+               break;
+       case MGMT_OP_SET_POWERED:
+               err = set_powered(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_DISCOVERABLE:
+               err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_CONNECTABLE:
+               err = set_connectable(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_PAIRABLE:
+               err = set_pairable(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_ADD_UUID:
+               err = add_uuid(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_REMOVE_UUID:
+               err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_DEV_CLASS:
+               err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_SERVICE_CACHE:
+               err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_LOAD_KEYS:
+               err = load_keys(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_REMOVE_KEY:
+               err = remove_key(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_DISCONNECT:
+               err = disconnect(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_GET_CONNECTIONS:
+               err = get_connections(sk, index);
+               break;
+       case MGMT_OP_PIN_CODE_REPLY:
+               err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_PIN_CODE_NEG_REPLY:
+               err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_IO_CAPABILITY:
+               err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_PAIR_DEVICE:
+               err = pair_device(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_USER_CONFIRM_REPLY:
+               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+               break;
+       case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+               break;
+       default:
+               BT_DBG("Unknown op %u", opcode);
+               err = cmd_status(sk, index, opcode, 0x01);
+               break;
+       }
+
+       if (err < 0)
+               goto done;
+
+       err = msglen;
+
+done:
+       kfree(buf);
+       return err;
+}
+
+int mgmt_index_added(u16 index)
+{
+       return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+}
+
+int mgmt_index_removed(u16 index)
+{
+       return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+}
+
+struct cmd_lookup {
+       u8 val;
+       struct sock *sk;
+};
+
+static void mode_rsp(struct pending_cmd *cmd, void *data)
+{
+       struct mgmt_mode *cp = cmd->cmd;
+       struct cmd_lookup *match = data;
+
+       if (cp->val != match->val)
+               return;
+
+       send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+
+       list_del(&cmd->list);
+
+       if (match->sk == NULL) {
+               match->sk = cmd->sk;
+               sock_hold(match->sk);
+       }
+
+       mgmt_pending_free(cmd);
+}
+
+int mgmt_powered(u16 index, u8 powered)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { powered, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+
+       ev.val = powered;
+
+       ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_discoverable(u16 index, u8 discoverable)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { discoverable, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+
+       ev.val = discoverable;
+
+       ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+                                                               match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_connectable(u16 index, u8 connectable)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { connectable, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+
+       ev.val = connectable;
+
+       ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
+{
+       struct mgmt_ev_new_key ev;
+
+       memset(&ev, 0, sizeof(ev));
+
+       bacpy(&ev.key.bdaddr, &key->bdaddr);
+       ev.key.type = key->type;
+       memcpy(ev.key.val, key->val, 16);
+       ev.key.pin_len = key->pin_len;
+       ev.old_key_type = old_key_type;
+
+       return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_connected ev;
+
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+}
+
+static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+{
+       struct mgmt_cp_disconnect *cp = cmd->cmd;
+       struct sock **sk = data;
+       struct mgmt_rp_disconnect rp;
+
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+
+       cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+
+       *sk = cmd->sk;
+       sock_hold(*sk);
+
+       mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_disconnected ev;
+       struct sock *sk = NULL;
+       int err;
+
+       mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+
+       bacpy(&ev.bdaddr, bdaddr);
+
+       err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+
+       if (sk)
+               sock_put(sk);
+
+       return err;
+}
+
+int mgmt_disconnect_failed(u16 index)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+       if (!cmd)
+               return -ENOENT;
+
+       err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct mgmt_ev_connect_failed ev;
+
+       bacpy(&ev.bdaddr, bdaddr);
+       ev.status = status;
+
+       return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_pin_code_request ev;
+
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+                                                                       NULL);
+}
+
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_rp_pin_code_reply rp;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+       if (!cmd)
+               return -ENOENT;
+
+       bacpy(&rp.bdaddr, bdaddr);
+       rp.status = status;
+
+       err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+                                                               sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_rp_pin_code_reply rp;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+       if (!cmd)
+               return -ENOENT;
+
+       bacpy(&rp.bdaddr, bdaddr);
+       rp.status = status;
+
+       err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+                                                               sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value)
+{
+       struct mgmt_ev_user_confirm_request ev;
+
+       BT_DBG("hci%u", index);
+
+       bacpy(&ev.bdaddr, bdaddr);
+       put_unaligned_le32(value, &ev.value);
+
+       return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+                                                                       NULL);
+}
+
+static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
+                                                               u8 opcode)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_rp_user_confirm_reply rp;
+       int err;
+
+       cmd = mgmt_pending_find(opcode, index);
+       if (!cmd)
+               return -ENOENT;
+
+       bacpy(&rp.bdaddr, bdaddr);
+       rp.status = status;
+       err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       return confirm_reply_complete(index, bdaddr, status,
+                                               MGMT_OP_USER_CONFIRM_REPLY);
+}
+
+int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       return confirm_reply_complete(index, bdaddr, status,
+                                       MGMT_OP_USER_CONFIRM_NEG_REPLY);
+}
+
+int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
 {
-       struct mgmt_ev_index_added ev;
+       struct mgmt_ev_auth_failed ev;
 
-       put_unaligned_le16(index, &ev.index);
+       bacpy(&ev.bdaddr, bdaddr);
+       ev.status = status;
 
-       return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+       return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
 }
index ff8aaa7..c997393 100644 (file)
@@ -1164,7 +1164,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                         * initiator rfcomm_process_rx already calls
                         * rfcomm_session_put() */
                        if (s->sock->sk->sk_state != BT_CLOSED)
-                               rfcomm_session_put(s);
+                               if (list_empty(&s->dlcs))
+                                       rfcomm_session_put(s);
                        break;
                }
        }
@@ -2153,8 +2154,6 @@ static int __init rfcomm_init(void)
 {
        int err;
 
-       l2cap_load();
-
        hci_register_cb(&rfcomm_cb);
 
        rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
index 2575c2d..d7b9af4 100644 (file)
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
                        break;
                }
 
+               tty_unlock();
                schedule();
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->wait, &wait);
index 960c6d1..42fdffd 100644 (file)
@@ -50,8 +50,6 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/sco.h>
 
-#define VERSION "0.6"
-
 static int disable_esco;
 
 static const struct proto_ops sco_sock_ops;
@@ -192,20 +190,21 @@ static int sco_connect(struct sock *sk)
 
        hci_dev_lock_bh(hdev);
 
-       err = -ENOMEM;
-
        if (lmp_esco_capable(hdev) && !disable_esco)
                type = ESCO_LINK;
        else
                type = SCO_LINK;
 
        hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
-       if (!hcon)
+       if (IS_ERR(hcon)) {
+               err = PTR_ERR(hcon);
                goto done;
+       }
 
        conn = sco_conn_add(hcon, 0);
        if (!conn) {
                hci_conn_put(hcon);
+               err = -ENOMEM;
                goto done;
        }
 
@@ -703,6 +702,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
                        break;
                }
 
+               memset(&cinfo, 0, sizeof(cinfo));
                cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
                memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
 
@@ -1023,7 +1023,7 @@ static struct hci_proto sco_hci_proto = {
        .recv_scodata   = sco_recv_scodata
 };
 
-static int __init sco_init(void)
+int __init sco_init(void)
 {
        int err;
 
@@ -1051,7 +1051,6 @@ static int __init sco_init(void)
                        BT_ERR("Failed to create SCO debug file");
        }
 
-       BT_INFO("SCO (Voice Link) ver %s", VERSION);
        BT_INFO("SCO socket layer initialized");
 
        return 0;
@@ -1061,7 +1060,7 @@ error:
        return err;
 }
 
-static void __exit sco_exit(void)
+void __exit sco_exit(void)
 {
        debugfs_remove(sco_debugfs);
 
@@ -1074,14 +1073,5 @@ static void __exit sco_exit(void)
        proto_unregister(&sco_proto);
 }
 
-module_init(sco_init);
-module_exit(sco_exit);
-
 module_param(disable_esco, bool, 0644);
 MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-2");
index 9190ae4..6dee7bf 100644 (file)
@@ -6,6 +6,7 @@ config BRIDGE
        tristate "802.1d Ethernet Bridging"
        select LLC
        select STP
+       depends on IPV6 || IPV6=n
        ---help---
          If you say Y here, then your Linux box will be able to act as an
          Ethernet bridge, which means that the different Ethernet segments it
index 5564435..21e5901 100644 (file)
@@ -78,6 +78,8 @@ static int br_dev_open(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
+       netif_carrier_off(dev);
+
        br_features_recompute(br);
        netif_start_queue(dev);
        br_stp_enable_bridge(br);
@@ -94,6 +96,8 @@ static int br_dev_stop(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
+       netif_carrier_off(dev);
+
        br_stp_disable_bridge(br);
        br_multicast_stop(br);
 
@@ -297,6 +301,21 @@ void br_netpoll_disable(struct net_bridge_port *p)
 
 #endif
 
+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
+
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       return br_add_if(br, slave_dev);
+}
+
+static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       return br_del_if(br, slave_dev);
+}
+
 static const struct ethtool_ops br_ethtool_ops = {
        .get_drvinfo    = br_getinfo,
        .get_link       = ethtool_op_get_link,
@@ -326,6 +345,8 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_netpoll_cleanup     = br_netpoll_cleanup,
        .ndo_poll_controller     = br_poll_controller,
 #endif
+       .ndo_add_slave           = br_add_slave,
+       .ndo_del_slave           = br_del_slave,
 };
 
 static void br_dev_free(struct net_device *dev)
index 2872393..88485cc 100644 (file)
@@ -328,12 +328,12 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
        fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
        if (fdb) {
                memcpy(fdb->addr.addr, addr, ETH_ALEN);
-               hlist_add_head_rcu(&fdb->hlist, head);
-
                fdb->dst = source;
                fdb->is_local = is_local;
                fdb->is_static = is_local;
                fdb->ageing_timer = jiffies;
+
+               hlist_add_head_rcu(&fdb->hlist, head);
        }
        return fdb;
 }
index d9d1e2b..dce8f00 100644 (file)
@@ -148,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
 
        netdev_rx_handler_unregister(dev);
 
+       netdev_set_master(dev, NULL);
+
        br_multicast_del_port(p);
 
        kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -365,7 +367,7 @@ int br_min_mtu(const struct net_bridge *br)
 void br_features_recompute(struct net_bridge *br)
 {
        struct net_bridge_port *p;
-       unsigned long features, mask;
+       u32 features, mask;
 
        features = mask = br->feature_mask;
        if (list_empty(&br->port_list))
@@ -379,7 +381,7 @@ void br_features_recompute(struct net_bridge *br)
        }
 
 done:
-       br->dev->features = netdev_fix_features(features, NULL);
+       br->dev->features = netdev_fix_features(br->dev, features);
 }
 
 /* called with RTNL */
@@ -429,10 +431,14 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
                goto err3;
 
-       err = netdev_rx_handler_register(dev, br_handle_frame, p);
+       err = netdev_set_master(dev, br->dev);
        if (err)
                goto err3;
 
+       err = netdev_rx_handler_register(dev, br_handle_frame, p);
+       if (err)
+               goto err4;
+
        dev->priv_flags |= IFF_BRIDGE_PORT;
 
        dev_disable_lro(dev);
@@ -455,6 +461,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        kobject_uevent(&p->kobj, KOBJ_ADD);
 
        return 0;
+
+err4:
+       netdev_set_master(dev, NULL);
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
index 6f6d8e1..88e4aa9 100644 (file)
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
        if (is_multicast_ether_addr(dest)) {
                mdst = br_mdb_get(br, skb);
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
-                       if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+                       if ((mdst && mdst->mglist) ||
                            br_multicast_is_router(br))
                                skb2 = skb;
                        br_multicast_forward(mdst, skb, skb2);
index f701a21..030a002 100644 (file)
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
+static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
-       if (ipv6_addr_is_multicast(addr) &&
-           IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+       if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
                return 1;
        return 0;
 }
@@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data)
        if (!netif_running(br->dev) || timer_pending(&mp->timer))
                goto out;
 
-       if (!hlist_unhashed(&mp->mglist))
-               hlist_del_init(&mp->mglist);
+       mp->mglist = false;
 
        if (mp->ports)
                goto out;
@@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                del_timer(&p->query_timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-               if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
 
@@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        eth = eth_hdr(skb);
 
        memcpy(eth->h_source, br->dev->dev_addr, 6);
-       ipv6_eth_mc_map(group, eth->h_dest);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
@@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        ip6h->payload_len = htons(8 + sizeof(*mldq));
        ip6h->nexthdr = IPPROTO_HOPOPTS;
        ip6h->hop_limit = 1;
-       ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+       ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                          &ip6h->saddr);
        ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+       ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
        hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
        struct net_bridge *br = mp->br;
 
        spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+       if (!netif_running(br->dev) || !mp->mglist ||
            mp->queries_sent >= br->multicast_last_member_count)
                goto out;
 
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
                goto err;
 
        if (!port) {
-               hlist_add_head(&mp->mglist, &br->mglist);
+               mp->mglist = true;
                mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
@@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
 {
        struct br_ip br_group;
 
-       if (ipv6_is_local_multicast(group))
+       if (!ipv6_is_transient_multicast(group))
                return 0;
 
        ipv6_addr_copy(&br_group.u.ip6, group);
-       br_group.proto = htons(ETH_P_IP);
+       br_group.proto = htons(ETH_P_IPV6);
 
        return br_multicast_add_group(br, port, &br_group);
 }
@@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 
                nsrcs = skb_header_pointer(skb,
                                           len + offsetof(struct mld2_grec,
-                                                         grec_mca),
+                                                         grec_nsrcs),
                                           sizeof(_nsrcs), &_nsrcs);
                if (!nsrcs)
                        return -EINVAL;
 
                if (!pskb_may_pull(skb,
                                   len + sizeof(*grec) +
-                                  sizeof(struct in6_addr) * (*nsrcs)))
+                                  sizeof(struct in6_addr) * ntohs(*nsrcs)))
                        return -EINVAL;
 
                grec = (struct mld2_grec *)(skb->data + len);
-               len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+               len += sizeof(*grec) +
+                      sizeof(struct in6_addr) * ntohs(*nsrcs);
 
                /* We treat these as MLDv1 reports for now. */
                switch (grec->grec_type) {
@@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
 
        max_delay *= br->multicast_last_member_count;
 
-       if (!hlist_unhashed(&mp->mglist) &&
+       if (mp->mglist &&
            (timer_pending(&mp->timer) ?
             time_after(mp->timer.expires, now + max_delay) :
             try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
-                       mod_timer(&mp->timer, now + max_delay);
+                       mod_timer(&p->timer, now + max_delay);
        }
 
 out:
@@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                goto out;
 
        max_delay *= br->multicast_last_member_count;
-       if (!hlist_unhashed(&mp->mglist) &&
+       if (mp->mglist &&
            (timer_pending(&mp->timer) ?
             time_after(mp->timer.expires, now + max_delay) :
             try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
-                       mod_timer(&mp->timer, now + max_delay);
+                       mod_timer(&p->timer, now + max_delay);
        }
 
 out:
@@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (!hlist_unhashed(&mp->mglist) &&
+               if (mp->mglist &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 {
        struct br_ip br_group;
 
-       if (ipv6_is_local_multicast(group))
+       if (!ipv6_is_transient_multicast(group))
                return;
 
        ipv6_addr_copy(&br_group.u.ip6, group);
index 4b5b66d..f97af55 100644 (file)
@@ -412,10 +412,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
        if (dnat_took_place(skb)) {
                if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
-                       struct flowi fl = {
-                               .fl4_dst = iph->daddr,
-                               .fl4_tos = RT_TOS(iph->tos),
-                       };
                        struct in_device *in_dev = __in_dev_get_rcu(dev);
 
                        /* If err equals -EHOSTUNREACH the error is due to a
@@ -428,14 +424,16 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
                        if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
                                goto free_skb;
 
-                       if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+                       rt = ip_route_output(dev_net(dev), iph->daddr, 0,
+                                            RT_TOS(iph->tos), 0);
+                       if (!IS_ERR(rt)) {
                                /* - Bridged-and-DNAT'ed traffic doesn't
                                 *   require ip_forwarding. */
-                               if (((struct dst_entry *)rt)->dev == dev) {
-                                       skb_dst_set(skb, (struct dst_entry *)rt);
+                               if (rt->dst.dev == dev) {
+                                       skb_dst_set(skb, &rt->dst);
                                        goto bridged_dnat;
                                }
-                               dst_release((struct dst_entry *)rt);
+                               ip_rt_put(rt);
                        }
 free_skb:
                        kfree_skb(skb);
index 84aac77..f7afc36 100644 (file)
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
 struct net_bridge_mdb_entry
 {
        struct hlist_node               hlist[2];
-       struct hlist_node               mglist;
        struct net_bridge               *br;
        struct net_bridge_port_group __rcu *ports;
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct timer_list               query_timer;
        struct br_ip                    addr;
+       bool                            mglist;
        u32                             queries_sent;
 };
 
@@ -182,7 +182,7 @@ struct net_bridge
        struct br_cpu_netstats __percpu *stats;
        spinlock_t                      hash_lock;
        struct hlist_head               hash[BR_HASH_SIZE];
-       unsigned long                   feature_mask;
+       u32                             feature_mask;
 #ifdef CONFIG_BRIDGE_NETFILTER
        struct rtable                   fake_rtable;
        bool                            nf_call_iptables;
@@ -238,7 +238,6 @@ struct net_bridge
        spinlock_t                      multicast_lock;
        struct net_bridge_mdb_htable __rcu *mdb;
        struct hlist_head               router_list;
-       struct hlist_head               mglist;
 
        struct timer_list               multicast_router_timer;
        struct timer_list               multicast_querier_timer;
index 57186d8..a5badd0 100644 (file)
@@ -397,28 +397,37 @@ static void br_make_forwarding(struct net_bridge_port *p)
 void br_port_state_selection(struct net_bridge *br)
 {
        struct net_bridge_port *p;
+       unsigned int liveports = 0;
 
        /* Don't change port states if userspace is handling STP */
        if (br->stp_enabled == BR_USER_STP)
                return;
 
        list_for_each_entry(p, &br->port_list, list) {
-               if (p->state != BR_STATE_DISABLED) {
-                       if (p->port_no == br->root_port) {
-                               p->config_pending = 0;
-                               p->topology_change_ack = 0;
-                               br_make_forwarding(p);
-                       } else if (br_is_designated_port(p)) {
-                               del_timer(&p->message_age_timer);
-                               br_make_forwarding(p);
-                       } else {
-                               p->config_pending = 0;
-                               p->topology_change_ack = 0;
-                               br_make_blocking(p);
-                       }
+               if (p->state == BR_STATE_DISABLED)
+                       continue;
+
+               if (p->port_no == br->root_port) {
+                       p->config_pending = 0;
+                       p->topology_change_ack = 0;
+                       br_make_forwarding(p);
+               } else if (br_is_designated_port(p)) {
+                       del_timer(&p->message_age_timer);
+                       br_make_forwarding(p);
+               } else {
+                       p->config_pending = 0;
+                       p->topology_change_ack = 0;
+                       br_make_blocking(p);
                }
 
+               if (p->state == BR_STATE_FORWARDING)
+                       ++liveports;
        }
+
+       if (liveports == 0)
+               netif_carrier_off(br->dev);
+       else
+               netif_carrier_on(br->dev);
 }
 
 /* called under bridge lock */
index 7b22456..3e96514 100644 (file)
@@ -94,6 +94,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
                p->state = BR_STATE_FORWARDING;
                if (br_is_designated_for_some_port(br))
                        br_topology_change_detection(br);
+               netif_carrier_on(br->dev);
        }
        br_log_state(p);
        spin_unlock(&br->lock);
index 50a46af..2ed0056 100644 (file)
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_bridge/ebt_ip6.h>
 
-struct tcpudphdr {
-       __be16 src;
-       __be16 dst;
+union pkthdr {
+       struct {
+               __be16 src;
+               __be16 dst;
+       } tcpudphdr;
+       struct {
+               u8 type;
+               u8 code;
+       } icmphdr;
 };
 
 static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
        const struct ebt_ip6_info *info = par->matchinfo;
        const struct ipv6hdr *ih6;
        struct ipv6hdr _ip6h;
-       const struct tcpudphdr *pptr;
-       struct tcpudphdr _ports;
+       const union pkthdr *pptr;
+       union pkthdr _pkthdr;
 
        ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
        if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return false;
                if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
                        return false;
-               if (!(info->bitmask & EBT_IP6_DPORT) &&
-                   !(info->bitmask & EBT_IP6_SPORT))
+               if (!(info->bitmask & ( EBT_IP6_DPORT |
+                                       EBT_IP6_SPORT | EBT_IP6_ICMP6)))
                        return true;
-               pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
-                                         &_ports);
+
+               /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+               pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+                                         &_pkthdr);
                if (pptr == NULL)
                        return false;
                if (info->bitmask & EBT_IP6_DPORT) {
-                       u32 dst = ntohs(pptr->dst);
+                       u16 dst = ntohs(pptr->tcpudphdr.dst);
                        if (FWINV(dst < info->dport[0] ||
                                  dst > info->dport[1], EBT_IP6_DPORT))
                                return false;
                }
                if (info->bitmask & EBT_IP6_SPORT) {
-                       u32 src = ntohs(pptr->src);
+                       u16 src = ntohs(pptr->tcpudphdr.src);
                        if (FWINV(src < info->sport[0] ||
                                  src > info->sport[1], EBT_IP6_SPORT))
                        return false;
                }
-               return true;
+               if ((info->bitmask & EBT_IP6_ICMP6) &&
+                    FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+                          pptr->icmphdr.type > info->icmpv6_type[1] ||
+                          pptr->icmphdr.code < info->icmpv6_code[0] ||
+                          pptr->icmphdr.code > info->icmpv6_code[1],
+                                                       EBT_IP6_ICMP6))
+                       return false;
        }
        return true;
 }
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
                return -EINVAL;
        if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
                return -EINVAL;
+       if (info->bitmask & EBT_IP6_ICMP6) {
+               if ((info->invflags & EBT_IP6_PROTO) ||
+                    info->protocol != IPPROTO_ICMPV6)
+                       return -EINVAL;
+               if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+                   info->icmpv6_code[0] > info->icmpv6_code[1])
+                       return -EINVAL;
+       }
        return 0;
 }
 
index 16df053..893669c 100644 (file)
@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
        if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
                return -ENOMEM;
 
+       tmp.name[sizeof(tmp.name) - 1] = 0;
+
        countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
        newinfo = vmalloc(sizeof(*newinfo) + countersize);
        if (!newinfo)
@@ -1764,6 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
 
        newinfo->entries_size = size;
 
+       xt_compat_init_offsets(AF_INET, info->nentries);
        return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
                                                        entries, newinfo);
 }
index c665de7..f1f98d9 100644 (file)
 #include <asm/atomic.h>
 
 #define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
 
 #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
 
 /* Information about CAIF physical interfaces held by Config Module in order
  * to manage physical interfaces
index d3ed264..27dab26 100644 (file)
@@ -18,7 +18,6 @@
 #define DGM_CMD_BIT  0x80
 #define DGM_FLOW_OFF 0x81
 #define DGM_FLOW_ON  0x80
-#define DGM_CTRL_PKT_SIZE 1
 #define DGM_MTU 1500
 
 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
index 9297f7d..8303fe3 100644 (file)
@@ -25,7 +25,6 @@ struct cfserl {
        spinlock_t sync;
        bool usestx;
 };
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
 
 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
index efad410..315c0d6 100644 (file)
@@ -20,7 +20,7 @@
 #define UTIL_REMOTE_SHUTDOWN 0x82
 #define UTIL_FLOW_OFF 0x81
 #define UTIL_FLOW_ON  0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
index 3b425b1..c3b1dec 100644 (file)
@@ -17,7 +17,7 @@
 #define VEI_FLOW_OFF 0x81
 #define VEI_FLOW_ON  0x80
 #define VEI_SET_PIN  0x82
-#define VEI_CTRL_PKT_SIZE 1
+
 #define container_obj(layr) container_of(layr, struct cfsrvl, layer)
 
 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
index fa9dab3..6008d6d 100644 (file)
@@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev)
        priv->conn_req.sockaddr.u.dgm.connection_id = -1;
        priv->flowenabled = false;
 
-       ASSERT_RTNL();
        init_waitqueue_head(&priv->netmgmt_wq);
-       list_add(&priv->list_field, &chnl_net_list);
 }
 
 
@@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
        ret = register_netdevice(dev);
        if (ret)
                pr_warn("device rtml registration failed\n");
+       else
+               list_add(&caifdev->list_field, &chnl_net_list);
        return ret;
 }
 
index dff633d..05f3578 100644 (file)
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
 {
        struct kvec iov = {buf, len};
        struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+       int r;
 
-       return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+       r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+       if (r == -EAGAIN)
+               r = 0;
+       return r;
 }
 
 /*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
                     size_t kvlen, size_t len, int more)
 {
        struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+       int r;
 
        if (more)
                msg.msg_flags |= MSG_MORE;
        else
                msg.msg_flags |= MSG_EOR;  /* superfluous, but what the hell */
 
-       return kernel_sendmsg(sock, &msg, iov, kvlen, len);
+       r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
+       if (r == -EAGAIN)
+               r = 0;
+       return r;
 }
 
 
@@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
                ceph_msg_put(con->out_msg);
                con->out_msg = NULL;
        }
-       con->out_keepalive_pending = false;
        con->in_seq = 0;
        con->in_seq_acked = 0;
 }
@@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
                    (msg->pages || msg->pagelist || msg->bio || in_trail))
                        kunmap(page);
 
+               if (ret == -EAGAIN)
+                       ret = 0;
                if (ret <= 0)
                        goto out;
 
@@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
                     con->auth_retry);
                if (con->auth_retry == 2) {
                        con->error_msg = "connect authorization failure";
-                       reset_connection(con);
-                       set_bit(CLOSED, &con->state);
                        return -1;
                }
                con->auth_retry = 1;
@@ -1705,14 +1712,6 @@ more:
 
        /* open the socket first? */
        if (con->sock == NULL) {
-               /*
-                * if we were STANDBY and are reconnecting _this_
-                * connection, bump connect_seq now.  Always bump
-                * global_seq.
-                */
-               if (test_and_clear_bit(STANDBY, &con->state))
-                       con->connect_seq++;
-
                prepare_write_banner(msgr, con);
                prepare_write_connect(msgr, con, 1);
                prepare_read_banner(con);
@@ -1737,16 +1736,12 @@ more_kvec:
        if (con->out_skip) {
                ret = write_partial_skip(con);
                if (ret <= 0)
-                       goto done;
-               if (ret < 0) {
-                       dout("try_write write_partial_skip err %d\n", ret);
-                       goto done;
-               }
+                       goto out;
        }
        if (con->out_kvec_left) {
                ret = write_partial_kvec(con);
                if (ret <= 0)
-                       goto done;
+                       goto out;
        }
 
        /* msg pages? */
@@ -1761,11 +1756,11 @@ more_kvec:
                if (ret == 1)
                        goto more_kvec;  /* we need to send the footer, too! */
                if (ret == 0)
-                       goto done;
+                       goto out;
                if (ret < 0) {
                        dout("try_write write_partial_msg_pages err %d\n",
                             ret);
-                       goto done;
+                       goto out;
                }
        }
 
@@ -1789,10 +1784,9 @@ do_next:
        /* Nothing to do! */
        clear_bit(WRITE_PENDING, &con->state);
        dout("try_write nothing else to write.\n");
-done:
        ret = 0;
 out:
-       dout("try_write done on %p\n", con);
+       dout("try_write done on %p ret %d\n", con, ret);
        return ret;
 }
 
@@ -1821,19 +1815,17 @@ more:
                        dout("try_read connecting\n");
                        ret = read_partial_banner(con);
                        if (ret <= 0)
-                               goto done;
-                       if (process_banner(con) < 0) {
-                               ret = -1;
                                goto out;
-                       }
+                       ret = process_banner(con);
+                       if (ret < 0)
+                               goto out;
                }
                ret = read_partial_connect(con);
                if (ret <= 0)
-                       goto done;
-               if (process_connect(con) < 0) {
-                       ret = -1;
                        goto out;
-               }
+               ret = process_connect(con);
+               if (ret < 0)
+                       goto out;
                goto more;
        }
 
@@ -1848,7 +1840,7 @@ more:
                dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
                ret = ceph_tcp_recvmsg(con->sock, buf, skip);
                if (ret <= 0)
-                       goto done;
+                       goto out;
                con->in_base_pos += ret;
                if (con->in_base_pos)
                        goto more;
@@ -1859,7 +1851,7 @@ more:
                 */
                ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
                if (ret <= 0)
-                       goto done;
+                       goto out;
                dout("try_read got tag %d\n", (int)con->in_tag);
                switch (con->in_tag) {
                case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1862,7 @@ more:
                        break;
                case CEPH_MSGR_TAG_CLOSE:
                        set_bit(CLOSED, &con->state);   /* fixme */
-                       goto done;
+                       goto out;
                default:
                        goto bad_tag;
                }
@@ -1882,13 +1874,12 @@ more:
                        case -EBADMSG:
                                con->error_msg = "bad crc";
                                ret = -EIO;
-                               goto out;
+                               break;
                        case -EIO:
                                con->error_msg = "io error";
-                               goto out;
-                       default:
-                               goto done;
+                               break;
                        }
+                       goto out;
                }
                if (con->in_tag == CEPH_MSGR_TAG_READY)
                        goto more;
@@ -1898,15 +1889,13 @@ more:
        if (con->in_tag == CEPH_MSGR_TAG_ACK) {
                ret = read_partial_ack(con);
                if (ret <= 0)
-                       goto done;
+                       goto out;
                process_ack(con);
                goto more;
        }
 
-done:
-       ret = 0;
 out:
-       dout("try_read done on %p\n", con);
+       dout("try_read done on %p ret %d\n", con, ret);
        return ret;
 
 bad_tag:
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
                                                   work.work);
 
        mutex_lock(&con->mutex);
+       if (test_and_clear_bit(BACKOFF, &con->state)) {
+               dout("con_work %p backing off\n", con);
+               if (queue_delayed_work(ceph_msgr_wq, &con->work,
+                                      round_jiffies_relative(con->delay))) {
+                       dout("con_work %p backoff %lu\n", con, con->delay);
+                       mutex_unlock(&con->mutex);
+                       return;
+               } else {
+                       con->ops->put(con);
+                       dout("con_work %p FAILED to back off %lu\n", con,
+                            con->delay);
+               }
+       }
 
+       if (test_bit(STANDBY, &con->state)) {
+               dout("con_work %p STANDBY\n", con);
+               goto done;
+       }
        if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
                dout("con_work CLOSED\n");
                con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
        /* Requeue anything that hasn't been acked */
        list_splice_init(&con->out_sent, &con->out_queue);
 
-       /* If there are no messages in the queue, place the connection
-        * in a STANDBY state (i.e., don't try to reconnect just yet). */
-       if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
-               dout("fault setting STANDBY\n");
+       /* If there are no messages queued or keepalive pending, place
+        * the connection in a STANDBY state */
+       if (list_empty(&con->out_queue) &&
+           !test_bit(KEEPALIVE_PENDING, &con->state)) {
+               dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
+               clear_bit(WRITE_PENDING, &con->state);
                set_bit(STANDBY, &con->state);
        } else {
                /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
                        con->delay = BASE_DELAY_INTERVAL;
                else if (con->delay < MAX_DELAY_INTERVAL)
                        con->delay *= 2;
-               dout("fault queueing %p delay %lu\n", con, con->delay);
                con->ops->get(con);
                if (queue_delayed_work(ceph_msgr_wq, &con->work,
-                                      round_jiffies_relative(con->delay)) == 0)
+                                      round_jiffies_relative(con->delay))) {
+                       dout("fault queued %p delay %lu\n", con, con->delay);
+               } else {
                        con->ops->put(con);
+                       dout("fault failed to queue %p delay %lu, backoff\n",
+                            con, con->delay);
+                       /*
+                        * In many cases we see a socket state change
+                        * while con_work is running and end up
+                        * queuing (non-delayed) work, such that we
+                        * can't backoff with a delay.  Set a flag so
+                        * that when con_work restarts we schedule the
+                        * delay then.
+                        */
+                       set_bit(BACKOFF, &con->state);
+               }
        }
 
 out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
 }
 EXPORT_SYMBOL(ceph_messenger_destroy);
 
+static void clear_standby(struct ceph_connection *con)
+{
+       /* come back from STANDBY? */
+       if (test_and_clear_bit(STANDBY, &con->state)) {
+               mutex_lock(&con->mutex);
+               dout("clear_standby %p and ++connect_seq\n", con);
+               con->connect_seq++;
+               WARN_ON(test_bit(WRITE_PENDING, &con->state));
+               WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
+               mutex_unlock(&con->mutex);
+       }
+}
+
 /*
  * Queue up an outgoing message on the given connection.
  */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
 
        /* if there wasn't anything waiting to send before, queue
         * new work */
+       clear_standby(con);
        if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
 }
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
  */
 void ceph_con_keepalive(struct ceph_connection *con)
 {
+       dout("con_keepalive %p\n", con);
+       clear_standby(con);
        if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
            test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
index 1a040e6..cd9c21d 100644 (file)
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
                                          int num_pages, bool write_page)
 {
        struct page **pages;
-       int rc;
+       int got = 0;
+       int rc = 0;
 
        pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
        if (!pages)
                return ERR_PTR(-ENOMEM);
 
        down_read(&current->mm->mmap_sem);
-       rc = get_user_pages(current, current->mm, (unsigned long)data,
-                           num_pages, write_page, 0, pages, NULL);
+       while (got < num_pages) {
+               rc = get_user_pages(current, current->mm,
+                   (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
+                   num_pages - got, write_page, 0, pages + got, NULL);
+               if (rc < 0)
+                       break;
+               BUG_ON(rc == 0);
+               got += rc;
+       }
        up_read(&current->mm->mmap_sem);
-       if (rc < num_pages)
+       if (rc < 0)
                goto fail;
        return pages;
 
 fail:
-       ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
+       ceph_put_page_vector(pages, got, false);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL(ceph_get_direct_page_vector);
index 7c6a46f..0d39032 100644 (file)
 #include <trace/events/skb.h>
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
 
 #include "net-sysfs.h"
 
@@ -749,7 +750,8 @@ EXPORT_SYMBOL(dev_get_by_index);
  *     @ha: hardware address
  *
  *     Search for an interface by MAC address. Returns NULL if the device
- *     is not found or a pointer to the device. The caller must hold RCU
+ *     is not found or a pointer to the device.
+ *     The caller must hold RCU or RTNL.
  *     The returned device has not had its ref count increased
  *     and the caller must therefore be careful about locking
  *
@@ -1113,13 +1115,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
 void dev_load(struct net *net, const char *name)
 {
        struct net_device *dev;
+       int no_module;
 
        rcu_read_lock();
        dev = dev_get_by_name_rcu(net, name);
        rcu_read_unlock();
 
-       if (!dev && capable(CAP_NET_ADMIN))
-               request_module("%s", name);
+       no_module = !dev;
+       if (no_module && capable(CAP_NET_ADMIN))
+               no_module = request_module("netdev-%s", name);
+       if (no_module && capable(CAP_SYS_MODULE)) {
+               if (!request_module("%s", name))
+                       pr_err("Loading kernel module for a network device "
+"with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s "
+"instead\n", name);
+       }
 }
 EXPORT_SYMBOL(dev_load);
 
@@ -1279,13 +1289,16 @@ static int __dev_close_many(struct list_head *head)
 
 static int __dev_close(struct net_device *dev)
 {
+       int retval;
        LIST_HEAD(single);
 
        list_add(&dev->unreg_list, &single);
-       return __dev_close_many(&single);
+       retval = __dev_close_many(&single);
+       list_del(&single);
+       return retval;
 }
 
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
        LIST_HEAD(tmp_list);
@@ -1324,7 +1337,7 @@ int dev_close(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        dev_close_many(&single);
-
+       list_del(&single);
        return 0;
 }
 EXPORT_SYMBOL(dev_close);
@@ -1593,6 +1606,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
        rcu_read_unlock();
 }
 
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+       int i;
+       struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+       /* If TC0 is invalidated disable TC mapping */
+       if (tc->offset + tc->count > txq) {
+               pr_warning("Number of in use tx queues changed "
+                          "invalidating tc mappings. Priority "
+                          "traffic classification disabled!\n");
+               dev->num_tc = 0;
+               return;
+       }
+
+       /* Invalidated prio to tc mappings set to TC0 */
+       for (i = 1; i < TC_BITMASK + 1; i++) {
+               int q = netdev_get_prio_tc_map(dev, i);
+
+               tc = &dev->tc_to_txq[q];
+               if (tc->offset + tc->count > txq) {
+                       pr_warning("Number of in use tx queues "
+                                  "changed. Priority %i to tc "
+                                  "mapping %i is no longer valid "
+                                  "setting map to 0\n",
+                                  i, q);
+                       netdev_set_prio_tc_map(dev, i, 0);
+               }
+       }
+}
+
 /*
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1604,7 +1659,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
        if (txq < 1 || txq > dev->num_tx_queues)
                return -EINVAL;
 
-       if (dev->reg_state == NETREG_REGISTERED) {
+       if (dev->reg_state == NETREG_REGISTERED ||
+           dev->reg_state == NETREG_UNREGISTERING) {
                ASSERT_RTNL();
 
                rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -1612,6 +1668,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
                if (rc)
                        return rc;
 
+               if (dev->num_tc)
+                       netif_setup_tc(dev, txq);
+
                if (txq < dev->real_num_tx_queues)
                        qdisc_reset_all_tx_gt(dev, txq);
        }
@@ -1811,7 +1870,7 @@ EXPORT_SYMBOL(skb_checksum_help);
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@ -1999,7 +2058,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
                 protocol == htons(ETH_P_FCOE)));
 }
 
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
 {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
@@ -2011,10 +2070,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
        return features;
 }
 
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       int features = skb->dev->features;
+       u32 features = skb->dev->features;
 
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2059,7 +2118,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        int rc = NETDEV_TX_OK;
 
        if (likely(!skb->next)) {
-               int features;
+               u32 features;
 
                /*
                 * If device doesnt need skb->dst, release it right now while
@@ -2161,6 +2220,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                  unsigned int num_tx_queues)
 {
        u32 hash;
+       u16 qoffset = 0;
+       u16 qcount = num_tx_queues;
 
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
@@ -2169,13 +2230,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                return hash;
        }
 
+       if (dev->num_tc) {
+               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+               qoffset = dev->tc_to_txq[tc].offset;
+               qcount = dev->tc_to_txq[tc].count;
+       }
+
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
                hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
 
-       return (u16) (((u64) hash * num_tx_queues) >> 32);
+       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -2272,15 +2339,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                                 struct netdev_queue *txq)
 {
        spinlock_t *root_lock = qdisc_lock(q);
-       bool contended = qdisc_is_running(q);
+       bool contended;
        int rc;
 
+       qdisc_skb_cb(skb)->pkt_len = skb->len;
+       qdisc_calculate_pkt_len(skb, q);
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
         * This permits __QDISC_STATE_RUNNING owner to get the lock more often
         * and dequeue packets faster.
         */
+       contended = qdisc_is_running(q);
        if (unlikely(contended))
                spin_lock(&q->busylock);
 
@@ -2298,7 +2368,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
 
-               qdisc_skb_cb(skb)->pkt_len = skb->len;
                qdisc_bstats_update(q, skb);
 
                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2313,7 +2382,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                rc = NET_XMIT_SUCCESS;
        } else {
                skb_dst_force(skb);
-               rc = qdisc_enqueue_root(skb, q);
+               rc = q->enqueue(skb, q) & NET_XMIT_MASK;
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@ -2532,6 +2601,54 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+           struct rps_dev_flow *rflow, u16 next_cpu)
+{
+       u16 tcpu;
+
+       tcpu = rflow->cpu = next_cpu;
+       if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+               struct netdev_rx_queue *rxqueue;
+               struct rps_dev_flow_table *flow_table;
+               struct rps_dev_flow *old_rflow;
+               u32 flow_id;
+               u16 rxq_index;
+               int rc;
+
+               /* Should we steer this flow to a different hardware queue? */
+               if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
+                   !(dev->features & NETIF_F_NTUPLE))
+                       goto out;
+               rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+               if (rxq_index == skb_get_rx_queue(skb))
+                       goto out;
+
+               rxqueue = dev->_rx + rxq_index;
+               flow_table = rcu_dereference(rxqueue->rps_flow_table);
+               if (!flow_table)
+                       goto out;
+               flow_id = skb->rxhash & flow_table->mask;
+               rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+                                                       rxq_index, flow_id);
+               if (rc < 0)
+                       goto out;
+               old_rflow = rflow;
+               rflow = &flow_table->flows[flow_id];
+               rflow->cpu = next_cpu;
+               rflow->filter = rc;
+               if (old_rflow->filter == rflow->filter)
+                       old_rflow->filter = RPS_NO_FILTER;
+       out:
+#endif
+               rflow->last_qtail =
+                       per_cpu(softnet_data, tcpu).input_queue_head;
+       }
+
+       return rflow;
+}
+
 /*
  * get_rps_cpu is called from netif_receive_skb and returns the target
  * CPU from the RPS map of the receiving queue for a given skb.
@@ -2562,7 +2679,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 
        map = rcu_dereference(rxqueue->rps_map);
        if (map) {
-               if (map->len == 1) {
+               if (map->len == 1 &&
+                   !rcu_dereference_raw(rxqueue->rps_flow_table)) {
                        tcpu = map->cpus[0];
                        if (cpu_online(tcpu))
                                cpu = tcpu;
@@ -2602,12 +2720,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-                     rflow->last_qtail)) >= 0)) {
-                       tcpu = rflow->cpu = next_cpu;
-                       if (tcpu != RPS_NO_CPU)
-                               rflow->last_qtail = per_cpu(softnet_data,
-                                   tcpu).input_queue_head;
-               }
+                     rflow->last_qtail)) >= 0))
+                       rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
                        *rflowp = rflow;
                        cpu = tcpu;
@@ -2628,6 +2743,46 @@ done:
        return cpu;
 }
 
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+                        u32 flow_id, u16 filter_id)
+{
+       struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+       struct rps_dev_flow_table *flow_table;
+       struct rps_dev_flow *rflow;
+       bool expire = true;
+       int cpu;
+
+       rcu_read_lock();
+       flow_table = rcu_dereference(rxqueue->rps_flow_table);
+       if (flow_table && flow_id <= flow_table->mask) {
+               rflow = &flow_table->flows[flow_id];
+               cpu = ACCESS_ONCE(rflow->cpu);
+               if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+                   ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+                          rflow->last_qtail) <
+                    (int)(10 * flow_table->mask)))
+                       expire = false;
+       }
+       rcu_read_unlock();
+       return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
 /* Called from hardirq (IPI) context */
 static void rps_trigger_softirq(void *data)
 {
@@ -2949,64 +3104,31 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 
-static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
-                                             struct net_device *master)
-{
-       if (skb->pkt_type == PACKET_HOST) {
-               u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
-
-               memcpy(dest, master->dev_addr, ETH_ALEN);
-       }
-}
-
-/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
- * ARP on active-backup slaves with arp_validate enabled.
- */
-int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+static void vlan_on_bond_hook(struct sk_buff *skb)
 {
-       struct net_device *dev = skb->dev;
-
-       if (master->priv_flags & IFF_MASTER_ARPMON)
-               dev->last_rx = jiffies;
-
-       if ((master->priv_flags & IFF_MASTER_ALB) &&
-           (master->priv_flags & IFF_BRIDGE_PORT)) {
-               /* Do address unmangle. The local destination address
-                * will be always the one master has. Provides the right
-                * functionality in a bridge.
-                */
-               skb_bond_set_mac_by_master(skb, master);
-       }
-
-       if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
-               if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
-                   skb->protocol == __cpu_to_be16(ETH_P_ARP))
-                       return 0;
-
-               if (master->priv_flags & IFF_MASTER_ALB) {
-                       if (skb->pkt_type != PACKET_BROADCAST &&
-                           skb->pkt_type != PACKET_MULTICAST)
-                               return 0;
-               }
-               if (master->priv_flags & IFF_MASTER_8023AD &&
-                   skb->protocol == __cpu_to_be16(ETH_P_SLOW))
-                       return 0;
+       /*
+        * Make sure ARP frames received on VLAN interfaces stacked on
+        * bonding interfaces still make their way to any base bonding
+        * device that may have registered for a specific ptype.
+        */
+       if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
+           vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
+           skb->protocol == htons(ETH_P_ARP)) {
+               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
-               return 1;
+               if (!skb2)
+                       return;
+               skb2->dev = vlan_dev_real_dev(skb->dev);
+               netif_rx(skb2);
        }
-       return 0;
 }
-EXPORT_SYMBOL(__skb_bond_should_drop);
 
 static int __netif_receive_skb(struct sk_buff *skb)
 {
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
        struct net_device *orig_dev;
-       struct net_device *master;
-       struct net_device *null_or_orig;
-       struct net_device *orig_or_bond;
+       struct net_device *null_or_dev;
        int ret = NET_RX_DROP;
        __be16 type;
 
@@ -3021,28 +3143,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
        if (!skb->skb_iif)
                skb->skb_iif = skb->dev->ifindex;
-
-       /*
-        * bonding note: skbs received on inactive slaves should only
-        * be delivered to pkt handlers that are exact matches.  Also
-        * the deliver_no_wcard flag will be set.  If packet handlers
-        * are sensitive to duplicate packets these skbs will need to
-        * be dropped at the handler.
-        */
-       null_or_orig = NULL;
        orig_dev = skb->dev;
-       master = ACCESS_ONCE(orig_dev->master);
-       if (skb->deliver_no_wcard)
-               null_or_orig = orig_dev;
-       else if (master) {
-               if (skb_bond_should_drop(skb, master)) {
-                       skb->deliver_no_wcard = 1;
-                       null_or_orig = orig_dev; /* deliver only exact match */
-               } else
-                       skb->dev = master;
-       }
 
-       __this_cpu_inc(softnet_data.processed);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
        skb->mac_len = skb->network_header - skb->mac_header;
@@ -3051,6 +3153,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
        rcu_read_lock();
 
+another_round:
+
+       __this_cpu_inc(softnet_data.processed);
+
 #ifdef CONFIG_NET_CLS_ACT
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3059,8 +3165,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
 #endif
 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-                   ptype->dev == orig_dev) {
+               if (!ptype->dev || ptype->dev == skb->dev) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -3074,16 +3179,20 @@ static int __netif_receive_skb(struct sk_buff *skb)
 ncls:
 #endif
 
-       /* Handle special case of bridge or macvlan */
        rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (rx_handler) {
+               struct net_device *prev_dev;
+
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
+               prev_dev = skb->dev;
                skb = rx_handler(skb);
                if (!skb)
                        goto out;
+               if (skb->dev != prev_dev)
+                       goto another_round;
        }
 
        if (vlan_tx_tag_present(skb)) {
@@ -3098,24 +3207,17 @@ ncls:
                        goto out;
        }
 
-       /*
-        * Make sure frames received on VLAN interfaces stacked on
-        * bonding interfaces still make their way to any base bonding
-        * device that may have registered for a specific ptype.  The
-        * handler may have to adjust skb->dev and orig_dev.
-        */
-       orig_or_bond = orig_dev;
-       if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
-           (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
-               orig_or_bond = vlan_dev_real_dev(skb->dev);
-       }
+       vlan_on_bond_hook(skb);
+
+       /* deliver only exact match when indicated */
+       null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
 
        type = skb->protocol;
        list_for_each_entry_rcu(ptype,
                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-               if (ptype->type == type && (ptype->dev == null_or_orig ||
-                    ptype->dev == skb->dev || ptype->dev == orig_dev ||
-                    ptype->dev == orig_or_bond)) {
+               if (ptype->type == type &&
+                   (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
+                    ptype->dev == orig_dev)) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -3423,6 +3525,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        __skb_pull(skb, skb_headlen(skb));
        skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
        skb->vlan_tci = 0;
+       skb->dev = napi->dev;
+       skb->skb_iif = 0;
 
        napi->skb = skb;
 }
@@ -3910,12 +4014,15 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       struct net_device *dev = (v == SEQ_START_TOKEN) ?
-                                 first_net_device(seq_file_net(seq)) :
-                                 next_net_device((struct net_device *)v);
+       struct net_device *dev = v;
+
+       if (v == SEQ_START_TOKEN)
+               dev = first_net_device_rcu(seq_file_net(seq));
+       else
+               dev = next_net_device_rcu(dev);
 
        ++*pos;
-       return rcu_dereference(dev);
+       return dev;
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4199,15 +4306,14 @@ static int __init dev_proc_init(void)
 
 
 /**
- *     netdev_set_master       -       set up master/slave pair
+ *     netdev_set_master       -       set up master pointer
  *     @slave: slave device
  *     @master: new master device
  *
  *     Changes the master device of the slave. Pass %NULL to break the
  *     bonding. The caller must hold the RTNL semaphore. On a failure
  *     a negative errno code is returned. On success the reference counts
- *     are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- *     function returns zero.
+ *     are adjusted and the function returns zero.
  */
 int netdev_set_master(struct net_device *slave, struct net_device *master)
 {
@@ -4227,6 +4333,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
                synchronize_net();
                dev_put(old);
        }
+       return 0;
+}
+EXPORT_SYMBOL(netdev_set_master);
+
+/**
+ *     netdev_set_bond_master  -       set up bonding master/slave pair
+ *     @slave: slave device
+ *     @master: new master device
+ *
+ *     Changes the master device of the slave. Pass %NULL to break the
+ *     bonding. The caller must hold the RTNL semaphore. On a failure
+ *     a negative errno code is returned. On success %RTM_NEWLINK is sent
+ *     to the routing socket and the function returns zero.
+ */
+int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+{
+       int err;
+
+       ASSERT_RTNL();
+
+       err = netdev_set_master(slave, master);
+       if (err)
+               return err;
        if (master)
                slave->flags |= IFF_SLAVE;
        else
@@ -4235,7 +4364,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
        rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
        return 0;
 }
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_set_bond_master);
 
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
@@ -4571,6 +4700,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(dev_set_mtu);
 
+/**
+ *     dev_set_group - Change group this device belongs to
+ *     @dev: device
+ *     @new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+       dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
 /**
  *     dev_set_mac_address - Change Media Access Control Address
  *     @dev: device
@@ -5059,43 +5199,58 @@ static void rollback_registered(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        rollback_registered_many(&single);
+       list_del(&single);
 }
 
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
 {
+       /* Fix illegal checksum combinations */
+       if ((features & NETIF_F_HW_CSUM) &&
+           (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+               netdev_info(dev, "mixed HW and IP checksum settings.\n");
+               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+       }
+
+       if ((features & NETIF_F_NO_CSUM) &&
+           (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+               netdev_info(dev, "mixed no checksumming and other settings.\n");
+               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+       }
+
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
-               if (name)
-                       printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
-                              "checksum feature.\n", name);
+               netdev_info(dev,
+                           "Dropping NETIF_F_SG since no checksum feature.\n");
                features &= ~NETIF_F_SG;
        }
 
        /* TSO requires that SG is present as well. */
        if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
-               if (name)
-                       printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
-                              "SG feature.\n", name);
+               netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
                features &= ~NETIF_F_TSO;
        }
 
+       /* Software GSO depends on SG. */
+       if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
+               netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+               features &= ~NETIF_F_GSO;
+       }
+
+       /* UFO needs SG and checksumming */
        if (features & NETIF_F_UFO) {
                /* maybe split UFO into V4 and V6? */
                if (!((features & NETIF_F_GEN_CSUM) ||
                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-                       if (name)
-                               printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-                                      "since no checksum offload features.\n",
-                                      name);
+                       netdev_info(dev,
+                               "Dropping NETIF_F_UFO since no checksum offload features.\n");
                        features &= ~NETIF_F_UFO;
                }
 
                if (!(features & NETIF_F_SG)) {
-                       if (name)
-                               printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-                                      "since no NETIF_F_SG feature.\n", name);
+                       netdev_info(dev,
+                               "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
                        features &= ~NETIF_F_UFO;
                }
        }
@@ -5104,6 +5259,37 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
 }
 EXPORT_SYMBOL(netdev_fix_features);
 
+void netdev_update_features(struct net_device *dev)
+{
+       u32 features;
+       int err = 0;
+
+       features = netdev_get_wanted_features(dev);
+
+       if (dev->netdev_ops->ndo_fix_features)
+               features = dev->netdev_ops->ndo_fix_features(dev, features);
+
+       /* driver might be less strict about feature dependencies */
+       features = netdev_fix_features(dev, features);
+
+       if (dev->features == features)
+               return;
+
+       netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+               dev->features, features);
+
+       if (dev->netdev_ops->ndo_set_features)
+               err = dev->netdev_ops->ndo_set_features(dev, features);
+
+       if (!err)
+               dev->features = features;
+       else if (err < 0)
+               netdev_err(dev,
+                       "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
+                       err, features, dev->features);
+}
+EXPORT_SYMBOL(netdev_update_features);
+
 /**
  *     netif_stacked_transfer_operstate -      transfer operstate
  *     @rootdev: the root or lower level device to transfer state from
@@ -5238,27 +5424,19 @@ int register_netdevice(struct net_device *dev)
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
 
-       /* Fix illegal checksum combinations */
-       if ((dev->features & NETIF_F_HW_CSUM) &&
-           (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
-                      dev->name);
-               dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
-       }
+       /* Transfer changeable features to wanted_features and enable
+        * software offloads (GSO and GRO).
+        */
+       dev->hw_features |= NETIF_F_SOFT_FEATURES;
+       dev->features |= NETIF_F_SOFT_FEATURES;
+       dev->wanted_features = dev->features & dev->hw_features;
 
-       if ((dev->features & NETIF_F_NO_CSUM) &&
-           (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
-                      dev->name);
-               dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+       /* Avoid warning from netdev_fix_features() for GSO without SG */
+       if (!(dev->wanted_features & NETIF_F_SG)) {
+               dev->wanted_features &= ~NETIF_F_GSO;
+               dev->features &= ~NETIF_F_GSO;
        }
 
-       dev->features = netdev_fix_features(dev->features, dev->name);
-
-       /* Enable software GSO if SG is supported. */
-       if (dev->features & NETIF_F_SG)
-               dev->features |= NETIF_F_GSO;
-
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
@@ -5275,6 +5453,8 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
        dev->reg_state = NETREG_REGISTERED;
 
+       netdev_update_features(dev);
+
        /*
         *      Default initial state at registry is that the
         *      device is present.
@@ -5656,30 +5836,36 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev_net_set(dev, &init_net);
 
+       dev->gso_max_size = GSO_MAX_SIZE;
+
+       INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
+       dev->ethtool_ntuple_list.count = 0;
+       INIT_LIST_HEAD(&dev->napi_list);
+       INIT_LIST_HEAD(&dev->unreg_list);
+       INIT_LIST_HEAD(&dev->link_watch_list);
+       dev->priv_flags = IFF_XMIT_DST_RELEASE;
+       setup(dev);
+
        dev->num_tx_queues = txqs;
        dev->real_num_tx_queues = txqs;
        if (netif_alloc_netdev_queues(dev))
-               goto free_pcpu;
+               goto free_all;
 
 #ifdef CONFIG_RPS
        dev->num_rx_queues = rxqs;
        dev->real_num_rx_queues = rxqs;
        if (netif_alloc_rx_queues(dev))
-               goto free_pcpu;
+               goto free_all;
 #endif
 
-       dev->gso_max_size = GSO_MAX_SIZE;
-
-       INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
-       dev->ethtool_ntuple_list.count = 0;
-       INIT_LIST_HEAD(&dev->napi_list);
-       INIT_LIST_HEAD(&dev->unreg_list);
-       INIT_LIST_HEAD(&dev->link_watch_list);
-       dev->priv_flags = IFF_XMIT_DST_RELEASE;
-       setup(dev);
        strcpy(dev->name, name);
+       dev->group = INIT_NETDEV_GROUP;
        return dev;
 
+free_all:
+       free_netdev(dev);
+       return NULL;
+
 free_pcpu:
        free_percpu(dev->pcpu_refcnt);
        kfree(dev->_tx);
@@ -5988,8 +6174,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  *     @one to the master device with current feature set @all.  Will not
  *     enable anything that is off in @mask. Returns the new feature set.
  */
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-                                       unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
 {
        /* If device needs checksumming, downgrade to it. */
        if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
@@ -6207,6 +6392,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
                }
        }
        unregister_netdevice_many(&dev_kill_list);
+       list_del(&dev_kill_list);
        rtnl_unlock();
 }
 
index 508f9c1..133fd22 100644 (file)
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
 
        list_for_each_entry(ha, &from_list->list, list) {
                type = addr_type ? addr_type : ha->type;
-               __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+               __hw_addr_del(to_list, ha->addr, addr_len, type);
        }
 }
 EXPORT_SYMBOL(__hw_addr_del_multiple);
index b99c7c7..91104d3 100644 (file)
@@ -164,7 +164,9 @@ int dst_discard(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dst_discard);
 
-void *dst_alloc(struct dst_ops *ops)
+const u32 dst_default_metrics[RTAX_MAX];
+
+void *dst_alloc(struct dst_ops *ops, int initial_ref)
 {
        struct dst_entry *dst;
 
@@ -175,11 +177,12 @@ void *dst_alloc(struct dst_ops *ops)
        dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
        if (!dst)
                return NULL;
-       atomic_set(&dst->__refcnt, 0);
+       atomic_set(&dst->__refcnt, initial_ref);
        dst->ops = ops;
        dst->lastuse = jiffies;
        dst->path = dst;
        dst->input = dst->output = dst_discard;
+       dst_init_metrics(dst, dst_default_metrics, true);
 #if RT_CACHE_DEBUG >= 2
        atomic_inc(&dst_total);
 #endif
@@ -282,6 +285,42 @@ void dst_release(struct dst_entry *dst)
 }
 EXPORT_SYMBOL(dst_release);
 
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+       u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+       if (p) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       kfree(p);
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               }
+       }
+       return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false.  */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+       unsigned long prev, new;
+
+       new = (unsigned long) dst_default_metrics;
+       prev = cmpxchg(&dst->_metrics, old, new);
+       if (prev == old)
+               kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
 /**
  * skb_dst_set_noref - sets skb dst, without a reference
  * @skb: buffer
index 1774178..c1a71bb 100644 (file)
@@ -34,12 +34,6 @@ u32 ethtool_op_get_link(struct net_device *dev)
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_rx_csum(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_rx_csum);
-
 u32 ethtool_op_get_tx_csum(struct net_device *dev)
 {
        return (dev->features & NETIF_F_ALL_CSUM) != 0;
@@ -55,6 +49,7 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
 
        return 0;
 }
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
 {
@@ -171,6 +166,381 @@ EXPORT_SYMBOL(ethtool_ntuple_flush);
 
 /* Handlers for each ethtool command */
 
+#define ETHTOOL_DEV_FEATURE_WORDS      1
+
+static void ethtool_get_features_compat(struct net_device *dev,
+       struct ethtool_get_features_block *features)
+{
+       if (!dev->ethtool_ops)
+               return;
+
+       /* getting RX checksum */
+       if (dev->ethtool_ops->get_rx_csum)
+               if (dev->ethtool_ops->get_rx_csum(dev))
+                       features[0].active |= NETIF_F_RXCSUM;
+
+       /* mark legacy-changeable features */
+       if (dev->ethtool_ops->set_sg)
+               features[0].available |= NETIF_F_SG;
+       if (dev->ethtool_ops->set_tx_csum)
+               features[0].available |= NETIF_F_ALL_CSUM;
+       if (dev->ethtool_ops->set_tso)
+               features[0].available |= NETIF_F_ALL_TSO;
+       if (dev->ethtool_ops->set_rx_csum)
+               features[0].available |= NETIF_F_RXCSUM;
+       if (dev->ethtool_ops->set_flags)
+               features[0].available |= flags_dup_features;
+}
+
+static int ethtool_set_feature_compat(struct net_device *dev,
+       int (*legacy_set)(struct net_device *, u32),
+       struct ethtool_set_features_block *features, u32 mask)
+{
+       u32 do_set;
+
+       if (!legacy_set)
+               return 0;
+
+       if (!(features[0].valid & mask))
+               return 0;
+
+       features[0].valid &= ~mask;
+
+       do_set = !!(features[0].requested & mask);
+
+       if (legacy_set(dev, do_set) < 0)
+               netdev_info(dev,
+                       "Legacy feature change (%s) failed for 0x%08x\n",
+                       do_set ? "set" : "clear", mask);
+
+       return 1;
+}
+
+static int ethtool_set_features_compat(struct net_device *dev,
+       struct ethtool_set_features_block *features)
+{
+       int compat;
+
+       if (!dev->ethtool_ops)
+               return 0;
+
+       compat  = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
+               features, NETIF_F_SG);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
+               features, NETIF_F_ALL_CSUM);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
+               features, NETIF_F_ALL_TSO);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
+               features, NETIF_F_RXCSUM);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+               features, flags_dup_features);
+
+       return compat;
+}
+
+static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+{
+       struct ethtool_gfeatures cmd = {
+               .cmd = ETHTOOL_GFEATURES,
+               .size = ETHTOOL_DEV_FEATURE_WORDS,
+       };
+       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
+               {
+                       .available = dev->hw_features,
+                       .requested = dev->wanted_features,
+                       .active = dev->features,
+                       .never_changed = NETIF_F_NEVER_CHANGE,
+               },
+       };
+       u32 __user *sizeaddr;
+       u32 copy_size;
+
+       ethtool_get_features_compat(dev, features);
+
+       sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
+       if (get_user(copy_size, sizeaddr))
+               return -EFAULT;
+
+       if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
+               copy_size = ETHTOOL_DEV_FEATURE_WORDS;
+
+       if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+               return -EFAULT;
+       useraddr += sizeof(cmd);
+       if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
+{
+       struct ethtool_sfeatures cmd;
+       struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
+       int ret = 0;
+
+       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+               return -EFAULT;
+       useraddr += sizeof(cmd);
+
+       if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
+               return -EINVAL;
+
+       if (copy_from_user(features, useraddr, sizeof(features)))
+               return -EFAULT;
+
+       if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+               return -EINVAL;
+
+       if (ethtool_set_features_compat(dev, features))
+               ret |= ETHTOOL_F_COMPAT;
+
+       if (features[0].valid & ~dev->hw_features) {
+               features[0].valid &= dev->hw_features;
+               ret |= ETHTOOL_F_UNSUPPORTED;
+       }
+
+       dev->wanted_features &= ~features[0].valid;
+       dev->wanted_features |= features[0].valid & features[0].requested;
+       netdev_update_features(dev);
+
+       if ((dev->wanted_features ^ dev->features) & features[0].valid)
+               ret |= ETHTOOL_F_WISH;
+
+       return ret;
+}
+
+static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
+       /* NETIF_F_SG */              "tx-scatter-gather",
+       /* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
+       /* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
+       /* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
+       /* NETIF_F_IPV6_CSUM */       "tx_checksum-ipv6",
+       /* NETIF_F_HIGHDMA */         "highdma",
+       /* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
+       /* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
+
+       /* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
+       /* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
+       /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
+       /* NETIF_F_GSO */             "tx-generic-segmentation",
+       /* NETIF_F_LLTX */            "tx-lockless",
+       /* NETIF_F_NETNS_LOCAL */     "netns-local",
+       /* NETIF_F_GRO */             "rx-gro",
+       /* NETIF_F_LRO */             "rx-lro",
+
+       /* NETIF_F_TSO */             "tx-tcp-segmentation",
+       /* NETIF_F_UFO */             "tx-udp-fragmentation",
+       /* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
+       /* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
+       /* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
+       /* NETIF_F_FSO */             "tx-fcoe-segmentation",
+       "",
+       "",
+
+       /* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
+       /* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
+       /* NETIF_F_FCOE_MTU */        "fcoe-mtu",
+       /* NETIF_F_NTUPLE */          "rx-ntuple-filter",
+       /* NETIF_F_RXHASH */          "rx-hashing",
+       /* NETIF_F_RXCSUM */          "rx-checksum",
+       "",
+       "",
+};
+
+static int __ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (sset == ETH_SS_FEATURES)
+               return ARRAY_SIZE(netdev_features_strings);
+
+       if (ops && ops->get_sset_count && ops->get_strings)
+               return ops->get_sset_count(dev, sset);
+       else
+               return -EOPNOTSUPP;
+}
+
+static void __ethtool_get_strings(struct net_device *dev,
+       u32 stringset, u8 *data)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (stringset == ETH_SS_FEATURES)
+               memcpy(data, netdev_features_strings,
+                       sizeof(netdev_features_strings));
+       else
+               /* ops->get_strings is valid because checked earlier */
+               ops->get_strings(dev, stringset, data);
+}
+
+static u32 ethtool_get_feature_mask(u32 eth_cmd)
+{
+       /* feature masks of legacy discrete ethtool ops */
+
+       switch (eth_cmd) {
+       case ETHTOOL_GTXCSUM:
+       case ETHTOOL_STXCSUM:
+               return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
+       case ETHTOOL_GRXCSUM:
+       case ETHTOOL_SRXCSUM:
+               return NETIF_F_RXCSUM;
+       case ETHTOOL_GSG:
+       case ETHTOOL_SSG:
+               return NETIF_F_SG;
+       case ETHTOOL_GTSO:
+       case ETHTOOL_STSO:
+               return NETIF_F_ALL_TSO;
+       case ETHTOOL_GUFO:
+       case ETHTOOL_SUFO:
+               return NETIF_F_UFO;
+       case ETHTOOL_GGSO:
+       case ETHTOOL_SGSO:
+               return NETIF_F_GSO;
+       case ETHTOOL_GGRO:
+       case ETHTOOL_SGRO:
+               return NETIF_F_GRO;
+       default:
+               BUG();
+       }
+}
+
+static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (!ops)
+               return NULL;
+
+       switch (ethcmd) {
+       case ETHTOOL_GTXCSUM:
+               return ops->get_tx_csum;
+       case ETHTOOL_GRXCSUM:
+               return ops->get_rx_csum;
+       case ETHTOOL_SSG:
+               return ops->get_sg;
+       case ETHTOOL_STSO:
+               return ops->get_tso;
+       case ETHTOOL_SUFO:
+               return ops->get_ufo;
+       default:
+               return NULL;
+       }
+}
+
+static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
+{
+       return !!(dev->features & NETIF_F_ALL_CSUM);
+}
+
+static int ethtool_get_one_feature(struct net_device *dev,
+       char __user *useraddr, u32 ethcmd)
+{
+       u32 mask = ethtool_get_feature_mask(ethcmd);
+       struct ethtool_value edata = {
+               .cmd = ethcmd,
+               .data = !!(dev->features & mask),
+       };
+
+       /* compatibility with discrete get_ ops */
+       if (!(dev->hw_features & mask)) {
+               u32 (*actor)(struct net_device *);
+
+               actor = __ethtool_get_one_feature_actor(dev, ethcmd);
+
+               /* bug compatibility with old get_rx_csum */
+               if (ethcmd == ETHTOOL_GRXCSUM && !actor)
+                       actor = __ethtool_get_rx_csum_oldbug;
+
+               if (actor)
+                       edata.data = actor(dev);
+       }
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_sg(struct net_device *dev, u32 data);
+static int __ethtool_set_tso(struct net_device *dev, u32 data);
+static int __ethtool_set_ufo(struct net_device *dev, u32 data);
+
+static int ethtool_set_one_feature(struct net_device *dev,
+       void __user *useraddr, u32 ethcmd)
+{
+       struct ethtool_value edata;
+       u32 mask;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       mask = ethtool_get_feature_mask(ethcmd);
+       mask &= dev->hw_features;
+       if (mask) {
+               if (edata.data)
+                       dev->wanted_features |= mask;
+               else
+                       dev->wanted_features &= ~mask;
+
+               netdev_update_features(dev);
+               return 0;
+       }
+
+       /* Driver is not converted to ndo_fix_features or does not
+        * support changing this offload. In the latter case it won't
+        * have corresponding ethtool_ops field set.
+        *
+        * Following part is to be removed after all drivers advertise
+        * their changeable features in netdev->hw_features and stop
+        * using discrete offload setting ops.
+        */
+
+       switch (ethcmd) {
+       case ETHTOOL_STXCSUM:
+               return __ethtool_set_tx_csum(dev, edata.data);
+       case ETHTOOL_SRXCSUM:
+               return __ethtool_set_rx_csum(dev, edata.data);
+       case ETHTOOL_SSG:
+               return __ethtool_set_sg(dev, edata.data);
+       case ETHTOOL_STSO:
+               return __ethtool_set_tso(dev, edata.data);
+       case ETHTOOL_SUFO:
+               return __ethtool_set_ufo(dev, edata.data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+       u32 changed;
+
+       if (data & ~flags_dup_features)
+               return -EINVAL;
+
+       /* legacy set_flags() op */
+       if (dev->ethtool_ops->set_flags) {
+               if (unlikely(dev->hw_features & flags_dup_features))
+                       netdev_warn(dev,
+                               "driver BUG: mixed hw_features and set_flags()\n");
+               return dev->ethtool_ops->set_flags(dev, data);
+       }
+
+       /* allow changing only bits set in hw_features */
+       changed = (data ^ dev->wanted_features) & flags_dup_features;
+       if (changed & ~dev->hw_features)
+               return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
+
+       dev->wanted_features =
+               (dev->wanted_features & ~changed) | data;
+
+       netdev_update_features(dev);
+
+       return 0;
+}
+
 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
@@ -251,14 +621,10 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
                                                    void __user *useraddr)
 {
        struct ethtool_sset_info info;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
        u64 sset_mask;
        int i, idx = 0, n_bits = 0, ret, rc;
        u32 *info_buf = NULL;
 
-       if (!ops->get_sset_count)
-               return -EOPNOTSUPP;
-
        if (copy_from_user(&info, useraddr, sizeof(info)))
                return -EFAULT;
 
@@ -285,7 +651,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
                if (!(sset_mask & (1ULL << i)))
                        continue;
 
-               rc = ops->get_sset_count(dev, i);
+               rc = __ethtool_get_sset_count(dev, i);
                if (rc >= 0) {
                        info.sset_mask |= (1ULL << i);
                        info_buf[idx++] = rc;
@@ -817,7 +1183,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = vmalloc(reglen);
+       regbuf = vzalloc(reglen);
        if (!regbuf)
                return -ENOMEM;
 
@@ -1091,6 +1457,9 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
 {
        int err;
 
+       if (data && !(dev->features & NETIF_F_ALL_CSUM))
+               return -EINVAL;
+
        if (!data && dev->ethtool_ops->set_tso) {
                err = dev->ethtool_ops->set_tso(dev, 0);
                if (err)
@@ -1105,145 +1474,55 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
        return dev->ethtool_ops->set_sg(dev, data);
 }
 
-static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
        int err;
 
        if (!dev->ethtool_ops->set_tx_csum)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (!edata.data && dev->ethtool_ops->set_sg) {
+       if (!data && dev->ethtool_ops->set_sg) {
                err = __ethtool_set_sg(dev, 0);
                if (err)
                        return err;
        }
 
-       return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+       return dev->ethtool_ops->set_tx_csum(dev, data);
 }
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
-static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_rx_csum)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (!edata.data && dev->ethtool_ops->set_sg)
+       if (!data)
                dev->features &= ~NETIF_F_GRO;
 
-       return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+       return dev->ethtool_ops->set_rx_csum(dev, data);
 }
 
-static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tso(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
-       if (!dev->ethtool_ops->set_sg)
-               return -EOPNOTSUPP;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data &&
-           !(dev->features & NETIF_F_ALL_CSUM))
-               return -EINVAL;
-
-       return __ethtool_set_sg(dev, edata.data);
-}
-
-static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_tso)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data && !(dev->features & NETIF_F_SG))
+       if (data && !(dev->features & NETIF_F_SG))
                return -EINVAL;
 
-       return dev->ethtool_ops->set_tso(dev, edata.data);
+       return dev->ethtool_ops->set_tso(dev, data);
 }
 
-static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_ufo(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_ufo)
                return -EOPNOTSUPP;
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-       if (edata.data && !(dev->features & NETIF_F_SG))
+       if (data && !(dev->features & NETIF_F_SG))
                return -EINVAL;
-       if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+       if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
                (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                        == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
                return -EINVAL;
-       return dev->ethtool_ops->set_ufo(dev, edata.data);
-}
-
-static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata = { ETHTOOL_GGSO };
-
-       edata.data = dev->features & NETIF_F_GSO;
-       if (copy_to_user(useraddr, &edata, sizeof(edata)))
-               return -EFAULT;
-       return 0;
-}
-
-static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-       if (edata.data)
-               dev->features |= NETIF_F_GSO;
-       else
-               dev->features &= ~NETIF_F_GSO;
-       return 0;
-}
-
-static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata = { ETHTOOL_GGRO };
-
-       edata.data = dev->features & NETIF_F_GRO;
-       if (copy_to_user(useraddr, &edata, sizeof(edata)))
-               return -EFAULT;
-       return 0;
-}
-
-static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data) {
-               u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
-                               dev->ethtool_ops->get_rx_csum(dev) :
-                               ethtool_op_get_rx_csum(dev);
-
-               if (!rxcsum)
-                       return -EINVAL;
-               dev->features |= NETIF_F_GRO;
-       } else
-               dev->features &= ~NETIF_F_GRO;
-
-       return 0;
+       return dev->ethtool_ops->set_ufo(dev, data);
 }
 
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -1287,17 +1566,13 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_gstrings gstrings;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
        u8 *data;
        int ret;
 
-       if (!ops->get_strings || !ops->get_sset_count)
-               return -EOPNOTSUPP;
-
        if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
                return -EFAULT;
 
-       ret = ops->get_sset_count(dev, gstrings.string_set);
+       ret = __ethtool_get_sset_count(dev, gstrings.string_set);
        if (ret < 0)
                return ret;
 
@@ -1307,7 +1582,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        if (!data)
                return -ENOMEM;
 
-       ops->get_strings(dev, gstrings.string_set, data);
+       __ethtool_get_strings(dev, gstrings.string_set, data);
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1317,7 +1592,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
                goto out;
        ret = 0;
 
- out:
+out:
        kfree(data);
        return ret;
 }
@@ -1458,7 +1733,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        void __user *useraddr = ifr->ifr_data;
        u32 ethcmd;
        int rc;
-       unsigned long old_features;
+       u32 old_features;
 
        if (!dev || !netif_device_present(dev))
                return -ENODEV;
@@ -1500,6 +1775,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCLSRLCNT:
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_GRXCLSRLALL:
+       case ETHTOOL_GFEATURES:
                break;
        default:
                if (!capable(CAP_NET_ADMIN))
@@ -1570,42 +1846,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SPAUSEPARAM:
                rc = ethtool_set_pauseparam(dev, useraddr);
                break;
-       case ETHTOOL_GRXCSUM:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_rx_csum ?
-                                       dev->ethtool_ops->get_rx_csum :
-                                       ethtool_op_get_rx_csum));
-               break;
-       case ETHTOOL_SRXCSUM:
-               rc = ethtool_set_rx_csum(dev, useraddr);
-               break;
-       case ETHTOOL_GTXCSUM:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_tx_csum ?
-                                       dev->ethtool_ops->get_tx_csum :
-                                       ethtool_op_get_tx_csum));
-               break;
-       case ETHTOOL_STXCSUM:
-               rc = ethtool_set_tx_csum(dev, useraddr);
-               break;
-       case ETHTOOL_GSG:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_sg ?
-                                       dev->ethtool_ops->get_sg :
-                                       ethtool_op_get_sg));
-               break;
-       case ETHTOOL_SSG:
-               rc = ethtool_set_sg(dev, useraddr);
-               break;
-       case ETHTOOL_GTSO:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_tso ?
-                                       dev->ethtool_ops->get_tso :
-                                       ethtool_op_get_tso));
-               break;
-       case ETHTOOL_STSO:
-               rc = ethtool_set_tso(dev, useraddr);
-               break;
        case ETHTOOL_TEST:
                rc = ethtool_self_test(dev, useraddr);
                break;
@@ -1621,21 +1861,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GPERMADDR:
                rc = ethtool_get_perm_addr(dev, useraddr);
                break;
-       case ETHTOOL_GUFO:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_ufo ?
-                                       dev->ethtool_ops->get_ufo :
-                                       ethtool_op_get_ufo));
-               break;
-       case ETHTOOL_SUFO:
-               rc = ethtool_set_ufo(dev, useraddr);
-               break;
-       case ETHTOOL_GGSO:
-               rc = ethtool_get_gso(dev, useraddr);
-               break;
-       case ETHTOOL_SGSO:
-               rc = ethtool_set_gso(dev, useraddr);
-               break;
        case ETHTOOL_GFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
                                       (dev->ethtool_ops->get_flags ?
@@ -1643,8 +1868,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
                                        ethtool_op_get_flags));
                break;
        case ETHTOOL_SFLAGS:
-               rc = ethtool_set_value(dev, useraddr,
-                                      dev->ethtool_ops->set_flags);
+               rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
                break;
        case ETHTOOL_GPFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
@@ -1666,12 +1890,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SRXCLSRLINS:
                rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
                break;
-       case ETHTOOL_GGRO:
-               rc = ethtool_get_gro(dev, useraddr);
-               break;
-       case ETHTOOL_SGRO:
-               rc = ethtool_set_gro(dev, useraddr);
-               break;
        case ETHTOOL_FLASHDEV:
                rc = ethtool_flash_device(dev, useraddr);
                break;
@@ -1693,6 +1911,30 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SRXFHINDIR:
                rc = ethtool_set_rxfh_indir(dev, useraddr);
                break;
+       case ETHTOOL_GFEATURES:
+               rc = ethtool_get_features(dev, useraddr);
+               break;
+       case ETHTOOL_SFEATURES:
+               rc = ethtool_set_features(dev, useraddr);
+               break;
+       case ETHTOOL_GTXCSUM:
+       case ETHTOOL_GRXCSUM:
+       case ETHTOOL_GSG:
+       case ETHTOOL_GTSO:
+       case ETHTOOL_GUFO:
+       case ETHTOOL_GGSO:
+       case ETHTOOL_GGRO:
+               rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
+               break;
+       case ETHTOOL_STXCSUM:
+       case ETHTOOL_SRXCSUM:
+       case ETHTOOL_SSG:
+       case ETHTOOL_STSO:
+       case ETHTOOL_SUFO:
+       case ETHTOOL_SGSO:
+       case ETHTOOL_SGRO:
+               rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
+               break;
        default:
                rc = -EOPNOTSUPP;
        }
index a20e5d3..8248ebb 100644 (file)
@@ -181,13 +181,13 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
 {
        int ret = 0;
 
-       if (rule->iifindex && (rule->iifindex != fl->iif))
+       if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
                goto out;
 
-       if (rule->oifindex && (rule->oifindex != fl->oif))
+       if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
                goto out;
 
-       if ((rule->mark ^ fl->mark) & rule->mark_mask)
+       if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
                goto out;
 
        ret = ops->match(rule, fl, flags);
index afc5837..232b187 100644 (file)
@@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        if (err)
                return err;
 
-       rcu_read_lock_bh();
-       filter = rcu_dereference_bh(sk->sk_filter);
+       rcu_read_lock();
+       filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = sk_run_filter(skb, filter->insns);
 
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return err;
 }
index 127c8a7..990703b 100644 (file)
@@ -172,9 +172,9 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
-                         struct flowi *key)
+                         const struct flowi *key)
 {
-       u32 *k = (u32 *) key;
+       const u32 *k = (const u32 *) key;
 
        return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
                & (flow_cache_hash_size(fc) - 1);
@@ -186,17 +186,17 @@ typedef unsigned long flow_compare_t;
  * important assumptions that we can here, such as alignment and
  * constant size.
  */
-static int flow_key_compare(struct flowi *key1, struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
 {
-       flow_compare_t *k1, *k1_lim, *k2;
+       const flow_compare_t *k1, *k1_lim, *k2;
        const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
 
        BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
-       k1 = (flow_compare_t *) key1;
+       k1 = (const flow_compare_t *) key1;
        k1_lim = k1 + n_elem;
 
-       k2 = (flow_compare_t *) key2;
+       k2 = (const flow_compare_t *) key2;
 
        do {
                if (*k1++ != *k2++)
@@ -207,7 +207,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
 }
 
 struct flow_cache_object *
-flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                  flow_resolve_t resolver, void *ctx)
 {
        struct flow_cache *fc = &flow_cache_global;
index 60a9029..799f06e 100644 (file)
@@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
 {
        size_t size = entries * sizeof(struct neighbour *);
        struct neigh_hash_table *ret;
-       struct neighbour **buckets;
+       struct neighbour __rcu **buckets;
 
        ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
        if (!ret)
@@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
        if (size <= PAGE_SIZE)
                buckets = kzalloc(size, GFP_ATOMIC);
        else
-               buckets = (struct neighbour **)
+               buckets = (struct neighbour __rcu **)
                          __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
                                           get_order(size));
        if (!buckets) {
                kfree(ret);
                return NULL;
        }
-       rcu_assign_pointer(ret->hash_buckets, buckets);
+       ret->hash_buckets = buckets;
        ret->hash_mask = entries - 1;
        get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
        return ret;
@@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
                                                    struct neigh_hash_table,
                                                    rcu);
        size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
-       struct neighbour **buckets = nht->hash_buckets;
+       struct neighbour __rcu **buckets = nht->hash_buckets;
 
        if (size <= PAGE_SIZE)
                kfree(buckets);
@@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot create neighbour proc dir entry");
 #endif
 
-       tbl->nht = neigh_hash_alloc(8);
+       RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
 
        phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
        tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@ int neigh_table_clear(struct neigh_table *tbl)
        }
        write_unlock(&neigh_tbl_lock);
 
-       call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+       call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+                neigh_hash_free_rcu);
        tbl->nht = NULL;
 
        kfree(tbl->phash_buckets);
index e23c01b..5ceb257 100644 (file)
@@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW(addr_len, fmt_dec);
 NETDEVICE_SHOW(iflink, fmt_dec);
 NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
 NETDEVICE_SHOW(type, fmt_dec);
 NETDEVICE_SHOW(link_mode, fmt_dec);
 
@@ -295,6 +295,20 @@ static ssize_t show_ifalias(struct device *dev,
        return ret;
 }
 
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+       dev_set_group(net, (int) new_group);
+       return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t len)
+{
+       return netdev_store(dev, attr, buf, len, change_group);
+}
+
 static struct device_attribute net_class_attributes[] = {
        __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@ static struct device_attribute net_class_attributes[] = {
        __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
        __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
               store_tx_queue_len),
+       __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
        {}
 };
 
index 02dc2cb..06be243 100644 (file)
@@ -193,6 +193,17 @@ void netpoll_poll_dev(struct net_device *dev)
 
        poll_napi(dev);
 
+       if (dev->priv_flags & IFF_SLAVE) {
+               if (dev->npinfo) {
+                       struct net_device *bond_dev = dev->master;
+                       struct sk_buff *skb;
+                       while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+                               skb->dev = bond_dev;
+                               skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+                       }
+               }
+       }
+
        service_arp_queue(dev->npinfo);
 
        zap_completion_queue();
@@ -313,9 +324,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq)) {
-                                       dev->priv_flags |= IFF_IN_NETPOLL;
                                        status = ops->ndo_start_xmit(skb, dev);
-                                       dev->priv_flags &= ~IFF_IN_NETPOLL;
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
index a9e7fc4..0c55eaa 100644 (file)
@@ -251,6 +251,7 @@ struct pktgen_dev {
        int max_pkt_size;       /* = ETH_ZLEN; */
        int pkt_overhead;       /* overhead for MPLS, VLANs, IPSEC etc */
        int nfrags;
+       struct page *page;
        u64 delay;              /* nano-seconds */
 
        __u64 count;            /* Default No packets to send */
@@ -1134,6 +1135,10 @@ static ssize_t pktgen_if_write(struct file *file,
                if (node_possible(value)) {
                        pkt_dev->node = value;
                        sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+                       if (pkt_dev->page) {
+                               put_page(pkt_dev->page);
+                               pkt_dev->page = NULL;
+                       }
                }
                else
                        sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,89 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi,
        return htons(id | (cfi << 12) | (prio << 13));
 }
 
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+                               int datalen)
+{
+       struct timeval timestamp;
+       struct pktgen_hdr *pgh;
+
+       pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+       datalen -= sizeof(*pgh);
+
+       if (pkt_dev->nfrags <= 0) {
+               memset(skb_put(skb, datalen), 0, datalen);
+       } else {
+               int frags = pkt_dev->nfrags;
+               int i, len;
+
+
+               if (frags > MAX_SKB_FRAGS)
+                       frags = MAX_SKB_FRAGS;
+               len = datalen - frags * PAGE_SIZE;
+               if (len > 0) {
+                       memset(skb_put(skb, len), 0, len);
+                       datalen = frags * PAGE_SIZE;
+               }
+
+               i = 0;
+               while (datalen > 0) {
+                       if (unlikely(!pkt_dev->page)) {
+                               int node = numa_node_id();
+
+                               if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+                                       node = pkt_dev->node;
+                               pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+                               if (!pkt_dev->page)
+                                       break;
+                       }
+                       skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+                       get_page(pkt_dev->page);
+                       skb_shinfo(skb)->frags[i].page_offset = 0;
+                       skb_shinfo(skb)->frags[i].size =
+                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+                       datalen -= skb_shinfo(skb)->frags[i].size;
+                       skb->len += skb_shinfo(skb)->frags[i].size;
+                       skb->data_len += skb_shinfo(skb)->frags[i].size;
+                       i++;
+                       skb_shinfo(skb)->nr_frags = i;
+               }
+
+               while (i < frags) {
+                       int rem;
+
+                       if (i == 0)
+                               break;
+
+                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+                       if (rem == 0)
+                               break;
+
+                       skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+                       skb_shinfo(skb)->frags[i] =
+                           skb_shinfo(skb)->frags[i - 1];
+                       get_page(skb_shinfo(skb)->frags[i].page);
+                       skb_shinfo(skb)->frags[i].page =
+                           skb_shinfo(skb)->frags[i - 1].page;
+                       skb_shinfo(skb)->frags[i].page_offset +=
+                           skb_shinfo(skb)->frags[i - 1].size;
+                       skb_shinfo(skb)->frags[i].size = rem;
+                       i++;
+                       skb_shinfo(skb)->nr_frags = i;
+               }
+       }
+
+       /* Stamp the time, and sequence number,
+        * convert them to network byte order
+        */
+       pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+       pgh->seq_num = htonl(pkt_dev->seq_num);
+
+       do_gettimeofday(&timestamp);
+       pgh->tv_sec = htonl(timestamp.tv_sec);
+       pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                                        struct pktgen_dev *pkt_dev)
 {
@@ -2613,7 +2701,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        struct udphdr *udph;
        int datalen, iplen;
        struct iphdr *iph;
-       struct pktgen_hdr *pgh = NULL;
        __be16 protocol = htons(ETH_P_IP);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2816,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                           pkt_dev->pkt_overhead);
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
-
-       if (pkt_dev->nfrags <= 0) {
-               pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-               memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
-       } else {
-               int frags = pkt_dev->nfrags;
-               int i, len;
-
-               pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-               if (frags > MAX_SKB_FRAGS)
-                       frags = MAX_SKB_FRAGS;
-               if (datalen > frags * PAGE_SIZE) {
-                       len = datalen - frags * PAGE_SIZE;
-                       memset(skb_put(skb, len), 0, len);
-                       datalen = frags * PAGE_SIZE;
-               }
-
-               i = 0;
-               while (datalen > 0) {
-                       struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
-                       skb_shinfo(skb)->frags[i].page = page;
-                       skb_shinfo(skb)->frags[i].page_offset = 0;
-                       skb_shinfo(skb)->frags[i].size =
-                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-                       datalen -= skb_shinfo(skb)->frags[i].size;
-                       skb->len += skb_shinfo(skb)->frags[i].size;
-                       skb->data_len += skb_shinfo(skb)->frags[i].size;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-
-               while (i < frags) {
-                       int rem;
-
-                       if (i == 0)
-                               break;
-
-                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-                       if (rem == 0)
-                               break;
-
-                       skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-                       skb_shinfo(skb)->frags[i] =
-                           skb_shinfo(skb)->frags[i - 1];
-                       get_page(skb_shinfo(skb)->frags[i].page);
-                       skb_shinfo(skb)->frags[i].page =
-                           skb_shinfo(skb)->frags[i - 1].page;
-                       skb_shinfo(skb)->frags[i].page_offset +=
-                           skb_shinfo(skb)->frags[i - 1].size;
-                       skb_shinfo(skb)->frags[i].size = rem;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-       }
-
-       /* Stamp the time, and sequence number,
-        * convert them to network byte order
-        */
-       if (pgh) {
-               struct timeval timestamp;
-
-               pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-               pgh->seq_num = htonl(pkt_dev->seq_num);
-
-               do_gettimeofday(&timestamp);
-               pgh->tv_sec = htonl(timestamp.tv_sec);
-               pgh->tv_usec = htonl(timestamp.tv_usec);
-       }
+       pktgen_finalize_skb(pkt_dev, skb, datalen);
 
 #ifdef CONFIG_XFRM
        if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2998,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        struct udphdr *udph;
        int datalen;
        struct ipv6hdr *iph;
-       struct pktgen_hdr *pgh = NULL;
        __be16 protocol = htons(ETH_P_IPV6);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3100,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
 
-       if (pkt_dev->nfrags <= 0)
-               pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-       else {
-               int frags = pkt_dev->nfrags;
-               int i;
-
-               pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-               if (frags > MAX_SKB_FRAGS)
-                       frags = MAX_SKB_FRAGS;
-               if (datalen > frags * PAGE_SIZE) {
-                       skb_put(skb, datalen - frags * PAGE_SIZE);
-                       datalen = frags * PAGE_SIZE;
-               }
-
-               i = 0;
-               while (datalen > 0) {
-                       struct page *page = alloc_pages(GFP_KERNEL, 0);
-                       skb_shinfo(skb)->frags[i].page = page;
-                       skb_shinfo(skb)->frags[i].page_offset = 0;
-                       skb_shinfo(skb)->frags[i].size =
-                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-                       datalen -= skb_shinfo(skb)->frags[i].size;
-                       skb->len += skb_shinfo(skb)->frags[i].size;
-                       skb->data_len += skb_shinfo(skb)->frags[i].size;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-
-               while (i < frags) {
-                       int rem;
-
-                       if (i == 0)
-                               break;
-
-                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-                       if (rem == 0)
-                               break;
-
-                       skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-                       skb_shinfo(skb)->frags[i] =
-                           skb_shinfo(skb)->frags[i - 1];
-                       get_page(skb_shinfo(skb)->frags[i].page);
-                       skb_shinfo(skb)->frags[i].page =
-                           skb_shinfo(skb)->frags[i - 1].page;
-                       skb_shinfo(skb)->frags[i].page_offset +=
-                           skb_shinfo(skb)->frags[i - 1].size;
-                       skb_shinfo(skb)->frags[i].size = rem;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-       }
-
-       /* Stamp the time, and sequence number,
-        * convert them to network byte order
-        * should we update cloned packets too ?
-        */
-       if (pgh) {
-               struct timeval timestamp;
-
-               pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-               pgh->seq_num = htonl(pkt_dev->seq_num);
-
-               do_gettimeofday(&timestamp);
-               pgh->tv_sec = htonl(timestamp.tv_sec);
-               pgh->tv_usec = htonl(timestamp.tv_usec);
-       }
-       /* pkt_dev->seq_num++; FF: you really mean this? */
+       pktgen_finalize_skb(pkt_dev, skb, datalen);
 
        return skb;
 }
@@ -3321,7 +3270,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
                                    pkt_dev->started_at);
        ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
 
-       p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
+       p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
                     (unsigned long long)ktime_to_us(elapsed),
                     (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
                     (unsigned long long)ktime_to_us(idle),
@@ -3884,6 +3833,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
        free_SAs(pkt_dev);
 #endif
        vfree(pkt_dev->flows);
+       if (pkt_dev->page)
+               put_page(pkt_dev->page);
        kfree(pkt_dev);
        return 0;
 }
index 750db57..49f7ea5 100644 (file)
@@ -868,6 +868,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
        NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
        NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+       NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
 
        if (dev->ifindex != dev->iflink)
                NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1035,6 +1036,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_MAP]              = { .len = sizeof(struct rtnl_link_ifmap) },
        [IFLA_MTU]              = { .type = NLA_U32 },
        [IFLA_LINK]             = { .type = NLA_U32 },
+       [IFLA_MASTER]           = { .type = NLA_U32 },
        [IFLA_TXQLEN]           = { .type = NLA_U32 },
        [IFLA_WEIGHT]           = { .type = NLA_U32 },
        [IFLA_OPERSTATE]        = { .type = NLA_U8 },
@@ -1121,8 +1123,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
                                return -EOPNOTSUPP;
 
                        if (af_ops->validate_link_af) {
-                               err = af_ops->validate_link_af(dev,
-                                                       tb[IFLA_AF_SPEC]);
+                               err = af_ops->validate_link_af(dev, af);
                                if (err < 0)
                                        return err;
                        }
@@ -1178,6 +1179,41 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
        return err;
 }
 
+static int do_set_master(struct net_device *dev, int ifindex)
+{
+       struct net_device *master_dev;
+       const struct net_device_ops *ops;
+       int err;
+
+       if (dev->master) {
+               if (dev->master->ifindex == ifindex)
+                       return 0;
+               ops = dev->master->netdev_ops;
+               if (ops->ndo_del_slave) {
+                       err = ops->ndo_del_slave(dev->master, dev);
+                       if (err)
+                               return err;
+               } else {
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       if (ifindex) {
+               master_dev = __dev_get_by_index(dev_net(dev), ifindex);
+               if (!master_dev)
+                       return -EINVAL;
+               ops = master_dev->netdev_ops;
+               if (ops->ndo_add_slave) {
+                       err = ops->ndo_add_slave(master_dev, dev);
+                       if (err)
+                               return err;
+               } else {
+                       return -EOPNOTSUPP;
+               }
+       }
+       return 0;
+}
+
 static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                      struct nlattr **tb, char *ifname, int modified)
 {
@@ -1265,6 +1301,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                modified = 1;
        }
 
+       if (tb[IFLA_GROUP]) {
+               dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+               modified = 1;
+       }
+
        /*
         * Interface selected by interface index but interface
         * name provided implies that a name change has been
@@ -1296,6 +1337,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        goto errout;
        }
 
+       if (tb[IFLA_MASTER]) {
+               err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+               if (err)
+                       goto errout;
+               modified = 1;
+       }
+
        if (tb[IFLA_TXQLEN])
                dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
 
@@ -1542,6 +1590,8 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
                set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
        if (tb[IFLA_LINKMODE])
                dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+       if (tb[IFLA_GROUP])
+               dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
 
        return dev;
 
@@ -1552,6 +1602,24 @@ err:
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
+static int rtnl_group_changelink(struct net *net, int group,
+               struct ifinfomsg *ifm,
+               struct nlattr **tb)
+{
+       struct net_device *dev;
+       int err;
+
+       for_each_netdev(net, dev) {
+               if (dev->group == group) {
+                       err = do_setlink(dev, ifm, tb, NULL, 0);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct net *net = sock_net(skb->sk);
@@ -1579,10 +1647,12 @@ replay:
        ifm = nlmsg_data(nlh);
        if (ifm->ifi_index > 0)
                dev = __dev_get_by_index(net, ifm->ifi_index);
-       else if (ifname[0])
-               dev = __dev_get_by_name(net, ifname);
-       else
-               dev = NULL;
+       else {
+               if (ifname[0])
+                       dev = __dev_get_by_name(net, ifname);
+               else
+                       dev = NULL;
+       }
 
        err = validate_linkmsg(dev, tb);
        if (err < 0)
@@ -1646,8 +1716,13 @@ replay:
                        return do_setlink(dev, ifm, tb, ifname, modified);
                }
 
-               if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+               if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+                       if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+                               return rtnl_group_changelink(net,
+                                               nla_get_u32(tb[IFLA_GROUP]),
+                                               ifm, tb);
                        return -ENODEV;
+               }
 
                if (ifm->ifi_index)
                        return -EOPNOTSUPP;
@@ -1672,6 +1747,9 @@ replay:
                        snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
 
                dest_net = rtnl_link_get_net(net, tb);
+               if (IS_ERR(dest_net))
+                       return PTR_ERR(dest_net);
+
                dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
 
                if (IS_ERR(dev))
index d31bb36..1eb526a 100644 (file)
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        shinfo = skb_shinfo(skb);
        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
+       kmemcheck_annotate_variable(shinfo->destructor_arg);
 
        if (fclone) {
                struct sk_buff *child = skb + 1;
@@ -2433,8 +2434,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
                        return -ENOMEM;
 
                /* initialize the next frag */
-               sk->sk_sndmsg_page = page;
-               sk->sk_sndmsg_off = 0;
                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
                skb->truesize += PAGE_SIZE;
                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
@@ -2454,7 +2453,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
                        return -EFAULT;
 
                /* copy was successful so update the size parameters */
-               sk->sk_sndmsg_off += copy;
                frag->size += copy;
                skb->len += copy;
                skb->data_len += copy;
@@ -2497,7 +2495,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     a pointer to the first in a list of new skbs for the segments.
  *     In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
@@ -2507,7 +2505,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
        unsigned int offset = doffset;
        unsigned int headroom;
        unsigned int len;
-       int sg = features & NETIF_F_SG;
+       int sg = !!(features & NETIF_F_SG);
        int nfrags = skb_shinfo(skb)->nr_frags;
        int err = -ENOMEM;
        int i = 0;
@@ -2744,8 +2742,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
 merge:
        if (offset > headlen) {
-               skbinfo->frags[0].page_offset += offset - headlen;
-               skbinfo->frags[0].size -= offset - headlen;
+               unsigned int eat = offset - headlen;
+
+               skbinfo->frags[0].page_offset += eat;
+               skbinfo->frags[0].size -= eat;
+               skb->data_len -= eat;
+               skb->len -= eat;
                offset = headlen;
        }
 
index d900ab9..3609eac 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, Intel Corporation.
+ * Copyright (c) 2008-2011, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -583,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
        u8 up, idtype;
        int ret = -EINVAL;
 
-       if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+       if (!tb[DCB_ATTR_APP])
                goto out;
 
        ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
                goto out;
 
        id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
-       up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+       if (netdev->dcbnl_ops->getapp) {
+               up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+       } else {
+               struct dcb_app app = {
+                                       .selector = idtype,
+                                       .protocol = id,
+                                    };
+               up = dcb_getapp(netdev, &app);
+       }
 
        /* send this back */
        dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -617,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
        dcb->cmd = DCB_CMD_GAPP;
 
        app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+       if (!app_nest)
+               goto out_cancel;
+
        ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
        if (ret)
                goto out_cancel;
@@ -1181,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
                        goto err;
        }
 
-       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
                if (err)
@@ -1212,6 +1224,59 @@ err:
        return err;
 }
 
+static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
+                               int app_nested_type, int app_info_type,
+                               int app_entry_type)
+{
+       struct dcb_peer_app_info info;
+       struct dcb_app *table = NULL;
+       const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+       u16 app_count;
+       int err;
+
+
+       /**
+        * retrieve the peer app configuration form the driver. If the driver
+        * handlers fail exit without doing anything
+        */
+       err = ops->peer_getappinfo(netdev, &info, &app_count);
+       if (!err && app_count) {
+               table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
+               if (!table)
+                       return -ENOMEM;
+
+               err = ops->peer_getapptable(netdev, table);
+       }
+
+       if (!err) {
+               u16 i;
+               struct nlattr *app;
+
+               /**
+                * build the message, from here on the only possible failure
+                * is due to the skb size
+                */
+               err = -EMSGSIZE;
+
+               app = nla_nest_start(skb, app_nested_type);
+               if (!app)
+                       goto nla_put_failure;
+
+               if (app_info_type)
+                       NLA_PUT(skb, app_info_type, sizeof(info), &info);
+
+               for (i = 0; i < app_count; i++)
+                       NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
+                               &table[i]);
+
+               nla_nest_end(skb, app);
+       }
+       err = 0;
+
+nla_put_failure:
+       kfree(table);
+       return err;
+}
 
 /* Handle IEEE 802.1Qaz GET commands. */
 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
@@ -1276,6 +1341,30 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
        spin_unlock(&dcb_lock);
        nla_nest_end(skb, app);
 
+       /* get peer info if available */
+       if (ops->ieee_peer_getets) {
+               struct ieee_ets ets;
+               err = ops->ieee_peer_getets(netdev, &ets);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+       }
+
+       if (ops->ieee_peer_getpfc) {
+               struct ieee_pfc pfc;
+               err = ops->ieee_peer_getpfc(netdev, &pfc);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+       }
+
+       if (ops->peer_getappinfo && ops->peer_getapptable) {
+               err = dcbnl_build_peer_app(netdev, skb,
+                                          DCB_ATTR_IEEE_PEER_APP,
+                                          DCB_ATTR_IEEE_APP_UNSPEC,
+                                          DCB_ATTR_IEEE_APP);
+               if (err)
+                       goto nla_put_failure;
+       }
+
        nla_nest_end(skb, ieee);
        nlmsg_end(skb, nlh);
 
@@ -1429,6 +1518,71 @@ err:
        return ret;
 }
 
+/* Handle CEE DCBX GET commands. */
+static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
+                        u32 pid, u32 seq, u16 flags)
+{
+       struct sk_buff *skb;
+       struct nlmsghdr *nlh;
+       struct dcbmsg *dcb;
+       struct nlattr *cee;
+       const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+       int err;
+
+       if (!ops)
+               return -EOPNOTSUPP;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOBUFS;
+
+       nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+
+       dcb = NLMSG_DATA(nlh);
+       dcb->dcb_family = AF_UNSPEC;
+       dcb->cmd = DCB_CMD_CEE_GET;
+
+       NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
+
+       cee = nla_nest_start(skb, DCB_ATTR_CEE);
+       if (!cee)
+               goto nla_put_failure;
+
+       /* get peer info if available */
+       if (ops->cee_peer_getpg) {
+               struct cee_pg pg;
+               err = ops->cee_peer_getpg(netdev, &pg);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+       }
+
+       if (ops->cee_peer_getpfc) {
+               struct cee_pfc pfc;
+               err = ops->cee_peer_getpfc(netdev, &pfc);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+       }
+
+       if (ops->peer_getappinfo && ops->peer_getapptable) {
+               err = dcbnl_build_peer_app(netdev, skb,
+                                          DCB_ATTR_CEE_PEER_APP_TABLE,
+                                          DCB_ATTR_CEE_PEER_APP_INFO,
+                                          DCB_ATTR_CEE_PEER_APP);
+               if (err)
+                       goto nla_put_failure;
+       }
+
+       nla_nest_end(skb, cee);
+       nlmsg_end(skb, nlh);
+
+       return rtnl_unicast(skb, &init_net, pid);
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+nlmsg_failure:
+       kfree_skb(skb);
+       return -1;
+}
+
 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct net *net = sock_net(skb->sk);
@@ -1558,6 +1712,10 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
                                       nlh->nlmsg_flags);
                goto out;
+       case DCB_CMD_CEE_GET:
+               ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
+                                   nlh->nlmsg_flags);
+               goto out;
        default:
                goto errout;
        }
@@ -1604,6 +1762,10 @@ EXPORT_SYMBOL(dcb_getapp);
 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
        struct dcb_app_type *itr;
+       struct dcb_app_type event;
+
+       memcpy(&event.name, dev->name, sizeof(event.name));
+       memcpy(&event.app, new, sizeof(event.app));
 
        spin_lock(&dcb_lock);
        /* Search for existing match and replace */
@@ -1635,7 +1797,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
        }
 out:
        spin_unlock(&dcb_lock);
-       call_dcbevent_notifiers(DCB_APP_EVENT, new);
+       call_dcbevent_notifiers(DCB_APP_EVENT, &event);
        return 0;
 }
 EXPORT_SYMBOL(dcb_setapp);
index e96d5e8..fadecd2 100644 (file)
@@ -583,6 +583,15 @@ done:
        dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+       return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
 {
        struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
index 8cde009..4222e7a 100644 (file)
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                /* Caller (dccp_v4_do_rcv) will send Reset */
                dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
                return 1;
+       } else if (sk->sk_state == DCCP_CLOSED) {
+               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+               return 1;
        }
 
        if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        switch (sk->sk_state) {
-       case DCCP_CLOSED:
-               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
-               return 1;
-
        case DCCP_REQUESTING:
                queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
                if (queued >= 0)
index 45a434f..ae451c6 100644 (file)
@@ -43,9 +43,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
        const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+       __be16 orig_sport, orig_dport;
        struct rtable *rt;
        __be32 daddr, nexthop;
-       int tmp;
        int err;
 
        dp->dccps_role = DCCP_ROLE_CLIENT;
@@ -63,12 +63,14 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                nexthop = inet->opt->faddr;
        }
 
-       tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
-                              RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
-                              IPPROTO_DCCP,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (tmp < 0)
-               return tmp;
+       orig_sport = inet->inet_sport;
+       orig_dport = usin->sin_port;
+       rt = ip_route_connect(nexthop, inet->inet_saddr,
+                             RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
+                             IPPROTO_DCCP,
+                             orig_sport, orig_dport, sk, true);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                ip_rt_put(rt);
@@ -99,11 +101,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err != 0)
                goto failure;
 
-       err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport,
-                               inet->inet_dport, sk);
-       if (err != 0)
+       rt = ip_route_newports(rt, IPPROTO_DCCP,
+                              orig_sport, orig_dport,
+                              inet->inet_sport, inet->inet_dport, sk);
+       if (IS_ERR(rt)) {
+               rt = NULL;
                goto failure;
-
+       }
        /* OK, now commit destination to socket.  */
        sk_setup_caps(sk, &rt->dst);
 
@@ -461,17 +465,19 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
                                           struct sk_buff *skb)
 {
        struct rtable *rt;
-       struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
-                           .fl4_dst = ip_hdr(skb)->saddr,
-                           .fl4_src = ip_hdr(skb)->daddr,
-                           .fl4_tos = RT_CONN_FLAGS(sk),
-                           .proto = sk->sk_protocol,
-                           .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
-                           .fl_ip_dport = dccp_hdr(skb)->dccph_sport
-                         };
-
-       security_skb_classify_flow(skb, &fl);
-       if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
+       struct flowi4 fl4 = {
+               .flowi4_oif = skb_rtable(skb)->rt_iif,
+               .daddr = ip_hdr(skb)->saddr,
+               .saddr = ip_hdr(skb)->daddr,
+               .flowi4_tos = RT_CONN_FLAGS(sk),
+               .flowi4_proto = sk->sk_protocol,
+               .fl4_sport = dccp_hdr(skb)->dccph_dport,
+               .fl4_dport = dccp_hdr(skb)->dccph_sport,
+       };
+
+       security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+       rt = ip_route_output_flow(net, &fl4, sk);
+       if (IS_ERR(rt)) {
                IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
index dca711d..de1b7e3 100644 (file)
@@ -147,30 +147,24 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                dst = __sk_dst_check(sk, np->dst_cookie);
                if (dst == NULL) {
                        struct inet_sock *inet = inet_sk(sk);
-                       struct flowi fl;
+                       struct flowi6 fl6;
 
                        /* BUGGG_FUTURE: Again, it is not clear how
                           to handle rthdr case. Ignore this complexity
                           for now.
                         */
-                       memset(&fl, 0, sizeof(fl));
-                       fl.proto = IPPROTO_DCCP;
-                       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-                       ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-                       fl.oif = sk->sk_bound_dev_if;
-                       fl.fl_ip_dport = inet->inet_dport;
-                       fl.fl_ip_sport = inet->inet_sport;
-                       security_sk_classify_flow(sk, &fl);
-
-                       err = ip6_dst_lookup(sk, &dst, &fl);
-                       if (err) {
-                               sk->sk_err_soft = -err;
-                               goto out;
-                       }
-
-                       err = xfrm_lookup(net, &dst, &fl, sk, 0);
-                       if (err < 0) {
-                               sk->sk_err_soft = -err;
+                       memset(&fl6, 0, sizeof(fl6));
+                       fl6.flowi6_proto = IPPROTO_DCCP;
+                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.flowi6_oif = sk->sk_bound_dev_if;
+                       fl6.fl6_dport = inet->inet_dport;
+                       fl6.fl6_sport = inet->inet_sport;
+                       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+                       dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
+                       if (IS_ERR(dst)) {
+                               sk->sk_err_soft = -PTR_ERR(dst);
                                goto out;
                        }
                } else
@@ -249,34 +243,30 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
        struct sk_buff *skb;
        struct ipv6_txoptions *opt = NULL;
        struct in6_addr *final_p, final;
-       struct flowi fl;
+       struct flowi6 fl6;
        int err = -1;
        struct dst_entry *dst;
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-       ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
-       fl.fl6_flowlabel = 0;
-       fl.oif = ireq6->iif;
-       fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-       fl.fl_ip_sport = inet_rsk(req)->loc_port;
-       security_req_classify_flow(req, &fl);
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_DCCP;
+       ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+       ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+       fl6.flowlabel = 0;
+       fl6.flowi6_oif = ireq6->iif;
+       fl6.fl6_dport = inet_rsk(req)->rmt_port;
+       fl6.fl6_sport = inet_rsk(req)->loc_port;
+       security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
        opt = np->opt;
 
-       final_p = fl6_update_dst(&fl, opt, &final);
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
-               goto done;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
-       if (err < 0)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
                goto done;
+       }
 
        skb = dccp_make_response(sk, dst, req);
        if (skb != NULL) {
@@ -285,8 +275,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
                dh->dccph_checksum = dccp_v6_csum_finish(skb,
                                                         &ireq6->loc_addr,
                                                         &ireq6->rmt_addr);
-               ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-               err = ip6_xmit(sk, skb, &fl, opt);
+               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               err = ip6_xmit(sk, skb, &fl6, opt);
                err = net_xmit_eval(err);
        }
 
@@ -308,7 +298,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 {
        struct ipv6hdr *rxip6h;
        struct sk_buff *skb;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct net *net = dev_net(skb_dst(rxskb)->dev);
        struct sock *ctl_sk = net->dccp.v6_ctl_sk;
        struct dst_entry *dst;
@@ -327,25 +317,24 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
        dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
                                                            &rxip6h->daddr);
 
-       memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
+       memset(&fl6, 0, sizeof(fl6));
+       ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
+       ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
 
-       fl.proto = IPPROTO_DCCP;
-       fl.oif = inet6_iif(rxskb);
-       fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
-       fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
-       security_skb_classify_flow(rxskb, &fl);
+       fl6.flowi6_proto = IPPROTO_DCCP;
+       fl6.flowi6_oif = inet6_iif(rxskb);
+       fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
+       fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
+       security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
 
        /* sk = NULL, but it is safe for now. RST socket required. */
-       if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
-               if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
-                       skb_dst_set(skb, dst);
-                       ip6_xmit(ctl_sk, skb, &fl, NULL);
-                       DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-                       DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
-                       return;
-               }
+       dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(skb, dst);
+               ip6_xmit(ctl_sk, skb, &fl6, NULL);
+               DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
+               DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+               return;
        }
 
        kfree_skb(skb);
@@ -484,7 +473,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
        struct inet6_request_sock *ireq6 = inet6_rsk(req);
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct inet_sock *newinet;
-       struct dccp_sock *newdp;
        struct dccp6_sock *newdp6;
        struct sock *newsk;
        struct ipv6_txoptions *opt;
@@ -498,7 +486,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                        return NULL;
 
                newdp6 = (struct dccp6_sock *)newsk;
-               newdp = dccp_sk(newsk);
                newinet = inet_sk(newsk);
                newinet->pinet6 = &newdp6->inet6;
                newnp = inet6_sk(newsk);
@@ -540,25 +527,20 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        if (dst == NULL) {
                struct in6_addr *final_p, final;
-               struct flowi fl;
-
-               memset(&fl, 0, sizeof(fl));
-               fl.proto = IPPROTO_DCCP;
-               ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-               final_p = fl6_update_dst(&fl, opt, &final);
-               ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
-               fl.oif = sk->sk_bound_dev_if;
-               fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-               fl.fl_ip_sport = inet_rsk(req)->loc_port;
-               security_sk_classify_flow(sk, &fl);
-
-               if (ip6_dst_lookup(sk, &dst, &fl))
-                       goto out;
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-               if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+               struct flowi6 fl6;
+
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_DCCP;
+               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               final_p = fl6_update_dst(&fl6, opt, &final);
+               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
+               fl6.fl6_dport = inet_rsk(req)->rmt_port;
+               fl6.fl6_sport = inet_rsk(req)->loc_port;
+               security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+               dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+               if (IS_ERR(dst))
                        goto out;
        }
 
@@ -578,7 +560,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
        newdp6 = (struct dccp6_sock *)newsk;
        newinet = inet_sk(newsk);
        newinet->pinet6 = &newdp6->inet6;
-       newdp = dccp_sk(newsk);
        newnp = inet6_sk(newsk);
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
@@ -878,7 +859,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
        struct in6_addr *saddr = NULL, *final_p, final;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_type;
        int err;
@@ -891,14 +872,14 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
        if (np->sndflow) {
-               fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
-               IP6_ECN_flow_init(fl.fl6_flowlabel);
-               if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
+               fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
+               IP6_ECN_flow_init(fl6.flowlabel);
+               if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
                        struct ip6_flowlabel *flowlabel;
-                       flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
                        ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
@@ -935,7 +916,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
-       np->flow_label = fl.fl6_flowlabel;
+       np->flow_label = fl6.flowlabel;
 
        /*
         * DCCP over IPv4
@@ -972,33 +953,24 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (!ipv6_addr_any(&np->rcv_saddr))
                saddr = &np->rcv_saddr;
 
-       fl.proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-       ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
-       fl.oif = sk->sk_bound_dev_if;
-       fl.fl_ip_dport = usin->sin6_port;
-       fl.fl_ip_sport = inet->inet_sport;
-       security_sk_classify_flow(sk, &fl);
+       fl6.flowi6_proto = IPPROTO_DCCP;
+       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.fl6_dport = usin->sin6_port;
+       fl6.fl6_sport = inet->inet_sport;
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       final_p = fl6_update_dst(&fl, np->opt, &final);
+       final_p = fl6_update_dst(&fl6, np->opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto failure;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto failure;
        }
 
        if (saddr == NULL) {
-               saddr = &fl.fl6_src;
+               saddr = &fl6.saddr;
                ipv6_addr_copy(&np->rcv_saddr, saddr);
        }
 
index 2af15b1..ea3b6ee 100644 (file)
@@ -908,7 +908,7 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
        struct socket *sock = sk->sk_socket;
        struct dn_scp *scp = DN_SK(sk);
        int err = -EISCONN;
-       struct flowi fl;
+       struct flowidn fld;
 
        if (sock->state == SS_CONNECTED)
                goto out;
@@ -947,13 +947,13 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
        memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
 
        err = -EHOSTUNREACH;
-       memset(&fl, 0, sizeof(fl));
-       fl.oif = sk->sk_bound_dev_if;
-       fl.fld_dst = dn_saddr2dn(&scp->peer);
-       fl.fld_src = dn_saddr2dn(&scp->addr);
-       dn_sk_ports_copy(&fl, scp);
-       fl.proto = DNPROTO_NSP;
-       if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
+       memset(&fld, 0, sizeof(fld));
+       fld.flowidn_oif = sk->sk_bound_dev_if;
+       fld.daddr = dn_saddr2dn(&scp->peer);
+       fld.saddr = dn_saddr2dn(&scp->addr);
+       dn_sk_ports_copy(&fld, scp);
+       fld.flowidn_proto = DNPROTO_NSP;
+       if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
                goto out;
        sk->sk_route_caps = sk->sk_dst_cache->dev->features;
        sock->state = SS_CONNECTING;
index 0ef0a81..1c74ed3 100644 (file)
@@ -201,7 +201,7 @@ static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct
        int err;
 
        if (nh->nh_gw) {
-               struct flowi fl;
+               struct flowidn fld;
                struct dn_fib_res res;
 
                if (nh->nh_flags&RTNH_F_ONLINK) {
@@ -221,15 +221,15 @@ static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct
                        return 0;
                }
 
-               memset(&fl, 0, sizeof(fl));
-               fl.fld_dst = nh->nh_gw;
-               fl.oif = nh->nh_oif;
-               fl.fld_scope = r->rtm_scope + 1;
+               memset(&fld, 0, sizeof(fld));
+               fld.daddr = nh->nh_gw;
+               fld.flowidn_oif = nh->nh_oif;
+               fld.flowidn_scope = r->rtm_scope + 1;
 
-               if (fl.fld_scope < RT_SCOPE_LINK)
-                       fl.fld_scope = RT_SCOPE_LINK;
+               if (fld.flowidn_scope < RT_SCOPE_LINK)
+                       fld.flowidn_scope = RT_SCOPE_LINK;
 
-               if ((err = dn_fib_lookup(&fl, &res)) != 0)
+               if ((err = dn_fib_lookup(&fld, &res)) != 0)
                        return err;
 
                err = -EINVAL;
@@ -404,7 +404,7 @@ failure:
        return NULL;
 }
 
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowi *fl, struct dn_fib_res *res)
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn *fld, struct dn_fib_res *res)
 {
        int err = dn_fib_props[type].error;
 
@@ -424,7 +424,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowi *
                                for_nexthops(fi) {
                                        if (nh->nh_flags & RTNH_F_DEAD)
                                                continue;
-                                       if (!fl->oif || fl->oif == nh->nh_oif)
+                                       if (!fld->flowidn_oif ||
+                                           fld->flowidn_oif == nh->nh_oif)
                                                break;
                                }
                                if (nhsel < fi->fib_nhs) {
@@ -445,7 +446,7 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowi *
        return err;
 }
 
-void dn_fib_select_multipath(const struct flowi *fl, struct dn_fib_res *res)
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res)
 {
        struct dn_fib_info *fi = res->fi;
        int w;
index 2ef1152..bd78836 100644 (file)
@@ -78,7 +78,7 @@ static void dn_nsp_send(struct sk_buff *skb)
        struct sock *sk = skb->sk;
        struct dn_scp *scp = DN_SK(sk);
        struct dst_entry *dst;
-       struct flowi fl;
+       struct flowidn fld;
 
        skb_reset_transport_header(skb);
        scp->stamp = jiffies;
@@ -91,13 +91,13 @@ try_again:
                return;
        }
 
-       memset(&fl, 0, sizeof(fl));
-       fl.oif = sk->sk_bound_dev_if;
-       fl.fld_src = dn_saddr2dn(&scp->addr);
-       fl.fld_dst = dn_saddr2dn(&scp->peer);
-       dn_sk_ports_copy(&fl, scp);
-       fl.proto = DNPROTO_NSP;
-       if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) {
+       memset(&fld, 0, sizeof(fld));
+       fld.flowidn_oif = sk->sk_bound_dev_if;
+       fld.saddr = dn_saddr2dn(&scp->addr);
+       fld.daddr = dn_saddr2dn(&scp->peer);
+       dn_sk_ports_copy(&fld, scp);
+       fld.flowidn_proto = DNPROTO_NSP;
+       if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) {
                dst = sk_dst_get(sk);
                sk->sk_route_caps = dst->dev->features;
                goto try_again;
index 5e63636..9f09d4f 100644 (file)
@@ -112,6 +112,7 @@ static int dn_dst_gc(struct dst_ops *ops);
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
 static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@ static struct dst_ops dn_dst_ops = {
        .check =                dn_dst_check,
        .default_advmss =       dn_dst_default_advmss,
        .default_mtu =          dn_dst_default_mtu,
+       .cow_metrics =          dst_cow_metrics_generic,
+       .destroy =              dn_dst_destroy,
        .negative_advice =      dn_dst_negative_advice,
        .link_failure =         dn_dst_link_failure,
        .update_pmtu =          dn_dst_update_pmtu,
 };
 
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+       dst_destroy_metrics_generic(dst);
+}
+
 static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
 {
        __u16 tmp = (__u16 __force)(src ^ dst);
@@ -274,14 +282,14 @@ static void dn_dst_link_failure(struct sk_buff *skb)
 {
 }
 
-static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
+static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
 {
-       return ((fl1->fld_dst ^ fl2->fld_dst) |
-               (fl1->fld_src ^ fl2->fld_src) |
-               (fl1->mark ^ fl2->mark) |
-               (fl1->fld_scope ^ fl2->fld_scope) |
-               (fl1->oif ^ fl2->oif) |
-               (fl1->iif ^ fl2->iif)) == 0;
+       return ((fl1->daddr ^ fl2->daddr) |
+               (fl1->saddr ^ fl2->saddr) |
+               (fl1->flowidn_mark ^ fl2->flowidn_mark) |
+               (fl1->flowidn_scope ^ fl2->flowidn_scope) |
+               (fl1->flowidn_oif ^ fl2->flowidn_oif) |
+               (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
 }
 
 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
@@ -295,7 +303,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
        spin_lock_bh(&dn_rt_hash_table[hash].lock);
        while ((rth = rcu_dereference_protected(*rthp,
                                                lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
-               if (compare_keys(&rth->fl, &rt->fl)) {
+               if (compare_keys(&rth->fld, &rt->fld)) {
                        /* Put it first */
                        *rthp = rth->dst.dn_next;
                        rcu_assign_pointer(rth->dst.dn_next,
@@ -814,14 +822,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 {
        struct dn_fib_info *fi = res->fi;
        struct net_device *dev = rt->dst.dev;
+       unsigned int mss_metric;
        struct neighbour *n;
-       unsigned int metric;
 
        if (fi) {
                if (DN_FIB_RES_GW(*res) &&
                    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = DN_FIB_RES_GW(*res);
-               dst_import_metrics(&rt->dst, fi->fib_metrics);
+               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
        }
        rt->rt_type = res->type;
 
@@ -834,10 +842,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 
        if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
                dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
-       metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
-       if (metric) {
+       mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+       if (mss_metric) {
                unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
-               if (metric > mss)
+               if (mss_metric > mss)
                        dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
        }
        return 0;
@@ -895,14 +903,16 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
        return (daddr&~mask)|res->fi->fib_nh->nh_gw;
 }
 
-static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
+static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
 {
-       struct flowi fl = { .fld_dst = oldflp->fld_dst,
-                           .fld_src = oldflp->fld_src,
-                           .fld_scope = RT_SCOPE_UNIVERSE,
-                           .mark = oldflp->mark,
-                           .iif = init_net.loopback_dev->ifindex,
-                           .oif = oldflp->oif };
+       struct flowidn fld = {
+               .daddr = oldflp->daddr,
+               .saddr = oldflp->saddr,
+               .flowidn_scope = RT_SCOPE_UNIVERSE,
+               .flowidn_mark = oldflp->flowidn_mark,
+               .flowidn_iif = init_net.loopback_dev->ifindex,
+               .flowidn_oif = oldflp->flowidn_oif,
+       };
        struct dn_route *rt = NULL;
        struct net_device *dev_out = NULL, *dev;
        struct neighbour *neigh = NULL;
@@ -916,13 +926,14 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
        if (decnet_debug_level & 16)
                printk(KERN_DEBUG
                       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
-                      " iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst),
-                      le16_to_cpu(oldflp->fld_src),
-                      oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif);
+                      " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
+                      le16_to_cpu(oldflp->saddr),
+                      oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
+                      oldflp->flowidn_oif);
 
        /* If we have an output interface, verify its a DECnet device */
-       if (oldflp->oif) {
-               dev_out = dev_get_by_index(&init_net, oldflp->oif);
+       if (oldflp->flowidn_oif) {
+               dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
                err = -ENODEV;
                if (dev_out && dev_out->dn_ptr == NULL) {
                        dev_put(dev_out);
@@ -933,11 +944,11 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
        }
 
        /* If we have a source address, verify that its a local address */
-       if (oldflp->fld_src) {
+       if (oldflp->saddr) {
                err = -EADDRNOTAVAIL;
 
                if (dev_out) {
-                       if (dn_dev_islocal(dev_out, oldflp->fld_src))
+                       if (dn_dev_islocal(dev_out, oldflp->saddr))
                                goto source_ok;
                        dev_put(dev_out);
                        goto out;
@@ -946,11 +957,11 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
                for_each_netdev_rcu(&init_net, dev) {
                        if (!dev->dn_ptr)
                                continue;
-                       if (!dn_dev_islocal(dev, oldflp->fld_src))
+                       if (!dn_dev_islocal(dev, oldflp->saddr))
                                continue;
                        if ((dev->flags & IFF_LOOPBACK) &&
-                           oldflp->fld_dst &&
-                           !dn_dev_islocal(dev, oldflp->fld_dst))
+                           oldflp->daddr &&
+                           !dn_dev_islocal(dev, oldflp->daddr))
                                continue;
 
                        dev_out = dev;
@@ -965,22 +976,22 @@ source_ok:
        }
 
        /* No destination? Assume its local */
-       if (!fl.fld_dst) {
-               fl.fld_dst = fl.fld_src;
+       if (!fld.daddr) {
+               fld.daddr = fld.saddr;
 
                err = -EADDRNOTAVAIL;
                if (dev_out)
                        dev_put(dev_out);
                dev_out = init_net.loopback_dev;
                dev_hold(dev_out);
-               if (!fl.fld_dst) {
-                       fl.fld_dst =
-                       fl.fld_src = dnet_select_source(dev_out, 0,
+               if (!fld.daddr) {
+                       fld.daddr =
+                       fld.saddr = dnet_select_source(dev_out, 0,
                                                       RT_SCOPE_HOST);
-                       if (!fl.fld_dst)
+                       if (!fld.daddr)
                                goto out;
                }
-               fl.oif = init_net.loopback_dev->ifindex;
+               fld.flowidn_oif = init_net.loopback_dev->ifindex;
                res.type = RTN_LOCAL;
                goto make_route;
        }
@@ -989,8 +1000,8 @@ source_ok:
                printk(KERN_DEBUG
                       "dn_route_output_slow: initial checks complete."
                       " dst=%o4x src=%04x oif=%d try_hard=%d\n",
-                      le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src),
-                      fl.oif, try_hard);
+                      le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
+                      fld.flowidn_oif, try_hard);
 
        /*
         * N.B. If the kernel is compiled without router support then
@@ -998,7 +1009,7 @@ source_ok:
         * will always be executed.
         */
        err = -ESRCH;
-       if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) {
+       if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
                struct dn_dev *dn_db;
                if (err != -ESRCH)
                        goto out;
@@ -1013,19 +1024,19 @@ source_ok:
                 * here
                 */
                if (!try_hard) {
-                       neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst);
+                       neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
                        if (neigh) {
-                               if ((oldflp->oif &&
-                                   (neigh->dev->ifindex != oldflp->oif)) ||
-                                   (oldflp->fld_src &&
+                               if ((oldflp->flowidn_oif &&
+                                   (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
+                                   (oldflp->saddr &&
                                    (!dn_dev_islocal(neigh->dev,
-                                                     oldflp->fld_src)))) {
+                                                    oldflp->saddr)))) {
                                        neigh_release(neigh);
                                        neigh = NULL;
                                } else {
                                        if (dev_out)
                                                dev_put(dev_out);
-                                       if (dn_dev_islocal(neigh->dev, fl.fld_dst)) {
+                                       if (dn_dev_islocal(neigh->dev, fld.daddr)) {
                                                dev_out = init_net.loopback_dev;
                                                res.type = RTN_LOCAL;
                                        } else {
@@ -1045,7 +1056,7 @@ source_ok:
                        goto out;
                dn_db = rcu_dereference_raw(dev_out->dn_ptr);
                /* Possible improvement - check all devices for local addr */
-               if (dn_dev_islocal(dev_out, fl.fld_dst)) {
+               if (dn_dev_islocal(dev_out, fld.daddr)) {
                        dev_put(dev_out);
                        dev_out = init_net.loopback_dev;
                        dev_hold(dev_out);
@@ -1061,16 +1072,16 @@ select_source:
                if (neigh)
                        gateway = ((struct dn_neigh *)neigh)->addr;
                if (gateway == 0)
-                       gateway = fl.fld_dst;
-               if (fl.fld_src == 0) {
-                       fl.fld_src = dnet_select_source(dev_out, gateway,
-                                                        res.type == RTN_LOCAL ?
-                                                        RT_SCOPE_HOST :
-                                                        RT_SCOPE_LINK);
-                       if (fl.fld_src == 0 && res.type != RTN_LOCAL)
+                       gateway = fld.daddr;
+               if (fld.saddr == 0) {
+                       fld.saddr = dnet_select_source(dev_out, gateway,
+                                                      res.type == RTN_LOCAL ?
+                                                      RT_SCOPE_HOST :
+                                                      RT_SCOPE_LINK);
+                       if (fld.saddr == 0 && res.type != RTN_LOCAL)
                                goto e_addr;
                }
-               fl.oif = dev_out->ifindex;
+               fld.flowidn_oif = dev_out->ifindex;
                goto make_route;
        }
        free_res = 1;
@@ -1079,61 +1090,61 @@ select_source:
                goto e_inval;
 
        if (res.type == RTN_LOCAL) {
-               if (!fl.fld_src)
-                       fl.fld_src = fl.fld_dst;
+               if (!fld.saddr)
+                       fld.saddr = fld.daddr;
                if (dev_out)
                        dev_put(dev_out);
                dev_out = init_net.loopback_dev;
                dev_hold(dev_out);
-               fl.oif = dev_out->ifindex;
+               fld.flowidn_oif = dev_out->ifindex;
                if (res.fi)
                        dn_fib_info_put(res.fi);
                res.fi = NULL;
                goto make_route;
        }
 
-       if (res.fi->fib_nhs > 1 && fl.oif == 0)
-               dn_fib_select_multipath(&fl, &res);
+       if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
+               dn_fib_select_multipath(&fld, &res);
 
        /*
         * We could add some logic to deal with default routes here and
         * get rid of some of the special casing above.
         */
 
-       if (!fl.fld_src)
-               fl.fld_src = DN_FIB_RES_PREFSRC(res);
+       if (!fld.saddr)
+               fld.saddr = DN_FIB_RES_PREFSRC(res);
 
        if (dev_out)
                dev_put(dev_out);
        dev_out = DN_FIB_RES_DEV(res);
        dev_hold(dev_out);
-       fl.oif = dev_out->ifindex;
+       fld.flowidn_oif = dev_out->ifindex;
        gateway = DN_FIB_RES_GW(res);
 
 make_route:
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
-       rt = dst_alloc(&dn_dst_ops);
+       rt = dst_alloc(&dn_dst_ops, 0);
        if (rt == NULL)
                goto e_nobufs;
 
        atomic_set(&rt->dst.__refcnt, 1);
        rt->dst.flags   = DST_HOST;
 
-       rt->fl.fld_src    = oldflp->fld_src;
-       rt->fl.fld_dst    = oldflp->fld_dst;
-       rt->fl.oif        = oldflp->oif;
-       rt->fl.iif        = 0;
-       rt->fl.mark       = oldflp->mark;
+       rt->fld.saddr        = oldflp->saddr;
+       rt->fld.daddr        = oldflp->daddr;
+       rt->fld.flowidn_oif  = oldflp->flowidn_oif;
+       rt->fld.flowidn_iif  = 0;
+       rt->fld.flowidn_mark = oldflp->flowidn_mark;
 
-       rt->rt_saddr      = fl.fld_src;
-       rt->rt_daddr      = fl.fld_dst;
-       rt->rt_gateway    = gateway ? gateway : fl.fld_dst;
-       rt->rt_local_src  = fl.fld_src;
+       rt->rt_saddr      = fld.saddr;
+       rt->rt_daddr      = fld.daddr;
+       rt->rt_gateway    = gateway ? gateway : fld.daddr;
+       rt->rt_local_src  = fld.saddr;
 
-       rt->rt_dst_map    = fl.fld_dst;
-       rt->rt_src_map    = fl.fld_src;
+       rt->rt_dst_map    = fld.daddr;
+       rt->rt_src_map    = fld.saddr;
 
        rt->dst.dev = dev_out;
        dev_hold(dev_out);
@@ -1151,7 +1162,7 @@ make_route:
        if (err)
                goto e_neighbour;
 
-       hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
+       hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
        dn_insert_route(rt, hash, (struct dn_route **)pprt);
 
 done:
@@ -1182,20 +1193,20 @@ e_neighbour:
 /*
  * N.B. The flags may be moved into the flowi at some future stage.
  */
-static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags)
+static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
 {
-       unsigned hash = dn_hash(flp->fld_src, flp->fld_dst);
+       unsigned hash = dn_hash(flp->saddr, flp->daddr);
        struct dn_route *rt = NULL;
 
        if (!(flags & MSG_TRYHARD)) {
                rcu_read_lock_bh();
                for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
                        rt = rcu_dereference_bh(rt->dst.dn_next)) {
-                       if ((flp->fld_dst == rt->fl.fld_dst) &&
-                           (flp->fld_src == rt->fl.fld_src) &&
-                           (flp->mark == rt->fl.mark) &&
+                       if ((flp->daddr == rt->fld.daddr) &&
+                           (flp->saddr == rt->fld.saddr) &&
+                           (flp->flowidn_mark == rt->fld.flowidn_mark) &&
                            dn_is_output_route(rt) &&
-                           (rt->fl.oif == flp->oif)) {
+                           (rt->fld.flowidn_oif == flp->flowidn_oif)) {
                                dst_use(&rt->dst, jiffies);
                                rcu_read_unlock_bh();
                                *pprt = &rt->dst;
@@ -1208,25 +1219,36 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
        return dn_route_output_slow(pprt, flp, flags);
 }
 
-static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags)
+static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
 {
        int err;
 
        err = __dn_route_output_key(pprt, flp, flags);
-       if (err == 0 && flp->proto) {
-               err = xfrm_lookup(&init_net, pprt, flp, NULL, 0);
+       if (err == 0 && flp->flowidn_proto) {
+               *pprt = xfrm_lookup(&init_net, *pprt,
+                                   flowidn_to_flowi(flp), NULL, 0);
+               if (IS_ERR(*pprt)) {
+                       err = PTR_ERR(*pprt);
+                       *pprt = NULL;
+               }
        }
        return err;
 }
 
-int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags)
+int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags)
 {
        int err;
 
        err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
-       if (err == 0 && fl->proto) {
-               err = xfrm_lookup(&init_net, pprt, fl, sk,
-                                (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT);
+       if (err == 0 && fl->flowidn_proto) {
+               if (!(flags & MSG_DONTWAIT))
+                       fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP;
+               *pprt = xfrm_lookup(&init_net, *pprt,
+                                   flowidn_to_flowi(fl), sk, 0);
+               if (IS_ERR(*pprt)) {
+                       err = PTR_ERR(*pprt);
+                       *pprt = NULL;
+               }
        }
        return err;
 }
@@ -1243,11 +1265,13 @@ static int dn_route_input_slow(struct sk_buff *skb)
        int flags = 0;
        __le16 gateway = 0;
        __le16 local_src = 0;
-       struct flowi fl = { .fld_dst = cb->dst,
-                           .fld_src = cb->src,
-                           .fld_scope = RT_SCOPE_UNIVERSE,
-                           .mark = skb->mark,
-                           .iif = skb->dev->ifindex };
+       struct flowidn fld = {
+               .daddr = cb->dst,
+               .saddr = cb->src,
+               .flowidn_scope = RT_SCOPE_UNIVERSE,
+               .flowidn_mark = skb->mark,
+               .flowidn_iif = skb->dev->ifindex,
+       };
        struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
        int err = -EINVAL;
        int free_res = 0;
@@ -1258,7 +1282,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
                goto out;
 
        /* Zero source addresses are not allowed */
-       if (fl.fld_src == 0)
+       if (fld.saddr == 0)
                goto out;
 
        /*
@@ -1272,7 +1296,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
        if (dn_dev_islocal(in_dev, cb->src))
                goto out;
 
-       err = dn_fib_lookup(&fl, &res);
+       err = dn_fib_lookup(&fld, &res);
        if (err) {
                if (err != -ESRCH)
                        goto out;
@@ -1284,7 +1308,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
 
                res.type = RTN_LOCAL;
        } else {
-               __le16 src_map = fl.fld_src;
+               __le16 src_map = fld.saddr;
                free_res = 1;
 
                out_dev = DN_FIB_RES_DEV(res);
@@ -1297,22 +1321,22 @@ static int dn_route_input_slow(struct sk_buff *skb)
                dev_hold(out_dev);
 
                if (res.r)
-                       src_map = fl.fld_src; /* no NAT support for now */
+                       src_map = fld.saddr; /* no NAT support for now */
 
                gateway = DN_FIB_RES_GW(res);
                if (res.type == RTN_NAT) {
-                       fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);
+                       fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
                        dn_fib_res_put(&res);
                        free_res = 0;
-                       if (dn_fib_lookup(&fl, &res))
+                       if (dn_fib_lookup(&fld, &res))
                                goto e_inval;
                        free_res = 1;
                        if (res.type != RTN_UNICAST)
                                goto e_inval;
                        flags |= RTCF_DNAT;
-                       gateway = fl.fld_dst;
+                       gateway = fld.daddr;
                }
-               fl.fld_src = src_map;
+               fld.saddr = src_map;
        }
 
        switch(res.type) {
@@ -1326,8 +1350,8 @@ static int dn_route_input_slow(struct sk_buff *skb)
                if (dn_db->parms.forwarding == 0)
                        goto e_inval;
 
-               if (res.fi->fib_nhs > 1 && fl.oif == 0)
-                       dn_fib_select_multipath(&fl, &res);
+               if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
+                       dn_fib_select_multipath(&fld, &res);
 
                /*
                 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
@@ -1345,8 +1369,8 @@ static int dn_route_input_slow(struct sk_buff *skb)
                break;
        case RTN_LOCAL:
                flags |= RTCF_LOCAL;
-               fl.fld_src = cb->dst;
-               fl.fld_dst = cb->src;
+               fld.saddr = cb->dst;
+               fld.daddr = cb->src;
 
                /* Routing tables gave us a gateway */
                if (gateway)
@@ -1375,25 +1399,25 @@ static int dn_route_input_slow(struct sk_buff *skb)
        }
 
 make_route:
-       rt = dst_alloc(&dn_dst_ops);
+       rt = dst_alloc(&dn_dst_ops, 0);
        if (rt == NULL)
                goto e_nobufs;
 
-       rt->rt_saddr      = fl.fld_src;
-       rt->rt_daddr      = fl.fld_dst;
-       rt->rt_gateway    = fl.fld_dst;
+       rt->rt_saddr      = fld.saddr;
+       rt->rt_daddr      = fld.daddr;
+       rt->rt_gateway    = fld.daddr;
        if (gateway)
                rt->rt_gateway = gateway;
        rt->rt_local_src  = local_src ? local_src : rt->rt_saddr;
 
-       rt->rt_dst_map    = fl.fld_dst;
-       rt->rt_src_map    = fl.fld_src;
+       rt->rt_dst_map    = fld.daddr;
+       rt->rt_src_map    = fld.saddr;
 
-       rt->fl.fld_src    = cb->src;
-       rt->fl.fld_dst    = cb->dst;
-       rt->fl.oif        = 0;
-       rt->fl.iif        = in_dev->ifindex;
-       rt->fl.mark       = fl.mark;
+       rt->fld.saddr        = cb->src;
+       rt->fld.daddr        = cb->dst;
+       rt->fld.flowidn_oif  = 0;
+       rt->fld.flowidn_iif  = in_dev->ifindex;
+       rt->fld.flowidn_mark = fld.flowidn_mark;
 
        rt->dst.flags = DST_HOST;
        rt->dst.neighbour = neigh;
@@ -1423,7 +1447,7 @@ make_route:
        if (err)
                goto e_neighbour;
 
-       hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
+       hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
        dn_insert_route(rt, hash, &rt);
        skb_dst_set(skb, &rt->dst);
 
@@ -1463,11 +1487,11 @@ static int dn_route_input(struct sk_buff *skb)
        rcu_read_lock();
        for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
            rt = rcu_dereference(rt->dst.dn_next)) {
-               if ((rt->fl.fld_src == cb->src) &&
-                   (rt->fl.fld_dst == cb->dst) &&
-                   (rt->fl.oif == 0) &&
-                   (rt->fl.mark == skb->mark) &&
-                   (rt->fl.iif == cb->iif)) {
+               if ((rt->fld.saddr == cb->src) &&
+                   (rt->fld.daddr == cb->dst) &&
+                   (rt->fld.flowidn_oif == 0) &&
+                   (rt->fld.flowidn_mark == skb->mark) &&
+                   (rt->fld.flowidn_iif == cb->iif)) {
                        dst_use(&rt->dst, jiffies);
                        rcu_read_unlock();
                        skb_dst_set(skb, (struct dst_entry *)rt);
@@ -1503,9 +1527,9 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        if (rt->rt_flags & RTCF_NOTIFY)
                r->rtm_flags |= RTM_F_NOTIFY;
        RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
-       if (rt->fl.fld_src) {
+       if (rt->fld.saddr) {
                r->rtm_src_len = 16;
-               RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
+               RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr);
        }
        if (rt->dst.dev)
                RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
@@ -1524,7 +1548,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
                               rt->dst.error) < 0)
                goto rtattr_failure;
        if (dn_is_input_route(rt))
-               RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
+               RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
@@ -1547,13 +1571,13 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
        struct dn_skb_cb *cb;
        int err;
        struct sk_buff *skb;
-       struct flowi fl;
+       struct flowidn fld;
 
        if (!net_eq(net, &init_net))
                return -EINVAL;
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = DNPROTO_NSP;
+       memset(&fld, 0, sizeof(fld));
+       fld.flowidn_proto = DNPROTO_NSP;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (skb == NULL)
@@ -1562,15 +1586,15 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
        cb = DN_SKB_CB(skb);
 
        if (rta[RTA_SRC-1])
-               memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);
+               memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2);
        if (rta[RTA_DST-1])
-               memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
+               memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2);
        if (rta[RTA_IIF-1])
-               memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
+               memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
 
-       if (fl.iif) {
+       if (fld.flowidn_iif) {
                struct net_device *dev;
-               if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) {
+               if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) {
                        kfree_skb(skb);
                        return -ENODEV;
                }
@@ -1581,8 +1605,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                }
                skb->protocol = htons(ETH_P_DNA_RT);
                skb->dev = dev;
-               cb->src = fl.fld_src;
-               cb->dst = fl.fld_dst;
+               cb->src = fld.saddr;
+               cb->dst = fld.daddr;
                local_bh_disable();
                err = dn_route_input(skb);
                local_bh_enable();
@@ -1594,8 +1618,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                int oif = 0;
                if (rta[RTA_OIF - 1])
                        memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
-               fl.oif = oif;
-               err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
+               fld.flowidn_oif = oif;
+               err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
        }
 
        if (skb->dev)
index 6eb91df..f0efb0c 100644 (file)
@@ -49,14 +49,15 @@ struct dn_fib_rule
 };
 
 
-int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
+int dn_fib_lookup(struct flowidn *flp, struct dn_fib_res *res)
 {
        struct fib_lookup_arg arg = {
                .result = res,
        };
        int err;
 
-       err = fib_rules_lookup(dn_fib_rules_ops, flp, 0, &arg);
+       err = fib_rules_lookup(dn_fib_rules_ops,
+                              flowidn_to_flowi(flp), 0, &arg);
        res->r = arg.rule;
 
        return err;
@@ -65,6 +66,7 @@ int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
 static int dn_fib_rule_action(struct fib_rule *rule, struct flowi *flp,
                              int flags, struct fib_lookup_arg *arg)
 {
+       struct flowidn *fld = &flp->u.dn;
        int err = -EAGAIN;
        struct dn_fib_table *tbl;
 
@@ -90,7 +92,7 @@ static int dn_fib_rule_action(struct fib_rule *rule, struct flowi *flp,
        if (tbl == NULL)
                goto errout;
 
-       err = tbl->lookup(tbl, flp, (struct dn_fib_res *)arg->result);
+       err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result);
        if (err > 0)
                err = -EAGAIN;
 errout:
@@ -104,8 +106,9 @@ static const struct nla_policy dn_fib_rule_policy[FRA_MAX+1] = {
 static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
        struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
-       __le16 daddr = fl->fld_dst;
-       __le16 saddr = fl->fld_src;
+       struct flowidn *fld = &fl->u.dn;
+       __le16 daddr = fld->daddr;
+       __le16 saddr = fld->saddr;
 
        if (((saddr ^ r->src) & r->srcmask) ||
            ((daddr ^ r->dst) & r->dstmask))
@@ -175,7 +178,7 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 
 unsigned dnet_addr_type(__le16 addr)
 {
-       struct flowi fl = { .fld_dst = addr };
+       struct flowidn fld = { .daddr = addr };
        struct dn_fib_res res;
        unsigned ret = RTN_UNICAST;
        struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
@@ -183,7 +186,7 @@ unsigned dnet_addr_type(__le16 addr)
        res.r = NULL;
 
        if (tb) {
-               if (!tb->lookup(tb, &fl, &res)) {
+               if (!tb->lookup(tb, &fld, &res)) {
                        ret = res.type;
                        dn_fib_res_put(&res);
                }
index f2abd37..99d8d3a 100644 (file)
@@ -59,7 +59,6 @@ struct dn_hash
 };
 
 #define dz_key_0(key)          ((key).datum = 0)
-#define dz_prefix(key,dz)      ((key).datum)
 
 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
        for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
@@ -765,7 +764,7 @@ static int dn_fib_table_flush(struct dn_fib_table *tb)
        return found;
 }
 
-static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
+static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res)
 {
        int err;
        struct dn_zone *dz;
@@ -774,7 +773,7 @@ static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp,
        read_lock(&dn_fib_tables_lock);
        for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
                struct dn_fib_node *f;
-               dn_fib_key_t k = dz_key(flp->fld_dst, dz);
+               dn_fib_key_t k = dz_key(flp->daddr, dz);
 
                for(f = dz_chain(k, dz); f; f = f->fn_next) {
                        if (!dn_key_eq(k, f->fn_key)) {
@@ -789,7 +788,7 @@ static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp,
                        if (f->fn_state&DN_S_ZOMBIE)
                                continue;
 
-                       if (f->fn_scope < flp->fld_scope)
+                       if (f->fn_scope < flp->flowidn_scope)
                                continue;
 
                        err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
index 739435a..cfa7a5e 100644 (file)
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
        size_t result_len = 0;
        const char *data = _data, *end, *opt;
 
-       kenter("%%%d,%s,'%s',%zu",
-              key->serial, key->description, data, datalen);
+       kenter("%%%d,%s,'%*.*s',%zu",
+              key->serial, key->description,
+              (int)datalen, (int)datalen, data, datalen);
 
        if (datalen <= 1 || !data || data[datalen - 1] != '\0')
                return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
                seq_printf(m, ": %u", key->datalen);
 }
 
+/*
+ * read the DNS data
+ * - the key's semaphore is read-locked
+ */
+static long dns_resolver_read(const struct key *key,
+                             char __user *buffer, size_t buflen)
+{
+       if (key->type_data.x[0])
+               return key->type_data.x[0];
+
+       return user_read(key, buffer, buflen);
+}
+
 struct key_type key_type_dns_resolver = {
        .name           = "dns_resolver",
        .instantiate    = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
        .revoke         = user_revoke,
        .destroy        = user_destroy,
        .describe       = dns_resolver_describe,
-       .read           = user_read,
+       .read           = dns_resolver_read,
 };
 
 static int __init init_dns_resolver(void)
index 0c877a7..3fb14b7 100644 (file)
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
 }
 module_exit(dsa_cleanup_module);
 
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:dsa");
index 83277f4..8f4ff5a 100644 (file)
@@ -18,7 +18,7 @@
 
 static int reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       return mdiobus_read(ds->master_mii_bus, addr, reg);
+       return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)                                    \
@@ -34,7 +34,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
 
 static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-       return mdiobus_write(ds->master_mii_bus, addr, reg, val);
+       return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr,
+                            reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)                              \
@@ -50,7 +51,7 @@ static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr)
 {
        int ret;
 
-       ret = mdiobus_read(bus, REG_PORT(0), 0x03);
+       ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
        if (ret >= 0) {
                ret &= 0xfff0;
                if (ret == 0x0600)
index 15dcc1a..0c28263 100644 (file)
@@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
 static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                          struct msghdr *msg, size_t len)
 {
-       struct sock *sk = sock->sk;
        struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
        struct net_device *dev;
        struct ec_addr addr;
        int err;
        unsigned char port, cb;
 #if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+       struct sock *sk = sock->sk;
        struct sk_buff *skb;
        struct ec_cb *eb;
 #endif
@@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
 error_free_buf:
        vfree(userbuf);
+error:
 #else
        err = -EPROTOTYPE;
 #endif
-       error:
        mutex_unlock(&econet_mutex);
 
        return err;
index a5a1050..cbb505b 100644 (file)
@@ -55,45 +55,9 @@ config IP_ADVANCED_ROUTER
 
          If unsure, say N here.
 
-choice
-       prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
-       depends on IP_ADVANCED_ROUTER
-       default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
-       bool "FIB_HASH"
-       ---help---
-         Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
-       bool "FIB_TRIE"
-       ---help---
-         Use new experimental LC-trie as FIB lookup algorithm.
-         This improves lookup performance if you have a large
-         number of routes.
-
-         LC-trie is a longest matching prefix lookup algorithm which
-         performs better than FIB_HASH for large routing tables.
-         But, it consumes more memory and is more complex.
-
-         LC-trie is described in:
-
-         IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
-         IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
-         June 1999
-
-         An experimental study of compression methods for dynamic tries
-         Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
-         <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
-       def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
 config IP_FIB_TRIE_STATS
        bool "FIB TRIE statistics"
-       depends on IP_FIB_TRIE
+       depends on IP_ADVANCED_ROUTER
        ---help---
          Keep track of statistics on structure of FIB TRIE table.
          Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@ config IP_ROUTE_VERBOSE
          handled by the klogd daemon which is responsible for kernel messages
          ("man klogd").
 
+config IP_ROUTE_CLASSID
+       bool
+
 config IP_PNP
        bool "IP: kernel level autoconfiguration"
        help
@@ -657,4 +624,3 @@ config TCP_MD5SIG
          on the Internet.
 
          If unsure, say N.
-
index 4978d22..0dc772d 100644 (file)
@@ -10,12 +10,10 @@ obj-y     := route.o inetpeer.o protocol.o \
             tcp_minisocks.o tcp_cong.o \
             datagram.o raw.o udp.o udplite.o \
             arp.o icmp.o devinet.o af_inet.o  igmp.o \
-            fib_frontend.o fib_semantics.o \
+            fib_frontend.o fib_semantics.o fib_trie.o \
             inet_fragment.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
index f2b6110..807d83c 100644 (file)
@@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 }
 EXPORT_SYMBOL(inet_ioctl);
 
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       struct sock *sk = sock->sk;
+       int err = -ENOIOCTLCMD;
+
+       if (sk->sk_prot->compat_ioctl)
+               err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+       return err;
+}
+#endif
+
 const struct proto_ops inet_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
@@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_sock_common_setsockopt,
        .compat_getsockopt = compat_sock_common_getsockopt,
+       .compat_ioctl      = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_sock_common_setsockopt,
        .compat_getsockopt = compat_sock_common_getsockopt,
+       .compat_ioctl      = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_sock_common_setsockopt,
        .compat_getsockopt = compat_sock_common_getsockopt,
+       .compat_ioctl      = inet_compat_ioctl,
 #endif
 };
 
@@ -1085,23 +1101,20 @@ int sysctl_ip_dynaddr __read_mostly;
 static int inet_sk_reselect_saddr(struct sock *sk)
 {
        struct inet_sock *inet = inet_sk(sk);
-       int err;
-       struct rtable *rt;
        __be32 old_saddr = inet->inet_saddr;
-       __be32 new_saddr;
        __be32 daddr = inet->inet_daddr;
+       struct rtable *rt;
+       __be32 new_saddr;
 
        if (inet->opt && inet->opt->srr)
                daddr = inet->opt->faddr;
 
        /* Query new route. */
-       err = ip_route_connect(&rt, daddr, 0,
-                              RT_CONN_FLAGS(sk),
-                              sk->sk_bound_dev_if,
-                              sk->sk_protocol,
-                              inet->inet_sport, inet->inet_dport, sk, 0);
-       if (err)
-               return err;
+       rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk),
+                             sk->sk_bound_dev_if, sk->sk_protocol,
+                             inet->inet_sport, inet->inet_dport, sk, false);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
 
        sk_setup_caps(sk, &rt->dst);
 
@@ -1144,25 +1157,16 @@ int inet_sk_rebuild_header(struct sock *sk)
        daddr = inet->inet_daddr;
        if (inet->opt && inet->opt->srr)
                daddr = inet->opt->faddr;
-{
-       struct flowi fl = {
-               .oif = sk->sk_bound_dev_if,
-               .mark = sk->sk_mark,
-               .fl4_dst = daddr,
-               .fl4_src = inet->inet_saddr,
-               .fl4_tos = RT_CONN_FLAGS(sk),
-               .proto = sk->sk_protocol,
-               .flags = inet_sk_flowi_flags(sk),
-               .fl_ip_sport = inet->inet_sport,
-               .fl_ip_dport = inet->inet_dport,
-       };
-
-       security_sk_classify_flow(sk, &fl);
-       err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
-}
-       if (!err)
+       rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
+                                  inet->inet_dport, inet->inet_sport,
+                                  sk->sk_protocol, RT_CONN_FLAGS(sk),
+                                  sk->sk_bound_dev_if);
+       if (!IS_ERR(rt)) {
+               err = 0;
                sk_setup_caps(sk, &rt->dst);
-       else {
+       } else {
+               err = PTR_ERR(rt);
+
                /* Routing failed... */
                sk->sk_route_caps = 0;
                /*
@@ -1215,7 +1219,7 @@ out:
        return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct iphdr *iph;
index 86961be..4286fd3 100644 (file)
@@ -201,11 +201,14 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
        top_iph->ttl = 0;
        top_iph->check = 0;
 
-       ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+       if (x->props.flags & XFRM_STATE_ALIGN4)
+               ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+       else
+               ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
 
        ah->reserved = 0;
        ah->spi = x->id.spi;
-       ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+       ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg, 0, skb->len);
@@ -299,9 +302,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        nexthdr = ah->nexthdr;
        ah_hlen = (ah->hdrlen + 2) << 2;
 
-       if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
-           ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
-               goto out;
+       if (x->props.flags & XFRM_STATE_ALIGN4) {
+               if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
+                   ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
+                       goto out;
+       } else {
+               if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
+                   ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
+                       goto out;
+       }
 
        if (!pskb_may_pull(skb, ah_hlen))
                goto out;
@@ -450,8 +459,12 @@ static int ah_init_state(struct xfrm_state *x)
 
        BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
 
-       x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
-                                         ahp->icv_trunc_len);
+       if (x->props.flags & XFRM_STATE_ALIGN4)
+               x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
+                                                 ahp->icv_trunc_len);
+       else
+               x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
+                                                 ahp->icv_trunc_len);
        if (x->props.mode == XFRM_MODE_TUNNEL)
                x->props.header_len += sizeof(struct iphdr);
        x->data = ahp;
index 04c8b69..090d273 100644 (file)
@@ -433,14 +433,13 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
 
 static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
 {
-       struct flowi fl = { .fl4_dst = sip,
-                           .fl4_src = tip };
        struct rtable *rt;
        int flag = 0;
        /*unsigned long now; */
        struct net *net = dev_net(dev);
 
-       if (ip_route_output_key(net, &rt, &fl) < 0)
+       rt = ip_route_output(net, sip, tip, 0, 0);
+       if (IS_ERR(rt))
                return 1;
        if (rt->dst.dev != dev) {
                NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
@@ -1017,14 +1016,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
                IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
                return 0;
        }
-       if (__in_dev_get_rcu(dev)) {
-               IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+       if (__in_dev_get_rtnl(dev)) {
+               IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
                return 0;
        }
        return -ENXIO;
 }
 
-/* must be called with rcu_read_lock() */
 static int arp_req_set_public(struct net *net, struct arpreq *r,
                struct net_device *dev)
 {
@@ -1062,12 +1060,10 @@ static int arp_req_set(struct net *net, struct arpreq *r,
        if (r->arp_flags & ATF_PERM)
                r->arp_flags |= ATF_COM;
        if (dev == NULL) {
-               struct flowi fl = { .fl4_dst = ip,
-                                   .fl4_tos = RTO_ONLINK };
-               struct rtable *rt;
-               err = ip_route_output_key(net, &rt, &fl);
-               if (err != 0)
-                       return err;
+               struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
+
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
                dev = rt->dst.dev;
                ip_rt_put(rt);
                if (!dev)
@@ -1178,7 +1174,6 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
 static int arp_req_delete(struct net *net, struct arpreq *r,
                          struct net_device *dev)
 {
-       int err;
        __be32 ip;
 
        if (r->arp_flags & ATF_PUBL)
@@ -1186,12 +1181,9 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
 
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
        if (dev == NULL) {
-               struct flowi fl = { .fl4_dst = ip,
-                                   .fl4_tos = RTO_ONLINK };
-               struct rtable *rt;
-               err = ip_route_output_key(net, &rt, &fl);
-               if (err != 0)
-                       return err;
+               struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
                dev = rt->dst.dev;
                ip_rt_put(rt);
                if (!dev)
@@ -1233,10 +1225,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        if (!(r.arp_flags & ATF_NETMASK))
                ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
                                                           htonl(0xFFFFFFFFUL);
-       rcu_read_lock();
+       rtnl_lock();
        if (r.arp_dev[0]) {
                err = -ENODEV;
-               dev = dev_get_by_name_rcu(net, r.arp_dev);
+               dev = __dev_get_by_name(net, r.arp_dev);
                if (dev == NULL)
                        goto out;
 
@@ -1263,7 +1255,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                break;
        }
 out:
-       rcu_read_unlock();
+       rtnl_unlock();
        if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
                err = -EFAULT;
        return err;
index 174be6c..85bd24c 100644 (file)
@@ -46,11 +46,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                if (!saddr)
                        saddr = inet->mc_addr;
        }
-       err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
-                              RT_CONN_FLAGS(sk), oif,
-                              sk->sk_protocol,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (err) {
+       rt = ip_route_connect(usin->sin_addr.s_addr, saddr,
+                             RT_CONN_FLAGS(sk), oif,
+                             sk->sk_protocol,
+                             inet->inet_sport, usin->sin_port, sk, true);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                if (err == -ENETUNREACH)
                        IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
                return err;
index 748cb5b..6d85800 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/hash.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
@@ -92,6 +93,71 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
        [IFA_LABEL]             = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
+/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
+ * value.  So if you change this define, make appropriate changes to
+ * inet_addr_hash as well.
+ */
+#define IN4_ADDR_HSIZE 256
+static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
+static DEFINE_SPINLOCK(inet_addr_hash_lock);
+
+static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+{
+       u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+
+       return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
+               (IN4_ADDR_HSIZE - 1));
+}
+
+static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
+{
+       unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+
+       spin_lock(&inet_addr_hash_lock);
+       hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+       spin_unlock(&inet_addr_hash_lock);
+}
+
+static void inet_hash_remove(struct in_ifaddr *ifa)
+{
+       spin_lock(&inet_addr_hash_lock);
+       hlist_del_init_rcu(&ifa->hash);
+       spin_unlock(&inet_addr_hash_lock);
+}
+
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
+ */
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
+{
+       unsigned int hash = inet_addr_hash(net, addr);
+       struct net_device *result = NULL;
+       struct in_ifaddr *ifa;
+       struct hlist_node *node;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+               struct net_device *dev = ifa->ifa_dev->dev;
+
+               if (!net_eq(dev_net(dev), net))
+                       continue;
+               if (ifa->ifa_local == addr) {
+                       result = dev;
+                       break;
+               }
+       }
+       if (result && devref)
+               dev_hold(result);
+       rcu_read_unlock();
+       return result;
+}
+EXPORT_SYMBOL(__ip_dev_find);
+
 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 
 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
@@ -265,6 +331,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                        }
 
                        if (!do_promote) {
+                               inet_hash_remove(ifa);
                                *ifap1 = ifa->ifa_next;
 
                                rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
@@ -281,6 +348,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
        /* 2. Unlink it */
 
        *ifap = ifa1->ifa_next;
+       inet_hash_remove(ifa1);
 
        /* 3. Announce address deletion */
 
@@ -368,6 +436,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        ifa->ifa_next = *ifap;
        *ifap = ifa;
 
+       inet_hash_insert(dev_net(in_dev->dev), ifa);
+
        /* Send message first, then call notifier.
           Notifier will trigger FIB update, so that
           listeners of netlink will know about new ifaddr */
@@ -521,6 +591,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
        if (tb[IFA_ADDRESS] == NULL)
                tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 
+       INIT_HLIST_NODE(&ifa->hash);
        ifa->ifa_prefixlen = ifm->ifa_prefixlen;
        ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
        ifa->ifa_flags = ifm->ifa_flags;
@@ -670,7 +741,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                             ifap = &ifa->ifa_next) {
                                if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
                                    sin_orig.sin_addr.s_addr ==
-                                                       ifa->ifa_address) {
+                                                       ifa->ifa_local) {
                                        break; /* found */
                                }
                        }
@@ -728,6 +799,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                if (!ifa) {
                        ret = -ENOBUFS;
                        ifa = inet_alloc_ifa();
+                       INIT_HLIST_NODE(&ifa->hash);
                        if (!ifa)
                                break;
                        if (colon)
@@ -1030,6 +1102,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
        return mtu >= 68;
 }
 
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+                                       struct in_device *in_dev)
+
+{
+       struct in_ifaddr *ifa = in_dev->ifa_list;
+
+       if (!ifa)
+               return;
+
+       arp_send(ARPOP_REQUEST, ETH_P_ARP,
+                ifa->ifa_local, dev,
+                ifa->ifa_local, NULL,
+                dev->dev_addr, NULL);
+}
+
 /* Called only under RTNL semaphore */
 
 static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1069,6 +1156,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                        struct in_ifaddr *ifa = inet_alloc_ifa();
 
                        if (ifa) {
+                               INIT_HLIST_NODE(&ifa->hash);
                                ifa->ifa_local =
                                  ifa->ifa_address = htonl(INADDR_LOOPBACK);
                                ifa->ifa_prefixlen = 8;
@@ -1082,18 +1170,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                }
                ip_mc_up(in_dev);
                /* fall through */
-       case NETDEV_NOTIFY_PEERS:
        case NETDEV_CHANGEADDR:
+               if (!IN_DEV_ARP_NOTIFY(in_dev))
+                       break;
+               /* fall through */
+       case NETDEV_NOTIFY_PEERS:
                /* Send gratuitous ARP to notify of link change */
-               if (IN_DEV_ARP_NOTIFY(in_dev)) {
-                       struct in_ifaddr *ifa = in_dev->ifa_list;
-
-                       if (ifa)
-                               arp_send(ARPOP_REQUEST, ETH_P_ARP,
-                                        ifa->ifa_address, dev,
-                                        ifa->ifa_address, NULL,
-                                        dev->dev_addr, NULL);
-               }
+               inetdev_send_gratuitous_arp(dev, in_dev);
                break;
        case NETDEV_DOWN:
                ip_mc_down(in_dev);
@@ -1710,6 +1793,11 @@ static struct rtnl_af_ops inet_af_ops = {
 
 void __init devinet_init(void)
 {
+       int i;
+
+       for (i = 0; i < IN4_ADDR_HSIZE; i++)
+               INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
        register_pernet_subsys(&devinet_ops);
 
        register_gifconf(PF_INET, inet_gifconf);
index e42a905..03f994b 100644 (file)
@@ -33,11 +33,14 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
  *
  * TODO: Use spare space in skb for this where possible.
  */
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
 {
        unsigned int len;
 
-       len = crypto_aead_ivsize(aead);
+       len = seqhilen;
+
+       len += crypto_aead_ivsize(aead);
+
        if (len) {
                len += crypto_aead_alignmask(aead) &
                       ~(crypto_tfm_ctx_alignment() - 1);
@@ -52,10 +55,15 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
        return kmalloc(len, GFP_ATOMIC);
 }
 
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+static inline __be32 *esp_tmp_seqhi(void *tmp)
+{
+       return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+}
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
 {
        return crypto_aead_ivsize(aead) ?
-              PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+              PTR_ALIGN((u8 *)tmp + seqhilen,
+                        crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
 }
 
 static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -122,6 +130,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        int plen;
        int tfclen;
        int nfrags;
+       int assoclen;
+       int sglists;
+       int seqhilen;
+       __be32 *seqhi;
 
        /* skb is pure payload to encrypt */
 
@@ -151,14 +163,25 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                goto error;
        nfrags = err;
 
-       tmp = esp_alloc_tmp(aead, nfrags + 1);
+       assoclen = sizeof(*esph);
+       sglists = 1;
+       seqhilen = 0;
+
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists += 2;
+               seqhilen += sizeof(__be32);
+               assoclen += seqhilen;
+       }
+
+       tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
        if (!tmp)
                goto error;
 
-       iv = esp_tmp_iv(aead, tmp);
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_givreq(aead, iv);
        asg = esp_givreq_sg(aead, req);
-       sg = asg + 1;
+       sg = asg + sglists;
 
        /* Fill padding... */
        tail = skb_tail_pointer(trailer);
@@ -215,19 +238,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        esph->spi = x->id.spi;
-       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg,
                     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
                     clen + alen);
-       sg_init_one(asg, esph, sizeof(*esph));
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               sg_init_table(asg, 3);
+               sg_set_buf(asg, &esph->spi, sizeof(__be32));
+               *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+               sg_set_buf(asg + 1, seqhi, seqhilen);
+               sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
+       } else
+               sg_init_one(asg, esph, sizeof(*esph));
 
        aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
-       aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+       aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output);
+                             XFRM_SKB_CB(skb)->seq.output.low);
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
@@ -346,6 +377,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
        struct sk_buff *trailer;
        int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
        int nfrags;
+       int assoclen;
+       int sglists;
+       int seqhilen;
+       __be32 *seqhi;
        void *tmp;
        u8 *iv;
        struct scatterlist *sg;
@@ -362,16 +397,27 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
        nfrags = err;
 
+       assoclen = sizeof(*esph);
+       sglists = 1;
+       seqhilen = 0;
+
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists += 2;
+               seqhilen += sizeof(__be32);
+               assoclen += seqhilen;
+       }
+
        err = -ENOMEM;
-       tmp = esp_alloc_tmp(aead, nfrags + 1);
+       tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
        if (!tmp)
                goto out;
 
        ESP_SKB_CB(skb)->tmp = tmp;
-       iv = esp_tmp_iv(aead, tmp);
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        asg = esp_req_sg(aead, req);
-       sg = asg + 1;
+       sg = asg + sglists;
 
        skb->ip_summed = CHECKSUM_NONE;
 
@@ -382,11 +428,19 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
-       sg_init_one(asg, esph, sizeof(*esph));
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               sg_init_table(asg, 3);
+               sg_set_buf(asg, &esph->spi, sizeof(__be32));
+               *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+               sg_set_buf(asg + 1, seqhi, seqhilen);
+               sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
+       } else
+               sg_init_one(asg, esph, sizeof(*esph));
 
        aead_request_set_callback(req, 0, esp_input_done, skb);
        aead_request_set_crypt(req, sg, sg, elen, iv);
-       aead_request_set_assoc(req, asg, sizeof(*esph));
+       aead_request_set_assoc(req, asg, assoclen);
 
        err = crypto_aead_decrypt(req);
        if (err == -EINPROGRESS)
@@ -500,10 +554,20 @@ static int esp_init_authenc(struct xfrm_state *x)
                goto error;
 
        err = -ENAMETOOLONG;
-       if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
-                    x->aalg ? x->aalg->alg_name : "digest_null",
-                    x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
-               goto error;
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+                            "authencesn(%s,%s)",
+                            x->aalg ? x->aalg->alg_name : "digest_null",
+                            x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto error;
+       } else {
+               if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+                            "authenc(%s,%s)",
+                            x->aalg ? x->aalg->alg_name : "digest_null",
+                            x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto error;
+       }
 
        aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
index 1d2cdd4..a373a25 100644 (file)
@@ -51,11 +51,11 @@ static int __net_init fib4_rules_init(struct net *net)
 {
        struct fib_table *local_table, *main_table;
 
-       local_table = fib_hash_table(RT_TABLE_LOCAL);
+       local_table = fib_trie_table(RT_TABLE_LOCAL);
        if (local_table == NULL)
                return -ENOMEM;
 
-       main_table  = fib_hash_table(RT_TABLE_MAIN);
+       main_table  = fib_trie_table(RT_TABLE_MAIN);
        if (main_table == NULL)
                goto fail;
 
@@ -82,7 +82,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        if (tb)
                return tb;
 
-       tb = fib_hash_table(id);
+       tb = fib_trie_table(id);
        if (!tb)
                return NULL;
        h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
 }
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
 
-void fib_select_default(struct net *net,
-                       const struct flowi *flp, struct fib_result *res)
-{
-       struct fib_table *tb;
-       int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
-               return;
-       table = res->r->table;
-#endif
-       tb = fib_get_table(net, table);
-       if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
-               fib_table_select_default(tb, flp, res);
-}
-
 static void fib_flush(struct net *net)
 {
        int flushed = 0;
@@ -147,46 +132,6 @@ static void fib_flush(struct net *net)
                rt_cache_flush(net, -1);
 }
 
-/**
- * __ip_dev_find - find the first device with a given source address.
- * @net: the net namespace
- * @addr: the source address
- * @devref: if true, take a reference on the found device
- *
- * If a caller uses devref=false, it should be protected by RCU, or RTNL
- */
-struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
-{
-       struct flowi fl = {
-               .fl4_dst = addr,
-       };
-       struct fib_result res = { 0 };
-       struct net_device *dev = NULL;
-       struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       res.r = NULL;
-#endif
-
-       rcu_read_lock();
-       local_table = fib_get_table(net, RT_TABLE_LOCAL);
-       if (!local_table ||
-           fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
-               rcu_read_unlock();
-               return NULL;
-       }
-       if (res.type != RTN_LOCAL)
-               goto out;
-       dev = FIB_RES_DEV(res);
-
-       if (dev && devref)
-               dev_hold(dev);
-out:
-       rcu_read_unlock();
-       return dev;
-}
-EXPORT_SYMBOL(__ip_dev_find);
-
 /*
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
@@ -195,7 +140,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
                                            const struct net_device *dev,
                                            __be32 addr)
 {
-       struct flowi            fl = { .fl4_dst = addr };
+       struct flowi4           fl4 = { .daddr = addr };
        struct fib_result       res;
        unsigned ret = RTN_BROADCAST;
        struct fib_table *local_table;
@@ -213,7 +158,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
        if (local_table) {
                ret = RTN_UNICAST;
                rcu_read_lock();
-               if (!fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
+               if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
                        if (!dev || dev == res.fi->fib_dev)
                                ret = res.type;
                }
@@ -248,19 +193,21 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                        u32 *itag, u32 mark)
 {
        struct in_device *in_dev;
-       struct flowi fl = {
-               .fl4_dst = src,
-               .fl4_src = dst,
-               .fl4_tos = tos,
-               .mark = mark,
-               .iif = oif
-       };
+       struct flowi4 fl4;
        struct fib_result res;
        int no_addr, rpf, accept_local;
        bool dev_match;
        int ret;
        struct net *net;
 
+       fl4.flowi4_oif = 0;
+       fl4.flowi4_iif = oif;
+       fl4.flowi4_mark = mark;
+       fl4.daddr = src;
+       fl4.saddr = dst;
+       fl4.flowi4_tos = tos;
+       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+
        no_addr = rpf = accept_local = 0;
        in_dev = __in_dev_get_rcu(dev);
        if (in_dev) {
@@ -268,14 +215,14 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                rpf = IN_DEV_RPFILTER(in_dev);
                accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
                if (mark && !IN_DEV_SRC_VMARK(in_dev))
-                       fl.mark = 0;
+                       fl4.flowi4_mark = 0;
        }
 
        if (in_dev == NULL)
                goto e_inval;
 
        net = dev_net(dev);
-       if (fib_lookup(net, &fl, &res))
+       if (fib_lookup(net, &fl4, &res))
                goto last_resort;
        if (res.type != RTN_UNICAST) {
                if (res.type != RTN_LOCAL || !accept_local)
@@ -306,10 +253,10 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                goto last_resort;
        if (rpf == 1)
                goto e_rpf;
-       fl.oif = dev->ifindex;
+       fl4.flowi4_oif = dev->ifindex;
 
        ret = 0;
-       if (fib_lookup(net, &fl, &res) == 0) {
+       if (fib_lookup(net, &fl4, &res) == 0) {
                if (res.type == RTN_UNICAST) {
                        *spec_dst = FIB_RES_PREFSRC(res);
                        ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
@@ -849,11 +796,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
 {
 
        struct fib_result       res;
-       struct flowi            fl = {
-               .mark = frn->fl_mark,
-               .fl4_dst = frn->fl_addr,
-               .fl4_tos = frn->fl_tos,
-               .fl4_scope = frn->fl_scope,
+       struct flowi4           fl4 = {
+               .flowi4_mark = frn->fl_mark,
+               .daddr = frn->fl_addr,
+               .flowi4_tos = frn->fl_tos,
+               .flowi4_scope = frn->fl_scope,
        };
 
 #ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -866,7 +813,7 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
 
                frn->tb_id = tb->tb_id;
                rcu_read_lock();
-               frn->err = fib_table_lookup(tb, &fl, &res, FIB_LOOKUP_NOREF);
+               frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
 
                if (!frn->err) {
                        frn->prefixlen = res.prefixlen;
@@ -945,10 +892,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                fib_sync_up(dev);
 #endif
+               fib_update_nh_saddrs(dev);
                rt_cache_flush(dev_net(dev), -1);
                break;
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa);
+               fib_update_nh_saddrs(dev);
                if (ifa->ifa_dev->ifa_list == NULL) {
                        /* Last address was deleted from this interface.
                         * Disable IP.
@@ -1101,5 +1050,5 @@ void __init ip_fib_init(void)
        register_netdevice_notifier(&fib_netdev_notifier);
        register_inetaddr_notifier(&fib_inetaddr_notifier);
 
-       fib_hash_init();
+       fib_trie_init();
 }
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644 (file)
index b3acb04..0000000
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors:    Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
-       struct hlist_node       fn_hash;
-       struct list_head        fn_alias;
-       __be32                  fn_key;
-       struct fib_alias        fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
-       struct fn_zone __rcu    *fz_next;       /* Next not empty zone  */
-       struct hlist_head __rcu *fz_hash;       /* Hash table pointer   */
-       seqlock_t               fz_lock;
-       u32                     fz_hashmask;    /* (fz_divisor - 1)     */
-
-       u8                      fz_order;       /* Zone order (0..32)   */
-       u8                      fz_revorder;    /* 32 - fz_order        */
-       __be32                  fz_mask;        /* inet_make_mask(order) */
-#define FZ_MASK(fz)            ((fz)->fz_mask)
-
-       struct hlist_head       fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
-       int                     fz_nent;        /* Number of entries    */
-       int                     fz_divisor;     /* Hash size (mask+1)   */
-};
-
-struct fn_hash {
-       struct fn_zone          *fn_zones[33];
-       struct fn_zone __rcu    *fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
-       u32 h = ntohl(key) >> fz->fz_revorder;
-       h ^= (h>>20);
-       h ^= (h>>10);
-       h ^= (h>>5);
-       h &= fz->fz_hashmask;
-       return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
-       return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
-       unsigned long size = divisor * sizeof(struct hlist_head);
-
-       if (size <= PAGE_SIZE)
-               return kzalloc(size, GFP_KERNEL);
-
-       return (struct hlist_head *)
-               __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
-                                  struct hlist_head *old_ht,
-                                  int old_divisor)
-{
-       int i;
-
-       for (i = 0; i < old_divisor; i++) {
-               struct hlist_node *node, *n;
-               struct fib_node *f;
-
-               hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-                       struct hlist_head *new_head;
-
-                       hlist_del_rcu(&f->fn_hash);
-
-                       new_head = rcu_dereference_protected(fz->fz_hash, 1) +
-                                  fn_hash(f->fn_key, fz);
-                       hlist_add_head_rcu(&f->fn_hash, new_head);
-               }
-       }
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
-       unsigned long size = divisor * sizeof(struct hlist_head);
-
-       if (size <= PAGE_SIZE)
-               kfree(hash);
-       else
-               free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
-       struct hlist_head *ht, *old_ht;
-       int old_divisor, new_divisor;
-       u32 new_hashmask;
-
-       new_divisor = old_divisor = fz->fz_divisor;
-
-       switch (old_divisor) {
-       case EMBEDDED_HASH_SIZE:
-               new_divisor *= EMBEDDED_HASH_SIZE;
-               break;
-       case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
-               new_divisor *= (EMBEDDED_HASH_SIZE/2);
-               break;
-       default:
-               if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
-                       printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
-                       return;
-               }
-               new_divisor = (old_divisor << 1);
-               break;
-       }
-
-       new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
-       printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
-              fz->fz_order, old_divisor);
-#endif
-
-       ht = fz_hash_alloc(new_divisor);
-
-       if (ht) {
-               struct fn_zone nfz;
-
-               memcpy(&nfz, fz, sizeof(nfz));
-
-               write_seqlock_bh(&fz->fz_lock);
-               old_ht = rcu_dereference_protected(fz->fz_hash, 1);
-               RCU_INIT_POINTER(nfz.fz_hash, ht);
-               nfz.fz_hashmask = new_hashmask;
-               nfz.fz_divisor = new_divisor;
-               fn_rebuild_zone(&nfz, old_ht, old_divisor);
-               fib_hash_genid++;
-               rcu_assign_pointer(fz->fz_hash, ht);
-               fz->fz_hashmask = new_hashmask;
-               fz->fz_divisor = new_divisor;
-               write_sequnlock_bh(&fz->fz_lock);
-
-               if (old_ht != fz->fz_embedded_hash) {
-                       synchronize_rcu();
-                       fz_hash_free(old_ht, old_divisor);
-               }
-       }
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
-       struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
-       kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
-       call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
-       struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
-       kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
-       fib_release_info(fa->fa_info);
-       if (fa == &f->fn_embedded_alias)
-               fa->fa_info = NULL;
-       else
-               call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
-       int i;
-       struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
-       if (!fz)
-               return NULL;
-
-       seqlock_init(&fz->fz_lock);
-       fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
-       fz->fz_hashmask = fz->fz_divisor - 1;
-       RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
-       fz->fz_order = z;
-       fz->fz_revorder = 32 - z;
-       fz->fz_mask = inet_make_mask(z);
-
-       /* Find the first not empty zone with more specific mask */
-       for (i = z + 1; i <= 32; i++)
-               if (table->fn_zones[i])
-                       break;
-       if (i > 32) {
-               /* No more specific masks, we are the first. */
-               rcu_assign_pointer(fz->fz_next,
-                                  rtnl_dereference(table->fn_zone_list));
-               rcu_assign_pointer(table->fn_zone_list, fz);
-       } else {
-               rcu_assign_pointer(fz->fz_next,
-                                  rtnl_dereference(table->fn_zones[i]->fz_next));
-               rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
-       }
-       table->fn_zones[z] = fz;
-       fib_hash_genid++;
-       return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
-                    const struct flowi *flp, struct fib_result *res,
-                    int fib_flags)
-{
-       int err;
-       struct fn_zone *fz;
-       struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
-       rcu_read_lock();
-       for (fz = rcu_dereference(t->fn_zone_list);
-            fz != NULL;
-            fz = rcu_dereference(fz->fz_next)) {
-               struct hlist_head *head;
-               struct hlist_node *node;
-               struct fib_node *f;
-               __be32 k;
-               unsigned int seq;
-
-               do {
-                       seq = read_seqbegin(&fz->fz_lock);
-                       k = fz_key(flp->fl4_dst, fz);
-
-                       head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
-                       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-                               if (f->fn_key != k)
-                                       continue;
-
-                               err = fib_semantic_match(&f->fn_alias,
-                                                flp, res,
-                                                fz->fz_order, fib_flags);
-                               if (err <= 0)
-                                       goto out;
-                       }
-               } while (read_seqretry(&fz->fz_lock, seq));
-       }
-       err = 1;
-out:
-       rcu_read_unlock();
-       return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
-                             const struct flowi *flp, struct fib_result *res)
-{
-       int order, last_idx;
-       struct hlist_node *node;
-       struct fib_node *f;
-       struct fib_info *fi = NULL;
-       struct fib_info *last_resort;
-       struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-       struct fn_zone *fz = t->fn_zones[0];
-       struct hlist_head *head;
-
-       if (fz == NULL)
-               return;
-
-       last_idx = -1;
-       last_resort = NULL;
-       order = -1;
-
-       rcu_read_lock();
-       head = rcu_dereference(fz->fz_hash);
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               struct fib_alias *fa;
-
-               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-                       struct fib_info *next_fi = fa->fa_info;
-
-                       if (fa->fa_scope != res->scope ||
-                           fa->fa_type != RTN_UNICAST)
-                               continue;
-
-                       if (next_fi->fib_priority > res->fi->fib_priority)
-                               break;
-                       if (!next_fi->fib_nh[0].nh_gw ||
-                           next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-                               continue;
-
-                       fib_alias_accessed(fa);
-
-                       if (fi == NULL) {
-                               if (next_fi != res->fi)
-                                       break;
-                       } else if (!fib_detect_death(fi, order, &last_resort,
-                                               &last_idx, tb->tb_default)) {
-                               fib_result_assign(res, fi);
-                               tb->tb_default = order;
-                               goto out;
-                       }
-                       fi = next_fi;
-                       order++;
-               }
-       }
-
-       if (order <= 0 || fi == NULL) {
-               tb->tb_default = -1;
-               goto out;
-       }
-
-       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-                               tb->tb_default)) {
-               fib_result_assign(res, fi);
-               tb->tb_default = order;
-               goto out;
-       }
-
-       if (last_idx >= 0)
-               fib_result_assign(res, last_resort);
-       tb->tb_default = last_idx;
-out:
-       rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
-       hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
-       struct hlist_node *node;
-       struct fib_node *f;
-
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               if (f->fn_key == key)
-                       return f;
-       }
-
-       return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
-       struct fib_alias *fa = &f->fn_embedded_alias;
-
-       if (fa->fa_info != NULL)
-               fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-       return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fib_node *new_f = NULL;
-       struct fib_node *f;
-       struct fib_alias *fa, *new_fa;
-       struct fn_zone *fz;
-       struct fib_info *fi;
-       u8 tos = cfg->fc_tos;
-       __be32 key;
-       int err;
-
-       if (cfg->fc_dst_len > 32)
-               return -EINVAL;
-
-       fz = table->fn_zones[cfg->fc_dst_len];
-       if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
-               return -ENOBUFS;
-
-       key = 0;
-       if (cfg->fc_dst) {
-               if (cfg->fc_dst & ~FZ_MASK(fz))
-                       return -EINVAL;
-               key = fz_key(cfg->fc_dst, fz);
-       }
-
-       fi = fib_create_info(cfg);
-       if (IS_ERR(fi))
-               return PTR_ERR(fi);
-
-       if (fz->fz_nent > (fz->fz_divisor<<1) &&
-           fz->fz_divisor < FZ_MAX_DIVISOR &&
-           (cfg->fc_dst_len == 32 ||
-            (1 << cfg->fc_dst_len) > fz->fz_divisor))
-               fn_rehash_zone(fz);
-
-       f = fib_find_node(fz, key);
-
-       if (!f)
-               fa = NULL;
-       else
-               fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
-       /* Now fa, if non-NULL, points to the first fib alias
-        * with the same keys [prefix,tos,priority], if such key already
-        * exists or to the node before which we will insert new one.
-        *
-        * If fa is NULL, we will need to allocate a new one and
-        * insert to the head of f.
-        *
-        * If f is NULL, no fib node matched the destination key
-        * and we need to allocate a new one of those as well.
-        */
-
-       if (fa && fa->fa_tos == tos &&
-           fa->fa_info->fib_priority == fi->fib_priority) {
-               struct fib_alias *fa_first, *fa_match;
-
-               err = -EEXIST;
-               if (cfg->fc_nlflags & NLM_F_EXCL)
-                       goto out;
-
-               /* We have 2 goals:
-                * 1. Find exact match for type, scope, fib_info to avoid
-                * duplicate routes
-                * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
-                */
-               fa_match = NULL;
-               fa_first = fa;
-               fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-               list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-                       if (fa->fa_tos != tos)
-                               break;
-                       if (fa->fa_info->fib_priority != fi->fib_priority)
-                               break;
-                       if (fa->fa_type == cfg->fc_type &&
-                           fa->fa_scope == cfg->fc_scope &&
-                           fa->fa_info == fi) {
-                               fa_match = fa;
-                               break;
-                       }
-               }
-
-               if (cfg->fc_nlflags & NLM_F_REPLACE) {
-                       u8 state;
-
-                       fa = fa_first;
-                       if (fa_match) {
-                               if (fa == fa_match)
-                                       err = 0;
-                               goto out;
-                       }
-                       err = -ENOBUFS;
-                       new_fa = fib_fast_alloc(f);
-                       if (new_fa == NULL)
-                               goto out;
-
-                       new_fa->fa_tos = fa->fa_tos;
-                       new_fa->fa_info = fi;
-                       new_fa->fa_type = cfg->fc_type;
-                       new_fa->fa_scope = cfg->fc_scope;
-                       state = fa->fa_state;
-                       new_fa->fa_state = state & ~FA_S_ACCESSED;
-                       fib_hash_genid++;
-                       list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
-                       fn_free_alias(fa, f);
-                       if (state & FA_S_ACCESSED)
-                               rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-                       rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
-                                 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
-                       return 0;
-               }
-
-               /* Error if we find a perfect match which
-                * uses the same scope, type, and nexthop
-                * information.
-                */
-               if (fa_match)
-                       goto out;
-
-               if (!(cfg->fc_nlflags & NLM_F_APPEND))
-                       fa = fa_first;
-       }
-
-       err = -ENOENT;
-       if (!(cfg->fc_nlflags & NLM_F_CREATE))
-               goto out;
-
-       err = -ENOBUFS;
-
-       if (!f) {
-               new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
-               if (new_f == NULL)
-                       goto out;
-
-               INIT_HLIST_NODE(&new_f->fn_hash);
-               INIT_LIST_HEAD(&new_f->fn_alias);
-               new_f->fn_key = key;
-               f = new_f;
-       }
-
-       new_fa = fib_fast_alloc(f);
-       if (new_fa == NULL)
-               goto out;
-
-       new_fa->fa_info = fi;
-       new_fa->fa_tos = tos;
-       new_fa->fa_type = cfg->fc_type;
-       new_fa->fa_scope = cfg->fc_scope;
-       new_fa->fa_state = 0;
-
-       /*
-        * Insert new entry to the list.
-        */
-
-       if (new_f)
-               fib_insert_node(fz, new_f);
-       list_add_tail_rcu(&new_fa->fa_list,
-                (fa ? &fa->fa_list : &f->fn_alias));
-       fib_hash_genid++;
-
-       if (new_f)
-               fz->fz_nent++;
-       rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
-       rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
-                 &cfg->fc_nlinfo, 0);
-       return 0;
-
-out:
-       if (new_f)
-               kmem_cache_free(fn_hash_kmem, new_f);
-       fib_release_info(fi);
-       return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
-       struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-       struct fib_node *f;
-       struct fib_alias *fa, *fa_to_delete;
-       struct fn_zone *fz;
-       __be32 key;
-
-       if (cfg->fc_dst_len > 32)
-               return -EINVAL;
-
-       if ((fz  = table->fn_zones[cfg->fc_dst_len]) == NULL)
-               return -ESRCH;
-
-       key = 0;
-       if (cfg->fc_dst) {
-               if (cfg->fc_dst & ~FZ_MASK(fz))
-                       return -EINVAL;
-               key = fz_key(cfg->fc_dst, fz);
-       }
-
-       f = fib_find_node(fz, key);
-
-       if (!f)
-               fa = NULL;
-       else
-               fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
-       if (!fa)
-               return -ESRCH;
-
-       fa_to_delete = NULL;
-       fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-       list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-               struct fib_info *fi = fa->fa_info;
-
-               if (fa->fa_tos != cfg->fc_tos)
-                       break;
-
-               if ((!cfg->fc_type ||
-                    fa->fa_type == cfg->fc_type) &&
-                   (cfg->fc_scope == RT_SCOPE_NOWHERE ||
-                    fa->fa_scope == cfg->fc_scope) &&
-                   (!cfg->fc_protocol ||
-                    fi->fib_protocol == cfg->fc_protocol) &&
-                   fib_nh_match(cfg, fi) == 0) {
-                       fa_to_delete = fa;
-                       break;
-               }
-       }
-
-       if (fa_to_delete) {
-               int kill_fn;
-
-               fa = fa_to_delete;
-               rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
-                         tb->tb_id, &cfg->fc_nlinfo, 0);
-
-               kill_fn = 0;
-               list_del_rcu(&fa->fa_list);
-               if (list_empty(&f->fn_alias)) {
-                       hlist_del_rcu(&f->fn_hash);
-                       kill_fn = 1;
-               }
-               fib_hash_genid++;
-
-               if (fa->fa_state & FA_S_ACCESSED)
-                       rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-               fn_free_alias(fa, f);
-               if (kill_fn) {
-                       fn_free_node(f);
-                       fz->fz_nent--;
-               }
-
-               return 0;
-       }
-       return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
-       struct hlist_node *node, *n;
-       struct fib_node *f;
-       int found = 0;
-
-       hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
-               struct fib_alias *fa, *fa_node;
-               int kill_f;
-
-               kill_f = 0;
-               list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
-                       struct fib_info *fi = fa->fa_info;
-
-                       if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
-                               list_del_rcu(&fa->fa_list);
-                               if (list_empty(&f->fn_alias)) {
-                                       hlist_del_rcu(&f->fn_hash);
-                                       kill_f = 1;
-                               }
-                               fib_hash_genid++;
-
-                               fn_free_alias(fa, f);
-                               found++;
-                       }
-               }
-               if (kill_f) {
-                       fn_free_node(f);
-                       fz->fz_nent--;
-               }
-       }
-       return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fn_zone *fz;
-       int found = 0;
-
-       for (fz = rtnl_dereference(table->fn_zone_list);
-            fz != NULL;
-            fz = rtnl_dereference(fz->fz_next)) {
-               int i;
-
-               for (i = fz->fz_divisor - 1; i >= 0; i--)
-                       found += fn_flush_list(fz, i);
-       }
-       return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fn_zone *fz, *next;
-
-       next = table->fn_zone_list;
-       while (next != NULL) {
-               fz = next;
-               next = fz->fz_next;
-
-               if (fz->fz_hash != fz->fz_embedded_hash)
-                       fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
-               kfree(fz);
-       }
-
-       kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
-                    struct fib_table *tb,
-                    struct fn_zone *fz,
-                    struct hlist_head *head)
-{
-       struct hlist_node *node;
-       struct fib_node *f;
-       int i, s_i;
-
-       s_i = cb->args[4];
-       i = 0;
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               struct fib_alias *fa;
-
-               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-                       if (i < s_i)
-                               goto next;
-
-                       if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq,
-                                         RTM_NEWROUTE,
-                                         tb->tb_id,
-                                         fa->fa_type,
-                                         fa->fa_scope,
-                                         f->fn_key,
-                                         fz->fz_order,
-                                         fa->fa_tos,
-                                         fa->fa_info,
-                                         NLM_F_MULTI) < 0) {
-                               cb->args[4] = i;
-                               return -1;
-                       }
-next:
-                       i++;
-               }
-       }
-       cb->args[4] = i;
-       return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
-                  struct fib_table *tb,
-                  struct fn_zone *fz)
-{
-       int h, s_h;
-       struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
-       if (head == NULL)
-               return skb->len;
-       s_h = cb->args[3];
-       for (h = s_h; h < fz->fz_divisor; h++) {
-               if (hlist_empty(head + h))
-                       continue;
-               if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
-                       cb->args[3] = h;
-                       return -1;
-               }
-               memset(&cb->args[4], 0,
-                      sizeof(cb->args) - 4*sizeof(cb->args[0]));
-       }
-       cb->args[3] = h;
-       return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
-                  struct netlink_callback *cb)
-{
-       int m = 0, s_m;
-       struct fn_zone *fz;
-       struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
-       s_m = cb->args[2];
-       rcu_read_lock();
-       for (fz = rcu_dereference(table->fn_zone_list);
-            fz != NULL;
-            fz = rcu_dereference(fz->fz_next), m++) {
-               if (m < s_m)
-                       continue;
-               if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
-                       cb->args[2] = m;
-                       rcu_read_unlock();
-                       return -1;
-               }
-               memset(&cb->args[3], 0,
-                      sizeof(cb->args) - 3*sizeof(cb->args[0]));
-       }
-       rcu_read_unlock();
-       cb->args[2] = m;
-       return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
-       fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
-                                        0, SLAB_PANIC, NULL);
-
-       fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
-                                         0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
-       struct fib_table *tb;
-
-       tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
-                    GFP_KERNEL);
-       if (tb == NULL)
-               return NULL;
-
-       tb->tb_id = id;
-       tb->tb_default = -1;
-
-       memset(tb->tb_data, 0, sizeof(struct fn_hash));
-       return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
-       struct seq_net_private p;
-       struct fn_zone  *zone;
-       int             bucket;
-       struct hlist_head *hash_head;
-       struct fib_node *fn;
-       struct fib_alias *fa;
-       loff_t pos;
-       unsigned int genid;
-       int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_table *main_table;
-       struct fn_hash *table;
-
-       main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
-       table = (struct fn_hash *)main_table->tb_data;
-
-       iter->bucket    = 0;
-       iter->hash_head = NULL;
-       iter->fn        = NULL;
-       iter->fa        = NULL;
-       iter->pos       = 0;
-       iter->genid     = fib_hash_genid;
-       iter->valid     = 1;
-
-       for (iter->zone = rcu_dereference(table->fn_zone_list);
-            iter->zone != NULL;
-            iter->zone = rcu_dereference(iter->zone->fz_next)) {
-               int maxslot;
-
-               if (!iter->zone->fz_nent)
-                       continue;
-
-               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-               maxslot = iter->zone->fz_divisor;
-
-               for (iter->bucket = 0; iter->bucket < maxslot;
-                    ++iter->bucket, ++iter->hash_head) {
-                       struct hlist_node *node;
-                       struct fib_node *fn;
-
-                       hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                               struct fib_alias *fa;
-
-                               list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                                       iter->fn = fn;
-                                       iter->fa = fa;
-                                       goto out;
-                               }
-                       }
-               }
-       }
-out:
-       return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_node *fn;
-       struct fib_alias *fa;
-
-       /* Advance FA, if any. */
-       fn = iter->fn;
-       fa = iter->fa;
-       if (fa) {
-               BUG_ON(!fn);
-               list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
-                       iter->fa = fa;
-                       goto out;
-               }
-       }
-
-       fa = iter->fa = NULL;
-
-       /* Advance FN. */
-       if (fn) {
-               struct hlist_node *node = &fn->fn_hash;
-               hlist_for_each_entry_continue(fn, node, fn_hash) {
-                       iter->fn = fn;
-
-                       list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                               iter->fa = fa;
-                               goto out;
-                       }
-               }
-       }
-
-       fn = iter->fn = NULL;
-
-       /* Advance hash chain. */
-       if (!iter->zone)
-               goto out;
-
-       for (;;) {
-               struct hlist_node *node;
-               int maxslot;
-
-               maxslot = iter->zone->fz_divisor;
-
-               while (++iter->bucket < maxslot) {
-                       iter->hash_head++;
-
-                       hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                               list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                                       iter->fn = fn;
-                                       iter->fa = fa;
-                                       goto out;
-                               }
-                       }
-               }
-
-               iter->zone = rcu_dereference(iter->zone->fz_next);
-
-               if (!iter->zone)
-                       goto out;
-
-               iter->bucket = 0;
-               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
-               hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                       list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                               iter->fn = fn;
-                               iter->fa = fa;
-                               goto out;
-                       }
-               }
-       }
-out:
-       iter->pos++;
-       return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_alias *fa;
-
-       if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
-               fa   = iter->fa;
-               pos -= iter->pos;
-       } else
-               fa = fib_get_first(seq);
-
-       if (fa)
-               while (pos && (fa = fib_get_next(seq)))
-                       --pos;
-       return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
-{
-       void *v = NULL;
-
-       rcu_read_lock();
-       if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
-               v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
-       return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       ++*pos;
-       return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
-{
-       rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
-       static const unsigned type2flags[RTN_MAX + 1] = {
-               [7] = RTF_REJECT,
-               [8] = RTF_REJECT,
-       };
-       unsigned flags = type2flags[type];
-
-       if (fi && fi->fib_nh->nh_gw)
-               flags |= RTF_GATEWAY;
-       if (mask == htonl(0xFFFFFFFF))
-               flags |= RTF_HOST;
-       flags |= RTF_UP;
-       return flags;
-}
-
-/*
- *     This outputs /proc/net/route.
- *
- *     It always works in backward compatibility mode.
- *     The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
-       struct fib_iter_state *iter;
-       int len;
-       __be32 prefix, mask;
-       unsigned flags;
-       struct fib_node *f;
-       struct fib_alias *fa;
-       struct fib_info *fi;
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
-                          "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
-                          "\tWindow\tIRTT");
-               goto out;
-       }
-
-       iter    = seq->private;
-       f       = iter->fn;
-       fa      = iter->fa;
-       fi      = fa->fa_info;
-       prefix  = f->fn_key;
-       mask    = FZ_MASK(iter->zone);
-       flags   = fib_flag_trans(fa->fa_type, mask, fi);
-       if (fi)
-               seq_printf(seq,
-                        "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-                        fi->fib_dev ? fi->fib_dev->name : "*", prefix,
-                        fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
-                        mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
-                        fi->fib_window,
-                        fi->fib_rtt >> 3, &len);
-       else
-               seq_printf(seq,
-                        "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-                        prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
-       seq_printf(seq, "%*s\n", 127 - len, "");
-out:
-       return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
-       .start  = fib_seq_start,
-       .next   = fib_seq_next,
-       .stop   = fib_seq_stop,
-       .show   = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open_net(inode, file, &fib_seq_ops,
-                           sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
-       .owner          = THIS_MODULE,
-       .open           = fib_seq_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
-       if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
-               return -ENOMEM;
-       return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
-       proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
index c079cc0..4ec3238 100644 (file)
@@ -25,9 +25,6 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
 }
 
 /* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
-                             const struct flowi *flp,
-                             struct fib_result *res, int prefixlen, int fib_flags);
 extern void fib_release_info(struct fib_info *);
 extern struct fib_info *fib_create_info(struct fib_config *cfg);
 extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
@@ -51,4 +48,11 @@ static inline void fib_result_assign(struct fib_result *res,
        res->fi = fi;
 }
 
+struct fib_prop {
+       int     error;
+       u8      scope;
+};
+
+extern const struct fib_prop fib_props[RTN_MAX + 1];
+
 #endif /* _FIB_LOOKUP_H */
index 7981a24..a53bb1b 100644 (file)
@@ -41,19 +41,19 @@ struct fib4_rule {
        __be32                  srcmask;
        __be32                  dst;
        __be32                  dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        u32                     tclassid;
 #endif
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
-u32 fib_rules_tclass(struct fib_result *res)
+#ifdef CONFIG_IP_ROUTE_CLASSID
+u32 fib_rules_tclass(const struct fib_result *res)
 {
        return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
 }
 #endif
 
-int fib_lookup(struct net *net, struct flowi *flp, struct fib_result *res)
+int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
 {
        struct fib_lookup_arg arg = {
                .result = res,
@@ -61,7 +61,7 @@ int fib_lookup(struct net *net, struct flowi *flp, struct fib_result *res)
        };
        int err;
 
-       err = fib_rules_lookup(net->ipv4.rules_ops, flp, 0, &arg);
+       err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
        res->r = arg.rule;
 
        return err;
@@ -95,7 +95,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
        if (!tbl)
                goto errout;
 
-       err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result, arg->flags);
+       err = fib_table_lookup(tbl, &flp->u.ip4, (struct fib_result *) arg->result, arg->flags);
        if (err > 0)
                err = -EAGAIN;
 errout:
@@ -106,14 +106,15 @@ errout:
 static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
        struct fib4_rule *r = (struct fib4_rule *) rule;
-       __be32 daddr = fl->fl4_dst;
-       __be32 saddr = fl->fl4_src;
+       struct flowi4 *fl4 = &fl->u.ip4;
+       __be32 daddr = fl4->daddr;
+       __be32 saddr = fl4->saddr;
 
        if (((saddr ^ r->src) & r->srcmask) ||
            ((daddr ^ r->dst) & r->dstmask))
                return 0;
 
-       if (r->tos && (r->tos != fl->fl4_tos))
+       if (r->tos && (r->tos != fl4->flowi4_tos))
                return 0;
 
        return 1;
@@ -165,7 +166,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (frh->dst_len)
                rule4->dst = nla_get_be32(tb[FRA_DST]);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (tb[FRA_FLOW])
                rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
 #endif
@@ -195,7 +196,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
        if (frh->tos && (rule4->tos != frh->tos))
                return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
                return 0;
 #endif
@@ -224,7 +225,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        if (rule4->src_len)
                NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (rule4->tclassid)
                NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
 #endif
index 12d3dc3..622ac4c 100644 (file)
@@ -49,7 +49,7 @@
 static DEFINE_SPINLOCK(fib_info_lock);
 static struct hlist_head *fib_info_hash;
 static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
 static unsigned int fib_info_cnt;
 
 #define DEVINDEX_HASHBITS 8
@@ -90,11 +90,7 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
 #define endfor_nexthops(fi) }
 
 
-static const struct
-{
-       int     error;
-       u8      scope;
-} fib_props[RTN_MAX + 1] = {
+const struct fib_prop fib_props[RTN_MAX + 1] = {
        [RTN_UNSPEC] = {
                .error  = 0,
                .scope  = RT_SCOPE_NOWHERE,
@@ -152,6 +148,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
 {
        struct fib_info *fi = container_of(head, struct fib_info, rcu);
 
+       if (fi->fib_metrics != (u32 *) dst_default_metrics)
+               kfree(fi->fib_metrics);
        kfree(fi);
 }
 
@@ -200,7 +198,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                    nh->nh_weight != onh->nh_weight ||
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
                    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +219,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
 
 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
 {
-       unsigned int mask = (fib_hash_size - 1);
+       unsigned int mask = (fib_info_hash_size - 1);
        unsigned int val = fi->fib_nhs;
 
        val ^= fi->fib_protocol;
@@ -422,7 +420,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
 #endif
@@ -476,7 +474,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        if (nla && nla_get_be32(nla) != nh->nh_gw)
                                return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        if (nla && nla_get_u32(nla) != nh->nh_tclassid)
                                return 1;
@@ -562,16 +560,16 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                }
                rcu_read_lock();
                {
-                       struct flowi fl = {
-                               .fl4_dst = nh->nh_gw,
-                               .fl4_scope = cfg->fc_scope + 1,
-                               .oif = nh->nh_oif,
+                       struct flowi4 fl4 = {
+                               .daddr = nh->nh_gw,
+                               .flowi4_scope = cfg->fc_scope + 1,
+                               .flowi4_oif = nh->nh_oif,
                        };
 
                        /* It is not necessary, but requires a bit of thinking */
-                       if (fl.fl4_scope < RT_SCOPE_LINK)
-                               fl.fl4_scope = RT_SCOPE_LINK;
-                       err = fib_lookup(net, &fl, &res);
+                       if (fl4.flowi4_scope < RT_SCOPE_LINK)
+                               fl4.flowi4_scope = RT_SCOPE_LINK;
+                       err = fib_lookup(net, &fl4, &res);
                        if (err) {
                                rcu_read_unlock();
                                return err;
@@ -613,14 +611,14 @@ out:
 
 static inline unsigned int fib_laddr_hashfn(__be32 val)
 {
-       unsigned int mask = (fib_hash_size - 1);
+       unsigned int mask = (fib_info_hash_size - 1);
 
        return ((__force u32)val ^
                ((__force u32)val >> 7) ^
                ((__force u32)val >> 14)) & mask;
 }
 
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
 {
        if (bytes <= PAGE_SIZE)
                return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +628,7 @@ static struct hlist_head *fib_hash_alloc(int bytes)
                                         get_order(bytes));
 }
 
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
 {
        if (!hash)
                return;
@@ -641,18 +639,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes)
                free_pages((unsigned long) hash, get_order(bytes));
 }
 
-static void fib_hash_move(struct hlist_head *new_info_hash,
-                         struct hlist_head *new_laddrhash,
-                         unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+                              struct hlist_head *new_laddrhash,
+                              unsigned int new_size)
 {
        struct hlist_head *old_info_hash, *old_laddrhash;
-       unsigned int old_size = fib_hash_size;
+       unsigned int old_size = fib_info_hash_size;
        unsigned int i, bytes;
 
        spin_lock_bh(&fib_info_lock);
        old_info_hash = fib_info_hash;
        old_laddrhash = fib_info_laddrhash;
-       fib_hash_size = new_size;
+       fib_info_hash_size = new_size;
 
        for (i = 0; i < old_size; i++) {
                struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +691,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
        spin_unlock_bh(&fib_info_lock);
 
        bytes = old_size * sizeof(struct hlist_head *);
-       fib_hash_free(old_info_hash, bytes);
-       fib_hash_free(old_laddrhash, bytes);
+       fib_info_hash_free(old_info_hash, bytes);
+       fib_info_hash_free(old_laddrhash, bytes);
 }
 
 struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -705,6 +703,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        int nhs = 1;
        struct net *net = cfg->fc_nlinfo.nl_net;
 
+       if (cfg->fc_type > RTN_MAX)
+               goto err_inval;
+
        /* Fast check to catch the most weird cases */
        if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
                goto err_inval;
@@ -718,8 +719,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
 #endif
 
        err = -ENOBUFS;
-       if (fib_info_cnt >= fib_hash_size) {
-               unsigned int new_size = fib_hash_size << 1;
+       if (fib_info_cnt >= fib_info_hash_size) {
+               unsigned int new_size = fib_info_hash_size << 1;
                struct hlist_head *new_info_hash;
                struct hlist_head *new_laddrhash;
                unsigned int bytes;
@@ -727,21 +728,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                if (!new_size)
                        new_size = 1;
                bytes = new_size * sizeof(struct hlist_head *);
-               new_info_hash = fib_hash_alloc(bytes);
-               new_laddrhash = fib_hash_alloc(bytes);
+               new_info_hash = fib_info_hash_alloc(bytes);
+               new_laddrhash = fib_info_hash_alloc(bytes);
                if (!new_info_hash || !new_laddrhash) {
-                       fib_hash_free(new_info_hash, bytes);
-                       fib_hash_free(new_laddrhash, bytes);
+                       fib_info_hash_free(new_info_hash, bytes);
+                       fib_info_hash_free(new_laddrhash, bytes);
                } else
-                       fib_hash_move(new_info_hash, new_laddrhash, new_size);
+                       fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
 
-               if (!fib_hash_size)
+               if (!fib_info_hash_size)
                        goto failure;
        }
 
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       if (cfg->fc_mx) {
+               fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               if (!fi->fib_metrics)
+                       goto failure;
+       } else
+               fi->fib_metrics = (u32 *) dst_default_metrics;
        fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
@@ -779,7 +786,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                        goto err_inval;
                if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
                        goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
                        goto err_inval;
 #endif
@@ -792,7 +799,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                nh->nh_oif = cfg->fc_oif;
                nh->nh_gw = cfg->fc_gw;
                nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                nh->nh_tclassid = cfg->fc_flow;
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -804,6 +811,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
                        goto err_inval;
                goto link_it;
+       } else {
+               switch (cfg->fc_type) {
+               case RTN_UNICAST:
+               case RTN_LOCAL:
+               case RTN_BROADCAST:
+               case RTN_ANYCAST:
+               case RTN_MULTICAST:
+                       break;
+               default:
+                       goto err_inval;
+               }
        }
 
        if (cfg->fc_scope > RT_SCOPE_HOST)
@@ -835,6 +853,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                                goto err_inval;
        }
 
+       change_nexthops(fi) {
+               nexthop_nh->nh_cfg_scope = cfg->fc_scope;
+               nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev,
+                                                       nexthop_nh->nh_gw,
+                                                       nexthop_nh->nh_cfg_scope);
+       } endfor_nexthops(fi)
+
 link_it:
        ofi = fib_find_info(fi);
        if (ofi) {
@@ -880,84 +905,6 @@ failure:
        return ERR_PTR(err);
 }
 
-/* Note! fib_semantic_match intentionally uses  RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
-                      struct fib_result *res, int prefixlen, int fib_flags)
-{
-       struct fib_alias *fa;
-       int nh_sel = 0;
-
-       list_for_each_entry_rcu(fa, head, fa_list) {
-               int err;
-
-               if (fa->fa_tos &&
-                   fa->fa_tos != flp->fl4_tos)
-                       continue;
-
-               if (fa->fa_scope < flp->fl4_scope)
-                       continue;
-
-               fib_alias_accessed(fa);
-
-               err = fib_props[fa->fa_type].error;
-               if (err == 0) {
-                       struct fib_info *fi = fa->fa_info;
-
-                       if (fi->fib_flags & RTNH_F_DEAD)
-                               continue;
-
-                       switch (fa->fa_type) {
-                       case RTN_UNICAST:
-                       case RTN_LOCAL:
-                       case RTN_BROADCAST:
-                       case RTN_ANYCAST:
-                       case RTN_MULTICAST:
-                               for_nexthops(fi) {
-                                       if (nh->nh_flags & RTNH_F_DEAD)
-                                               continue;
-                                       if (!flp->oif || flp->oif == nh->nh_oif)
-                                               break;
-                               }
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-                               if (nhsel < fi->fib_nhs) {
-                                       nh_sel = nhsel;
-                                       goto out_fill_res;
-                               }
-#else
-                               if (nhsel < 1)
-                                       goto out_fill_res;
-#endif
-                               endfor_nexthops(fi);
-                               continue;
-
-                       default:
-                               pr_warning("fib_semantic_match bad type %#x\n",
-                                          fa->fa_type);
-                               return -EINVAL;
-                       }
-               }
-               return err;
-       }
-       return 1;
-
-out_fill_res:
-       res->prefixlen = prefixlen;
-       res->nh_sel = nh_sel;
-       res->type = fa->fa_type;
-       res->scope = fa->fa_scope;
-       res->fi = fa->fa_info;
-       if (!(fib_flags & FIB_LOOKUP_NOREF))
-               atomic_inc(&res->fi->fib_clntref);
-       return 0;
-}
-
-/* Find appropriate source address to this destination */
-
-__be32 __fib_res_prefsrc(struct fib_result *res)
-{
-       return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
-}
-
 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                  u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
                  struct fib_info *fi, unsigned int flags)
@@ -1002,7 +949,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
 
                if (fi->fib_nh->nh_oif)
                        NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                if (fi->fib_nh[0].nh_tclassid)
                        NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
 #endif
@@ -1027,7 +974,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
 
                        if (nh->nh_gw)
                                NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        if (nh->nh_tclassid)
                                NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
 #endif
@@ -1125,6 +1072,80 @@ int fib_sync_down_dev(struct net_device *dev, int force)
        return ret;
 }
 
+/* Must be invoked inside of an RCU protected region.  */
+void fib_select_default(struct fib_result *res)
+{
+       struct fib_info *fi = NULL, *last_resort = NULL;
+       struct list_head *fa_head = res->fa_head;
+       struct fib_table *tb = res->table;
+       int order = -1, last_idx = -1;
+       struct fib_alias *fa;
+
+       list_for_each_entry_rcu(fa, fa_head, fa_list) {
+               struct fib_info *next_fi = fa->fa_info;
+
+               if (fa->fa_scope != res->scope ||
+                   fa->fa_type != RTN_UNICAST)
+                       continue;
+
+               if (next_fi->fib_priority > res->fi->fib_priority)
+                       break;
+               if (!next_fi->fib_nh[0].nh_gw ||
+                   next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+                       continue;
+
+               fib_alias_accessed(fa);
+
+               if (fi == NULL) {
+                       if (next_fi != res->fi)
+                               break;
+               } else if (!fib_detect_death(fi, order, &last_resort,
+                                            &last_idx, tb->tb_default)) {
+                       fib_result_assign(res, fi);
+                       tb->tb_default = order;
+                       goto out;
+               }
+               fi = next_fi;
+               order++;
+       }
+
+       if (order <= 0 || fi == NULL) {
+               tb->tb_default = -1;
+               goto out;
+       }
+
+       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+                               tb->tb_default)) {
+               fib_result_assign(res, fi);
+               tb->tb_default = order;
+               goto out;
+       }
+
+       if (last_idx >= 0)
+               fib_result_assign(res, last_resort);
+       tb->tb_default = last_idx;
+out:
+       return;
+}
+
+void fib_update_nh_saddrs(struct net_device *dev)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct fib_nh *nh;
+       unsigned int hash;
+
+       hash = fib_devindex_hashfn(dev->ifindex);
+       head = &fib_info_devhash[hash];
+       hlist_for_each_entry(nh, node, head, nh_hash) {
+               if (nh->nh_dev != dev)
+                       continue;
+               nh->nh_saddr = inet_select_addr(nh->nh_dev,
+                                               nh->nh_gw,
+                                               nh->nh_cfg_scope);
+       }
+}
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
 /*
@@ -1189,7 +1210,7 @@ int fib_sync_up(struct net_device *dev)
  * The algorithm is suboptimal, but it provides really
  * fair weighted route distribution.
  */
-void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
+void fib_select_multipath(struct fib_result *res)
 {
        struct fib_info *fi = res->fi;
        int w;
index 0f28034..3d28a35 100644 (file)
@@ -95,7 +95,7 @@ typedef unsigned int t_key;
 #define IS_TNODE(n) (!(n->parent & T_LEAF))
 #define IS_LEAF(n) (n->parent & T_LEAF)
 
-struct node {
+struct rt_trie_node {
        unsigned long parent;
        t_key key;
 };
@@ -126,7 +126,7 @@ struct tnode {
                struct work_struct work;
                struct tnode *tnode_free;
        };
-       struct node *child[0];
+       struct rt_trie_node *child[0];
 };
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@ struct trie_stat {
 };
 
 struct trie {
-       struct node *trie;
+       struct rt_trie_node *trie;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats stats;
 #endif
 };
 
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
                                  int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
 static struct tnode *inflate(struct trie *t, struct tnode *tn);
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 /* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@ static const int sync_pages = 128;
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
 {
        return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
 }
 
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
 {
        struct tnode *ret = node_parent(node);
 
@@ -192,22 +192,22 @@ static inline struct tnode *node_parent_rcu(struct node *node)
 /* Same as rcu_assign_pointer
  * but that macro() assumes that value is a pointer.
  */
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
 {
        smp_wmb();
        node->parent = (unsigned long)ptr | NODE_TYPE(node);
 }
 
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
 {
        BUG_ON(i >= 1U << tn->bits);
 
        return tn->child[i];
 }
 
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
 {
-       struct node *ret = tnode_get_child(tn, i);
+       struct rt_trie_node *ret = tnode_get_child(tn, i);
 
        return rcu_dereference_rtnl(ret);
 }
@@ -217,12 +217,12 @@ static inline int tnode_child_length(const struct tnode *tn)
        return 1 << tn->bits;
 }
 
-static inline t_key mask_pfx(t_key k, unsigned short l)
+static inline t_key mask_pfx(t_key k, unsigned int l)
 {
        return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
 }
 
-static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
+static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
 {
        if (offset < KEYLENGTH)
                return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
@@ -378,7 +378,7 @@ static void __tnode_free_rcu(struct rcu_head *head)
 {
        struct tnode *tn = container_of(head, struct tnode, rcu);
        size_t size = sizeof(struct tnode) +
-                     (sizeof(struct node *) << tn->bits);
+                     (sizeof(struct rt_trie_node *) << tn->bits);
 
        if (size <= PAGE_SIZE)
                kfree(tn);
@@ -402,7 +402,7 @@ static void tnode_free_safe(struct tnode *tn)
        tn->tnode_free = tnode_free_head;
        tnode_free_head = tn;
        tnode_free_size += sizeof(struct tnode) +
-                          (sizeof(struct node *) << tn->bits);
+                          (sizeof(struct rt_trie_node *) << tn->bits);
 }
 
 static void tnode_free_flush(void)
@@ -443,7 +443,7 @@ static struct leaf_info *leaf_info_new(int plen)
 
 static struct tnode *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+       size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
        struct tnode *tn = tnode_alloc(sz);
 
        if (tn) {
@@ -456,7 +456,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
        }
 
        pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-                sizeof(struct node) << bits);
+                sizeof(struct rt_trie_node) << bits);
        return tn;
 }
 
@@ -465,7 +465,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
 
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
 {
        if (n == NULL || IS_LEAF(n))
                return 0;
@@ -474,7 +474,7 @@ static inline int tnode_full(const struct tnode *tn, const struct node *n)
 }
 
 static inline void put_child(struct trie *t, struct tnode *tn, int i,
-                            struct node *n)
+                            struct rt_trie_node *n)
 {
        tnode_put_child_reorg(tn, i, n, -1);
 }
@@ -484,10 +484,10 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
   * Update the value of full_children and empty_children.
   */
 
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
                                  int wasfull)
 {
-       struct node *chi = tn->child[i];
+       struct rt_trie_node *chi = tn->child[i];
        int isfull;
 
        BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
 }
 
 #define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
 {
        int i;
        struct tnode *old_tn;
@@ -605,7 +605,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Keep root node larger  */
 
-       if (!node_parent((struct node *)tn)) {
+       if (!node_parent((struct rt_trie_node *)tn)) {
                inflate_threshold_use = inflate_threshold_root;
                halve_threshold_use = halve_threshold_root;
        } else {
@@ -635,7 +635,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Return if at least one inflate is run */
        if (max_work != MAX_WORK)
-               return (struct node *) tn;
+               return (struct rt_trie_node *) tn;
 
        /*
         * Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
        if (tn->empty_children == tnode_child_length(tn) - 1) {
 one_child:
                for (i = 0; i < tnode_child_length(tn); i++) {
-                       struct node *n;
+                       struct rt_trie_node *n;
 
                        n = tn->child[i];
                        if (!n)
@@ -676,7 +676,7 @@ one_child:
                        return n;
                }
        }
-       return (struct node *) tn;
+       return (struct rt_trie_node *) tn;
 }
 
 static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
                                goto nomem;
                        }
 
-                       put_child(t, tn, 2*i, (struct node *) left);
-                       put_child(t, tn, 2*i+1, (struct node *) right);
+                       put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+                       put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
                }
        }
 
        for (i = 0; i < olen; i++) {
                struct tnode *inode;
-               struct node *node = tnode_get_child(oldtnode, i);
+               struct rt_trie_node *node = tnode_get_child(oldtnode, i);
                struct tnode *left, *right;
                int size, j;
 
@@ -825,7 +825,7 @@ nomem:
 static struct tnode *halve(struct trie *t, struct tnode *tn)
 {
        struct tnode *oldtnode = tn;
-       struct node *left, *right;
+       struct rt_trie_node *left, *right;
        int i;
        int olen = tnode_child_length(tn);
 
@@ -856,7 +856,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
                        if (!newn)
                                goto nomem;
 
-                       put_child(t, tn, i/2, (struct node *)newn);
+                       put_child(t, tn, i/2, (struct rt_trie_node *)newn);
                }
 
        }
@@ -958,7 +958,7 @@ fib_find_node(struct trie *t, u32 key)
 {
        int pos;
        struct tnode *tn;
-       struct node *n;
+       struct rt_trie_node *n;
 
        pos = 0;
        n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
 
        key = tn->key;
 
-       while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+       while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
                cindex = tkey_extract_bits(key, tp->pos, tp->bits);
                wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
                tn = (struct tnode *) resize(t, (struct tnode *)tn);
 
                tnode_put_child_reorg((struct tnode *)tp, cindex,
-                                     (struct node *)tn, wasfull);
+                                     (struct rt_trie_node *)tn, wasfull);
 
-               tp = node_parent((struct node *) tn);
+               tp = node_parent((struct rt_trie_node *) tn);
                if (!tp)
-                       rcu_assign_pointer(t->trie, (struct node *)tn);
+                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
 
                tnode_free_flush();
                if (!tp)
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
        if (IS_TNODE(tn))
                tn = (struct tnode *)resize(t, (struct tnode *)tn);
 
-       rcu_assign_pointer(t->trie, (struct node *)tn);
+       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
        tnode_free_flush();
 }
 
@@ -1025,7 +1025,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
 {
        int pos, newpos;
        struct tnode *tp = NULL, *tn = NULL;
-       struct node *n;
+       struct rt_trie_node *n;
        struct leaf *l;
        int missbit;
        struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
        if (t->trie && n == NULL) {
                /* Case 2: n is NULL, and will just insert a new leaf */
 
-               node_set_parent((struct node *)l, tp);
+               node_set_parent((struct rt_trie_node *)l, tp);
 
                cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-               put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+               put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
        } else {
                /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
                /*
@@ -1141,18 +1141,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
                        return NULL;
                }
 
-               node_set_parent((struct node *)tn, tp);
+               node_set_parent((struct rt_trie_node *)tn, tp);
 
                missbit = tkey_extract_bits(key, newpos, 1);
-               put_child(t, tn, missbit, (struct node *)l);
+               put_child(t, tn, missbit, (struct rt_trie_node *)l);
                put_child(t, tn, 1-missbit, n);
 
                if (tp) {
                        cindex = tkey_extract_bits(key, tp->pos, tp->bits);
                        put_child(t, (struct tnode *)tp, cindex,
-                                 (struct node *)tn);
+                                 (struct rt_trie_node *)tn);
                } else {
-                       rcu_assign_pointer(t->trie, (struct node *)tn);
+                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
                        tp = tn;
                }
        }
@@ -1340,8 +1340,8 @@ err:
 }
 
 /* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
-                     t_key key,  const struct flowi *flp,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
+                     t_key key,  const struct flowi4 *flp,
                      struct fib_result *res, int fib_flags)
 {
        struct leaf_info *li;
@@ -1349,40 +1349,75 @@ static int check_leaf(struct trie *t, struct leaf *l,
        struct hlist_node *node;
 
        hlist_for_each_entry_rcu(li, node, hhead, hlist) {
-               int err;
+               struct fib_alias *fa;
                int plen = li->plen;
                __be32 mask = inet_make_mask(plen);
 
                if (l->key != (key & ntohl(mask)))
                        continue;
 
-               err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+               list_for_each_entry_rcu(fa, &li->falh, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
+                       int nhsel, err;
 
+                       if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+                               continue;
+                       if (fa->fa_scope < flp->flowi4_scope)
+                               continue;
+                       fib_alias_accessed(fa);
+                       err = fib_props[fa->fa_type].error;
+                       if (err) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-               if (err <= 0)
-                       t->stats.semantic_match_passed++;
-               else
-                       t->stats.semantic_match_miss++;
+                               t->stats.semantic_match_miss++;
+#endif
+                               return 1;
+                       }
+                       if (fi->fib_flags & RTNH_F_DEAD)
+                               continue;
+                       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+                               const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+                               if (nh->nh_flags & RTNH_F_DEAD)
+                                       continue;
+                               if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
+                                       continue;
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                               t->stats.semantic_match_passed++;
+#endif
+                               res->prefixlen = plen;
+                               res->nh_sel = nhsel;
+                               res->type = fa->fa_type;
+                               res->scope = fa->fa_scope;
+                               res->fi = fi;
+                               res->table = tb;
+                               res->fa_head = &li->falh;
+                               if (!(fib_flags & FIB_LOOKUP_NOREF))
+                                       atomic_inc(&res->fi->fib_clntref);
+                               return 0;
+                       }
+               }
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+               t->stats.semantic_match_miss++;
 #endif
-               if (err <= 0)
-                       return err;
        }
 
        return 1;
 }
 
-int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                     struct fib_result *res, int fib_flags)
 {
        struct trie *t = (struct trie *) tb->tb_data;
        int ret;
-       struct node *n;
+       struct rt_trie_node *n;
        struct tnode *pn;
-       int pos, bits;
-       t_key key = ntohl(flp->fl4_dst);
-       int chopped_off;
+       unsigned int pos, bits;
+       t_key key = ntohl(flp->daddr);
+       unsigned int chopped_off;
        t_key cindex = 0;
-       int current_prefix_length = KEYLENGTH;
+       unsigned int current_prefix_length = KEYLENGTH;
        struct tnode *cn;
        t_key pref_mismatch;
 
@@ -1398,7 +1433,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
 
        /* Just a leaf? */
        if (IS_LEAF(n)) {
-               ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+               ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
                goto found;
        }
 
@@ -1423,7 +1458,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
                }
 
                if (IS_LEAF(n)) {
-                       ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+                       ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
                        if (ret > 0)
                                goto backtrace;
                        goto found;
@@ -1541,7 +1576,7 @@ backtrace:
                if (chopped_off <= pn->bits) {
                        cindex &= ~(1 << (chopped_off-1));
                } else {
-                       struct tnode *parent = node_parent_rcu((struct node *) pn);
+                       struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
                        if (!parent)
                                goto failed;
 
@@ -1568,7 +1603,7 @@ found:
  */
 static void trie_leaf_remove(struct trie *t, struct leaf *l)
 {
-       struct tnode *tp = node_parent((struct node *) l);
+       struct tnode *tp = node_parent((struct rt_trie_node *) l);
 
        pr_debug("entering trie_leaf_remove(%p)\n", l);
 
@@ -1706,7 +1741,7 @@ static int trie_flush_leaf(struct leaf *l)
  * Scan for the next right leaf starting at node p->child[idx]
  * Since we have back pointer, no recursion necessary.
  */
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
 {
        do {
                t_key idx;
@@ -1732,7 +1767,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
                }
 
                /* Node empty, walk back up to parent */
-               c = (struct node *) p;
+               c = (struct rt_trie_node *) p;
        } while ((p = node_parent_rcu(c)) != NULL);
 
        return NULL; /* Root of trie */
@@ -1753,7 +1788,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
 
 static struct leaf *trie_nextleaf(struct leaf *l)
 {
-       struct node *c = (struct node *) l;
+       struct rt_trie_node *c = (struct rt_trie_node *) l;
        struct tnode *p = node_parent_rcu(c);
 
        if (!p)
@@ -1802,80 +1837,6 @@ void fib_free_table(struct fib_table *tb)
        kfree(tb);
 }
 
-void fib_table_select_default(struct fib_table *tb,
-                             const struct flowi *flp,
-                             struct fib_result *res)
-{
-       struct trie *t = (struct trie *) tb->tb_data;
-       int order, last_idx;
-       struct fib_info *fi = NULL;
-       struct fib_info *last_resort;
-       struct fib_alias *fa = NULL;
-       struct list_head *fa_head;
-       struct leaf *l;
-
-       last_idx = -1;
-       last_resort = NULL;
-       order = -1;
-
-       rcu_read_lock();
-
-       l = fib_find_node(t, 0);
-       if (!l)
-               goto out;
-
-       fa_head = get_fa_head(l, 0);
-       if (!fa_head)
-               goto out;
-
-       if (list_empty(fa_head))
-               goto out;
-
-       list_for_each_entry_rcu(fa, fa_head, fa_list) {
-               struct fib_info *next_fi = fa->fa_info;
-
-               if (fa->fa_scope != res->scope ||
-                   fa->fa_type != RTN_UNICAST)
-                       continue;
-
-               if (next_fi->fib_priority > res->fi->fib_priority)
-                       break;
-               if (!next_fi->fib_nh[0].nh_gw ||
-                   next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-                       continue;
-
-               fib_alias_accessed(fa);
-
-               if (fi == NULL) {
-                       if (next_fi != res->fi)
-                               break;
-               } else if (!fib_detect_death(fi, order, &last_resort,
-                                            &last_idx, tb->tb_default)) {
-                       fib_result_assign(res, fi);
-                       tb->tb_default = order;
-                       goto out;
-               }
-               fi = next_fi;
-               order++;
-       }
-       if (order <= 0 || fi == NULL) {
-               tb->tb_default = -1;
-               goto out;
-       }
-
-       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-                               tb->tb_default)) {
-               fib_result_assign(res, fi);
-               tb->tb_default = order;
-               goto out;
-       }
-       if (last_idx >= 0)
-               fib_result_assign(res, last_resort);
-       tb->tb_default = last_idx;
-out:
-       rcu_read_unlock();
-}
-
 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
                           struct fib_table *tb,
                           struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1951,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
        return skb->len;
 }
 
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
 {
        fn_alias_kmem = kmem_cache_create("ip_fib_alias",
                                          sizeof(struct fib_alias),
@@ -2003,8 +1964,7 @@ void __init fib_hash_init(void)
 }
 
 
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
 {
        struct fib_table *tb;
        struct trie *t;
@@ -2036,7 +1996,7 @@ struct fib_trie_iter {
        unsigned int depth;
 };
 
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
 {
        struct tnode *tn = iter->tnode;
        unsigned int cindex = iter->index;
@@ -2050,7 +2010,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
                 iter->tnode, iter->index, iter->depth);
 rescan:
        while (cindex < (1<<tn->bits)) {
-               struct node *n = tnode_get_child_rcu(tn, cindex);
+               struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
 
                if (n) {
                        if (IS_LEAF(n)) {
@@ -2069,7 +2029,7 @@ rescan:
        }
 
        /* Current node exhausted, pop back up */
-       p = node_parent_rcu((struct node *)tn);
+       p = node_parent_rcu((struct rt_trie_node *)tn);
        if (p) {
                cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
                tn = p;
@@ -2081,10 +2041,10 @@ rescan:
        return NULL;
 }
 
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
                                       struct trie *t)
 {
-       struct node *n;
+       struct rt_trie_node *n;
 
        if (!t)
                return NULL;
@@ -2108,7 +2068,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-       struct node *n;
+       struct rt_trie_node *n;
        struct fib_trie_iter iter;
 
        memset(s, 0, sizeof(*s));
@@ -2181,7 +2141,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_putc(seq, '\n');
        seq_printf(seq, "\tPointers: %u\n", pointers);
 
-       bytes += sizeof(struct node *) * pointers;
+       bytes += sizeof(struct rt_trie_node *) * pointers;
        seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
        seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
@@ -2262,7 +2222,7 @@ static const struct file_operations fib_triestat_fops = {
        .release = single_release_net,
 };
 
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct fib_trie_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2275,7 +2235,7 @@ static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
-                       struct node *n;
+                       struct rt_trie_node *n;
 
                        for (n = fib_trie_get_first(iter,
                                                    (struct trie *) tb->tb_data);
@@ -2304,7 +2264,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct fib_table *tb = iter->tb;
        struct hlist_node *tb_node;
        unsigned int h;
-       struct node *n;
+       struct rt_trie_node *n;
 
        ++*pos;
        /* next node in same table */
@@ -2390,7 +2350,7 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
        const struct fib_trie_iter *iter = seq->private;
-       struct node *n = v;
+       struct rt_trie_node *n = v;
 
        if (!node_parent_rcu(n))
                fib_table_print(seq, iter->tb);
index 4aa1b7f..a91dc16 100644 (file)
@@ -233,48 +233,11 @@ static inline void icmp_xmit_unlock(struct sock *sk)
  *     Send an ICMP frame.
  */
 
-/*
- *     Check transmit rate limitation for given message.
- *     The rate information is held in the destination cache now.
- *     This function is generic and could be used for other purposes
- *     too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
- *
- *     Note that the same dst_entry fields are modified by functions in
- *     route.c too, but these work for packet destinations while xrlim_allow
- *     works for icmp destinations. This means the rate limiting information
- *     for one "ip object" is shared - and these ICMPs are twice limited:
- *     by source and by destination.
- *
- *     RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
- *                       SHOULD allow setting of rate limits
- *
- *     Shared between ICMPv4 and ICMPv6.
- */
-#define XRLIM_BURST_FACTOR 6
-int xrlim_allow(struct dst_entry *dst, int timeout)
-{
-       unsigned long now, token = dst->rate_tokens;
-       int rc = 0;
-
-       now = jiffies;
-       token += now - dst->rate_last;
-       dst->rate_last = now;
-       if (token > XRLIM_BURST_FACTOR * timeout)
-               token = XRLIM_BURST_FACTOR * timeout;
-       if (token >= timeout) {
-               token -= timeout;
-               rc = 1;
-       }
-       dst->rate_tokens = token;
-       return rc;
-}
-EXPORT_SYMBOL(xrlim_allow);
-
-static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                int type, int code)
 {
        struct dst_entry *dst = &rt->dst;
-       int rc = 1;
+       bool rc = true;
 
        if (type > NR_ICMP_TYPES)
                goto out;
@@ -288,8 +251,12 @@ static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                goto out;
 
        /* Limit if icmp type is enabled in ratemask. */
-       if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
-               rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
+       if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
+               if (!rt->peer)
+                       rt_bind_peer(rt, 1);
+               rc = inet_peer_xrlim_allow(rt->peer,
+                                          net->ipv4.sysctl_icmp_ratelimit);
+       }
 out:
        return rc;
 }
@@ -386,12 +353,15 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                        daddr = icmp_param->replyopts.faddr;
        }
        {
-               struct flowi fl = { .fl4_dst= daddr,
-                                   .fl4_src = rt->rt_spec_dst,
-                                   .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
-                                   .proto = IPPROTO_ICMP };
-               security_skb_classify_flow(skb, &fl);
-               if (ip_route_output_key(net, &rt, &fl))
+               struct flowi4 fl4 = {
+                       .daddr = daddr,
+                       .saddr = rt->rt_spec_dst,
+                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .flowi4_proto = IPPROTO_ICMP,
+               };
+               security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+               rt = ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt))
                        goto out_unlock;
        }
        if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
@@ -402,6 +372,97 @@ out_unlock:
        icmp_xmit_unlock(sk);
 }
 
+static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
+                                       struct iphdr *iph,
+                                       __be32 saddr, u8 tos,
+                                       int type, int code,
+                                       struct icmp_bxm *param)
+{
+       struct flowi4 fl4 = {
+               .daddr = (param->replyopts.srr ?
+                         param->replyopts.faddr : iph->saddr),
+               .saddr = saddr,
+               .flowi4_tos = RT_TOS(tos),
+               .flowi4_proto = IPPROTO_ICMP,
+               .fl4_icmp_type = type,
+               .fl4_icmp_code = code,
+       };
+       struct rtable *rt, *rt2;
+       int err;
+
+       security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
+       rt = __ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
+               return rt;
+
+       /* No need to clone since we're just using its address. */
+       rt2 = rt;
+
+       if (!fl4.saddr)
+               fl4.saddr = rt->rt_src;
+
+       rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
+                                          flowi4_to_flowi(&fl4), NULL, 0);
+       if (!IS_ERR(rt)) {
+               if (rt != rt2)
+                       return rt;
+       } else if (PTR_ERR(rt) == -EPERM) {
+               rt = NULL;
+       } else
+               return rt;
+
+       err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
+       if (err)
+               goto relookup_failed;
+
+       if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
+               rt2 = __ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt2))
+                       err = PTR_ERR(rt2);
+       } else {
+               struct flowi4 fl4_2 = {};
+               unsigned long orefdst;
+
+               fl4_2.daddr = fl4.saddr;
+               rt2 = ip_route_output_key(net, &fl4_2);
+               if (IS_ERR(rt2)) {
+                       err = PTR_ERR(rt2);
+                       goto relookup_failed;
+               }
+               /* Ugh! */
+               orefdst = skb_in->_skb_refdst; /* save old refdst */
+               err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
+                                    RT_TOS(tos), rt2->dst.dev);
+
+               dst_release(&rt2->dst);
+               rt2 = skb_rtable(skb_in);
+               skb_in->_skb_refdst = orefdst; /* restore old refdst */
+       }
+
+       if (err)
+               goto relookup_failed;
+
+       rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
+                                           flowi4_to_flowi(&fl4), NULL,
+                                           XFRM_LOOKUP_ICMP);
+       if (!IS_ERR(rt2)) {
+               dst_release(&rt->dst);
+               rt = rt2;
+       } else if (PTR_ERR(rt2) == -EPERM) {
+               if (rt)
+                       dst_release(&rt->dst);
+               return rt2;
+       } else {
+               err = PTR_ERR(rt2);
+               goto relookup_failed;
+       }
+       return rt;
+
+relookup_failed:
+       if (rt)
+               return rt;
+       return ERR_PTR(err);
+}
 
 /*
  *     Send an ICMP message in response to a situation
@@ -507,7 +568,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                rcu_read_lock();
                if (rt_is_input_route(rt) &&
                    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
-                       dev = dev_get_by_index_rcu(net, rt->fl.iif);
+                       dev = dev_get_by_index_rcu(net, rt->rt_iif);
 
                if (dev)
                        saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -539,86 +600,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        ipc.opt = &icmp_param.replyopts;
        ipc.tx_flags = 0;
 
-       {
-               struct flowi fl = {
-                       .fl4_dst = icmp_param.replyopts.srr ?
-                                  icmp_param.replyopts.faddr : iph->saddr,
-                       .fl4_src = saddr,
-                       .fl4_tos = RT_TOS(tos),
-                       .proto = IPPROTO_ICMP,
-                       .fl_icmp_type = type,
-                       .fl_icmp_code = code,
-               };
-               int err;
-               struct rtable *rt2;
-
-               security_skb_classify_flow(skb_in, &fl);
-               if (__ip_route_output_key(net, &rt, &fl))
-                       goto out_unlock;
-
-               /* No need to clone since we're just using its address. */
-               rt2 = rt;
-
-               if (!fl.nl_u.ip4_u.saddr)
-                       fl.nl_u.ip4_u.saddr = rt->rt_src;
-
-               err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
-               switch (err) {
-               case 0:
-                       if (rt != rt2)
-                               goto route_done;
-                       break;
-               case -EPERM:
-                       rt = NULL;
-                       break;
-               default:
-                       goto out_unlock;
-               }
-
-               if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
-                       goto relookup_failed;
-
-               if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
-                       err = __ip_route_output_key(net, &rt2, &fl);
-               else {
-                       struct flowi fl2 = {};
-                       unsigned long orefdst;
-
-                       fl2.fl4_dst = fl.fl4_src;
-                       if (ip_route_output_key(net, &rt2, &fl2))
-                               goto relookup_failed;
-
-                       /* Ugh! */
-                       orefdst = skb_in->_skb_refdst; /* save old refdst */
-                       err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
-                                            RT_TOS(tos), rt2->dst.dev);
-
-                       dst_release(&rt2->dst);
-                       rt2 = skb_rtable(skb_in);
-                       skb_in->_skb_refdst = orefdst; /* restore old refdst */
-               }
-
-               if (err)
-                       goto relookup_failed;
-
-               err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
-                                 XFRM_LOOKUP_ICMP);
-               switch (err) {
-               case 0:
-                       dst_release(&rt->dst);
-                       rt = rt2;
-                       break;
-               case -EPERM:
-                       goto ende;
-               default:
-relookup_failed:
-                       if (!rt)
-                               goto out_unlock;
-                       break;
-               }
-       }
+       rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
+                              type, code, &icmp_param);
+       if (IS_ERR(rt))
+               goto out_unlock;
 
-route_done:
        if (!icmpv4_xrlim_allow(net, rt, type, code))
                goto ende;
 
index e0e77e2..1fd3d9c 100644 (file)
@@ -321,14 +321,12 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        }
        igmp_skb_size(skb) = size;
 
-       {
-               struct flowi fl = { .oif = dev->ifindex,
-                                   .fl4_dst = IGMPV3_ALL_MCR,
-                                   .proto = IPPROTO_IGMP };
-               if (ip_route_output_key(net, &rt, &fl)) {
-                       kfree_skb(skb);
-                       return NULL;
-               }
+       rt = ip_route_output_ports(net, NULL, IGMPV3_ALL_MCR, 0,
+                                  0, 0,
+                                  IPPROTO_IGMP, 0, dev->ifindex);
+       if (IS_ERR(rt)) {
+               kfree_skb(skb);
+               return NULL;
        }
        if (rt->rt_src == 0) {
                kfree_skb(skb);
@@ -666,13 +664,12 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        else
                dst = group;
 
-       {
-               struct flowi fl = { .oif = dev->ifindex,
-                                   .fl4_dst = dst,
-                                   .proto = IPPROTO_IGMP };
-               if (ip_route_output_key(net, &rt, &fl))
-                       return -1;
-       }
+       rt = ip_route_output_ports(net, NULL, dst, 0,
+                                  0, 0,
+                                  IPPROTO_IGMP, 0, dev->ifindex);
+       if (IS_ERR(rt))
+               return -1;
+
        if (rt->rt_src == 0) {
                ip_rt_put(rt);
                return -1;
@@ -1439,8 +1436,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
 /* RTNL is locked */
 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
-       struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
-       struct rtable *rt;
        struct net_device *dev = NULL;
        struct in_device *idev = NULL;
 
@@ -1454,9 +1449,14 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
                        return NULL;
        }
 
-       if (!dev && !ip_route_output_key(net, &rt, &fl)) {
-               dev = rt->dst.dev;
-               ip_rt_put(rt);
+       if (!dev) {
+               struct rtable *rt = ip_route_output(net,
+                                                   imr->imr_multiaddr.s_addr,
+                                                   0, 0, 0);
+               if (!IS_ERR(rt)) {
+                       dev = rt->dst.dev;
+                       ip_rt_put(rt);
+               }
        }
        if (dev) {
                imr->imr_ifindex = dev->ifindex;
@@ -2329,13 +2329,13 @@ void ip_mc_drop_socket(struct sock *sk)
        rtnl_unlock();
 }
 
-int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
+/* called with rcu_read_lock() */
+int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
 {
        struct ip_mc_list *im;
        struct ip_sf_list *psf;
        int rv = 0;
 
-       rcu_read_lock();
        for_each_pmc_rcu(in_dev, im) {
                if (im->multiaddr == mc_addr)
                        break;
@@ -2357,7 +2357,6 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
                } else
                        rv = 1; /* unspecified source; tentatively allow */
        }
-       rcu_read_unlock();
        return rv;
 }
 
index 97e5fb7..6c0b7f4 100644 (file)
@@ -356,20 +356,23 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct rtable *rt;
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct ip_options *opt = inet_rsk(req)->opt;
-       struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                           .mark = sk->sk_mark,
-                           .fl4_dst = ((opt && opt->srr) ?
-                                         opt->faddr : ireq->rmt_addr),
-                           .fl4_src = ireq->loc_addr,
-                           .fl4_tos = RT_CONN_FLAGS(sk),
-                           .proto = sk->sk_protocol,
-                           .flags = inet_sk_flowi_flags(sk),
-                           .fl_ip_sport = inet_sk(sk)->inet_sport,
-                           .fl_ip_dport = ireq->rmt_port };
+       struct flowi4 fl4 = {
+               .flowi4_oif = sk->sk_bound_dev_if,
+               .flowi4_mark = sk->sk_mark,
+               .daddr = ((opt && opt->srr) ?
+                         opt->faddr : ireq->rmt_addr),
+               .saddr = ireq->loc_addr,
+               .flowi4_tos = RT_CONN_FLAGS(sk),
+               .flowi4_proto = sk->sk_protocol,
+               .flowi4_flags = inet_sk_flowi_flags(sk),
+               .fl4_sport = inet_sk(sk)->inet_sport,
+               .fl4_dport = ireq->rmt_port,
+       };
        struct net *net = sock_net(sk);
 
-       security_req_classify_flow(req, &fl);
-       if (ip_route_output_flow(net, &rt, &fl, sk, 0))
+       security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+       rt = ip_route_output_flow(net, &fl4, sk);
+       if (IS_ERR(rt))
                goto no_route;
        if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
                goto route_err;
index c5af909..3c8dfa1 100644 (file)
@@ -505,7 +505,9 @@ restart:
                        }
 
                        rcu_read_unlock();
+                       local_bh_disable();
                        inet_twsk_deschedule(tw, twdr);
+                       local_bh_enable();
                        inet_twsk_put(tw);
                        goto restart_rcu;
                }
index d9bc857..dd1b20e 100644 (file)
@@ -81,19 +81,19 @@ static const struct inet_peer peer_fake_node = {
 
 struct inet_peer_base {
        struct inet_peer __rcu *root;
-       spinlock_t      lock;
+       seqlock_t       lock;
        int             total;
 };
 
 static struct inet_peer_base v4_peers = {
        .root           = peer_avl_empty_rcu,
-       .lock           = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
+       .lock           = __SEQLOCK_UNLOCKED(v4_peers.lock),
        .total          = 0,
 };
 
 static struct inet_peer_base v6_peers = {
        .root           = peer_avl_empty_rcu,
-       .lock           = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
+       .lock           = __SEQLOCK_UNLOCKED(v6_peers.lock),
        .total          = 0,
 };
 
@@ -167,9 +167,9 @@ static int addr_compare(const struct inetpeer_addr *a,
        int i, n = (a->family == AF_INET ? 1 : 4);
 
        for (i = 0; i < n; i++) {
-               if (a->a6[i] == b->a6[i])
+               if (a->addr.a6[i] == b->addr.a6[i])
                        continue;
-               if (a->a6[i] < b->a6[i])
+               if (a->addr.a6[i] < b->addr.a6[i])
                        return -1;
                return 1;
        }
@@ -177,6 +177,9 @@ static int addr_compare(const struct inetpeer_addr *a,
        return 0;
 }
 
+#define rcu_deref_locked(X, BASE)                              \
+       rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
+
 /*
  * Called with local BH disabled and the pool lock held.
  */
@@ -187,8 +190,7 @@ static int addr_compare(const struct inetpeer_addr *a,
                                                                \
        stackptr = _stack;                                      \
        *stackptr++ = &_base->root;                             \
-       for (u = rcu_dereference_protected(_base->root,         \
-                       lockdep_is_held(&_base->lock));         \
+       for (u = rcu_deref_locked(_base->root, _base);          \
             u != peer_avl_empty; ) {                           \
                int cmp = addr_compare(_daddr, &u->daddr);      \
                if (cmp == 0)                                   \
@@ -198,23 +200,22 @@ static int addr_compare(const struct inetpeer_addr *a,
                else                                            \
                        v = &u->avl_right;                      \
                *stackptr++ = v;                                \
-               u = rcu_dereference_protected(*v,               \
-                       lockdep_is_held(&_base->lock));         \
+               u = rcu_deref_locked(*v, _base);                \
        }                                                       \
        u;                                                      \
 })
 
 /*
- * Called with rcu_read_lock_bh()
+ * Called with rcu_read_lock()
  * Because we hold no lock against a writer, its quite possible we fall
  * in an endless loop.
  * But every pointer we follow is guaranteed to be valid thanks to RCU.
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
-static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
-                                      struct inet_peer_base *base)
+static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
+                                   struct inet_peer_base *base)
 {
-       struct inet_peer *u = rcu_dereference_bh(base->root);
+       struct inet_peer *u = rcu_dereference(base->root);
        int count = 0;
 
        while (u != peer_avl_empty) {
@@ -230,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
                        return u;
                }
                if (cmp == -1)
-                       u = rcu_dereference_bh(u->avl_left);
+                       u = rcu_dereference(u->avl_left);
                else
-                       u = rcu_dereference_bh(u->avl_right);
+                       u = rcu_dereference(u->avl_right);
                if (unlikely(++count == PEER_MAXDEPTH))
                        break;
        }
@@ -246,13 +247,11 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
        struct inet_peer __rcu **v;                             \
        *stackptr++ = &start->avl_left;                         \
        v = &start->avl_left;                                   \
-       for (u = rcu_dereference_protected(*v,                  \
-                       lockdep_is_held(&base->lock));          \
+       for (u = rcu_deref_locked(*v, base);                    \
             u->avl_right != peer_avl_empty_rcu; ) {            \
                v = &u->avl_right;                              \
                *stackptr++ = v;                                \
-               u = rcu_dereference_protected(*v,               \
-                       lockdep_is_held(&base->lock));          \
+               u = rcu_deref_locked(*v, base);                 \
        }                                                       \
        u;                                                      \
 })
@@ -271,21 +270,16 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
 
        while (stackend > stack) {
                nodep = *--stackend;
-               node = rcu_dereference_protected(*nodep,
-                               lockdep_is_held(&base->lock));
-               l = rcu_dereference_protected(node->avl_left,
-                               lockdep_is_held(&base->lock));
-               r = rcu_dereference_protected(node->avl_right,
-                               lockdep_is_held(&base->lock));
+               node = rcu_deref_locked(*nodep, base);
+               l = rcu_deref_locked(node->avl_left, base);
+               r = rcu_deref_locked(node->avl_right, base);
                lh = node_height(l);
                rh = node_height(r);
                if (lh > rh + 1) { /* l: RH+2 */
                        struct inet_peer *ll, *lr, *lrl, *lrr;
                        int lrh;
-                       ll = rcu_dereference_protected(l->avl_left,
-                               lockdep_is_held(&base->lock));
-                       lr = rcu_dereference_protected(l->avl_right,
-                               lockdep_is_held(&base->lock));
+                       ll = rcu_deref_locked(l->avl_left, base);
+                       lr = rcu_deref_locked(l->avl_right, base);
                        lrh = node_height(lr);
                        if (lrh <= node_height(ll)) {   /* ll: RH+1 */
                                RCU_INIT_POINTER(node->avl_left, lr);   /* lr: RH or RH+1 */
@@ -296,10 +290,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                                l->avl_height = node->avl_height + 1;
                                RCU_INIT_POINTER(*nodep, l);
                        } else { /* ll: RH, lr: RH+1 */
-                               lrl = rcu_dereference_protected(lr->avl_left,
-                                       lockdep_is_held(&base->lock));  /* lrl: RH or RH-1 */
-                               lrr = rcu_dereference_protected(lr->avl_right,
-                                       lockdep_is_held(&base->lock));  /* lrr: RH or RH-1 */
+                               lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
+                               lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
                                RCU_INIT_POINTER(node->avl_left, lrr);  /* lrr: RH or RH-1 */
                                RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = rh + 1; /* node: RH+1 */
@@ -314,10 +306,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                } else if (rh > lh + 1) { /* r: LH+2 */
                        struct inet_peer *rr, *rl, *rlr, *rll;
                        int rlh;
-                       rr = rcu_dereference_protected(r->avl_right,
-                               lockdep_is_held(&base->lock));
-                       rl = rcu_dereference_protected(r->avl_left,
-                               lockdep_is_held(&base->lock));
+                       rr = rcu_deref_locked(r->avl_right, base);
+                       rl = rcu_deref_locked(r->avl_left, base);
                        rlh = node_height(rl);
                        if (rlh <= node_height(rr)) {   /* rr: LH+1 */
                                RCU_INIT_POINTER(node->avl_right, rl);  /* rl: LH or LH+1 */
@@ -328,10 +318,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                                r->avl_height = node->avl_height + 1;
                                RCU_INIT_POINTER(*nodep, r);
                        } else { /* rr: RH, rl: RH+1 */
-                               rlr = rcu_dereference_protected(rl->avl_right,
-                                       lockdep_is_held(&base->lock));  /* rlr: LH or LH-1 */
-                               rll = rcu_dereference_protected(rl->avl_left,
-                                       lockdep_is_held(&base->lock));  /* rll: LH or LH-1 */
+                               rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
+                               rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
                                RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
                                RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = lh + 1; /* node: LH+1 */
@@ -372,7 +360,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
 
        do_free = 0;
 
-       spin_lock_bh(&base->lock);
+       write_seqlock_bh(&base->lock);
        /* Check the reference counter.  It was artificially incremented by 1
         * in cleanup() function to prevent sudden disappearing.  If we can
         * atomically (because of lockless readers) take this last reference,
@@ -392,8 +380,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p, base);
-                       BUG_ON(rcu_dereference_protected(*stackptr[-1],
-                                       lockdep_is_held(&base->lock)) != t);
+                       BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->daddr > x->daddr for any
                         * x in p->avl_left subtree.
@@ -409,10 +396,10 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
                base->total--;
                do_free = 1;
        }
-       spin_unlock_bh(&base->lock);
+       write_sequnlock_bh(&base->lock);
 
        if (do_free)
-               call_rcu_bh(&p->rcu, inetpeer_free_rcu);
+               call_rcu(&p->rcu, inetpeer_free_rcu);
        else
                /* The node is used again.  Decrease the reference counter
                 * back.  The loop "cleanup -> unlink_from_unused
@@ -475,15 +462,19 @@ static int cleanup_once(unsigned long ttl)
 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 {
        struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
-       struct inet_peer_base *base = family_to_base(AF_INET);
+       struct inet_peer_base *base = family_to_base(daddr->family);
        struct inet_peer *p;
+       unsigned int sequence;
+       int invalidated;
 
        /* Look up for the address quickly, lockless.
         * Because of a concurrent writer, we might not find an existing entry.
         */
-       rcu_read_lock_bh();
-       p = lookup_rcu_bh(daddr, base);
-       rcu_read_unlock_bh();
+       rcu_read_lock();
+       sequence = read_seqbegin(&base->lock);
+       p = lookup_rcu(daddr, base);
+       invalidated = read_seqretry(&base->lock, sequence);
+       rcu_read_unlock();
 
        if (p) {
                /* The existing node has been found.
@@ -493,14 +484,18 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                return p;
        }
 
+       /* If no writer did a change during our lookup, we can return early. */
+       if (!create && !invalidated)
+               return NULL;
+
        /* retry an exact lookup, taking the lock before.
         * At least, nodes should be hot in our cache.
         */
-       spin_lock_bh(&base->lock);
+       write_seqlock_bh(&base->lock);
        p = lookup(daddr, stack, base);
        if (p != peer_avl_empty) {
                atomic_inc(&p->refcnt);
-               spin_unlock_bh(&base->lock);
+               write_sequnlock_bh(&base->lock);
                /* Remove the entry from unused list if it was there. */
                unlink_from_unused(p);
                return p;
@@ -510,8 +505,14 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                p->daddr = *daddr;
                atomic_set(&p->refcnt, 1);
                atomic_set(&p->rid, 0);
-               atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
+               atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
                p->tcp_ts_stamp = 0;
+               p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+               p->rate_tokens = 0;
+               p->rate_last = 0;
+               p->pmtu_expires = 0;
+               p->pmtu_orig = 0;
+               memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
                INIT_LIST_HEAD(&p->unused);
 
 
@@ -519,7 +520,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                link_to_pool(p, base);
                base->total++;
        }
-       spin_unlock_bh(&base->lock);
+       write_sequnlock_bh(&base->lock);
 
        if (base->total >= inet_peer_threshold)
                /* Remove one less-recently-used entry. */
@@ -579,3 +580,44 @@ void inet_putpeer(struct inet_peer *p)
        local_bh_enable();
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
+
+/*
+ *     Check transmit rate limitation for given message.
+ *     The rate information is held in the inet_peer entries now.
+ *     This function is generic and could be used for other purposes
+ *     too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
+ *
+ *     Note that the same inet_peer fields are modified by functions in
+ *     route.c too, but these work for packet destinations while xrlim_allow
+ *     works for icmp destinations. This means the rate limiting information
+ *     for one "ip object" is shared - and these ICMPs are twice limited:
+ *     by source and by destination.
+ *
+ *     RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
+ *                       SHOULD allow setting of rate limits
+ *
+ *     Shared between ICMPv4 and ICMPv6.
+ */
+#define XRLIM_BURST_FACTOR 6
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+{
+       unsigned long now, token;
+       bool rc = false;
+
+       if (!peer)
+               return true;
+
+       token = peer->rate_tokens;
+       now = jiffies;
+       token += now - peer->rate_last;
+       peer->rate_last = now;
+       if (token > XRLIM_BURST_FACTOR * timeout)
+               token = XRLIM_BURST_FACTOR * timeout;
+       if (token >= timeout) {
+               token -= timeout;
+               rc = true;
+       }
+       peer->rate_tokens = token;
+       return rc;
+}
+EXPORT_SYMBOL(inet_peer_xrlim_allow);
index eb68a0e..da5941f 100644 (file)
@@ -769,18 +769,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
        }
 
-       {
-               struct flowi fl = {
-                       .oif = tunnel->parms.link,
-                       .fl4_dst = dst,
-                       .fl4_src = tiph->saddr,
-                       .fl4_tos = RT_TOS(tos),
-                       .fl_gre_key = tunnel->parms.o_key
-               };
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
-                       dev->stats.tx_carrier_errors++;
-                       goto tx_error;
-               }
+       rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr,
+                                tunnel->parms.o_key, RT_TOS(tos),
+                                tunnel->parms.link);
+       if (IS_ERR(rt)) {
+               dev->stats.tx_carrier_errors++;
+               goto tx_error;
        }
        tdev = rt->dst.dev;
 
@@ -944,17 +938,13 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
        /* Guess output device to choose reasonable mtu and needed_headroom */
 
        if (iph->daddr) {
-               struct flowi fl = {
-                       .oif = tunnel->parms.link,
-                       .fl4_dst = iph->daddr,
-                       .fl4_src = iph->saddr,
-                       .fl4_tos = RT_TOS(iph->tos),
-                       .proto = IPPROTO_GRE,
-                       .fl_gre_key = tunnel->parms.o_key
-               };
-               struct rtable *rt;
-
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               struct rtable *rt = ip_route_output_gre(dev_net(dev),
+                                                       iph->daddr, iph->saddr,
+                                                       tunnel->parms.o_key,
+                                                       RT_TOS(iph->tos),
+                                                       tunnel->parms.link);
+
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
@@ -1206,17 +1196,14 @@ static int ipgre_open(struct net_device *dev)
        struct ip_tunnel *t = netdev_priv(dev);
 
        if (ipv4_is_multicast(t->parms.iph.daddr)) {
-               struct flowi fl = {
-                       .oif = t->parms.link,
-                       .fl4_dst = t->parms.iph.daddr,
-                       .fl4_src = t->parms.iph.saddr,
-                       .fl4_tos = RT_TOS(t->parms.iph.tos),
-                       .proto = IPPROTO_GRE,
-                       .fl_gre_key = t->parms.o_key
-               };
-               struct rtable *rt;
-
-               if (ip_route_output_key(dev_net(dev), &rt, &fl))
+               struct rtable *rt = ip_route_output_gre(dev_net(dev),
+                                                       t->parms.iph.daddr,
+                                                       t->parms.iph.saddr,
+                                                       t->parms.o_key,
+                                                       RT_TOS(t->parms.iph.tos),
+                                                       t->parms.link);
+
+               if (IS_ERR(rt))
                        return -EADDRNOTAVAIL;
                dev = rt->dst.dev;
                ip_rt_put(rt);
@@ -1764,4 +1751,4 @@ module_exit(ipgre_fini);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_RTNL_LINK("gre");
 MODULE_ALIAS_RTNL_LINK("gretap");
-MODULE_ALIAS("gre0");
+MODULE_ALIAS_NETDEV("gre0");
index d859bcc..d7b2b09 100644 (file)
@@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
                }
        }
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (unlikely(skb_dst(skb)->tclassid)) {
                struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
                u32 idx = skb_dst(skb)->tclassid;
index 04c7b3b..67f241b 100644 (file)
@@ -339,25 +339,19 @@ int ip_queue_xmit(struct sk_buff *skb)
                if(opt && opt->srr)
                        daddr = opt->faddr;
 
-               {
-                       struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                                           .mark = sk->sk_mark,
-                                           .fl4_dst = daddr,
-                                           .fl4_src = inet->inet_saddr,
-                                           .fl4_tos = RT_CONN_FLAGS(sk),
-                                           .proto = sk->sk_protocol,
-                                           .flags = inet_sk_flowi_flags(sk),
-                                           .fl_ip_sport = inet->inet_sport,
-                                           .fl_ip_dport = inet->inet_dport };
-
-                       /* If this fails, retransmit mechanism of transport layer will
-                        * keep trying until route appears or the connection times
-                        * itself out.
-                        */
-                       security_sk_classify_flow(sk, &fl);
-                       if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
-                               goto no_route;
-               }
+               /* If this fails, retransmit mechanism of transport layer will
+                * keep trying until route appears or the connection times
+                * itself out.
+                */
+               rt = ip_route_output_ports(sock_net(sk), sk,
+                                          daddr, inet->inet_saddr,
+                                          inet->inet_dport,
+                                          inet->inet_sport,
+                                          sk->sk_protocol,
+                                          RT_CONN_FLAGS(sk),
+                                          sk->sk_bound_dev_if);
+               if (IS_ERR(rt))
+                       goto no_route;
                sk_setup_caps(sk, &rt->dst);
        }
        skb_dst_set_noref(skb, &rt->dst);
@@ -733,6 +727,7 @@ csum_page(struct page *page, int offset, int copy)
 }
 
 static inline int ip_ufo_append_data(struct sock *sk,
+                       struct sk_buff_head *queue,
                        int getfrag(void *from, char *to, int offset, int len,
                               int odd, struct sk_buff *skb),
                        void *from, int length, int hh_len, int fragheaderlen,
@@ -745,7 +740,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
         * device, so create one single skb packet containing complete
         * udp datagram
         */
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+       if ((skb = skb_peek_tail(queue)) == NULL) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
@@ -767,40 +762,28 @@ static inline int ip_ufo_append_data(struct sock *sk,
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-               sk->sk_sndmsg_off = 0;
 
                /* specify the length of each IP datagram fragment */
                skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-               __skb_queue_tail(&sk->sk_write_queue, skb);
+               __skb_queue_tail(queue, skb);
        }
 
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
 
-/*
- *     ip_append_data() and ip_append_page() can make one large IP datagram
- *     from many pieces of data. Each pieces will be holded on the socket
- *     until ip_push_pending_frames() is called. Each piece can be a page
- *     or non-page data.
- *
- *     Not only UDP, other transport protocols - e.g. raw sockets - can use
- *     this interface potentially.
- *
- *     LATER: length must be adjusted by pad at tail, when it is required.
- */
-int ip_append_data(struct sock *sk,
-                  int getfrag(void *from, char *to, int offset, int len,
-                              int odd, struct sk_buff *skb),
-                  void *from, int length, int transhdrlen,
-                  struct ipcm_cookie *ipc, struct rtable **rtp,
-                  unsigned int flags)
+static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
+                           struct inet_cork *cork,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           unsigned int flags)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
 
-       struct ip_options *opt = NULL;
+       struct ip_options *opt = cork->opt;
        int hh_len;
        int exthdrlen;
        int mtu;
@@ -809,58 +792,19 @@ int ip_append_data(struct sock *sk,
        int offset = 0;
        unsigned int maxfraglen, fragheaderlen;
        int csummode = CHECKSUM_NONE;
-       struct rtable *rt;
+       struct rtable *rt = (struct rtable *)cork->dst;
 
-       if (flags&MSG_PROBE)
-               return 0;
-
-       if (skb_queue_empty(&sk->sk_write_queue)) {
-               /*
-                * setup for corking.
-                */
-               opt = ipc->opt;
-               if (opt) {
-                       if (inet->cork.opt == NULL) {
-                               inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
-                               if (unlikely(inet->cork.opt == NULL))
-                                       return -ENOBUFS;
-                       }
-                       memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
-                       inet->cork.flags |= IPCORK_OPT;
-                       inet->cork.addr = ipc->addr;
-               }
-               rt = *rtp;
-               if (unlikely(!rt))
-                       return -EFAULT;
-               /*
-                * We steal reference to this route, caller should not release it
-                */
-               *rtp = NULL;
-               inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-                                           rt->dst.dev->mtu :
-                                           dst_mtu(rt->dst.path);
-               inet->cork.dst = &rt->dst;
-               inet->cork.length = 0;
-               sk->sk_sndmsg_page = NULL;
-               sk->sk_sndmsg_off = 0;
-               exthdrlen = rt->dst.header_len;
-               length += exthdrlen;
-               transhdrlen += exthdrlen;
-       } else {
-               rt = (struct rtable *)inet->cork.dst;
-               if (inet->cork.flags & IPCORK_OPT)
-                       opt = inet->cork.opt;
+       exthdrlen = transhdrlen ? rt->dst.header_len : 0;
+       length += exthdrlen;
+       transhdrlen += exthdrlen;
+       mtu = cork->fragsize;
 
-               transhdrlen = 0;
-               exthdrlen = 0;
-               mtu = inet->cork.fragsize;
-       }
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 
-       if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
+       if (cork->length + length > 0xFFFF - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
                               mtu-exthdrlen);
                return -EMSGSIZE;
@@ -876,15 +820,15 @@ int ip_append_data(struct sock *sk,
            !exthdrlen)
                csummode = CHECKSUM_PARTIAL;
 
-       skb = skb_peek_tail(&sk->sk_write_queue);
+       skb = skb_peek_tail(queue);
 
-       inet->cork.length += length;
+       cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
-               err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
-                                        fragheaderlen, transhdrlen, mtu,
-                                        flags);
+               err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+                                        hh_len, fragheaderlen, transhdrlen,
+                                        mtu, flags);
                if (err)
                        goto error;
                return 0;
@@ -961,7 +905,7 @@ alloc_new_skb:
                                else
                                        /* only the initial fragment is
                                           time stamped */
-                                       ipc->tx_flags = 0;
+                                       cork->tx_flags = 0;
                        }
                        if (skb == NULL)
                                goto error;
@@ -972,7 +916,7 @@ alloc_new_skb:
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);
-                       skb_shinfo(skb)->tx_flags = ipc->tx_flags;
+                       skb_shinfo(skb)->tx_flags = cork->tx_flags;
 
                        /*
                         *      Find where to start putting bytes.
@@ -1009,7 +953,7 @@ alloc_new_skb:
                        /*
                         * Put the packet on the pending queue.
                         */
-                       __skb_queue_tail(&sk->sk_write_queue, skb);
+                       __skb_queue_tail(queue, skb);
                        continue;
                }
 
@@ -1029,8 +973,8 @@ alloc_new_skb:
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = sk->sk_sndmsg_page;
-                       int off = sk->sk_sndmsg_off;
+                       struct page *page = cork->page;
+                       int off = cork->off;
                        unsigned int left;
 
                        if (page && (left = PAGE_SIZE - off) > 0) {
@@ -1042,7 +986,7 @@ alloc_new_skb:
                                                goto error;
                                        }
                                        get_page(page);
-                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       skb_fill_page_desc(skb, i, page, off, 0);
                                        frag = &skb_shinfo(skb)->frags[i];
                                }
                        } else if (i < MAX_SKB_FRAGS) {
@@ -1053,8 +997,8 @@ alloc_new_skb:
                                        err = -ENOMEM;
                                        goto error;
                                }
-                               sk->sk_sndmsg_page = page;
-                               sk->sk_sndmsg_off = 0;
+                               cork->page = page;
+                               cork->off = 0;
 
                                skb_fill_page_desc(skb, i, page, 0, 0);
                                frag = &skb_shinfo(skb)->frags[i];
@@ -1066,7 +1010,7 @@ alloc_new_skb:
                                err = -EFAULT;
                                goto error;
                        }
-                       sk->sk_sndmsg_off += copy;
+                       cork->off += copy;
                        frag->size += copy;
                        skb->len += copy;
                        skb->data_len += copy;
@@ -1080,11 +1024,87 @@ alloc_new_skb:
        return 0;
 
 error:
-       inet->cork.length -= length;
+       cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
        return err;
 }
 
+static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+                        struct ipcm_cookie *ipc, struct rtable **rtp)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ip_options *opt;
+       struct rtable *rt;
+
+       /*
+        * setup for corking.
+        */
+       opt = ipc->opt;
+       if (opt) {
+               if (cork->opt == NULL) {
+                       cork->opt = kmalloc(sizeof(struct ip_options) + 40,
+                                           sk->sk_allocation);
+                       if (unlikely(cork->opt == NULL))
+                               return -ENOBUFS;
+               }
+               memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen);
+               cork->flags |= IPCORK_OPT;
+               cork->addr = ipc->addr;
+       }
+       rt = *rtp;
+       if (unlikely(!rt))
+               return -EFAULT;
+       /*
+        * We steal reference to this route, caller should not release it
+        */
+       *rtp = NULL;
+       cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
+                        rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+       cork->dst = &rt->dst;
+       cork->length = 0;
+       cork->tx_flags = ipc->tx_flags;
+       cork->page = NULL;
+       cork->off = 0;
+
+       return 0;
+}
+
+/*
+ *     ip_append_data() and ip_append_page() can make one large IP datagram
+ *     from many pieces of data. Each pieces will be holded on the socket
+ *     until ip_push_pending_frames() is called. Each piece can be a page
+ *     or non-page data.
+ *
+ *     Not only UDP, other transport protocols - e.g. raw sockets - can use
+ *     this interface potentially.
+ *
+ *     LATER: length must be adjusted by pad at tail, when it is required.
+ */
+int ip_append_data(struct sock *sk,
+                  int getfrag(void *from, char *to, int offset, int len,
+                              int odd, struct sk_buff *skb),
+                  void *from, int length, int transhdrlen,
+                  struct ipcm_cookie *ipc, struct rtable **rtp,
+                  unsigned int flags)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       int err;
+
+       if (flags&MSG_PROBE)
+               return 0;
+
+       if (skb_queue_empty(&sk->sk_write_queue)) {
+               err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
+               if (err)
+                       return err;
+       } else {
+               transhdrlen = 0;
+       }
+
+       return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
+                               from, length, transhdrlen, flags);
+}
+
 ssize_t        ip_append_page(struct sock *sk, struct page *page,
                       int offset, size_t size, int flags)
 {
@@ -1228,40 +1248,41 @@ error:
        return err;
 }
 
-static void ip_cork_release(struct inet_sock *inet)
+static void ip_cork_release(struct inet_cork *cork)
 {
-       inet->cork.flags &= ~IPCORK_OPT;
-       kfree(inet->cork.opt);
-       inet->cork.opt = NULL;
-       dst_release(inet->cork.dst);
-       inet->cork.dst = NULL;
+       cork->flags &= ~IPCORK_OPT;
+       kfree(cork->opt);
+       cork->opt = NULL;
+       dst_release(cork->dst);
+       cork->dst = NULL;
 }
 
 /*
  *     Combined all pending IP fragments on the socket as one IP datagram
  *     and push them out.
  */
-int ip_push_pending_frames(struct sock *sk)
+struct sk_buff *__ip_make_skb(struct sock *sk,
+                             struct sk_buff_head *queue,
+                             struct inet_cork *cork)
 {
        struct sk_buff *skb, *tmp_skb;
        struct sk_buff **tail_skb;
        struct inet_sock *inet = inet_sk(sk);
        struct net *net = sock_net(sk);
        struct ip_options *opt = NULL;
-       struct rtable *rt = (struct rtable *)inet->cork.dst;
+       struct rtable *rt = (struct rtable *)cork->dst;
        struct iphdr *iph;
        __be16 df = 0;
        __u8 ttl;
-       int err = 0;
 
-       if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
+       if ((skb = __skb_dequeue(queue)) == NULL)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
        /* move skb->data to ip header from ext header */
        if (skb->data < skb_network_header(skb))
                __skb_pull(skb, skb_network_offset(skb));
-       while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+       while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
                __skb_pull(tmp_skb, skb_network_header_len(skb));
                *tail_skb = tmp_skb;
                tail_skb = &(tmp_skb->next);
@@ -1287,8 +1308,8 @@ int ip_push_pending_frames(struct sock *sk)
             ip_dont_fragment(sk, &rt->dst)))
                df = htons(IP_DF);
 
-       if (inet->cork.flags & IPCORK_OPT)
-               opt = inet->cork.opt;
+       if (cork->flags & IPCORK_OPT)
+               opt = cork->opt;
 
        if (rt->rt_type == RTN_MULTICAST)
                ttl = inet->mc_ttl;
@@ -1300,7 +1321,7 @@ int ip_push_pending_frames(struct sock *sk)
        iph->ihl = 5;
        if (opt) {
                iph->ihl += opt->optlen>>2;
-               ip_options_build(skb, opt, inet->cork.addr, rt, 0);
+               ip_options_build(skb, opt, cork->addr, rt, 0);
        }
        iph->tos = inet->tos;
        iph->frag_off = df;
@@ -1316,44 +1337,95 @@ int ip_push_pending_frames(struct sock *sk)
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
         * on dst refcount
         */
-       inet->cork.dst = NULL;
+       cork->dst = NULL;
        skb_dst_set(skb, &rt->dst);
 
        if (iph->protocol == IPPROTO_ICMP)
                icmp_out_count(net, ((struct icmphdr *)
                        skb_transport_header(skb))->type);
 
-       /* Netfilter gets whole the not fragmented skb. */
+       ip_cork_release(cork);
+out:
+       return skb;
+}
+
+int ip_send_skb(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       int err;
+
        err = ip_local_out(skb);
        if (err) {
                if (err > 0)
                        err = net_xmit_errno(err);
                if (err)
-                       goto error;
+                       IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
        }
 
-out:
-       ip_cork_release(inet);
        return err;
+}
 
-error:
-       IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
-       goto out;
+int ip_push_pending_frames(struct sock *sk)
+{
+       struct sk_buff *skb;
+
+       skb = ip_finish_skb(sk);
+       if (!skb)
+               return 0;
+
+       /* Netfilter gets whole the not fragmented skb. */
+       return ip_send_skb(skb);
 }
 
 /*
  *     Throw away all pending data on the socket.
  */
-void ip_flush_pending_frames(struct sock *sk)
+static void __ip_flush_pending_frames(struct sock *sk,
+                                     struct sk_buff_head *queue,
+                                     struct inet_cork *cork)
 {
        struct sk_buff *skb;
 
-       while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+       while ((skb = __skb_dequeue_tail(queue)) != NULL)
                kfree_skb(skb);
 
-       ip_cork_release(inet_sk(sk));
+       ip_cork_release(cork);
+}
+
+void ip_flush_pending_frames(struct sock *sk)
+{
+       __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
 }
 
+struct sk_buff *ip_make_skb(struct sock *sk,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           struct ipcm_cookie *ipc, struct rtable **rtp,
+                           unsigned int flags)
+{
+       struct inet_cork cork = {};
+       struct sk_buff_head queue;
+       int err;
+
+       if (flags & MSG_PROBE)
+               return NULL;
+
+       __skb_queue_head_init(&queue);
+
+       err = ip_setup_cork(sk, &cork, ipc, rtp);
+       if (err)
+               return ERR_PTR(err);
+
+       err = __ip_append_data(sk, &queue, &cork, getfrag,
+                              from, length, transhdrlen, flags);
+       if (err) {
+               __ip_flush_pending_frames(sk, &queue, &cork);
+               return ERR_PTR(err);
+       }
+
+       return __ip_make_skb(sk, &queue, &cork);
+}
 
 /*
  *     Fetch data from kernel space and fill in checksum if needed.
@@ -1402,16 +1474,19 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
        }
 
        {
-               struct flowi fl = { .oif = arg->bound_dev_if,
-                                   .fl4_dst = daddr,
-                                   .fl4_src = rt->rt_spec_dst,
-                                   .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
-                                   .fl_ip_sport = tcp_hdr(skb)->dest,
-                                   .fl_ip_dport = tcp_hdr(skb)->source,
-                                   .proto = sk->sk_protocol,
-                                   .flags = ip_reply_arg_flowi_flags(arg) };
-               security_skb_classify_flow(skb, &fl);
-               if (ip_route_output_key(sock_net(sk), &rt, &fl))
+               struct flowi4 fl4 = {
+                       .flowi4_oif = arg->bound_dev_if,
+                       .daddr = daddr,
+                       .saddr = rt->rt_spec_dst,
+                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .fl4_sport = tcp_hdr(skb)->dest,
+                       .fl4_dport = tcp_hdr(skb)->source,
+                       .flowi4_proto = sk->sk_protocol,
+                       .flowi4_flags = ip_reply_arg_flowi_flags(arg),
+               };
+               security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+               rt = ip_route_output_key(sock_net(sk), &fl4);
+               if (IS_ERR(rt))
                        return;
        }
 
index 988f52f..bfc17c5 100644 (file)
@@ -460,19 +460,14 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        goto tx_error_icmp;
        }
 
-       {
-               struct flowi fl = {
-                       .oif = tunnel->parms.link,
-                       .fl4_dst = dst,
-                       .fl4_src= tiph->saddr,
-                       .fl4_tos = RT_TOS(tos),
-                       .proto = IPPROTO_IPIP
-               };
-
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
-                       dev->stats.tx_carrier_errors++;
-                       goto tx_error_icmp;
-               }
+       rt = ip_route_output_ports(dev_net(dev), NULL,
+                                  dst, tiph->saddr,
+                                  0, 0,
+                                  IPPROTO_IPIP, RT_TOS(tos),
+                                  tunnel->parms.link);
+       if (IS_ERR(rt)) {
+               dev->stats.tx_carrier_errors++;
+               goto tx_error_icmp;
        }
        tdev = rt->dst.dev;
 
@@ -583,16 +578,14 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
        iph = &tunnel->parms.iph;
 
        if (iph->daddr) {
-               struct flowi fl = {
-                       .oif = tunnel->parms.link,
-                       .fl4_dst = iph->daddr,
-                       .fl4_src = iph->saddr,
-                       .fl4_tos = RT_TOS(iph->tos),
-                       .proto = IPPROTO_IPIP
-               };
-               struct rtable *rt;
-
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL,
+                                                         iph->daddr, iph->saddr,
+                                                         0, 0,
+                                                         IPPROTO_IPIP,
+                                                         RT_TOS(iph->tos),
+                                                         tunnel->parms.link);
+
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
@@ -913,4 +906,4 @@ static void __exit ipip_fini(void)
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("tunl0");
+MODULE_ALIAS_NETDEV("tunl0");
index 3f3a9af..1f62eae 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/notifier.h>
 #include <linux/if_arp.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 #include <net/ipip.h>
 #include <net/checksum.h>
 #include <net/netlink.h>
@@ -147,14 +148,15 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
        return NULL;
 }
 
-static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
                           struct mr_table **mrt)
 {
        struct ipmr_result res;
        struct fib_lookup_arg arg = { .result = &res, };
        int err;
 
-       err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
+       err = fib_rules_lookup(net->ipv4.mr_rules_ops,
+                              flowi4_to_flowi(flp4), 0, &arg);
        if (err < 0)
                return err;
        *mrt = res.mrt;
@@ -282,7 +284,7 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
        return net->ipv4.mrt;
 }
 
-static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
                           struct mr_table **mrt)
 {
        *mrt = net->ipv4.mrt;
@@ -434,14 +436,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct net *net = dev_net(dev);
        struct mr_table *mrt;
-       struct flowi fl = {
-               .oif            = dev->ifindex,
-               .iif            = skb->skb_iif,
-               .mark           = skb->mark,
+       struct flowi4 fl4 = {
+               .flowi4_oif     = dev->ifindex,
+               .flowi4_iif     = skb->skb_iif,
+               .flowi4_mark    = skb->mark,
        };
        int err;
 
-       err = ipmr_fib_lookup(net, &fl, &mrt);
+       err = ipmr_fib_lookup(net, &fl4, &mrt);
        if (err < 0) {
                kfree_skb(skb);
                return err;
@@ -1434,6 +1436,81 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
        }
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+       struct in_addr src;
+       struct in_addr grp;
+       compat_ulong_t pktcnt;
+       compat_ulong_t bytecnt;
+       compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_vif_req {
+       vifi_t  vifi;           /* Which iface */
+       compat_ulong_t icount;
+       compat_ulong_t ocount;
+       compat_ulong_t ibytes;
+       compat_ulong_t obytes;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+       struct compat_sioc_sg_req sr;
+       struct compat_sioc_vif_req vr;
+       struct vif_device *vif;
+       struct mfc_cache *c;
+       struct net *net = sock_net(sk);
+       struct mr_table *mrt;
+
+       mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+       if (mrt == NULL)
+               return -ENOENT;
+
+       switch (cmd) {
+       case SIOCGETVIFCNT:
+               if (copy_from_user(&vr, arg, sizeof(vr)))
+                       return -EFAULT;
+               if (vr.vifi >= mrt->maxvif)
+                       return -EINVAL;
+               read_lock(&mrt_lock);
+               vif = &mrt->vif_table[vr.vifi];
+               if (VIF_EXISTS(mrt, vr.vifi)) {
+                       vr.icount = vif->pkt_in;
+                       vr.ocount = vif->pkt_out;
+                       vr.ibytes = vif->bytes_in;
+                       vr.obytes = vif->bytes_out;
+                       read_unlock(&mrt_lock);
+
+                       if (copy_to_user(arg, &vr, sizeof(vr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               read_unlock(&mrt_lock);
+               return -EADDRNOTAVAIL;
+       case SIOCGETSGCNT:
+               if (copy_from_user(&sr, arg, sizeof(sr)))
+                       return -EFAULT;
+
+               rcu_read_lock();
+               c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+               if (c) {
+                       sr.pktcnt = c->mfc_un.res.pkt;
+                       sr.bytecnt = c->mfc_un.res.bytes;
+                       sr.wrong_if = c->mfc_un.res.wrong_if;
+                       rcu_read_unlock();
+
+                       if (copy_to_user(arg, &sr, sizeof(sr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               rcu_read_unlock();
+               return -EADDRNOTAVAIL;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+#endif
+
 
 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
@@ -1535,26 +1612,20 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
 #endif
 
        if (vif->flags & VIFF_TUNNEL) {
-               struct flowi fl = {
-                       .oif = vif->link,
-                       .fl4_dst = vif->remote,
-                       .fl4_src = vif->local,
-                       .fl4_tos = RT_TOS(iph->tos),
-                       .proto = IPPROTO_IPIP
-               };
-
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_ports(net, NULL,
+                                          vif->remote, vif->local,
+                                          0, 0,
+                                          IPPROTO_IPIP,
+                                          RT_TOS(iph->tos), vif->link);
+               if (IS_ERR(rt))
                        goto out_free;
                encap = sizeof(struct iphdr);
        } else {
-               struct flowi fl = {
-                       .oif = vif->link,
-                       .fl4_dst = iph->daddr,
-                       .fl4_tos = RT_TOS(iph->tos),
-                       .proto = IPPROTO_IPIP
-               };
-
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_ports(net, NULL, iph->daddr, 0,
+                                          0, 0,
+                                          IPPROTO_IPIP,
+                                          RT_TOS(iph->tos), vif->link);
+               if (IS_ERR(rt))
                        goto out_free;
        }
 
@@ -1717,6 +1788,24 @@ dont_forward:
        return 0;
 }
 
+static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct rtable *rt)
+{
+       struct flowi4 fl4 = {
+               .daddr = rt->rt_key_dst,
+               .saddr = rt->rt_key_src,
+               .flowi4_tos = rt->rt_tos,
+               .flowi4_oif = rt->rt_oif,
+               .flowi4_iif = rt->rt_iif,
+               .flowi4_mark = rt->rt_mark,
+       };
+       struct mr_table *mrt;
+       int err;
+
+       err = ipmr_fib_lookup(net, &fl4, &mrt);
+       if (err)
+               return ERR_PTR(err);
+       return mrt;
+}
 
 /*
  *     Multicast packets for forwarding arrive here
@@ -1729,7 +1818,6 @@ int ip_mr_input(struct sk_buff *skb)
        struct net *net = dev_net(skb->dev);
        int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
        struct mr_table *mrt;
-       int err;
 
        /* Packet is looped back after forward, it should not be
         * forwarded second time, but still can be delivered locally.
@@ -1737,12 +1825,11 @@ int ip_mr_input(struct sk_buff *skb)
        if (IPCB(skb)->flags & IPSKB_FORWARDED)
                goto dont_forward;
 
-       err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
-       if (err < 0) {
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt)) {
                kfree_skb(skb);
-               return err;
+               return PTR_ERR(mrt);
        }
-
        if (!local) {
                if (IPCB(skb)->opt.router_alert) {
                        if (ip_call_ra_chain(skb))
@@ -1870,9 +1957,9 @@ int pim_rcv_v1(struct sk_buff *skb)
 
        pim = igmp_hdr(skb);
 
-       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt))
                goto drop;
-
        if (!mrt->mroute_do_pim ||
            pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
                goto drop;
@@ -1902,9 +1989,9 @@ static int pim_rcv(struct sk_buff *skb)
             csum_fold(skb_checksum(skb, 0, skb->len, 0))))
                goto drop;
 
-       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt))
                goto drop;
-
        if (__pim_rcv(mrt, skb, sizeof(*pim))) {
 drop:
                kfree_skb(skb);
index 994a1f2..f3c0b54 100644 (file)
@@ -16,7 +16,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        struct net *net = dev_net(skb_dst(skb)->dev);
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
-       struct flowi fl = {};
+       struct flowi4 fl4 = {};
        unsigned long orefdst;
        unsigned int hh_len;
        unsigned int type;
@@ -31,14 +31,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
         * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
         */
        if (addr_type == RTN_LOCAL) {
-               fl.fl4_dst = iph->daddr;
+               fl4.daddr = iph->daddr;
                if (type == RTN_LOCAL)
-                       fl.fl4_src = iph->saddr;
-               fl.fl4_tos = RT_TOS(iph->tos);
-               fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
-               fl.mark = skb->mark;
-               fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
-               if (ip_route_output_key(net, &rt, &fl) != 0)
+                       fl4.saddr = iph->saddr;
+               fl4.flowi4_tos = RT_TOS(iph->tos);
+               fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+               fl4.flowi4_mark = skb->mark;
+               fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
+               rt = ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt))
                        return -1;
 
                /* Drop old route. */
@@ -47,8 +48,9 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        } else {
                /* non-local src, find valid iif to satisfy
                 * rp-filter when calling ip_route_input. */
-               fl.fl4_dst = iph->saddr;
-               if (ip_route_output_key(net, &rt, &fl) != 0)
+               fl4.daddr = iph->saddr;
+               rt = ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt))
                        return -1;
 
                orefdst = skb->_skb_refdst;
@@ -66,10 +68,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
 
 #ifdef CONFIG_XFRM
        if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
-           xfrm_decode_session(skb, &fl, AF_INET) == 0) {
+           xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
                struct dst_entry *dst = skb_dst(skb);
                skb_dst_set(skb, NULL);
-               if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+               dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0);
+               if (IS_ERR(dst))
                        return -1;
                skb_dst_set(skb, dst);
        }
@@ -102,7 +105,8 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
                dst = ((struct xfrm_dst *)dst)->route;
        dst_hold(dst);
 
-       if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0)
+       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
+       if (IS_ERR(dst))
                return -1;
 
        skb_dst_drop(skb);
@@ -219,7 +223,11 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
 
 static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
 {
-       return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
+       struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+       *dst = &rt->dst;
+       return 0;
 }
 
 static const struct nf_afinfo nf_ip_afinfo = {
index babd1a2..1dfc18a 100644 (file)
@@ -64,16 +64,6 @@ config IP_NF_IPTABLES
 if IP_NF_IPTABLES
 
 # The matches.
-config IP_NF_MATCH_ADDRTYPE
-       tristate '"addrtype" address type match support'
-       depends on NETFILTER_ADVANCED
-       help
-         This option allows you to match what routing thinks of an address,
-         eg. UNICAST, LOCAL, BROADCAST, ...
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
-
 config IP_NF_MATCH_AH
        tristate '"ah" match support'
        depends on NETFILTER_ADVANCED
@@ -206,8 +196,9 @@ config IP_NF_TARGET_REDIRECT
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
-       depends on NF_NAT
+       depends on NF_CONNTRACK_SNMP && NF_NAT
        depends on NETFILTER_ADVANCED
+       default NF_NAT && NF_CONNTRACK_SNMP
        ---help---
 
          This module implements an Application Layer Gateway (ALG) for
index 19eb59d..dca2082 100644 (file)
@@ -48,7 +48,6 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
 # matches
-obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
 
index e855fff..4b5d457 100644 (file)
@@ -866,6 +866,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(NFPROTO_ARP, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1065,6 +1066,7 @@ static int do_replace(struct net *net, const void __user *user,
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -1333,6 +1335,7 @@ static int translate_compat_table(const char *name,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(NFPROTO_ARP);
+       xt_compat_init_offsets(NFPROTO_ARP, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
@@ -1486,6 +1489,7 @@ static int compat_do_replace(struct net *net, void __user *user,
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -1738,6 +1742,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
                        ret = -EFAULT;
                        break;
                }
+               rev.name[sizeof(rev.name)-1] = 0;
 
                try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
                                                         rev.revision, 1, &ret),
index b8ddcc4..a5e52a9 100644 (file)
@@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par)
 
        if (mangle->flags & ~ARPT_MANGLE_MASK ||
            !(mangle->flags & ARPT_MANGLE_MASK))
-               return false;
+               return -EINVAL;
 
        if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
           mangle->target != XT_CONTINUE)
-               return false;
-       return true;
+               return -EINVAL;
+       return 0;
 }
 
 static struct xt_target arpt_mangle_reg __read_mostly = {
index 652efea..b09ed0d 100644 (file)
@@ -1063,6 +1063,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(AF_INET, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1261,6 +1262,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -1664,6 +1666,7 @@ translate_compat_table(struct net *net,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET);
+       xt_compat_init_offsets(AF_INET, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
@@ -1805,6 +1808,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -2034,6 +2038,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EFAULT;
                        break;
                }
+               rev.name[sizeof(rev.name)-1] = 0;
 
                if (cmd == IPT_SO_GET_REVISION_TARGET)
                        target = 1;
index 1e26a48..403ca57 100644 (file)
@@ -300,13 +300,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
         * that the ->target() function isn't called after ->destroy() */
 
        ct = nf_ct_get(skb, &ctinfo);
-       if (ct == NULL) {
-               pr_info("no conntrack!\n");
-                       /* FIXME: need to drop invalid ones, since replies
-                        * to outgoing connections of other nodes will be
-                        * marked as INVALID */
+       if (ct == NULL)
                return NF_DROP;
-       }
 
        /* special case: ICMP error handling. conntrack distinguishes between
         * error messages (RELATED) and information requests (see below) */
index 72ffc8f..d76d6c9 100644 (file)
@@ -442,8 +442,7 @@ ipt_log_packet(u_int8_t pf,
        }
 #endif
 
-       /* MAC logging for input path only. */
-       if (in && !out)
+       if (in != NULL)
                dump_mac_header(m, loginfo, skb);
 
        dump_packet(m, loginfo, skb, 0);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
deleted file mode 100644 (file)
index db8bff0..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *  iptables module to match inet_addr_type() of an ip.
- *
- *  Copyright (c) 2004 Patrick McHardy <kaber@trash.net>
- *  (C) 2007 Laszlo Attila Toth <panther@balabit.hu>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ip.h>
-#include <net/route.h>
-
-#include <linux/netfilter_ipv4/ipt_addrtype.h>
-#include <linux/netfilter/x_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_DESCRIPTION("Xtables: address type match for IPv4");
-
-static inline bool match_type(struct net *net, const struct net_device *dev,
-                             __be32 addr, u_int16_t mask)
-{
-       return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
-}
-
-static bool
-addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-       const struct ipt_addrtype_info *info = par->matchinfo;
-       const struct iphdr *iph = ip_hdr(skb);
-       bool ret = true;
-
-       if (info->source)
-               ret &= match_type(net, NULL, iph->saddr, info->source) ^
-                      info->invert_source;
-       if (info->dest)
-               ret &= match_type(net, NULL, iph->daddr, info->dest) ^
-                      info->invert_dest;
-
-       return ret;
-}
-
-static bool
-addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-       const struct ipt_addrtype_info_v1 *info = par->matchinfo;
-       const struct iphdr *iph = ip_hdr(skb);
-       const struct net_device *dev = NULL;
-       bool ret = true;
-
-       if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN)
-               dev = par->in;
-       else if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT)
-               dev = par->out;
-
-       if (info->source)
-               ret &= match_type(net, dev, iph->saddr, info->source) ^
-                      (info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
-       if (ret && info->dest)
-               ret &= match_type(net, dev, iph->daddr, info->dest) ^
-                      !!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
-       return ret;
-}
-
-static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
-{
-       struct ipt_addrtype_info_v1 *info = par->matchinfo;
-
-       if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN &&
-           info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
-               pr_info("both incoming and outgoing "
-                       "interface limitation cannot be selected\n");
-               return -EINVAL;
-       }
-
-       if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
-           (1 << NF_INET_LOCAL_IN)) &&
-           info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
-               pr_info("output interface limitation "
-                       "not valid in PREROUTING and INPUT\n");
-               return -EINVAL;
-       }
-
-       if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
-           (1 << NF_INET_LOCAL_OUT)) &&
-           info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) {
-               pr_info("input interface limitation "
-                       "not valid in POSTROUTING and OUTPUT\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static struct xt_match addrtype_mt_reg[] __read_mostly = {
-       {
-               .name           = "addrtype",
-               .family         = NFPROTO_IPV4,
-               .match          = addrtype_mt_v0,
-               .matchsize      = sizeof(struct ipt_addrtype_info),
-               .me             = THIS_MODULE
-       },
-       {
-               .name           = "addrtype",
-               .family         = NFPROTO_IPV4,
-               .revision       = 1,
-               .match          = addrtype_mt_v1,
-               .checkentry     = addrtype_mt_checkentry_v1,
-               .matchsize      = sizeof(struct ipt_addrtype_info_v1),
-               .me             = THIS_MODULE
-       }
-};
-
-static int __init addrtype_mt_init(void)
-{
-       return xt_register_matches(addrtype_mt_reg,
-                                  ARRAY_SIZE(addrtype_mt_reg));
-}
-
-static void __exit addrtype_mt_exit(void)
-{
-       xt_unregister_matches(addrtype_mt_reg, ARRAY_SIZE(addrtype_mt_reg));
-}
-
-module_init(addrtype_mt_init);
-module_exit(addrtype_mt_exit);
index 294a2a3..aef5d1f 100644 (file)
@@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
        ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
                           dev_net(out)->ipv4.iptable_mangle);
        /* Reroute for ANY change. */
-       if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+       if (ret != NF_DROP && ret != NF_STOLEN) {
                iph = ip_hdr(skb);
 
                if (iph->saddr != saddr ||
index 63f60fc..5585980 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
 
 struct ct_iter_state {
        struct seq_net_private p;
@@ -35,7 +36,8 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        for (st->bucket = 0;
             st->bucket < net->ct.htable_size;
             st->bucket++) {
-               n = rcu_dereference(net->ct.hash[st->bucket].first);
+               n = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -48,13 +50,14 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
                        if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
-               head = rcu_dereference(net->ct.hash[st->bucket].first);
+               head = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
        }
        return head;
 }
@@ -217,7 +220,8 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-               n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               n = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -230,11 +234,12 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_next_rcu(head));
        while (head == NULL) {
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
-               head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               head = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
        }
        return head;
 }
index 0f23b3f..703f366 100644 (file)
@@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb,
 
        /* Try to get same port: if not, try to change it. */
        for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               int ret;
+               int res;
 
                exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
-               if (ret == 0)
+               res = nf_ct_expect_related(exp);
+               if (res == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (res != -EBUSY) {
                        port = 0;
                        break;
                }
index c04787c..21bcf47 100644 (file)
@@ -221,7 +221,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
           manips not an issue.  */
        if (maniptype == IP_NAT_MANIP_SRC &&
            !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
-               if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+               /* try the original tuple first */
+               if (in_range(orig_tuple, range)) {
+                       if (!nf_nat_used_tuple(orig_tuple, ct)) {
+                               *tuple = *orig_tuple;
+                               return;
+                       }
+               } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+                          range)) {
                        pr_debug("get_unique_tuple: Found current src map\n");
                        if (!nf_nat_used_tuple(tuple, ct))
                                return;
@@ -266,7 +273,6 @@ nf_nat_setup_info(struct nf_conn *ct,
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_tuple curr_tuple, new_tuple;
        struct nf_conn_nat *nat;
-       int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
 
        /* nat helper or nfctnetlink also setup binding */
        nat = nfct_nat(ct);
@@ -306,8 +312,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                        ct->status |= IPS_DST_NAT;
        }
 
-       /* Place in source hash if this is the first time. */
-       if (have_to_hash) {
+       if (maniptype == IP_NAT_MANIP_SRC) {
                unsigned int srchash;
 
                srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@ nf_nat_setup_info(struct nf_conn *ct,
 
        /* It's done. */
        if (maniptype == IP_NAT_MANIP_DST)
-               set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+               ct->status |= IPS_DST_NAT_DONE;
        else
-               set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+               ct->status |= IPS_SRC_NAT_DONE;
 
        return NF_ACCEPT;
 }
@@ -502,7 +507,10 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
        int ret = 0;
 
        spin_lock_bh(&nf_nat_lock);
-       if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+       if (rcu_dereference_protected(
+                       nf_nat_protos[proto->protonum],
+                       lockdep_is_held(&nf_nat_lock)
+                       ) != &nf_nat_unknown_protocol) {
                ret = -EBUSY;
                goto out;
        }
@@ -532,7 +540,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
        if (nat == NULL || nat->ct == NULL)
                return;
 
-       NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+       NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
 
        spin_lock_bh(&nf_nat_lock);
        hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@ static void nf_nat_move_storage(void *new, void *old)
        struct nf_conn_nat *old_nat = old;
        struct nf_conn *ct = old_nat->ct;
 
-       if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+       if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
                return;
 
        spin_lock_bh(&nf_nat_lock);
-       new_nat->ct = ct;
        hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
        spin_unlock_bh(&nf_nat_lock);
 }
@@ -679,8 +686,7 @@ static int __net_init nf_nat_net_init(struct net *net)
 {
        /* Leave them the same for the moment. */
        net->ipv4.nat_htable_size = net->ct.htable_size;
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
-                                                      &net->ipv4.nat_vmalloced, 0);
+       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
        if (!net->ipv4.nat_bysource)
                return -ENOMEM;
        return 0;
@@ -702,8 +708,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        nf_ct_iterate_cleanup(net, &clean_nat, NULL);
        synchronize_rcu();
-       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-                            net->ipv4.nat_htable_size);
+       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
index ee5f419..8812a02 100644 (file)
@@ -54,6 +54,7 @@
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@ static int __init nf_nat_snmp_basic_init(void)
 {
        int ret = 0;
 
-       ret = nf_conntrack_helper_register(&snmp_helper);
-       if (ret < 0)
-               return ret;
+       BUG_ON(nf_nat_snmp_hook != NULL);
+       rcu_assign_pointer(nf_nat_snmp_hook, help);
+
        ret = nf_conntrack_helper_register(&snmp_trap_helper);
        if (ret < 0) {
                nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@ static int __init nf_nat_snmp_basic_init(void)
 
 static void __exit nf_nat_snmp_basic_fini(void)
 {
-       nf_conntrack_helper_unregister(&snmp_helper);
+       rcu_assign_pointer(nf_nat_snmp_hook, NULL);
        nf_conntrack_helper_unregister(&snmp_trap_helper);
 }
 
index 95481fe..7317bdf 100644 (file)
@@ -31,6 +31,7 @@
 #ifdef CONFIG_XFRM
 static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 {
+       struct flowi4 *fl4 = &fl->u.ip4;
        const struct nf_conn *ct;
        const struct nf_conntrack_tuple *t;
        enum ip_conntrack_info ctinfo;
@@ -49,25 +50,25 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
                statusbit = IPS_SRC_NAT;
 
        if (ct->status & statusbit) {
-               fl->fl4_dst = t->dst.u3.ip;
+               fl4->daddr = t->dst.u3.ip;
                if (t->dst.protonum == IPPROTO_TCP ||
                    t->dst.protonum == IPPROTO_UDP ||
                    t->dst.protonum == IPPROTO_UDPLITE ||
                    t->dst.protonum == IPPROTO_DCCP ||
                    t->dst.protonum == IPPROTO_SCTP)
-                       fl->fl_ip_dport = t->dst.u.tcp.port;
+                       fl4->fl4_dport = t->dst.u.tcp.port;
        }
 
        statusbit ^= IPS_NAT_MASK;
 
        if (ct->status & statusbit) {
-               fl->fl4_src = t->src.u3.ip;
+               fl4->saddr = t->src.u3.ip;
                if (t->dst.protonum == IPPROTO_TCP ||
                    t->dst.protonum == IPPROTO_UDP ||
                    t->dst.protonum == IPPROTO_UDPLITE ||
                    t->dst.protonum == IPPROTO_DCCP ||
                    t->dst.protonum == IPPROTO_SCTP)
-                       fl->fl_ip_sport = t->src.u.tcp.port;
+                       fl4->fl4_sport = t->src.u.tcp.port;
        }
 }
 #endif
index a3d5ab7..e837ffd 100644 (file)
@@ -76,6 +76,7 @@
 #include <linux/seq_file.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 
 static struct raw_hashinfo raw_v4_hashinfo = {
        .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -401,7 +402,7 @@ error:
        return err;
 }
 
-static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
+static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg)
 {
        struct iovec *iov;
        u8 __user *type = NULL;
@@ -417,7 +418,7 @@ static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
                if (!iov)
                        continue;
 
-               switch (fl->proto) {
+               switch (fl4->flowi4_proto) {
                case IPPROTO_ICMP:
                        /* check if one-byte field is readable or not. */
                        if (iov->iov_base && iov->iov_len < 1)
@@ -432,8 +433,8 @@ static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
                                code = iov->iov_base;
 
                        if (type && code) {
-                               if (get_user(fl->fl_icmp_type, type) ||
-                                   get_user(fl->fl_icmp_code, code))
+                               if (get_user(fl4->fl4_icmp_type, type) ||
+                                   get_user(fl4->fl4_icmp_code, code))
                                        return -EFAULT;
                                probed = 1;
                        }
@@ -547,25 +548,30 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        }
 
        {
-               struct flowi fl = { .oif = ipc.oif,
-                                   .mark = sk->sk_mark,
-                                   .fl4_dst = daddr,
-                                   .fl4_src = saddr,
-                                   .fl4_tos = tos,
-                                   .proto = inet->hdrincl ? IPPROTO_RAW :
-                                                            sk->sk_protocol,
-                                 };
+               struct flowi4 fl4 = {
+                       .flowi4_oif = ipc.oif,
+                       .flowi4_mark = sk->sk_mark,
+                       .daddr = daddr,
+                       .saddr = saddr,
+                       .flowi4_tos = tos,
+                       .flowi4_proto = (inet->hdrincl ?
+                                        IPPROTO_RAW :
+                                        sk->sk_protocol),
+                       .flowi4_flags = FLOWI_FLAG_CAN_SLEEP,
+               };
                if (!inet->hdrincl) {
-                       err = raw_probe_proto_opt(&fl, msg);
+                       err = raw_probe_proto_opt(&fl4, msg);
                        if (err)
                                goto done;
                }
 
-               security_sk_classify_flow(sk, &fl);
-               err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
+               security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+               rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       goto done;
+               }
        }
-       if (err)
-               goto done;
 
        err = -EACCES;
        if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
@@ -838,6 +844,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
        }
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case SIOCOUTQ:
+       case SIOCINQ:
+               return -ENOIOCTLCMD;
+       default:
+#ifdef CONFIG_IP_MROUTE
+               return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+               return -ENOIOCTLCMD;
+#endif
+       }
+}
+#endif
+
 struct proto raw_prot = {
        .name              = "RAW",
        .owner             = THIS_MODULE,
@@ -860,6 +883,7 @@ struct proto raw_prot = {
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_raw_setsockopt,
        .compat_getsockopt = compat_raw_getsockopt,
+       .compat_ioctl      = compat_raw_ioctl,
 #endif
 };
 
index 351dc4e..209989c 100644 (file)
 #include <linux/sysctl.h>
 #endif
 
-#define RT_FL_TOS(oldflp) \
-    ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+#define RT_FL_TOS(oldflp4) \
+    ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
 
 #define IP_MAX_MTU     0xFFF0
 
@@ -131,9 +131,6 @@ static int ip_rt_min_pmtu __read_mostly             = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
-
 /*
  *     Interface to generic destination cache.
  */
@@ -152,6 +149,41 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 {
 }
 
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       struct inet_peer *peer;
+       u32 *p = NULL;
+
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+
+       peer = rt->peer;
+       if (peer) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               p = peer->metrics;
+               if (inet_metrics_new(peer))
+                       memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               } else {
+                       if (rt->fi) {
+                               fib_info_put(rt->fi);
+                               rt->fi = NULL;
+                       }
+               }
+       }
+       return p;
+}
+
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
@@ -159,6 +191,7 @@ static struct dst_ops ipv4_dst_ops = {
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
        .default_mtu =          ipv4_default_mtu,
+       .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
        .ifdown =               ipv4_dst_ifdown,
        .negative_advice =      ipv4_negative_advice,
@@ -391,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                        dst_metric(&r->dst, RTAX_WINDOW),
                        (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
                              dst_metric(&r->dst, RTAX_RTTVAR)),
-                       r->fl.fl4_tos,
+                       r->rt_tos,
                        r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
                        r->dst.hh ? (r->dst.hh->hh_output ==
                                       dev_queue_xmit) : 0,
@@ -514,7 +547,7 @@ static const struct file_operations rt_cpu_seq_fops = {
        .release = seq_release,
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static int rt_acct_proc_show(struct seq_file *m, void *v)
 {
        struct ip_rt_acct *dst, *src;
@@ -567,14 +600,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
        if (!pde)
                goto err2;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
        if (!pde)
                goto err3;
 #endif
        return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 err3:
        remove_proc_entry("rt_cache", net->proc_net_stat);
 #endif
@@ -588,7 +621,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
 {
        remove_proc_entry("rt_cache", net->proc_net_stat);
        remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        remove_proc_entry("rt_acct", net->proc_net);
 #endif
 }
@@ -632,7 +665,7 @@ static inline int rt_fast_clean(struct rtable *rth)
 static inline int rt_valuable(struct rtable *rth)
 {
        return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
-               rth->dst.expires;
+               (rth->peer && rth->peer->pmtu_expires);
 }
 
 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -643,13 +676,7 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
        if (atomic_read(&rth->dst.__refcnt))
                goto out;
 
-       ret = 1;
-       if (rth->dst.expires &&
-           time_after_eq(jiffies, rth->dst.expires))
-               goto out;
-
        age = jiffies - rth->dst.lastuse;
-       ret = 0;
        if ((age <= tmo1 && !rt_fast_clean(rth)) ||
            (age <= tmo2 && rt_valuable(rth)))
                goto out;
@@ -684,22 +711,22 @@ static inline bool rt_caching(const struct net *net)
                net->ipv4.sysctl_rt_cache_rebuild_count;
 }
 
-static inline bool compare_hash_inputs(const struct flowi *fl1,
-                                       const struct flowi *fl2)
+static inline bool compare_hash_inputs(const struct rtable *rt1,
+                                      const struct rtable *rt2)
 {
-       return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
-               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
-               (fl1->iif ^ fl2->iif)) == 0);
+       return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
+               ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
+               (rt1->rt_iif ^ rt2->rt_iif)) == 0);
 }
 
-static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
+static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
 {
-       return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
-               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
-               (fl1->mark ^ fl2->mark) |
-               (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
-               (fl1->oif ^ fl2->oif) |
-               (fl1->iif ^ fl2->iif)) == 0;
+       return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
+               ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
+               (rt1->rt_mark ^ rt2->rt_mark) |
+               (rt1->rt_tos ^ rt2->rt_tos) |
+               (rt1->rt_oif ^ rt2->rt_oif) |
+               (rt1->rt_iif ^ rt2->rt_iif)) == 0;
 }
 
 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -786,104 +813,13 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        const struct rtable *aux = head;
 
        while (aux != rth) {
-               if (compare_hash_inputs(&aux->fl, &rth->fl))
+               if (compare_hash_inputs(aux, rth))
                        return 0;
                aux = rcu_dereference_protected(aux->dst.rt_next, 1);
        }
        return ONE;
 }
 
-static void rt_check_expire(void)
-{
-       static unsigned int rover;
-       unsigned int i = rover, goal;
-       struct rtable *rth;
-       struct rtable __rcu **rthp;
-       unsigned long samples = 0;
-       unsigned long sum = 0, sum2 = 0;
-       unsigned long delta;
-       u64 mult;
-
-       delta = jiffies - expires_ljiffies;
-       expires_ljiffies = jiffies;
-       mult = ((u64)delta) << rt_hash_log;
-       if (ip_rt_gc_timeout > 1)
-               do_div(mult, ip_rt_gc_timeout);
-       goal = (unsigned int)mult;
-       if (goal > rt_hash_mask)
-               goal = rt_hash_mask + 1;
-       for (; goal > 0; goal--) {
-               unsigned long tmo = ip_rt_gc_timeout;
-               unsigned long length;
-
-               i = (i + 1) & rt_hash_mask;
-               rthp = &rt_hash_table[i].chain;
-
-               if (need_resched())
-                       cond_resched();
-
-               samples++;
-
-               if (rcu_dereference_raw(*rthp) == NULL)
-                       continue;
-               length = 0;
-               spin_lock_bh(rt_hash_lock_addr(i));
-               while ((rth = rcu_dereference_protected(*rthp,
-                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
-                       prefetch(rth->dst.rt_next);
-                       if (rt_is_expired(rth)) {
-                               *rthp = rth->dst.rt_next;
-                               rt_free(rth);
-                               continue;
-                       }
-                       if (rth->dst.expires) {
-                               /* Entry is expired even if it is in use */
-                               if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
-                                       tmo >>= 1;
-                                       rthp = &rth->dst.rt_next;
-                                       /*
-                                        * We only count entries on
-                                        * a chain with equal hash inputs once
-                                        * so that entries for different QOS
-                                        * levels, and other non-hash input
-                                        * attributes don't unfairly skew
-                                        * the length computation
-                                        */
-                                       length += has_noalias(rt_hash_table[i].chain, rth);
-                                       continue;
-                               }
-                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
-                               goto nofree;
-
-                       /* Cleanup aged off entries. */
-                       *rthp = rth->dst.rt_next;
-                       rt_free(rth);
-               }
-               spin_unlock_bh(rt_hash_lock_addr(i));
-               sum += length;
-               sum2 += length*length;
-       }
-       if (samples) {
-               unsigned long avg = sum / samples;
-               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
-               rt_chain_length_max = max_t(unsigned long,
-                                       ip_rt_gc_elasticity,
-                                       (avg + 4*sd) >> FRACT_BITS);
-       }
-       rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
-       rt_check_expire();
-       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
 /*
  * Pertubation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1078,8 +1014,8 @@ static int slow_chain_length(const struct rtable *head)
        return length >> FRACT_BITS;
 }
 
-static int rt_intern_hash(unsigned hash, struct rtable *rt,
-                         struct rtable **rp, struct sk_buff *skb, int ifindex)
+static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
+                                    struct sk_buff *skb, int ifindex)
 {
        struct rtable   *rth, *cand;
        struct rtable __rcu **rthp, **candp;
@@ -1120,7 +1056,7 @@ restart:
                                        printk(KERN_WARNING
                                            "Neighbour table failure & not caching routes.\n");
                                ip_rt_put(rt);
-                               return err;
+                               return ERR_PTR(err);
                        }
                }
 
@@ -1137,7 +1073,7 @@ restart:
                        rt_free(rth);
                        continue;
                }
-               if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
+               if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
                        /* Put it first */
                        *rthp = rth->dst.rt_next;
                        /*
@@ -1157,11 +1093,9 @@ restart:
                        spin_unlock_bh(rt_hash_lock_addr(hash));
 
                        rt_drop(rt);
-                       if (rp)
-                               *rp = rth;
-                       else
+                       if (skb)
                                skb_dst_set(skb, &rth->dst);
-                       return 0;
+                       return rth;
                }
 
                if (!atomic_read(&rth->dst.__refcnt)) {
@@ -1202,7 +1136,7 @@ restart:
                        rt_emergency_hash_rebuild(net);
                        spin_unlock_bh(rt_hash_lock_addr(hash));
 
-                       hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+                       hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
                                        ifindex, rt_genid(net));
                        goto restart;
                }
@@ -1218,7 +1152,7 @@ restart:
 
                        if (err != -ENOBUFS) {
                                rt_drop(rt);
-                               return err;
+                               return ERR_PTR(err);
                        }
 
                        /* Neighbour tables are full and nothing
@@ -1239,7 +1173,7 @@ restart:
                        if (net_ratelimit())
                                printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
                        rt_drop(rt);
-                       return -ENOBUFS;
+                       return ERR_PTR(-ENOBUFS);
                }
        }
 
@@ -1265,11 +1199,16 @@ restart:
        spin_unlock_bh(rt_hash_lock_addr(hash));
 
 skip_hashing:
-       if (rp)
-               *rp = rt;
-       else
+       if (skb)
                skb_dst_set(skb, &rt->dst);
-       return 0;
+       return rt;
+}
+
+static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt_peer_genid(void)
+{
+       return atomic_read(&__rt_peer_genid);
 }
 
 void rt_bind_peer(struct rtable *rt, int create)
@@ -1280,6 +1219,8 @@ void rt_bind_peer(struct rtable *rt, int create)
 
        if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
                inet_putpeer(peer);
+       else
+               rt->rt_peer_genid = rt_peer_genid();
 }
 
 /*
@@ -1349,13 +1290,8 @@ static void rt_del(unsigned hash, struct rtable *rt)
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                    __be32 saddr, struct net_device *dev)
 {
-       int i, k;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rth;
-       struct rtable __rcu **rthp;
-       __be32  skeys[2] = { saddr, 0 };
-       int  ikeys[2] = { dev->ifindex, 0 };
-       struct netevent_redirect netevent;
+       struct inet_peer *peer;
        struct net *net;
 
        if (!in_dev)
@@ -1367,9 +1303,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
            ipv4_is_zeronet(new_gw))
                goto reject_redirect;
 
-       if (!rt_caching(net))
-               goto reject_redirect;
-
        if (!IN_DEV_SHARED_MEDIA(in_dev)) {
                if (!inet_addr_onlink(in_dev, new_gw, old_gw))
                        goto reject_redirect;
@@ -1380,91 +1313,13 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        goto reject_redirect;
        }
 
-       for (i = 0; i < 2; i++) {
-               for (k = 0; k < 2; k++) {
-                       unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-                                               rt_genid(net));
-
-                       rthp = &rt_hash_table[hash].chain;
-
-                       while ((rth = rcu_dereference(*rthp)) != NULL) {
-                               struct rtable *rt;
-
-                               if (rth->fl.fl4_dst != daddr ||
-                                   rth->fl.fl4_src != skeys[i] ||
-                                   rth->fl.oif != ikeys[k] ||
-                                   rt_is_input_route(rth) ||
-                                   rt_is_expired(rth) ||
-                                   !net_eq(dev_net(rth->dst.dev), net)) {
-                                       rthp = &rth->dst.rt_next;
-                                       continue;
-                               }
-
-                               if (rth->rt_dst != daddr ||
-                                   rth->rt_src != saddr ||
-                                   rth->dst.error ||
-                                   rth->rt_gateway != old_gw ||
-                                   rth->dst.dev != dev)
-                                       break;
-
-                               dst_hold(&rth->dst);
-
-                               rt = dst_alloc(&ipv4_dst_ops);
-                               if (rt == NULL) {
-                                       ip_rt_put(rth);
-                                       return;
-                               }
-
-                               /* Copy all the information. */
-                               *rt = *rth;
-                               rt->dst.__use           = 1;
-                               atomic_set(&rt->dst.__refcnt, 1);
-                               rt->dst.child           = NULL;
-                               if (rt->dst.dev)
-                                       dev_hold(rt->dst.dev);
-                               rt->dst.obsolete        = -1;
-                               rt->dst.lastuse = jiffies;
-                               rt->dst.path            = &rt->dst;
-                               rt->dst.neighbour       = NULL;
-                               rt->dst.hh              = NULL;
-#ifdef CONFIG_XFRM
-                               rt->dst.xfrm            = NULL;
-#endif
-                               rt->rt_genid            = rt_genid(net);
-                               rt->rt_flags            |= RTCF_REDIRECTED;
-
-                               /* Gateway is different ... */
-                               rt->rt_gateway          = new_gw;
-
-                               /* Redirect received -> path was valid */
-                               dst_confirm(&rth->dst);
-
-                               if (rt->peer)
-                                       atomic_inc(&rt->peer->refcnt);
-
-                               if (arp_bind_neighbour(&rt->dst) ||
-                                   !(rt->dst.neighbour->nud_state &
-                                           NUD_VALID)) {
-                                       if (rt->dst.neighbour)
-                                               neigh_event_send(rt->dst.neighbour, NULL);
-                                       ip_rt_put(rth);
-                                       rt_drop(rt);
-                                       goto do_next;
-                               }
+       peer = inet_getpeer_v4(daddr, 1);
+       if (peer) {
+               peer->redirect_learned.a4 = new_gw;
 
-                               netevent.old = &rth->dst;
-                               netevent.new = &rt->dst;
-                               call_netevent_notifiers(NETEVENT_REDIRECT,
-                                                       &netevent);
+               inet_putpeer(peer);
 
-                               rt_del(hash, rth);
-                               if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
-                                       ip_rt_put(rt);
-                               goto do_next;
-                       }
-               do_next:
-                       ;
-               }
+               atomic_inc(&__rt_peer_genid);
        }
        return;
 
@@ -1488,18 +1343,24 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
                if (dst->obsolete > 0) {
                        ip_rt_put(rt);
                        ret = NULL;
-               } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-                          (rt->dst.expires &&
-                           time_after_eq(jiffies, rt->dst.expires))) {
-                       unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
-                                               rt->fl.oif,
+               } else if (rt->rt_flags & RTCF_REDIRECTED) {
+                       unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
+                                               rt->rt_oif,
                                                rt_genid(dev_net(dst->dev)));
 #if RT_CACHE_DEBUG >= 1
                        printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
-                               &rt->rt_dst, rt->fl.fl4_tos);
+                               &rt->rt_dst, rt->rt_tos);
 #endif
                        rt_del(hash, rt);
                        ret = NULL;
+               } else if (rt->peer &&
+                          rt->peer->pmtu_expires &&
+                          time_after_eq(jiffies, rt->peer->pmtu_expires)) {
+                       unsigned long orig = rt->peer->pmtu_expires;
+
+                       if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+                               dst_metric_set(dst, RTAX_MTU,
+                                              rt->peer->pmtu_orig);
                }
        }
        return ret;
@@ -1525,6 +1386,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
 {
        struct rtable *rt = skb_rtable(skb);
        struct in_device *in_dev;
+       struct inet_peer *peer;
        int log_martians;
 
        rcu_read_lock();
@@ -1536,33 +1398,41 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        log_martians = IN_DEV_LOG_MARTIANS(in_dev);
        rcu_read_unlock();
 
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+       if (!peer) {
+               icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+               return;
+       }
+
        /* No redirected packets during ip_rt_redirect_silence;
         * reset the algorithm.
         */
-       if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
-               rt->dst.rate_tokens = 0;
+       if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+               peer->rate_tokens = 0;
 
        /* Too many ignored redirects; do not send anything
         * set dst.rate_last to the last seen redirected packet.
         */
-       if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
-               rt->dst.rate_last = jiffies;
+       if (peer->rate_tokens >= ip_rt_redirect_number) {
+               peer->rate_last = jiffies;
                return;
        }
 
        /* Check for load limit; set rate_last to the latest sent
         * redirect.
         */
-       if (rt->dst.rate_tokens == 0 ||
+       if (peer->rate_tokens == 0 ||
            time_after(jiffies,
-                      (rt->dst.rate_last +
-                       (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
+                      (peer->rate_last +
+                       (ip_rt_redirect_load << peer->rate_tokens)))) {
                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
-               rt->dst.rate_last = jiffies;
-               ++rt->dst.rate_tokens;
+               peer->rate_last = jiffies;
+               ++peer->rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
                if (log_martians &&
-                   rt->dst.rate_tokens == ip_rt_redirect_number &&
+                   peer->rate_tokens == ip_rt_redirect_number &&
                    net_ratelimit())
                        printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
                                &rt->rt_src, rt->rt_iif,
@@ -1574,7 +1444,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
 static int ip_error(struct sk_buff *skb)
 {
        struct rtable *rt = skb_rtable(skb);
+       struct inet_peer *peer;
        unsigned long now;
+       bool send;
        int code;
 
        switch (rt->dst.error) {
@@ -1594,15 +1466,24 @@ static int ip_error(struct sk_buff *skb)
                        break;
        }
 
-       now = jiffies;
-       rt->dst.rate_tokens += now - rt->dst.rate_last;
-       if (rt->dst.rate_tokens > ip_rt_error_burst)
-               rt->dst.rate_tokens = ip_rt_error_burst;
-       rt->dst.rate_last = now;
-       if (rt->dst.rate_tokens >= ip_rt_error_cost) {
-               rt->dst.rate_tokens -= ip_rt_error_cost;
-               icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+
+       send = true;
+       if (peer) {
+               now = jiffies;
+               peer->rate_tokens += now - peer->rate_last;
+               if (peer->rate_tokens > ip_rt_error_burst)
+                       peer->rate_tokens = ip_rt_error_burst;
+               peer->rate_last = now;
+               if (peer->rate_tokens >= ip_rt_error_cost)
+                       peer->rate_tokens -= ip_rt_error_cost;
+               else
+                       send = false;
        }
+       if (send)
+               icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
 
 out:   kfree_skb(skb);
        return 0;
@@ -1630,88 +1511,142 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                 unsigned short new_mtu,
                                 struct net_device *dev)
 {
-       int i, k;
        unsigned short old_mtu = ntohs(iph->tot_len);
-       struct rtable *rth;
-       int  ikeys[2] = { dev->ifindex, 0 };
-       __be32  skeys[2] = { iph->saddr, 0, };
-       __be32  daddr = iph->daddr;
        unsigned short est_mtu = 0;
+       struct inet_peer *peer;
 
-       for (k = 0; k < 2; k++) {
-               for (i = 0; i < 2; i++) {
-                       unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-                                               rt_genid(net));
-
-                       rcu_read_lock();
-                       for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-                            rth = rcu_dereference(rth->dst.rt_next)) {
-                               unsigned short mtu = new_mtu;
-
-                               if (rth->fl.fl4_dst != daddr ||
-                                   rth->fl.fl4_src != skeys[i] ||
-                                   rth->rt_dst != daddr ||
-                                   rth->rt_src != iph->saddr ||
-                                   rth->fl.oif != ikeys[k] ||
-                                   rt_is_input_route(rth) ||
-                                   dst_metric_locked(&rth->dst, RTAX_MTU) ||
-                                   !net_eq(dev_net(rth->dst.dev), net) ||
-                                   rt_is_expired(rth))
-                                       continue;
+       peer = inet_getpeer_v4(iph->daddr, 1);
+       if (peer) {
+               unsigned short mtu = new_mtu;
 
-                               if (new_mtu < 68 || new_mtu >= old_mtu) {
+               if (new_mtu < 68 || new_mtu >= old_mtu) {
+                       /* BSD 4.2 derived systems incorrectly adjust
+                        * tot_len by the IP header length, and report
+                        * a zero MTU in the ICMP message.
+                        */
+                       if (mtu == 0 &&
+                           old_mtu >= 68 + (iph->ihl << 2))
+                               old_mtu -= iph->ihl << 2;
+                       mtu = guess_mtu(old_mtu);
+               }
 
-                                       /* BSD 4.2 compatibility hack :-( */
-                                       if (mtu == 0 &&
-                                           old_mtu >= dst_mtu(&rth->dst) &&
-                                           old_mtu >= 68 + (iph->ihl << 2))
-                                               old_mtu -= iph->ihl << 2;
+               if (mtu < ip_rt_min_pmtu)
+                       mtu = ip_rt_min_pmtu;
+               if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+                       unsigned long pmtu_expires;
 
-                                       mtu = guess_mtu(old_mtu);
-                               }
-                               if (mtu <= dst_mtu(&rth->dst)) {
-                                       if (mtu < dst_mtu(&rth->dst)) {
-                                               dst_confirm(&rth->dst);
-                                               if (mtu < ip_rt_min_pmtu) {
-                                                       u32 lock = dst_metric(&rth->dst,
-                                                                             RTAX_LOCK);
-                                                       mtu = ip_rt_min_pmtu;
-                                                       lock |= (1 << RTAX_MTU);
-                                                       dst_metric_set(&rth->dst, RTAX_LOCK,
-                                                                      lock);
-                                               }
-                                               dst_metric_set(&rth->dst, RTAX_MTU, mtu);
-                                               dst_set_expires(&rth->dst,
-                                                       ip_rt_mtu_expires);
-                                       }
-                                       est_mtu = mtu;
-                               }
-                       }
-                       rcu_read_unlock();
+                       pmtu_expires = jiffies + ip_rt_mtu_expires;
+                       if (!pmtu_expires)
+                               pmtu_expires = 1UL;
+
+                       est_mtu = mtu;
+                       peer->pmtu_learned = mtu;
+                       peer->pmtu_expires = pmtu_expires;
                }
+
+               inet_putpeer(peer);
+
+               atomic_inc(&__rt_peer_genid);
        }
        return est_mtu ? : new_mtu;
 }
 
+static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+{
+       unsigned long expires = peer->pmtu_expires;
+
+       if (time_before(jiffies, expires)) {
+               u32 orig_dst_mtu = dst_mtu(dst);
+               if (peer->pmtu_learned < orig_dst_mtu) {
+                       if (!peer->pmtu_orig)
+                               peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
+                       dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
+               }
+       } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
+               dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+}
+
 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-       if (dst_mtu(dst) > mtu && mtu >= 68 &&
-           !(dst_metric_locked(dst, RTAX_MTU))) {
-               if (mtu < ip_rt_min_pmtu) {
-                       u32 lock = dst_metric(dst, RTAX_LOCK);
+       struct rtable *rt = (struct rtable *) dst;
+       struct inet_peer *peer;
+
+       dst_confirm(dst);
+
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+       if (peer) {
+               if (mtu < ip_rt_min_pmtu)
                        mtu = ip_rt_min_pmtu;
-                       dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
+               if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+                       unsigned long pmtu_expires;
+
+                       pmtu_expires = jiffies + ip_rt_mtu_expires;
+                       if (!pmtu_expires)
+                               pmtu_expires = 1UL;
+
+                       peer->pmtu_learned = mtu;
+                       peer->pmtu_expires = pmtu_expires;
+
+                       atomic_inc(&__rt_peer_genid);
+                       rt->rt_peer_genid = rt_peer_genid();
                }
-               dst_metric_set(dst, RTAX_MTU, mtu);
-               dst_set_expires(dst, ip_rt_mtu_expires);
-               call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+               check_peer_pmtu(dst, peer);
+
+               inet_putpeer(peer);
+       }
+}
+
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       __be32 orig_gw = rt->rt_gateway;
+
+       dst_confirm(&rt->dst);
+
+       neigh_release(rt->dst.neighbour);
+       rt->dst.neighbour = NULL;
+
+       rt->rt_gateway = peer->redirect_learned.a4;
+       if (arp_bind_neighbour(&rt->dst) ||
+           !(rt->dst.neighbour->nud_state & NUD_VALID)) {
+               if (rt->dst.neighbour)
+                       neigh_event_send(rt->dst.neighbour, NULL);
+               rt->rt_gateway = orig_gw;
+               return -EAGAIN;
+       } else {
+               rt->rt_flags |= RTCF_REDIRECTED;
+               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
+                                       rt->dst.neighbour);
        }
+       return 0;
 }
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 {
-       if (rt_is_expired((struct rtable *)dst))
+       struct rtable *rt = (struct rtable *) dst;
+
+       if (rt_is_expired(rt))
                return NULL;
+       if (rt->rt_peer_genid != rt_peer_genid()) {
+               struct inet_peer *peer;
+
+               if (!rt->peer)
+                       rt_bind_peer(rt, 0);
+
+               peer = rt->peer;
+               if (peer && peer->pmtu_expires)
+                       check_peer_pmtu(dst, peer);
+
+               if (peer && peer->redirect_learned.a4 &&
+                   peer->redirect_learned.a4 != rt->rt_gateway) {
+                       if (check_peer_redir(dst, peer))
+                               return NULL;
+               }
+
+               rt->rt_peer_genid = rt_peer_genid();
+       }
        return dst;
 }
 
@@ -1720,6 +1655,10 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
        struct rtable *rt = (struct rtable *) dst;
        struct inet_peer *peer = rt->peer;
 
+       if (rt->fi) {
+               fib_info_put(rt->fi);
+               rt->fi = NULL;
+       }
        if (peer) {
                rt->peer = NULL;
                inet_putpeer(peer);
@@ -1734,8 +1673,14 @@ static void ipv4_link_failure(struct sk_buff *skb)
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
 
        rt = skb_rtable(skb);
-       if (rt)
-               dst_set_expires(&rt->dst, 0);
+       if (rt &&
+           rt->peer &&
+           rt->peer->pmtu_expires) {
+               unsigned long orig = rt->peer->pmtu_expires;
+
+               if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+                       dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+       }
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
@@ -1764,8 +1709,17 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        if (rt_is_output_route(rt))
                src = rt->rt_src;
        else {
+               struct flowi4 fl4 = {
+                       .daddr = rt->rt_key_dst,
+                       .saddr = rt->rt_key_src,
+                       .flowi4_tos = rt->rt_tos,
+                       .flowi4_oif = rt->rt_oif,
+                       .flowi4_iif = rt->rt_iif,
+                       .flowi4_mark = rt->rt_mark,
+               };
+
                rcu_read_lock();
-               if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
+               if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
                        src = FIB_RES_PREFSRC(res);
                else
                        src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
@@ -1775,7 +1729,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        memcpy(addr, &src, 4);
 }
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static void set_class_tag(struct rtable *rt, u32 tag)
 {
        if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,17 +1769,54 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
        return mtu;
 }
 
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
+static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
+                           struct fib_info *fi)
+{
+       struct inet_peer *peer;
+       int create = 0;
+
+       /* If a peer entry exists for this destination, we must hook
+        * it up in order to get at cached metrics.
+        */
+       if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
+               create = 1;
+
+       rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
+       if (peer) {
+               rt->rt_peer_genid = rt_peer_genid();
+               if (inet_metrics_new(peer))
+                       memcpy(peer->metrics, fi->fib_metrics,
+                              sizeof(u32) * RTAX_MAX);
+               dst_init_metrics(&rt->dst, peer->metrics, false);
+
+               if (peer->pmtu_expires)
+                       check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_learned.a4 &&
+                   peer->redirect_learned.a4 != rt->rt_gateway) {
+                       rt->rt_gateway = peer->redirect_learned.a4;
+                       rt->rt_flags |= RTCF_REDIRECTED;
+               }
+       } else {
+               if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+                       rt->fi = fi;
+                       atomic_inc(&fi->fib_clntref);
+               }
+               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+       }
+}
+
+static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
+                          const struct fib_result *res,
+                          struct fib_info *fi, u16 type, u32 itag)
 {
        struct dst_entry *dst = &rt->dst;
-       struct fib_info *fi = res->fi;
 
        if (fi) {
                if (FIB_RES_GW(*res) &&
                    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = FIB_RES_GW(*res);
-               dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+               rt_init_metrics(rt, oldflp4, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
                dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
        }
@@ -1835,13 +1826,26 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
        if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
                dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        set_class_tag(rt, fib_rules_tclass(res));
 #endif
        set_class_tag(rt, itag);
 #endif
-       rt->rt_type = res->type;
+       rt->rt_type = type;
+}
+
+static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
+{
+       struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
+       if (rt) {
+               rt->dst.obsolete = -1;
+
+               rt->dst.flags = DST_HOST |
+                       (nopolicy ? DST_NOPOLICY : 0) |
+                       (noxfrm ? DST_NOXFRM : 0);
+       }
+       return rt;
 }
 
 /* called in rcu_read_lock() section */
@@ -1874,31 +1878,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                if (err < 0)
                        goto e_err;
        }
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
        if (!rth)
                goto e_nobufs;
 
        rth->dst.output = ip_rt_bug;
-       rth->dst.obsolete = -1;
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
-       rth->rt_iif     =
-       rth->fl.iif     = dev->ifindex;
+       rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = init_net.loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->fl.oif     = 0;
+       rth->rt_oif     = 0;
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
        rth->rt_genid   = rt_genid(dev_net(dev));
@@ -1916,7 +1914,10 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        RT_CACHE_STAT_INC(in_slow_mc);
 
        hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
-       return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
+       rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
+       err = 0;
+       if (IS_ERR(rth))
+               err = PTR_ERR(rth);
 
 e_nobufs:
        return -ENOBUFS;
@@ -1959,7 +1960,7 @@ static void ip_handle_martian_source(struct net_device *dev,
 
 /* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
-                          struct fib_result *res,
+                          const struct fib_result *res,
                           struct in_device *in_dev,
                           __be32 daddr, __be32 saddr, u32 tos,
                           struct rtable **result)
@@ -2013,39 +2014,31 @@ static int __mkroute_input(struct sk_buff *skb,
                }
        }
 
-
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+                          IN_DEV_CONF_GET(out_dev, NOXFRM));
        if (!rth) {
                err = -ENOBUFS;
                goto cleanup;
        }
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       if (IN_DEV_CONF_GET(out_dev, NOXFRM))
-               rth->dst.flags |= DST_NOXFRM;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
        rth->rt_gateway = daddr;
-       rth->rt_iif     =
-               rth->fl.iif     = in_dev->dev->ifindex;
+       rth->rt_iif     = in_dev->dev->ifindex;
        rth->dst.dev    = (out_dev)->dev;
        dev_hold(rth->dst.dev);
-       rth->fl.oif     = 0;
+       rth->rt_oif     = 0;
        rth->rt_spec_dst= spec_dst;
 
-       rth->dst.obsolete = -1;
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
        rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
 
-       rt_set_nexthop(rth, res, itag);
+       rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
 
        rth->rt_flags = flags;
 
@@ -2057,7 +2050,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
 static int ip_mkroute_input(struct sk_buff *skb,
                            struct fib_result *res,
-                           const struct flowi *fl,
+                           const struct flowi4 *fl4,
                            struct in_device *in_dev,
                            __be32 daddr, __be32 saddr, u32 tos)
 {
@@ -2066,8 +2059,8 @@ static int ip_mkroute_input(struct sk_buff *skb,
        unsigned hash;
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
-               fib_select_multipath(fl, res);
+       if (res->fi && res->fi->fib_nhs > 1)
+               fib_select_multipath(res);
 #endif
 
        /* create a routing cache entry */
@@ -2076,9 +2069,12 @@ static int ip_mkroute_input(struct sk_buff *skb,
                return err;
 
        /* put it into the cache */
-       hash = rt_hash(daddr, saddr, fl->iif,
+       hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
                       rt_genid(dev_net(rth->dst.dev)));
-       return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
+       rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
+       if (IS_ERR(rth))
+               return PTR_ERR(rth);
+       return 0;
 }
 
 /*
@@ -2097,12 +2093,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 {
        struct fib_result res;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct flowi fl = { .fl4_dst    = daddr,
-                           .fl4_src    = saddr,
-                           .fl4_tos    = tos,
-                           .fl4_scope  = RT_SCOPE_UNIVERSE,
-                           .mark = skb->mark,
-                           .iif = dev->ifindex };
+       struct flowi4   fl4;
        unsigned        flags = 0;
        u32             itag = 0;
        struct rtable * rth;
@@ -2139,7 +2130,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        /*
         *      Now we are ready to route packet.
         */
-       err = fib_lookup(net, &fl, &res);
+       fl4.flowi4_oif = 0;
+       fl4.flowi4_iif = dev->ifindex;
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_tos = tos;
+       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.daddr = daddr;
+       fl4.saddr = saddr;
+       err = fib_lookup(net, &fl4, &res);
        if (err != 0) {
                if (!IN_DEV_FORWARD(in_dev))
                        goto e_hostunreach;
@@ -2168,7 +2166,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (res.type != RTN_UNICAST)
                goto martian_destination;
 
-       err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
+       err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
 out:   return err;
 
 brd_input:
@@ -2190,29 +2188,23 @@ brd_input:
        RT_CACHE_STAT_INC(in_brd);
 
 local_input:
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
        if (!rth)
                goto e_nobufs;
 
        rth->dst.output= ip_rt_bug;
-       rth->dst.obsolete = -1;
        rth->rt_genid = rt_genid(net);
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
-       rth->rt_iif     =
-       rth->fl.iif     = dev->ifindex;
+       rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = net->loopback_dev;
        dev_hold(rth->dst.dev);
        rth->rt_gateway = daddr;
@@ -2225,8 +2217,11 @@ local_input:
                rth->rt_flags   &= ~RTCF_LOCAL;
        }
        rth->rt_type    = res.type;
-       hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
-       err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
+       hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
+       rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
+       err = 0;
+       if (IS_ERR(rth))
+               err = PTR_ERR(rth);
        goto out;
 
 no_route:
@@ -2288,12 +2283,12 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
             rth = rcu_dereference(rth->dst.rt_next)) {
-               if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
-                    ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
-                    (rth->fl.iif ^ iif) |
-                    rth->fl.oif |
-                    (rth->fl.fl4_tos ^ tos)) == 0 &&
-                   rth->fl.mark == skb->mark &&
+               if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
+                    ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
+                    (rth->rt_iif ^ iif) |
+                    rth->rt_oif |
+                    (rth->rt_tos ^ tos)) == 0 &&
+                   rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
                        if (noref) {
@@ -2326,8 +2321,8 @@ skip_cache:
                struct in_device *in_dev = __in_dev_get_rcu(dev);
 
                if (in_dev) {
-                       int our = ip_check_mc(in_dev, daddr, saddr,
-                                             ip_hdr(skb)->protocol);
+                       int our = ip_check_mc_rcu(in_dev, daddr, saddr,
+                                                 ip_hdr(skb)->protocol);
                        if (our
 #ifdef CONFIG_IP_MROUTE
                                ||
@@ -2351,98 +2346,91 @@ skip_cache:
 EXPORT_SYMBOL(ip_route_input_common);
 
 /* called with rcu_read_lock() */
-static int __mkroute_output(struct rtable **result,
-                           struct fib_result *res,
-                           const struct flowi *fl,
-                           const struct flowi *oldflp,
-                           struct net_device *dev_out,
-                           unsigned flags)
+static struct rtable *__mkroute_output(const struct fib_result *res,
+                                      const struct flowi4 *fl4,
+                                      const struct flowi4 *oldflp4,
+                                      struct net_device *dev_out,
+                                      unsigned int flags)
 {
-       struct rtable *rth;
+       struct fib_info *fi = res->fi;
+       u32 tos = RT_FL_TOS(oldflp4);
        struct in_device *in_dev;
-       u32 tos = RT_FL_TOS(oldflp);
+       u16 type = res->type;
+       struct rtable *rth;
 
-       if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
-               return -EINVAL;
+       if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+               return ERR_PTR(-EINVAL);
 
-       if (ipv4_is_lbcast(fl->fl4_dst))
-               res->type = RTN_BROADCAST;
-       else if (ipv4_is_multicast(fl->fl4_dst))
-               res->type = RTN_MULTICAST;
-       else if (ipv4_is_zeronet(fl->fl4_dst))
-               return -EINVAL;
+       if (ipv4_is_lbcast(fl4->daddr))
+               type = RTN_BROADCAST;
+       else if (ipv4_is_multicast(fl4->daddr))
+               type = RTN_MULTICAST;
+       else if (ipv4_is_zeronet(fl4->daddr))
+               return ERR_PTR(-EINVAL);
 
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
        in_dev = __in_dev_get_rcu(dev_out);
        if (!in_dev)
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
-       if (res->type == RTN_BROADCAST) {
+       if (type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
-               res->fi = NULL;
-       } else if (res->type == RTN_MULTICAST) {
+               fi = NULL;
+       } else if (type == RTN_MULTICAST) {
                flags |= RTCF_MULTICAST | RTCF_LOCAL;
-               if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
-                                oldflp->proto))
+               if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr,
+                                    oldflp4->flowi4_proto))
                        flags &= ~RTCF_LOCAL;
                /* If multicast route do not exist use
                 * default one, but do not gateway in this case.
                 * Yes, it is hack.
                 */
-               if (res->fi && res->prefixlen < 4)
-                       res->fi = NULL;
+               if (fi && res->prefixlen < 4)
+                       fi = NULL;
        }
 
-
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+                          IN_DEV_CONF_GET(in_dev, NOXFRM));
        if (!rth)
-               return -ENOBUFS;
-
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOXFRM))
-               rth->dst.flags |= DST_NOXFRM;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-
-       rth->fl.fl4_dst = oldflp->fl4_dst;
-       rth->fl.fl4_tos = tos;
-       rth->fl.fl4_src = oldflp->fl4_src;
-       rth->fl.oif     = oldflp->oif;
-       rth->fl.mark    = oldflp->mark;
-       rth->rt_dst     = fl->fl4_dst;
-       rth->rt_src     = fl->fl4_src;
-       rth->rt_iif     = oldflp->oif ? : dev_out->ifindex;
+               return ERR_PTR(-ENOBUFS);
+
+       rth->rt_key_dst = oldflp4->daddr;
+       rth->rt_tos     = tos;
+       rth->rt_key_src = oldflp4->saddr;
+       rth->rt_oif     = oldflp4->flowi4_oif;
+       rth->rt_mark    = oldflp4->flowi4_mark;
+       rth->rt_dst     = fl4->daddr;
+       rth->rt_src     = fl4->saddr;
+       rth->rt_iif     = 0;
        /* get references to the devices that are to be hold by the routing
           cache entry */
        rth->dst.dev    = dev_out;
        dev_hold(dev_out);
-       rth->rt_gateway = fl->fl4_dst;
-       rth->rt_spec_dst= fl->fl4_src;
+       rth->rt_gateway = fl4->daddr;
+       rth->rt_spec_dst= fl4->saddr;
 
        rth->dst.output=ip_output;
-       rth->dst.obsolete = -1;
        rth->rt_genid = rt_genid(dev_net(dev_out));
 
        RT_CACHE_STAT_INC(out_slow_tot);
 
        if (flags & RTCF_LOCAL) {
                rth->dst.input = ip_local_deliver;
-               rth->rt_spec_dst = fl->fl4_dst;
+               rth->rt_spec_dst = fl4->daddr;
        }
        if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
-               rth->rt_spec_dst = fl->fl4_src;
+               rth->rt_spec_dst = fl4->saddr;
                if (flags & RTCF_LOCAL &&
                    !(dev_out->flags & IFF_LOOPBACK)) {
                        rth->dst.output = ip_mc_output;
                        RT_CACHE_STAT_INC(out_slow_mc);
                }
 #ifdef CONFIG_IP_MROUTE
-               if (res->type == RTN_MULTICAST) {
+               if (type == RTN_MULTICAST) {
                        if (IN_DEV_MFORWARD(in_dev) &&
-                           !ipv4_is_local_multicast(oldflp->fl4_dst)) {
+                           !ipv4_is_local_multicast(oldflp4->daddr)) {
                                rth->dst.input = ip_mr_input;
                                rth->dst.output = ip_mc_output;
                        }
@@ -2450,31 +2438,10 @@ static int __mkroute_output(struct rtable **result,
 #endif
        }
 
-       rt_set_nexthop(rth, res, 0);
+       rt_set_nexthop(rth, oldflp4, res, fi, type, 0);
 
        rth->rt_flags = flags;
-       *result = rth;
-       return 0;
-}
-
-/* called with rcu_read_lock() */
-static int ip_mkroute_output(struct rtable **rp,
-                            struct fib_result *res,
-                            const struct flowi *fl,
-                            const struct flowi *oldflp,
-                            struct net_device *dev_out,
-                            unsigned flags)
-{
-       struct rtable *rth = NULL;
-       int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
-       unsigned hash;
-       if (err == 0) {
-               hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
-                              rt_genid(dev_net(dev_out)));
-               err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
-       }
-
-       return err;
+       return rth;
 }
 
 /*
@@ -2482,34 +2449,36 @@ static int ip_mkroute_output(struct rtable **rp,
  * called with rcu_read_lock();
  */
 
-static int ip_route_output_slow(struct net *net, struct rtable **rp,
-                               const struct flowi *oldflp)
-{
-       u32 tos = RT_FL_TOS(oldflp);
-       struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
-                           .fl4_src = oldflp->fl4_src,
-                           .fl4_tos = tos & IPTOS_RT_MASK,
-                           .fl4_scope = ((tos & RTO_ONLINK) ?
-                                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
-                           .mark = oldflp->mark,
-                           .iif = net->loopback_dev->ifindex,
-                           .oif = oldflp->oif };
+static struct rtable *ip_route_output_slow(struct net *net,
+                                          const struct flowi4 *oldflp4)
+{
+       u32 tos = RT_FL_TOS(oldflp4);
+       struct flowi4 fl4;
        struct fib_result res;
        unsigned int flags = 0;
        struct net_device *dev_out = NULL;
-       int err;
-
+       struct rtable *rth;
 
        res.fi          = NULL;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        res.r           = NULL;
 #endif
 
-       if (oldflp->fl4_src) {
-               err = -EINVAL;
-               if (ipv4_is_multicast(oldflp->fl4_src) ||
-                   ipv4_is_lbcast(oldflp->fl4_src) ||
-                   ipv4_is_zeronet(oldflp->fl4_src))
+       fl4.flowi4_oif = oldflp4->flowi4_oif;
+       fl4.flowi4_iif = net->loopback_dev->ifindex;
+       fl4.flowi4_mark = oldflp4->flowi4_mark;
+       fl4.daddr = oldflp4->daddr;
+       fl4.saddr = oldflp4->saddr;
+       fl4.flowi4_tos = tos & IPTOS_RT_MASK;
+       fl4.flowi4_scope = ((tos & RTO_ONLINK) ?
+                       RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
+
+       rcu_read_lock();
+       if (oldflp4->saddr) {
+               rth = ERR_PTR(-EINVAL);
+               if (ipv4_is_multicast(oldflp4->saddr) ||
+                   ipv4_is_lbcast(oldflp4->saddr) ||
+                   ipv4_is_zeronet(oldflp4->saddr))
                        goto out;
 
                /* I removed check for oif == dev_out->oif here.
@@ -2520,11 +2489,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                      of another iface. --ANK
                 */
 
-               if (oldflp->oif == 0 &&
-                   (ipv4_is_multicast(oldflp->fl4_dst) ||
-                    ipv4_is_lbcast(oldflp->fl4_dst))) {
+               if (oldflp4->flowi4_oif == 0 &&
+                   (ipv4_is_multicast(oldflp4->daddr) ||
+                    ipv4_is_lbcast(oldflp4->daddr))) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
-                       dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
+                       dev_out = __ip_dev_find(net, oldflp4->saddr, false);
                        if (dev_out == NULL)
                                goto out;
 
@@ -2543,60 +2512,60 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                           Luckily, this hack is good workaround.
                         */
 
-                       fl.oif = dev_out->ifindex;
+                       fl4.flowi4_oif = dev_out->ifindex;
                        goto make_route;
                }
 
-               if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
+               if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
-                       if (!__ip_dev_find(net, oldflp->fl4_src, false))
+                       if (!__ip_dev_find(net, oldflp4->saddr, false))
                                goto out;
                }
        }
 
 
-       if (oldflp->oif) {
-               dev_out = dev_get_by_index_rcu(net, oldflp->oif);
-               err = -ENODEV;
+       if (oldflp4->flowi4_oif) {
+               dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif);
+               rth = ERR_PTR(-ENODEV);
                if (dev_out == NULL)
                        goto out;
 
                /* RACE: Check return value of inet_select_addr instead. */
                if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
-                       err = -ENETUNREACH;
+                       rth = ERR_PTR(-ENETUNREACH);
                        goto out;
                }
-               if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
-                   ipv4_is_lbcast(oldflp->fl4_dst)) {
-                       if (!fl.fl4_src)
-                               fl.fl4_src = inet_select_addr(dev_out, 0,
-                                                             RT_SCOPE_LINK);
+               if (ipv4_is_local_multicast(oldflp4->daddr) ||
+                   ipv4_is_lbcast(oldflp4->daddr)) {
+                       if (!fl4.saddr)
+                               fl4.saddr = inet_select_addr(dev_out, 0,
+                                                            RT_SCOPE_LINK);
                        goto make_route;
                }
-               if (!fl.fl4_src) {
-                       if (ipv4_is_multicast(oldflp->fl4_dst))
-                               fl.fl4_src = inet_select_addr(dev_out, 0,
-                                                             fl.fl4_scope);
-                       else if (!oldflp->fl4_dst)
-                               fl.fl4_src = inet_select_addr(dev_out, 0,
-                                                             RT_SCOPE_HOST);
+               if (!fl4.saddr) {
+                       if (ipv4_is_multicast(oldflp4->daddr))
+                               fl4.saddr = inet_select_addr(dev_out, 0,
+                                                            fl4.flowi4_scope);
+                       else if (!oldflp4->daddr)
+                               fl4.saddr = inet_select_addr(dev_out, 0,
+                                                            RT_SCOPE_HOST);
                }
        }
 
-       if (!fl.fl4_dst) {
-               fl.fl4_dst = fl.fl4_src;
-               if (!fl.fl4_dst)
-                       fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
+       if (!fl4.daddr) {
+               fl4.daddr = fl4.saddr;
+               if (!fl4.daddr)
+                       fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK);
                dev_out = net->loopback_dev;
-               fl.oif = net->loopback_dev->ifindex;
+               fl4.flowi4_oif = net->loopback_dev->ifindex;
                res.type = RTN_LOCAL;
                flags |= RTCF_LOCAL;
                goto make_route;
        }
 
-       if (fib_lookup(net, &fl, &res)) {
+       if (fib_lookup(net, &fl4, &res)) {
                res.fi = NULL;
-               if (oldflp->oif) {
+               if (oldflp4->flowi4_oif) {
                        /* Apparently, routing tables are wrong. Assume,
                           that the destination is on link.
 
@@ -2615,90 +2584,93 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                           likely IPv6, but we do not.
                         */
 
-                       if (fl.fl4_src == 0)
-                               fl.fl4_src = inet_select_addr(dev_out, 0,
-                                                             RT_SCOPE_LINK);
+                       if (fl4.saddr == 0)
+                               fl4.saddr = inet_select_addr(dev_out, 0,
+                                                            RT_SCOPE_LINK);
                        res.type = RTN_UNICAST;
                        goto make_route;
                }
-               err = -ENETUNREACH;
+               rth = ERR_PTR(-ENETUNREACH);
                goto out;
        }
 
        if (res.type == RTN_LOCAL) {
-               if (!fl.fl4_src) {
+               if (!fl4.saddr) {
                        if (res.fi->fib_prefsrc)
-                               fl.fl4_src = res.fi->fib_prefsrc;
+                               fl4.saddr = res.fi->fib_prefsrc;
                        else
-                               fl.fl4_src = fl.fl4_dst;
+                               fl4.saddr = fl4.daddr;
                }
                dev_out = net->loopback_dev;
-               fl.oif = dev_out->ifindex;
+               fl4.flowi4_oif = dev_out->ifindex;
                res.fi = NULL;
                flags |= RTCF_LOCAL;
                goto make_route;
        }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (res.fi->fib_nhs > 1 && fl.oif == 0)
-               fib_select_multipath(&fl, &res);
+       if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0)
+               fib_select_multipath(&res);
        else
 #endif
-       if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
-               fib_select_default(net, &fl, &res);
+       if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif)
+               fib_select_default(&res);
 
-       if (!fl.fl4_src)
-               fl.fl4_src = FIB_RES_PREFSRC(res);
+       if (!fl4.saddr)
+               fl4.saddr = FIB_RES_PREFSRC(res);
 
        dev_out = FIB_RES_DEV(res);
-       fl.oif = dev_out->ifindex;
+       fl4.flowi4_oif = dev_out->ifindex;
 
 
 make_route:
-       err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+       rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags);
+       if (!IS_ERR(rth)) {
+               unsigned int hash;
 
-out:   return err;
+               hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif,
+                              rt_genid(dev_net(dev_out)));
+               rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif);
+       }
+
+out:
+       rcu_read_unlock();
+       return rth;
 }
 
-int __ip_route_output_key(struct net *net, struct rtable **rp,
-                         const struct flowi *flp)
+struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
 {
-       unsigned int hash;
-       int res;
        struct rtable *rth;
+       unsigned int hash;
 
        if (!rt_caching(net))
                goto slow_output;
 
-       hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
+       hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
 
        rcu_read_lock_bh();
        for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
                rth = rcu_dereference_bh(rth->dst.rt_next)) {
-               if (rth->fl.fl4_dst == flp->fl4_dst &&
-                   rth->fl.fl4_src == flp->fl4_src &&
+               if (rth->rt_key_dst == flp4->daddr &&
+                   rth->rt_key_src == flp4->saddr &&
                    rt_is_output_route(rth) &&
-                   rth->fl.oif == flp->oif &&
-                   rth->fl.mark == flp->mark &&
-                   !((rth->fl.fl4_tos ^ flp->fl4_tos) &
+                   rth->rt_oif == flp4->flowi4_oif &&
+                   rth->rt_mark == flp4->flowi4_mark &&
+                   !((rth->rt_tos ^ flp4->flowi4_tos) &
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
-                       *rp = rth;
-                       return 0;
+                       return rth;
                }
                RT_CACHE_STAT_INC(out_hlist_search);
        }
        rcu_read_unlock_bh();
 
 slow_output:
-       rcu_read_lock();
-       res = ip_route_output_slow(net, rp, flp);
-       rcu_read_unlock();
-       return res;
+       return ip_route_output_slow(net, flp4);
 }
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
@@ -2707,6 +2679,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
        return NULL;
 }
 
+static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+{
+       return 0;
+}
+
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2716,20 +2693,19 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
        .check                  =       ipv4_blackhole_dst_check,
+       .default_mtu            =       ipv4_blackhole_default_mtu,
+       .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
 };
 
-
-static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
+struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
-       struct rtable *ort = *rp;
-       struct rtable *rt = (struct rtable *)
-               dst_alloc(&ipv4_dst_blackhole_ops);
+       struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
+       struct rtable *ort = (struct rtable *) dst_orig;
 
        if (rt) {
                struct dst_entry *new = &rt->dst;
 
-               atomic_set(&new->__refcnt, 1);
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard;
@@ -2739,7 +2715,12 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
                if (new->dev)
                        dev_hold(new->dev);
 
-               rt->fl = ort->fl;
+               rt->rt_key_dst = ort->rt_key_dst;
+               rt->rt_key_src = ort->rt_key_src;
+               rt->rt_tos = ort->rt_tos;
+               rt->rt_iif = ort->rt_iif;
+               rt->rt_oif = ort->rt_oif;
+               rt->rt_mark = ort->rt_mark;
 
                rt->rt_genid = rt_genid(net);
                rt->rt_flags = ort->rt_flags;
@@ -2752,46 +2733,40 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
                rt->peer = ort->peer;
                if (rt->peer)
                        atomic_inc(&rt->peer->refcnt);
+               rt->fi = ort->fi;
+               if (rt->fi)
+                       atomic_inc(&rt->fi->fib_clntref);
 
                dst_free(new);
        }
 
-       dst_release(&(*rp)->dst);
-       *rp = rt;
-       return rt ? 0 : -ENOMEM;
+       dst_release(dst_orig);
+
+       return rt ? &rt->dst : ERR_PTR(-ENOMEM);
 }
 
-int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
-                        struct sock *sk, int flags)
+struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
+                                   struct sock *sk)
 {
-       int err;
+       struct rtable *rt = __ip_route_output_key(net, flp4);
 
-       if ((err = __ip_route_output_key(net, rp, flp)) != 0)
-               return err;
+       if (IS_ERR(rt))
+               return rt;
 
-       if (flp->proto) {
-               if (!flp->fl4_src)
-                       flp->fl4_src = (*rp)->rt_src;
-               if (!flp->fl4_dst)
-                       flp->fl4_dst = (*rp)->rt_dst;
-               err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
-                                   flags ? XFRM_LOOKUP_WAIT : 0);
-               if (err == -EREMOTE)
-                       err = ipv4_dst_blackhole(net, rp, flp);
-
-               return err;
+       if (flp4->flowi4_proto) {
+               if (!flp4->saddr)
+                       flp4->saddr = rt->rt_src;
+               if (!flp4->daddr)
+                       flp4->daddr = rt->rt_dst;
+               rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
+                                                  flowi4_to_flowi(flp4),
+                                                  sk, 0);
        }
 
-       return 0;
+       return rt;
 }
 EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
-int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
-{
-       return ip_route_output_flow(net, rp, flp, NULL, 0);
-}
-EXPORT_SYMBOL(ip_route_output_key);
-
 static int rt_fill_info(struct net *net,
                        struct sk_buff *skb, u32 pid, u32 seq, int event,
                        int nowait, unsigned int flags)
@@ -2810,7 +2785,7 @@ static int rt_fill_info(struct net *net,
        r->rtm_family    = AF_INET;
        r->rtm_dst_len  = 32;
        r->rtm_src_len  = 0;
-       r->rtm_tos      = rt->fl.fl4_tos;
+       r->rtm_tos      = rt->rt_tos;
        r->rtm_table    = RT_TABLE_MAIN;
        NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
        r->rtm_type     = rt->rt_type;
@@ -2822,19 +2797,19 @@ static int rt_fill_info(struct net *net,
 
        NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
 
-       if (rt->fl.fl4_src) {
+       if (rt->rt_key_src) {
                r->rtm_src_len = 32;
-               NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
+               NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
        }
        if (rt->dst.dev)
                NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (rt->dst.tclassid)
                NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
        if (rt_is_input_route(rt))
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
-       else if (rt->rt_src != rt->fl.fl4_src)
+       else if (rt->rt_src != rt->rt_key_src)
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
 
        if (rt->rt_dst != rt->rt_gateway)
@@ -2843,11 +2818,12 @@ static int rt_fill_info(struct net *net,
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       if (rt->fl.mark)
-               NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
+       if (rt->rt_mark)
+               NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
 
        error = rt->dst.error;
-       expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+       expires = (rt->peer && rt->peer->pmtu_expires) ?
+               rt->peer->pmtu_expires - jiffies : 0;
        if (rt->peer) {
                inet_peer_refcheck(rt->peer);
                id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
@@ -2877,7 +2853,7 @@ static int rt_fill_info(struct net *net,
                        }
                } else
 #endif
-                       NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
+                       NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
        }
 
        if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -2951,14 +2927,18 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                if (err == 0 && rt->dst.error)
                        err = -rt->dst.error;
        } else {
-               struct flowi fl = {
-                       .fl4_dst = dst,
-                       .fl4_src = src,
-                       .fl4_tos = rtm->rtm_tos,
-                       .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
-                       .mark = mark,
+               struct flowi4 fl4 = {
+                       .daddr = dst,
+                       .saddr = src,
+                       .flowi4_tos = rtm->rtm_tos,
+                       .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
+                       .flowi4_mark = mark,
                };
-               err = ip_route_output_key(net, &rt, &fl);
+               rt = ip_route_output_key(net, &fl4);
+
+               err = 0;
+               if (IS_ERR(rt))
+                       err = PTR_ERR(rt);
        }
 
        if (err)
@@ -3249,9 +3229,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
 };
 
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
 
 static __initdata unsigned long rhash_entries;
 static int __init set_rhash_entries(char *str)
@@ -3267,7 +3247,7 @@ int __init ip_rt_init(void)
 {
        int rc = 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
        if (!ip_rt_acct)
                panic("IP: failed to allocate ip_rt_acct\n");
@@ -3304,14 +3284,6 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
-       /* All the timers, started at system startup tend
-          to synchronize. Perturb it a bit.
-        */
-       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
-       expires_ljiffies = jiffies;
-       schedule_delayed_work(&expires_work,
-               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 4751920..8b44c6d 100644 (file)
@@ -345,17 +345,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * no easy way to do this.
         */
        {
-               struct flowi fl = { .mark = sk->sk_mark,
-                                   .fl4_dst = ((opt && opt->srr) ?
-                                               opt->faddr : ireq->rmt_addr),
-                                   .fl4_src = ireq->loc_addr,
-                                   .fl4_tos = RT_CONN_FLAGS(sk),
-                                   .proto = IPPROTO_TCP,
-                                   .flags = inet_sk_flowi_flags(sk),
-                                   .fl_ip_sport = th->dest,
-                                   .fl_ip_dport = th->source };
-               security_req_classify_flow(req, &fl);
-               if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
+               struct flowi4 fl4 = {
+                       .flowi4_mark = sk->sk_mark,
+                       .daddr = ((opt && opt->srr) ?
+                                 opt->faddr : ireq->rmt_addr),
+                       .saddr = ireq->loc_addr,
+                       .flowi4_tos = RT_CONN_FLAGS(sk),
+                       .flowi4_proto = IPPROTO_TCP,
+                       .flowi4_flags = inet_sk_flowi_flags(sk),
+                       .fl4_sport = th->dest,
+                       .fl4_dport = th->source,
+               };
+               security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+               rt = ip_route_output_key(sock_net(sk), &fl4);
+               if (IS_ERR(rt)) {
                        reqsk_free(req);
                        goto out;
                }
index 6c11eec..b22d450 100644 (file)
@@ -505,6 +505,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                else
                        answ = tp->write_seq - tp->snd_una;
                break;
+       case SIOCOUTQNSD:
+               if (sk->sk_state == TCP_LISTEN)
+                       return -EINVAL;
+
+               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+                       answ = 0;
+               else
+                       answ = tp->write_seq - tp->snd_nxt;
+               break;
        default:
                return -ENOIOCTLCMD;
        }
@@ -873,9 +882,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
                                        flags);
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
        res = do_tcp_sendpages(sk, &page, offset, size, flags);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return res;
 }
@@ -916,7 +923,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        long timeo;
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
 
        flags = msg->msg_flags;
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1104,7 +1110,6 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
@@ -1123,7 +1128,6 @@ do_error:
                goto out;
 out_err:
        err = sk_stream_error(sk, flags, err);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 }
@@ -1415,8 +1419,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        lock_sock(sk);
 
-       TCP_CHECK_TIMER(sk);
-
        err = -ENOTCONN;
        if (sk->sk_state == TCP_LISTEN)
                goto out;
@@ -1767,12 +1769,10 @@ skip_copy:
        /* Clean up data we have read: This will do ACK frames. */
        tcp_cleanup_rbuf(sk, copied);
 
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
 out:
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 
@@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct tcphdr *th;
index 3b53fd1..6187eb4 100644 (file)
@@ -209,7 +209,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
 }
 
 
-static struct tcp_congestion_ops bictcp = {
+static struct tcp_congestion_ops bictcp __read_mostly = {
        .init           = bictcp_init,
        .ssthresh       = bictcp_recalc_ssthresh,
        .cong_avoid     = bictcp_cong_avoid,
index 71d5f2f..62f775c 100644 (file)
@@ -405,7 +405,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                hystart_update(sk, delay);
 }
 
-static struct tcp_congestion_ops cubictcp = {
+static struct tcp_congestion_ops cubictcp __read_mostly = {
        .init           = bictcp_init,
        .ssthresh       = bictcp_recalc_ssthresh,
        .cong_avoid     = bictcp_cong_avoid,
index 8b6caaf..30f27f6 100644 (file)
@@ -158,7 +158,7 @@ static u32 hstcp_ssthresh(struct sock *sk)
 }
 
 
-static struct tcp_congestion_ops tcp_highspeed = {
+static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
        .init           = hstcp_init,
        .ssthresh       = hstcp_ssthresh,
        .cong_avoid     = hstcp_cong_avoid,
index 7c94a49..c1a8175 100644 (file)
@@ -284,7 +284,7 @@ static void htcp_state(struct sock *sk, u8 new_state)
        }
 }
 
-static struct tcp_congestion_ops htcp = {
+static struct tcp_congestion_ops htcp __read_mostly = {
        .init           = htcp_init,
        .ssthresh       = htcp_recalc_ssthresh,
        .cong_avoid     = htcp_cong_avoid,
index 377bc93..fe3ecf4 100644 (file)
@@ -162,7 +162,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
        tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
 }
 
-static struct tcp_congestion_ops tcp_hybla = {
+static struct tcp_congestion_ops tcp_hybla __read_mostly = {
        .init           = hybla_init,
        .ssthresh       = tcp_reno_ssthresh,
        .min_cwnd       = tcp_reno_min_cwnd,
index 00ca688..813b43a 100644 (file)
@@ -322,7 +322,7 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
        }
 }
 
-static struct tcp_congestion_ops tcp_illinois = {
+static struct tcp_congestion_ops tcp_illinois __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_illinois_init,
        .ssthresh       = tcp_illinois_ssthresh,
index 2549b29..08ea735 100644 (file)
@@ -817,7 +817,7 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 
        if (!cwnd)
-               cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+               cwnd = TCP_INIT_CWND;
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+               if (tp->undo_marker && tp->undo_retrans &&
+                   after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
                        state->reord = min(fack_count, state->reord);
@@ -4399,7 +4400,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                        if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
                                tp->ucopy.len -= chunk;
                                tp->copied_seq += chunk;
-                               eaten = (chunk == skb->len && !th->fin);
+                               eaten = (chunk == skb->len);
                                tcp_rcv_space_adjust(sk);
                        }
                        local_bh_disable();
index 856f684..f7e6c2c 100644 (file)
@@ -149,9 +149,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+       __be16 orig_sport, orig_dport;
        struct rtable *rt;
        __be32 daddr, nexthop;
-       int tmp;
        int err;
 
        if (addr_len < sizeof(struct sockaddr_in))
@@ -167,14 +167,17 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                nexthop = inet->opt->faddr;
        }
 
-       tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
-                              RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
-                              IPPROTO_TCP,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (tmp < 0) {
-               if (tmp == -ENETUNREACH)
+       orig_sport = inet->inet_sport;
+       orig_dport = usin->sin_port;
+       rt = ip_route_connect(nexthop, inet->inet_saddr,
+                             RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
+                             IPPROTO_TCP,
+                             orig_sport, orig_dport, sk, true);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               if (err == -ENETUNREACH)
                        IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
-               return tmp;
+               return err;
        }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
@@ -233,11 +236,14 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
-       err = ip_route_newports(&rt, IPPROTO_TCP,
-                               inet->inet_sport, inet->inet_dport, sk);
-       if (err)
+       rt = ip_route_newports(rt, IPPROTO_TCP,
+                              orig_sport, orig_dport,
+                              inet->inet_sport, inet->inet_dport, sk);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               rt = NULL;
                goto failure;
-
+       }
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
@@ -1341,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                    tcp_death_row.sysctl_tw_recycle &&
                    (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
-                   peer->daddr.a4 == saddr) {
+                   peer->daddr.addr.a4 == saddr) {
                        inet_peer_refcheck(peer);
                        if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
                            (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1556,12 +1562,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                sock_rps_save_rxhash(sk, skb->rxhash);
-               TCP_CHECK_TIMER(sk);
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
                }
-               TCP_CHECK_TIMER(sk);
                return 0;
        }
 
@@ -1583,13 +1587,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        } else
                sock_rps_save_rxhash(sk, skb->rxhash);
 
-
-       TCP_CHECK_TIMER(sk);
        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
                rsk = sk;
                goto reset;
        }
-       TCP_CHECK_TIMER(sk);
        return 0;
 
 reset:
@@ -1994,7 +1995,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
                                }
                                req = req->dl_next;
                        }
-                       st->offset = 0;
                        if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
                                break;
 get_req:
index de87037..656d431 100644 (file)
@@ -313,7 +313,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
        lp->last_drop = tcp_time_stamp;
 }
 
-static struct tcp_congestion_ops tcp_lp = {
+static struct tcp_congestion_ops tcp_lp __read_mostly = {
        .flags = TCP_CONG_RTT_STAMP,
        .init = tcp_lp_init,
        .ssthresh = tcp_reno_ssthresh,
index 406f320..dfa5beb 100644 (file)
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans++;
+               tp->undo_retrans += tcp_skb_pcount(skb);
 
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
index a765137..8ce55b8 100644 (file)
@@ -35,7 +35,7 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
 }
 
 
-static struct tcp_congestion_ops tcp_scalable = {
+static struct tcp_congestion_ops tcp_scalable __read_mostly = {
        .ssthresh       = tcp_scalable_ssthresh,
        .cong_avoid     = tcp_scalable_cong_avoid,
        .min_cwnd       = tcp_reno_min_cwnd,
index 74a6aa0..ecd44b0 100644 (file)
@@ -259,7 +259,6 @@ static void tcp_delack_timer(unsigned long data)
                tcp_send_ack(sk);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
-       TCP_CHECK_TIMER(sk);
 
 out:
        if (tcp_memory_pressure)
@@ -481,7 +480,6 @@ static void tcp_write_timer(unsigned long data)
                tcp_probe_timer(sk);
                break;
        }
-       TCP_CHECK_TIMER(sk);
 
 out:
        sk_mem_reclaim(sk);
@@ -589,7 +587,6 @@ static void tcp_keepalive_timer (unsigned long data)
                elapsed = keepalive_time_when(tp) - elapsed;
        }
 
-       TCP_CHECK_TIMER(sk);
        sk_mem_reclaim(sk);
 
 resched:
index c6743ee..80fa2bf 100644 (file)
@@ -304,7 +304,7 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
 
-static struct tcp_congestion_ops tcp_vegas = {
+static struct tcp_congestion_ops tcp_vegas __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_vegas_init,
        .ssthresh       = tcp_reno_ssthresh,
index 38bc0b5..ac43cd7 100644 (file)
@@ -201,7 +201,7 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
                return max(tp->snd_cwnd >> 1U, 2U);
 }
 
-static struct tcp_congestion_ops tcp_veno = {
+static struct tcp_congestion_ops tcp_veno __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_veno_init,
        .ssthresh       = tcp_veno_ssthresh,
index a534dda..1b91bf4 100644 (file)
@@ -272,7 +272,7 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
 }
 
 
-static struct tcp_congestion_ops tcp_westwood = {
+static struct tcp_congestion_ops tcp_westwood __read_mostly = {
        .init           = tcp_westwood_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
index a0f2403..dc7f431 100644 (file)
@@ -225,7 +225,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
        return tp->snd_cwnd - reduction;
 }
 
-static struct tcp_congestion_ops tcp_yeah = {
+static struct tcp_congestion_ops tcp_yeah __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_yeah_init,
        .ssthresh       = tcp_yeah_ssthresh,
index 8157b17..588f47a 100644 (file)
@@ -663,75 +663,72 @@ void udp_flush_pending_frames(struct sock *sk)
 EXPORT_SYMBOL(udp_flush_pending_frames);
 
 /**
- *     udp4_hwcsum_outgoing  -  handle outgoing HW checksumming
- *     @sk:    socket we are sending on
+ *     udp4_hwcsum  -  handle outgoing HW checksumming
  *     @skb:   sk_buff containing the filled-in UDP header
  *             (checksum field must be zeroed out)
+ *     @src:   source IP address
+ *     @dst:   destination IP address
  */
-static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
-                                __be32 src, __be32 dst, int len)
+static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
 {
-       unsigned int offset;
        struct udphdr *uh = udp_hdr(skb);
+       struct sk_buff *frags = skb_shinfo(skb)->frag_list;
+       int offset = skb_transport_offset(skb);
+       int len = skb->len - offset;
+       int hlen = len;
        __wsum csum = 0;
 
-       if (skb_queue_len(&sk->sk_write_queue) == 1) {
+       if (!frags) {
                /*
                 * Only one fragment on the socket.
                 */
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
-               uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
+               uh->check = ~csum_tcpudp_magic(src, dst, len,
+                                              IPPROTO_UDP, 0);
        } else {
                /*
                 * HW-checksum won't work as there are two or more
                 * fragments on the socket so that all csums of sk_buffs
                 * should be together
                 */
-               offset = skb_transport_offset(skb);
-               skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+               do {
+                       csum = csum_add(csum, frags->csum);
+                       hlen -= frags->len;
+               } while ((frags = frags->next));
 
+               csum = skb_checksum(skb, offset, hlen, csum);
                skb->ip_summed = CHECKSUM_NONE;
 
-               skb_queue_walk(&sk->sk_write_queue, skb) {
-                       csum = csum_add(csum, skb->csum);
-               }
-
                uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
        }
 }
 
-/*
- * Push out all pending data as one UDP datagram. Socket is locked.
- */
-static int udp_push_pending_frames(struct sock *sk)
+static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
 {
-       struct udp_sock  *up = udp_sk(sk);
+       struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
-       struct flowi *fl = &inet->cork.fl;
-       struct sk_buff *skb;
        struct udphdr *uh;
+       struct rtable *rt = (struct rtable *)skb_dst(skb);
        int err = 0;
        int is_udplite = IS_UDPLITE(sk);
+       int offset = skb_transport_offset(skb);
+       int len = skb->len - offset;
        __wsum csum = 0;
 
-       /* Grab the skbuff where UDP header space exists. */
-       if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
-               goto out;
-
        /*
         * Create a UDP header
         */
        uh = udp_hdr(skb);
-       uh->source = fl->fl_ip_sport;
-       uh->dest = fl->fl_ip_dport;
-       uh->len = htons(up->len);
+       uh->source = inet->inet_sport;
+       uh->dest = dport;
+       uh->len = htons(len);
        uh->check = 0;
 
        if (is_udplite)                                  /*     UDP-Lite      */
-               csum  = udplite_csum_outgoing(sk, skb);
+               csum = udplite_csum(skb);
 
        else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
 
@@ -740,20 +737,20 @@ static int udp_push_pending_frames(struct sock *sk)
 
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 
-               udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
+               udp4_hwcsum(skb, rt->rt_src, daddr);
                goto send;
 
-       } else                                           /*   `normal' UDP    */
-               csum = udp_csum_outgoing(sk, skb);
+       } else
+               csum = udp_csum(skb);
 
        /* add protocol-dependent pseudo-header */
-       uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
+       uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
                                      sk->sk_protocol, csum);
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
 send:
-       err = ip_push_pending_frames(sk);
+       err = ip_send_skb(skb);
        if (err) {
                if (err == -ENOBUFS && !inet->recverr) {
                        UDP_INC_STATS_USER(sock_net(sk),
@@ -763,6 +760,26 @@ send:
        } else
                UDP_INC_STATS_USER(sock_net(sk),
                                   UDP_MIB_OUTDATAGRAMS, is_udplite);
+       return err;
+}
+
+/*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+static int udp_push_pending_frames(struct sock *sk)
+{
+       struct udp_sock  *up = udp_sk(sk);
+       struct inet_sock *inet = inet_sk(sk);
+       struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
+       struct sk_buff *skb;
+       int err = 0;
+
+       skb = ip_finish_skb(sk);
+       if (!skb)
+               goto out;
+
+       err = udp_send_skb(skb, fl4->daddr, fl4->fl4_dport);
+
 out:
        up->len = 0;
        up->pending = 0;
@@ -774,6 +791,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 {
        struct inet_sock *inet = inet_sk(sk);
        struct udp_sock *up = udp_sk(sk);
+       struct flowi4 *fl4;
        int ulen = len;
        struct ipcm_cookie ipc;
        struct rtable *rt = NULL;
@@ -785,6 +803,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        int err, is_udplite = IS_UDPLITE(sk);
        int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+       struct sk_buff *skb;
 
        if (len > 0xFFFF)
                return -EMSGSIZE;
@@ -799,6 +818,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.opt = NULL;
        ipc.tx_flags = 0;
 
+       getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+
        if (up->pending) {
                /*
                 * There are pending frames.
@@ -888,20 +909,25 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                rt = (struct rtable *)sk_dst_check(sk, 0);
 
        if (rt == NULL) {
-               struct flowi fl = { .oif = ipc.oif,
-                                   .mark = sk->sk_mark,
-                                   .fl4_dst = faddr,
-                                   .fl4_src = saddr,
-                                   .fl4_tos = tos,
-                                   .proto = sk->sk_protocol,
-                                   .flags = inet_sk_flowi_flags(sk),
-                                   .fl_ip_sport = inet->inet_sport,
-                                   .fl_ip_dport = dport };
+               struct flowi4 fl4 = {
+                       .flowi4_oif = ipc.oif,
+                       .flowi4_mark = sk->sk_mark,
+                       .daddr = faddr,
+                       .saddr = saddr,
+                       .flowi4_tos = tos,
+                       .flowi4_proto = sk->sk_protocol,
+                       .flowi4_flags = (inet_sk_flowi_flags(sk) |
+                                        FLOWI_FLAG_CAN_SLEEP),
+                       .fl4_sport = inet->inet_sport,
+                       .fl4_dport = dport,
+               };
                struct net *net = sock_net(sk);
 
-               security_sk_classify_flow(sk, &fl);
-               err = ip_route_output_flow(net, &rt, &fl, sk, 1);
-               if (err) {
+               security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+               rt = ip_route_output_flow(net, &fl4, sk);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       rt = NULL;
                        if (err == -ENETUNREACH)
                                IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
                        goto out;
@@ -923,6 +949,17 @@ back_from_confirm:
        if (!ipc.addr)
                daddr = ipc.addr = rt->rt_dst;
 
+       /* Lockless fast path for the non-corking case. */
+       if (!corkreq) {
+               skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
+                                 sizeof(struct udphdr), &ipc, &rt,
+                                 msg->msg_flags);
+               err = PTR_ERR(skb);
+               if (skb && !IS_ERR(skb))
+                       err = udp_send_skb(skb, daddr, dport);
+               goto out;
+       }
+
        lock_sock(sk);
        if (unlikely(up->pending)) {
                /* The socket is already corked while preparing it. */
@@ -936,15 +973,15 @@ back_from_confirm:
        /*
         *      Now cork the socket to pend data.
         */
-       inet->cork.fl.fl4_dst = daddr;
-       inet->cork.fl.fl_ip_dport = dport;
-       inet->cork.fl.fl4_src = saddr;
-       inet->cork.fl.fl_ip_sport = inet->inet_sport;
+       fl4 = &inet->cork.fl.u.ip4;
+       fl4->daddr = daddr;
+       fl4->saddr = saddr;
+       fl4->fl4_dport = dport;
+       fl4->fl4_sport = inet->inet_sport;
        up->pending = AF_INET;
 
 do_append_data:
        up->len += ulen;
-       getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
                        sizeof(struct udphdr), &ipc, &rt,
                        corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
@@ -2199,7 +2236,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index b057d40..13e0e7f 100644 (file)
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
-                                         xfrm_address_t *saddr,
-                                         xfrm_address_t *daddr)
+                                         const xfrm_address_t *saddr,
+                                         const xfrm_address_t *daddr)
 {
-       struct flowi fl = {
-               .fl4_dst = daddr->a4,
-               .fl4_tos = tos,
+       struct flowi4 fl4 = {
+               .daddr = daddr->a4,
+               .flowi4_tos = tos,
        };
-       struct dst_entry *dst;
        struct rtable *rt;
-       int err;
 
        if (saddr)
-               fl.fl4_src = saddr->a4;
+               fl4.saddr = saddr->a4;
+
+       rt = __ip_route_output_key(net, &fl4);
+       if (!IS_ERR(rt))
+               return &rt->dst;
 
-       err = __ip_route_output_key(net, &rt, &fl);
-       dst = &rt->dst;
-       if (err)
-               dst = ERR_PTR(err);
-       return dst;
+       return ERR_CAST(rt);
 }
 
 static int xfrm4_get_saddr(struct net *net,
@@ -56,9 +54,9 @@ static int xfrm4_get_saddr(struct net *net,
        return 0;
 }
 
-static int xfrm4_get_tos(struct flowi *fl)
+static int xfrm4_get_tos(const struct flowi *fl)
 {
-       return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
+       return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
 }
 
 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
@@ -68,11 +66,17 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                         struct flowi *fl)
+                         const struct flowi *fl)
 {
        struct rtable *rt = (struct rtable *)xdst->route;
+       const struct flowi4 *fl4 = &fl->u.ip4;
 
-       xdst->u.rt.fl = *fl;
+       rt->rt_key_dst = fl4->daddr;
+       rt->rt_key_src = fl4->saddr;
+       rt->rt_tos = fl4->flowi4_tos;
+       rt->rt_iif = fl4->flowi4_iif;
+       rt->rt_oif = fl4->flowi4_oif;
+       rt->rt_mark = fl4->flowi4_mark;
 
        xdst->u.dst.dev = dev;
        dev_hold(dev);
@@ -99,9 +103,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
        struct iphdr *iph = ip_hdr(skb);
        u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+       struct flowi4 *fl4 = &fl->u.ip4;
 
-       memset(fl, 0, sizeof(struct flowi));
-       fl->mark = skb->mark;
+       memset(fl4, 0, sizeof(struct flowi4));
+       fl4->flowi4_mark = skb->mark;
 
        if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
                switch (iph->protocol) {
@@ -114,8 +119,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be16 *ports = (__be16 *)xprth;
 
-                               fl->fl_ip_sport = ports[!!reverse];
-                               fl->fl_ip_dport = ports[!reverse];
+                               fl4->fl4_sport = ports[!!reverse];
+                               fl4->fl4_dport = ports[!reverse];
                        }
                        break;
 
@@ -123,8 +128,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                        if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
                                u8 *icmp = xprth;
 
-                               fl->fl_icmp_type = icmp[0];
-                               fl->fl_icmp_code = icmp[1];
+                               fl4->fl4_icmp_type = icmp[0];
+                               fl4->fl4_icmp_code = icmp[1];
                        }
                        break;
 
@@ -132,7 +137,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                        if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be32 *ehdr = (__be32 *)xprth;
 
-                               fl->fl_ipsec_spi = ehdr[0];
+                               fl4->fl4_ipsec_spi = ehdr[0];
                        }
                        break;
 
@@ -140,7 +145,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                        if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
                                __be32 *ah_hdr = (__be32*)xprth;
 
-                               fl->fl_ipsec_spi = ah_hdr[1];
+                               fl4->fl4_ipsec_spi = ah_hdr[1];
                        }
                        break;
 
@@ -148,7 +153,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                        if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be16 *ipcomp_hdr = (__be16 *)xprth;
 
-                               fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
+                               fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
                        }
                        break;
 
@@ -160,20 +165,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                                if (greflags[0] & GRE_KEY) {
                                        if (greflags[0] & GRE_CSUM)
                                                gre_hdr++;
-                                       fl->fl_gre_key = gre_hdr[1];
+                                       fl4->fl4_gre_key = gre_hdr[1];
                                }
                        }
                        break;
 
                default:
-                       fl->fl_ipsec_spi = 0;
+                       fl4->fl4_ipsec_spi = 0;
                        break;
                }
        }
-       fl->proto = iph->protocol;
-       fl->fl4_dst = reverse ? iph->saddr : iph->daddr;
-       fl->fl4_src = reverse ? iph->daddr : iph->saddr;
-       fl->fl4_tos = iph->tos;
+       fl4->flowi4_proto = iph->protocol;
+       fl4->daddr = reverse ? iph->saddr : iph->daddr;
+       fl4->saddr = reverse ? iph->daddr : iph->saddr;
+       fl4->flowi4_tos = iph->tos;
 }
 
 static inline int xfrm4_garbage_collect(struct dst_ops *ops)
@@ -196,8 +201,11 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
+       dst_destroy_metrics_generic(dst);
+
        if (likely(xdst->u.rt.peer))
                inet_putpeer(xdst->u.rt.peer);
+
        xfrm_dst_destroy(xdst);
 }
 
@@ -215,6 +223,7 @@ static struct dst_ops xfrm4_dst_ops = {
        .protocol =             cpu_to_be16(ETH_P_IP),
        .gc =                   xfrm4_garbage_collect,
        .update_pmtu =          xfrm4_update_pmtu,
+       .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              xfrm4_dst_destroy,
        .ifdown =               xfrm4_dst_ifdown,
        .local_out =            __ip_local_out,
@@ -230,6 +239,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
        .get_tos =              xfrm4_get_tos,
        .init_path =            xfrm4_init_path,
        .fill_dst =             xfrm4_fill_dst,
+       .blackhole_route =      ipv4_blackhole_route,
 };
 
 #ifdef CONFIG_SYSCTL
index 4794762..1717c64 100644 (file)
@@ -21,24 +21,26 @@ static int xfrm4_init_flags(struct xfrm_state *x)
 }
 
 static void
-__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 {
-       sel->daddr.a4 = fl->fl4_dst;
-       sel->saddr.a4 = fl->fl4_src;
-       sel->dport = xfrm_flowi_dport(fl);
+       const struct flowi4 *fl4 = &fl->u.ip4;
+
+       sel->daddr.a4 = fl4->daddr;
+       sel->saddr.a4 = fl4->saddr;
+       sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
        sel->dport_mask = htons(0xffff);
-       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
        sel->sport_mask = htons(0xffff);
        sel->family = AF_INET;
        sel->prefixlen_d = 32;
        sel->prefixlen_s = 32;
-       sel->proto = fl->proto;
-       sel->ifindex = fl->oif;
+       sel->proto = fl4->flowi4_proto;
+       sel->ifindex = fl4->flowi4_oif;
 }
 
 static void
-xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+                  const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        x->id = tmpl->id;
        if (x->id.daddr.a4 == 0)
index 24a1cf1..3daaf3c 100644 (file)
@@ -718,12 +718,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        struct inet6_ifaddr *ifa, *ifn;
        struct inet6_dev *idev = ifp->idev;
        int state;
-       int hash;
        int deleted = 0, onlink = 0;
        unsigned long expires = jiffies;
 
-       hash = ipv6_addr_hash(&ifp->addr);
-
        spin_lock_bh(&ifp->state_lock);
        state = ifp->state;
        ifp->state = INET6_IFADDR_STATE_DEAD;
@@ -2661,14 +2658,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        struct net *net = dev_net(dev);
        struct inet6_dev *idev;
        struct inet6_ifaddr *ifa;
-       LIST_HEAD(keep_list);
-       int state;
+       int state, i;
 
        ASSERT_RTNL();
 
-       /* Flush routes if device is being removed or it is not loopback */
-       if (how || !(dev->flags & IFF_LOOPBACK))
-               rt6_ifdown(net, dev);
+       rt6_ifdown(net, dev);
+       neigh_ifdown(&nd_tbl, dev);
 
        idev = __in6_dev_get(dev);
        if (idev == NULL)
@@ -2689,6 +2684,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        }
 
+       /* Step 2: clear hash table */
+       for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+               struct hlist_head *h = &inet6_addr_lst[i];
+               struct hlist_node *n;
+
+               spin_lock_bh(&addrconf_hash_lock);
+       restart:
+               hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+                       if (ifa->idev == idev) {
+                               hlist_del_init_rcu(&ifa->addr_lst);
+                               addrconf_del_timer(ifa);
+                               goto restart;
+                       }
+               }
+               spin_unlock_bh(&addrconf_hash_lock);
+       }
+
        write_lock_bh(&idev->lock);
 
        /* Step 2: clear flags for stateless addrconf */
@@ -2722,52 +2734,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
                                       struct inet6_ifaddr, if_list);
                addrconf_del_timer(ifa);
 
-               /* If just doing link down, and address is permanent
-                  and not link-local, then retain it. */
-               if (!how &&
-                   (ifa->flags&IFA_F_PERMANENT) &&
-                   !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
-                       list_move_tail(&ifa->if_list, &keep_list);
-
-                       /* If not doing DAD on this address, just keep it. */
-                       if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
-                           idev->cnf.accept_dad <= 0 ||
-                           (ifa->flags & IFA_F_NODAD))
-                               continue;
+               list_del(&ifa->if_list);
 
-                       /* If it was tentative already, no need to notify */
-                       if (ifa->flags & IFA_F_TENTATIVE)
-                               continue;
+               write_unlock_bh(&idev->lock);
 
-                       /* Flag it for later restoration when link comes up */
-                       ifa->flags |= IFA_F_TENTATIVE;
-                       ifa->state = INET6_IFADDR_STATE_DAD;
-               } else {
-                       list_del(&ifa->if_list);
-
-                       /* clear hash table */
-                       spin_lock_bh(&addrconf_hash_lock);
-                       hlist_del_init_rcu(&ifa->addr_lst);
-                       spin_unlock_bh(&addrconf_hash_lock);
-
-                       write_unlock_bh(&idev->lock);
-                       spin_lock_bh(&ifa->state_lock);
-                       state = ifa->state;
-                       ifa->state = INET6_IFADDR_STATE_DEAD;
-                       spin_unlock_bh(&ifa->state_lock);
-
-                       if (state != INET6_IFADDR_STATE_DEAD) {
-                               __ipv6_ifa_notify(RTM_DELADDR, ifa);
-                               atomic_notifier_call_chain(&inet6addr_chain,
-                                                          NETDEV_DOWN, ifa);
-                       }
+               spin_lock_bh(&ifa->state_lock);
+               state = ifa->state;
+               ifa->state = INET6_IFADDR_STATE_DEAD;
+               spin_unlock_bh(&ifa->state_lock);
 
-                       in6_ifa_put(ifa);
-                       write_lock_bh(&idev->lock);
+               if (state != INET6_IFADDR_STATE_DEAD) {
+                       __ipv6_ifa_notify(RTM_DELADDR, ifa);
+                       atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
                }
-       }
+               in6_ifa_put(ifa);
 
-       list_splice(&keep_list, &idev->addr_list);
+               write_lock_bh(&idev->lock);
+       }
 
        write_unlock_bh(&idev->lock);
 
@@ -4156,8 +4139,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                addrconf_leave_solict(ifp->idev, &ifp->addr);
                dst_hold(&ifp->rt->dst);
 
-               if (ifp->state == INET6_IFADDR_STATE_DEAD &&
-                   ip6_del_rt(ifp->rt))
+               if (ip6_del_rt(ifp->rt))
                        dst_free(&ifp->rt->dst);
                break;
        }
index 978e80e..4b13d5d 100644 (file)
@@ -644,41 +644,34 @@ EXPORT_SYMBOL(inet6_unregister_protosw);
 
 int inet6_sk_rebuild_header(struct sock *sk)
 {
-       int err;
-       struct dst_entry *dst;
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct dst_entry *dst;
 
        dst = __sk_dst_check(sk, np->dst_cookie);
 
        if (dst == NULL) {
                struct inet_sock *inet = inet_sk(sk);
                struct in6_addr *final_p, final;
-               struct flowi fl;
-
-               memset(&fl, 0, sizeof(fl));
-               fl.proto = sk->sk_protocol;
-               ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-               ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-               fl.fl6_flowlabel = np->flow_label;
-               fl.oif = sk->sk_bound_dev_if;
-               fl.mark = sk->sk_mark;
-               fl.fl_ip_dport = inet->inet_dport;
-               fl.fl_ip_sport = inet->inet_sport;
-               security_sk_classify_flow(sk, &fl);
-
-               final_p = fl6_update_dst(&fl, np->opt, &final);
-
-               err = ip6_dst_lookup(sk, &dst, &fl);
-               if (err) {
+               struct flowi6 fl6;
+
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_proto = sk->sk_protocol;
+               ipv6_addr_copy(&fl6.daddr, &np->daddr);
+               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.flowlabel = np->flow_label;
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
+               fl6.flowi6_mark = sk->sk_mark;
+               fl6.fl6_dport = inet->inet_dport;
+               fl6.fl6_sport = inet->inet_sport;
+               security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+               final_p = fl6_update_dst(&fl6, np->opt, &final);
+
+               dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+               if (IS_ERR(dst)) {
                        sk->sk_route_caps = 0;
-                       return err;
-               }
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-               if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
-                       sk->sk_err_soft = -err;
-                       return err;
+                       sk->sk_err_soft = -PTR_ERR(dst);
+                       return PTR_ERR(dst);
                }
 
                __ip6_dst_store(sk, dst, NULL, NULL);
@@ -772,7 +765,7 @@ out:
        return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct ipv6hdr *ipv6h;
index 1aba54a..2195ae6 100644 (file)
@@ -409,7 +409,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
 
        ah->reserved = 0;
        ah->spi = x->id.spi;
-       ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+       ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg, 0, skb->len);
index 320bdb8..1656033 100644 (file)
@@ -40,7 +40,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct ipv6_pinfo       *np = inet6_sk(sk);
        struct in6_addr         *daddr, *final_p, final;
        struct dst_entry        *dst;
-       struct flowi            fl;
+       struct flowi6           fl6;
        struct ip6_flowlabel    *flowlabel = NULL;
        struct ipv6_txoptions   *opt;
        int                     addr_type;
@@ -59,11 +59,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
        if (np->sndflow) {
-               fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-               if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
-                       flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+               fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+               if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
                        ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
@@ -137,7 +137,7 @@ ipv4_connected:
        }
 
        ipv6_addr_copy(&np->daddr, daddr);
-       np->flow_label = fl.fl6_flowlabel;
+       np->flow_label = fl6.flowlabel;
 
        inet->inet_dport = usin->sin6_port;
 
@@ -146,53 +146,46 @@ ipv4_connected:
         *      destination cache for it.
         */
 
-       fl.proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-       ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-       fl.oif = sk->sk_bound_dev_if;
-       fl.mark = sk->sk_mark;
-       fl.fl_ip_dport = inet->inet_dport;
-       fl.fl_ip_sport = inet->inet_sport;
+       fl6.flowi6_proto = sk->sk_protocol;
+       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_dport = inet->inet_dport;
+       fl6.fl6_sport = inet->inet_sport;
 
-       if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
-               fl.oif = np->mcast_oif;
+       if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+               fl6.flowi6_oif = np->mcast_oif;
 
-       security_sk_classify_flow(sk, &fl);
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
        opt = flowlabel ? flowlabel->opt : np->opt;
-       final_p = fl6_update_dst(&fl, opt, &final);
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+       err = 0;
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
 
        /* source address lookup done in ip6_dst_lookup */
 
        if (ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&np->saddr, &fl.fl6_src);
+               ipv6_addr_copy(&np->saddr, &fl6.saddr);
 
        if (ipv6_addr_any(&np->rcv_saddr)) {
-               ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
+               ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
                if (sk->sk_prot->rehash)
                        sk->sk_prot->rehash(sk);
        }
 
        ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
+                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
                      &np->daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
-                     ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
+                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                      &np->saddr :
 #endif
                      NULL);
@@ -238,7 +231,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                kfree_skb(skb);
 }
 
-void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sock_exterr_skb *serr;
@@ -257,7 +250,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
+       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
 
        serr = SKB_EXT_ERR(skb);
        serr->ee.ee_errno = err;
@@ -268,7 +261,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
        serr->ee.ee_info = info;
        serr->ee.ee_data = 0;
        serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
-       serr->port = fl->fl_ip_dport;
+       serr->port = fl6->fl6_dport;
 
        __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
        skb_reset_transport_header(skb);
@@ -277,7 +270,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
                kfree_skb(skb);
 }
 
-void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct ipv6hdr *iph;
@@ -294,7 +287,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
+       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
 
        mtu_info = IP6CBMTU(skb);
        if (!mtu_info) {
@@ -306,7 +299,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
        mtu_info->ip6m_addr.sin6_family = AF_INET6;
        mtu_info->ip6m_addr.sin6_port = 0;
        mtu_info->ip6m_addr.sin6_flowinfo = 0;
-       mtu_info->ip6m_addr.sin6_scope_id = fl->oif;
+       mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
        ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
 
        __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
@@ -600,7 +593,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
 }
 
 int datagram_send_ctl(struct net *net,
-                     struct msghdr *msg, struct flowi *fl,
+                     struct msghdr *msg, struct flowi6 *fl6,
                      struct ipv6_txoptions *opt,
                      int *hlimit, int *tclass, int *dontfrag)
 {
@@ -636,16 +629,17 @@ int datagram_send_ctl(struct net *net,
                        src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
 
                        if (src_info->ipi6_ifindex) {
-                               if (fl->oif && src_info->ipi6_ifindex != fl->oif)
+                               if (fl6->flowi6_oif &&
+                                   src_info->ipi6_ifindex != fl6->flowi6_oif)
                                        return -EINVAL;
-                               fl->oif = src_info->ipi6_ifindex;
+                               fl6->flowi6_oif = src_info->ipi6_ifindex;
                        }
 
                        addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
 
                        rcu_read_lock();
-                       if (fl->oif) {
-                               dev = dev_get_by_index_rcu(net, fl->oif);
+                       if (fl6->flowi6_oif) {
+                               dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
                                if (!dev) {
                                        rcu_read_unlock();
                                        return -ENODEV;
@@ -661,7 +655,7 @@ int datagram_send_ctl(struct net *net,
                                                   strict ? dev : NULL, 0))
                                        err = -EINVAL;
                                else
-                                       ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
+                                       ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
                        }
 
                        rcu_read_unlock();
@@ -678,13 +672,13 @@ int datagram_send_ctl(struct net *net,
                                goto exit_f;
                        }
 
-                       if (fl->fl6_flowlabel&IPV6_FLOWINFO_MASK) {
-                               if ((fl->fl6_flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
+                       if (fl6->flowlabel&IPV6_FLOWINFO_MASK) {
+                               if ((fl6->flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
                                        err = -EINVAL;
                                        goto exit_f;
                                }
                        }
-                       fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg);
+                       fl6->flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg);
                        break;
 
                case IPV6_2292HOPOPTS:
index 1b5c982..5aa8ec8 100644 (file)
@@ -54,16 +54,20 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
 /*
  * Allocate an AEAD request structure with extra space for SG and IV.
  *
- * For alignment considerations the IV is placed at the front, followed
- * by the request and finally the SG list.
+ * For alignment considerations the upper 32 bits of the sequence number are
+ * placed at the front, if present. Followed by the IV, the request and finally
+ * the SG list.
  *
  * TODO: Use spare space in skb for this where possible.
  */
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
 {
        unsigned int len;
 
-       len = crypto_aead_ivsize(aead);
+       len = seqihlen;
+
+       len += crypto_aead_ivsize(aead);
+
        if (len) {
                len += crypto_aead_alignmask(aead) &
                       ~(crypto_tfm_ctx_alignment() - 1);
@@ -78,10 +82,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
        return kmalloc(len, GFP_ATOMIC);
 }
 
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+static inline __be32 *esp_tmp_seqhi(void *tmp)
+{
+       return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+}
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
 {
        return crypto_aead_ivsize(aead) ?
-              PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+              PTR_ALIGN((u8 *)tmp + seqhilen,
+                        crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
 }
 
 static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -145,8 +155,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        int plen;
        int tfclen;
        int nfrags;
+       int assoclen;
+       int sglists;
+       int seqhilen;
        u8 *iv;
        u8 *tail;
+       __be32 *seqhi;
        struct esp_data *esp = x->data;
 
        /* skb is pure payload to encrypt */
@@ -175,14 +189,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
                goto error;
        nfrags = err;
 
-       tmp = esp_alloc_tmp(aead, nfrags + 1);
+       assoclen = sizeof(*esph);
+       sglists = 1;
+       seqhilen = 0;
+
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists += 2;
+               seqhilen += sizeof(__be32);
+               assoclen += seqhilen;
+       }
+
+       tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
        if (!tmp)
                goto error;
 
-       iv = esp_tmp_iv(aead, tmp);
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_givreq(aead, iv);
        asg = esp_givreq_sg(aead, req);
-       sg = asg + 1;
+       sg = asg + sglists;
 
        /* Fill padding... */
        tail = skb_tail_pointer(trailer);
@@ -204,19 +229,27 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        *skb_mac_header(skb) = IPPROTO_ESP;
 
        esph->spi = x->id.spi;
-       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg,
                     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
                     clen + alen);
-       sg_init_one(asg, esph, sizeof(*esph));
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               sg_init_table(asg, 3);
+               sg_set_buf(asg, &esph->spi, sizeof(__be32));
+               *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+               sg_set_buf(asg + 1, seqhi, seqhilen);
+               sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
+       } else
+               sg_init_one(asg, esph, sizeof(*esph));
 
        aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
-       aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+       aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output);
+                             XFRM_SKB_CB(skb)->seq.output.low);
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
@@ -292,8 +325,12 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        struct sk_buff *trailer;
        int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
        int nfrags;
+       int assoclen;
+       int sglists;
+       int seqhilen;
        int ret = 0;
        void *tmp;
+       __be32 *seqhi;
        u8 *iv;
        struct scatterlist *sg;
        struct scatterlist *asg;
@@ -314,12 +351,24 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        ret = -ENOMEM;
-       tmp = esp_alloc_tmp(aead, nfrags + 1);
+
+       assoclen = sizeof(*esph);
+       sglists = 1;
+       seqhilen = 0;
+
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists += 2;
+               seqhilen += sizeof(__be32);
+               assoclen += seqhilen;
+       }
+
+       tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
        if (!tmp)
                goto out;
 
        ESP_SKB_CB(skb)->tmp = tmp;
-       iv = esp_tmp_iv(aead, tmp);
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        asg = esp_req_sg(aead, req);
        sg = asg + 1;
@@ -333,11 +382,19 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
-       sg_init_one(asg, esph, sizeof(*esph));
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               sg_init_table(asg, 3);
+               sg_set_buf(asg, &esph->spi, sizeof(__be32));
+               *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+               sg_set_buf(asg + 1, seqhi, seqhilen);
+               sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
+       } else
+               sg_init_one(asg, esph, sizeof(*esph));
 
        aead_request_set_callback(req, 0, esp_input_done, skb);
        aead_request_set_crypt(req, sg, sg, elen, iv);
-       aead_request_set_assoc(req, asg, sizeof(*esph));
+       aead_request_set_assoc(req, asg, assoclen);
 
        ret = crypto_aead_decrypt(req);
        if (ret == -EINPROGRESS)
@@ -443,10 +500,20 @@ static int esp_init_authenc(struct xfrm_state *x)
                goto error;
 
        err = -ENAMETOOLONG;
-       if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
-                    x->aalg ? x->aalg->alg_name : "digest_null",
-                    x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
-               goto error;
+
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+                            "authencesn(%s,%s)",
+                            x->aalg ? x->aalg->alg_name : "digest_null",
+                            x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto error;
+       } else {
+               if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+                            "authenc(%s,%s)",
+                            x->aalg ? x->aalg->alg_name : "digest_null",
+                            x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto error;
+       }
 
        aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
index 262f105..79a485e 100644 (file)
@@ -876,22 +876,22 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  * fl6_update_dst - update flowi destination address with info given
  *                  by srcrt option, if any.
  *
- * @fl: flowi for which fl6_dst is to be updated
+ * @fl6: flowi6 for which daddr is to be updated
  * @opt: struct ipv6_txoptions in which to look for srcrt opt
- * @orig: copy of original fl6_dst address if modified
+ * @orig: copy of original daddr address if modified
  *
  * Returns NULL if no txoptions or no srcrt, otherwise returns orig
- * and initial value of fl->fl6_dst set in orig
+ * and initial value of fl6->daddr set in orig
  */
-struct in6_addr *fl6_update_dst(struct flowi *fl,
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
                                const struct ipv6_txoptions *opt,
                                struct in6_addr *orig)
 {
        if (!opt || !opt->srcrt)
                return NULL;
 
-       ipv6_addr_copy(orig, &fl->fl6_dst);
-       ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr);
+       ipv6_addr_copy(orig, &fl6->daddr);
+       ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
        return orig;
 }
 
index d829874..34d244d 100644 (file)
@@ -29,7 +29,7 @@ struct fib6_rule
        u8                      tclass;
 };
 
-struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
        struct fib_lookup_arg arg = {
@@ -37,7 +37,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
                .flags = FIB_LOOKUP_NOREF,
        };
 
-       fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg);
+       fib_rules_lookup(net->ipv6.fib6_rules_ops,
+                        flowi6_to_flowi(fl6), flags, &arg);
 
        if (arg.result)
                return arg.result;
@@ -49,6 +50,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                            int flags, struct fib_lookup_arg *arg)
 {
+       struct flowi6 *flp6 = &flp->u.ip6;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
        struct net *net = rule->fr_net;
@@ -71,7 +73,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
 
        table = fib6_get_table(net, rule->table);
        if (table)
-               rt = lookup(net, table, flp, flags);
+               rt = lookup(net, table, flp6, flags);
 
        if (rt != net->ipv6.ip6_null_entry) {
                struct fib6_rule *r = (struct fib6_rule *)rule;
@@ -86,14 +88,14 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
 
                        if (ipv6_dev_get_saddr(net,
                                               ip6_dst_idev(&rt->dst)->dev,
-                                              &flp->fl6_dst,
+                                              &flp6->daddr,
                                               rt6_flags2srcprefs(flags),
                                               &saddr))
                                goto again;
                        if (!ipv6_prefix_equal(&saddr, &r->src.addr,
                                               r->src.plen))
                                goto again;
-                       ipv6_addr_copy(&flp->fl6_src, &saddr);
+                       ipv6_addr_copy(&flp6->saddr, &saddr);
                }
                goto out;
        }
@@ -113,9 +115,10 @@ out:
 static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
        struct fib6_rule *r = (struct fib6_rule *) rule;
+       struct flowi6 *fl6 = &fl->u.ip6;
 
        if (r->dst.plen &&
-           !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen))
+           !ipv6_prefix_equal(&fl6->daddr, &r->dst.addr, r->dst.plen))
                return 0;
 
        /*
@@ -125,14 +128,14 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
         */
        if (r->src.plen) {
                if (flags & RT6_LOOKUP_F_HAS_SADDR) {
-                       if (!ipv6_prefix_equal(&fl->fl6_src, &r->src.addr,
+                       if (!ipv6_prefix_equal(&fl6->saddr, &r->src.addr,
                                               r->src.plen))
                                return 0;
                } else if (!(r->common.flags & FIB_RULE_FIND_SADDR))
                        return 0;
        }
 
-       if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff))
+       if (r->tclass && r->tclass != ((ntohl(fl6->flowlabel) >> 20) & 0xff))
                return 0;
 
        return 1;
index 03e62f9..83cb4f9 100644 (file)
@@ -157,32 +157,32 @@ static int is_ineligible(struct sk_buff *skb)
 /*
  * Check the ICMP output rate limit
  */
-static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
-                                    struct flowi *fl)
+static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+                                     struct flowi6 *fl6)
 {
        struct dst_entry *dst;
        struct net *net = sock_net(sk);
-       int res = 0;
+       bool res = false;
 
        /* Informational messages are not limited. */
        if (type & ICMPV6_INFOMSG_MASK)
-               return 1;
+               return true;
 
        /* Do not limit pmtu discovery, it would break it. */
        if (type == ICMPV6_PKT_TOOBIG)
-               return 1;
+               return true;
 
        /*
         * Look up the output route.
         * XXX: perhaps the expire for routing entries cloned by
         * this lookup should be more aggressive (not longer than timeout).
         */
-       dst = ip6_route_output(net, sk, fl);
+       dst = ip6_route_output(net, sk, fl6);
        if (dst->error) {
                IP6_INC_STATS(net, ip6_dst_idev(dst),
                              IPSTATS_MIB_OUTNOROUTES);
        } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
-               res = 1;
+               res = true;
        } else {
                struct rt6_info *rt = (struct rt6_info *)dst;
                int tmo = net->ipv6.sysctl.icmpv6_time;
@@ -191,7 +191,9 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
                if (rt->rt6i_dst.plen < 128)
                        tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
 
-               res = xrlim_allow(dst, tmo);
+               if (!rt->rt6i_peer)
+                       rt6_bind_peer(rt, 1);
+               res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
        }
        dst_release(dst);
        return res;
@@ -215,7 +217,7 @@ static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
        return (*op & 0xC0) == 0x80;
 }
 
-static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
+static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
 {
        struct sk_buff *skb;
        struct icmp6hdr *icmp6h;
@@ -231,9 +233,9 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct
        if (skb_queue_len(&sk->sk_write_queue) == 1) {
                skb->csum = csum_partial(icmp6h,
                                        sizeof(struct icmp6hdr), skb->csum);
-               icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
-                                                     &fl->fl6_dst,
-                                                     len, fl->proto,
+               icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
+                                                     &fl6->daddr,
+                                                     len, fl6->flowi6_proto,
                                                      skb->csum);
        } else {
                __wsum tmp_csum = 0;
@@ -244,9 +246,9 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct
 
                tmp_csum = csum_partial(icmp6h,
                                        sizeof(struct icmp6hdr), tmp_csum);
-               icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
-                                                     &fl->fl6_dst,
-                                                     len, fl->proto,
+               icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
+                                                     &fl6->daddr,
+                                                     len, fl6->flowi6_proto,
                                                      tmp_csum);
        }
        ip6_push_pending_frames(sk);
@@ -298,6 +300,68 @@ static void mip6_addr_swap(struct sk_buff *skb)
 static inline void mip6_addr_swap(struct sk_buff *skb) {}
 #endif
 
+static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                            struct sock *sk, struct flowi6 *fl6)
+{
+       struct dst_entry *dst, *dst2;
+       struct flowi6 fl2;
+       int err;
+
+       err = ip6_dst_lookup(sk, &dst, fl6);
+       if (err)
+               return ERR_PTR(err);
+
+       /*
+        * We won't send icmp if the destination is known
+        * anycast.
+        */
+       if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+               LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
+               dst_release(dst);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* No need to clone since we're just using its address. */
+       dst2 = dst;
+
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
+       if (!IS_ERR(dst)) {
+               if (dst != dst2)
+                       return dst;
+       } else {
+               if (PTR_ERR(dst) == -EPERM)
+                       dst = NULL;
+               else
+                       return dst;
+       }
+
+       err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
+       if (err)
+               goto relookup_failed;
+
+       err = ip6_dst_lookup(sk, &dst2, &fl2);
+       if (err)
+               goto relookup_failed;
+
+       dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
+       if (!IS_ERR(dst2)) {
+               dst_release(dst);
+               dst = dst2;
+       } else {
+               err = PTR_ERR(dst2);
+               if (err == -EPERM) {
+                       dst_release(dst);
+                       return dst2;
+               } else
+                       goto relookup_failed;
+       }
+
+relookup_failed:
+       if (dst)
+               return dst;
+       return ERR_PTR(err);
+}
+
 /*
  *     Send an ICMP message in response to a packet in error
  */
@@ -310,10 +374,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        struct ipv6_pinfo *np;
        struct in6_addr *saddr = NULL;
        struct dst_entry *dst;
-       struct dst_entry *dst2;
        struct icmp6hdr tmp_hdr;
-       struct flowi fl;
-       struct flowi fl2;
+       struct flowi6 fl6;
        struct icmpv6_msg msg;
        int iif = 0;
        int addr_type = 0;
@@ -380,22 +442,22 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        mip6_addr_swap(skb);
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_ICMPV6;
+       ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
        if (saddr)
-               ipv6_addr_copy(&fl.fl6_src, saddr);
-       fl.oif = iif;
-       fl.fl_icmp_type = type;
-       fl.fl_icmp_code = code;
-       security_skb_classify_flow(skb, &fl);
+               ipv6_addr_copy(&fl6.saddr, saddr);
+       fl6.flowi6_oif = iif;
+       fl6.fl6_icmp_type = type;
+       fl6.fl6_icmp_code = code;
+       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
        np = inet6_sk(sk);
 
-       if (!icmpv6_xrlim_allow(sk, type, &fl))
+       if (!icmpv6_xrlim_allow(sk, type, &fl6))
                goto out;
 
        tmp_hdr.icmp6_type = type;
@@ -403,61 +465,14 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        tmp_hdr.icmp6_cksum = 0;
        tmp_hdr.icmp6_pointer = htonl(info);
 
-       if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
-               fl.oif = np->mcast_oif;
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = icmpv6_route_lookup(net, skb, sk, &fl6);
+       if (IS_ERR(dst))
                goto out;
 
-       /*
-        * We won't send icmp if the destination is known
-        * anycast.
-        */
-       if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
-               LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
-               goto out_dst_release;
-       }
-
-       /* No need to clone since we're just using its address. */
-       dst2 = dst;
-
-       err = xfrm_lookup(net, &dst, &fl, sk, 0);
-       switch (err) {
-       case 0:
-               if (dst != dst2)
-                       goto route_done;
-               break;
-       case -EPERM:
-               dst = NULL;
-               break;
-       default:
-               goto out;
-       }
-
-       if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
-               goto relookup_failed;
-
-       if (ip6_dst_lookup(sk, &dst2, &fl2))
-               goto relookup_failed;
-
-       err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
-       switch (err) {
-       case 0:
-               dst_release(dst);
-               dst = dst2;
-               break;
-       case -EPERM:
-               goto out_dst_release;
-       default:
-relookup_failed:
-               if (!dst)
-                       goto out;
-               break;
-       }
-
-route_done:
-       if (ipv6_addr_is_multicast(&fl.fl6_dst))
+       if (ipv6_addr_is_multicast(&fl6.daddr))
                hlimit = np->mcast_hops;
        else
                hlimit = np->hop_limit;
@@ -480,14 +495,14 @@ route_done:
        err = ip6_append_data(sk, icmpv6_getfrag, &msg,
                              len + sizeof(struct icmp6hdr),
                              sizeof(struct icmp6hdr), hlimit,
-                             np->tclass, NULL, &fl, (struct rt6_info*)dst,
+                             np->tclass, NULL, &fl6, (struct rt6_info*)dst,
                              MSG_DONTWAIT, np->dontfrag);
        if (err) {
                ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
                goto out_put;
        }
-       err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
+       err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr));
 
 out_put:
        if (likely(idev != NULL))
@@ -509,7 +524,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        struct in6_addr *saddr = NULL;
        struct icmp6hdr *icmph = icmp6_hdr(skb);
        struct icmp6hdr tmp_hdr;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct icmpv6_msg msg;
        struct dst_entry *dst;
        int err = 0;
@@ -523,30 +538,31 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
        tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_ICMPV6;
+       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
        if (saddr)
-               ipv6_addr_copy(&fl.fl6_src, saddr);
-       fl.oif = skb->dev->ifindex;
-       fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
-       security_skb_classify_flow(skb, &fl);
+               ipv6_addr_copy(&fl6.saddr, saddr);
+       fl6.flowi6_oif = skb->dev->ifindex;
+       fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
        np = inet6_sk(sk);
 
-       if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
-               fl.oif = np->mcast_oif;
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
+       err = ip6_dst_lookup(sk, &dst, &fl6);
        if (err)
                goto out;
-       if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0)
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
+       if (IS_ERR(dst))
                goto out;
 
-       if (ipv6_addr_is_multicast(&fl.fl6_dst))
+       if (ipv6_addr_is_multicast(&fl6.daddr))
                hlimit = np->mcast_hops;
        else
                hlimit = np->hop_limit;
@@ -560,7 +576,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        msg.type = ICMPV6_ECHO_REPLY;
 
        err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
-                               sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
+                               sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
                                (struct rt6_info*)dst, MSG_DONTWAIT,
                                np->dontfrag);
 
@@ -569,7 +585,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
                ip6_flush_pending_frames(sk);
                goto out_put;
        }
-       err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
+       err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
 
 out_put:
        if (likely(idev != NULL))
@@ -768,20 +784,20 @@ drop_no_count:
        return 0;
 }
 
-void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
+void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
                      u8 type,
                      const struct in6_addr *saddr,
                      const struct in6_addr *daddr,
                      int oif)
 {
-       memset(fl, 0, sizeof(*fl));
-       ipv6_addr_copy(&fl->fl6_src, saddr);
-       ipv6_addr_copy(&fl->fl6_dst, daddr);
-       fl->proto               = IPPROTO_ICMPV6;
-       fl->fl_icmp_type        = type;
-       fl->fl_icmp_code        = 0;
-       fl->oif                 = oif;
-       security_sk_classify_flow(sk, fl);
+       memset(fl6, 0, sizeof(*fl6));
+       ipv6_addr_copy(&fl6->saddr, saddr);
+       ipv6_addr_copy(&fl6->daddr, daddr);
+       fl6->flowi6_proto       = IPPROTO_ICMPV6;
+       fl6->fl6_icmp_type      = type;
+       fl6->fl6_icmp_code      = 0;
+       fl6->flowi6_oif         = oif;
+       security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 }
 
 /*
index d144e62..1660546 100644 (file)
@@ -61,26 +61,21 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *final_p, final;
        struct dst_entry *dst;
-       struct flowi fl;
-
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-       final_p = fl6_update_dst(&fl, np->opt, &final);
-       ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
-       fl.oif = sk->sk_bound_dev_if;
-       fl.mark = sk->sk_mark;
-       fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-       fl.fl_ip_sport = inet_rsk(req)->loc_port;
-       security_req_classify_flow(req, &fl);
-
-       if (ip6_dst_lookup(sk, &dst, &fl))
-               return NULL;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+       struct flowi6 fl6;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_TCP;
+       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+       final_p = fl6_update_dst(&fl6, np->opt, &final);
+       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_dport = inet_rsk(req)->rmt_port;
+       fl6.fl6_sport = inet_rsk(req)->loc_port;
+       security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+       if (IS_ERR(dst))
                return NULL;
 
        return dst;
@@ -213,42 +208,34 @@ int inet6_csk_xmit(struct sk_buff *skb)
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
        struct in6_addr *final_p, final;
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-       ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-       fl.fl6_flowlabel = np->flow_label;
-       IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
-       fl.oif = sk->sk_bound_dev_if;
-       fl.mark = sk->sk_mark;
-       fl.fl_ip_sport = inet->inet_sport;
-       fl.fl_ip_dport = inet->inet_dport;
-       security_sk_classify_flow(sk, &fl);
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = sk->sk_protocol;
+       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.flowlabel = np->flow_label;
+       IP6_ECN_flow_xmit(sk, fl6.flowlabel);
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_sport = inet->inet_sport;
+       fl6.fl6_dport = inet->inet_dport;
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       final_p = fl6_update_dst(&fl, np->opt, &final);
+       final_p = fl6_update_dst(&fl6, np->opt, &final);
 
        dst = __inet6_csk_dst_check(sk, np->dst_cookie);
 
        if (dst == NULL) {
-               int err = ip6_dst_lookup(sk, &dst, &fl);
-
-               if (err) {
-                       sk->sk_err_soft = -err;
-                       kfree_skb(skb);
-                       return err;
-               }
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
+               dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
 
-               if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
+               if (IS_ERR(dst)) {
+                       sk->sk_err_soft = -PTR_ERR(dst);
                        sk->sk_route_caps = 0;
                        kfree_skb(skb);
-                       return err;
+                       return PTR_ERR(dst);
                }
 
                __inet6_csk_dst_store(sk, dst, NULL, NULL);
@@ -257,9 +244,9 @@ int inet6_csk_xmit(struct sk_buff *skb)
        skb_dst_set(skb, dst_clone(dst));
 
        /* Restore final destination back after routing done */
-       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
+       ipv6_addr_copy(&fl6.daddr, &np->daddr);
 
-       return ip6_xmit(sk, skb, &fl, np->opt);
+       return ip6_xmit(sk, skb, &fl6, np->opt);
 }
 
 EXPORT_SYMBOL_GPL(inet6_csk_xmit);
index de38211..7548905 100644 (file)
@@ -260,10 +260,10 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
          return net->ipv6.fib6_main_tbl;
 }
 
-struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags, pol_lookup_t lookup)
 {
-       return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
+       return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
 }
 
 static void __net_init fib6_tables_init(struct net *net)
index 1365468..f3caf1b 100644 (file)
@@ -342,7 +342,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
 
        if (olen > 0) {
                struct msghdr msg;
-               struct flowi flowi;
+               struct flowi6 flowi6;
                int junk;
 
                err = -ENOMEM;
@@ -358,9 +358,9 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
 
                msg.msg_controllen = olen;
                msg.msg_control = (void*)(fl->opt+1);
-               flowi.oif = 0;
+               memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk,
+               err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk,
                                        &junk, &junk);
                if (err)
                        goto done;
index 5f8d242..1820887 100644 (file)
@@ -174,15 +174,15 @@ int ip6_output(struct sk_buff *skb)
  *     xmit an sk_buff (used by TCP, SCTP and DCCP)
  */
 
-int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
             struct ipv6_txoptions *opt)
 {
        struct net *net = sock_net(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct in6_addr *first_hop = &fl->fl6_dst;
+       struct in6_addr *first_hop = &fl6->daddr;
        struct dst_entry *dst = skb_dst(skb);
        struct ipv6hdr *hdr;
-       u8  proto = fl->proto;
+       u8  proto = fl6->flowi6_proto;
        int seg_len = skb->len;
        int hlimit = -1;
        int tclass = 0;
@@ -230,13 +230,13 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
        if (hlimit < 0)
                hlimit = ip6_dst_hoplimit(dst);
 
-       *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
+       *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
        hdr->hop_limit = hlimit;
 
-       ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
+       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
        ipv6_addr_copy(&hdr->daddr, first_hop);
 
        skb->priority = sk->sk_priority;
@@ -274,13 +274,10 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct ipv6hdr *hdr;
-       int totlen;
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
 
-       totlen = len + sizeof(struct ipv6hdr);
-
        skb_reset_network_header(skb);
        skb_put(skb, sizeof(struct ipv6hdr));
        hdr = ipv6_hdr(skb);
@@ -479,10 +476,13 @@ int ip6_forward(struct sk_buff *skb)
                else
                        target = &hdr->daddr;
 
+               if (!rt->rt6i_peer)
+                       rt6_bind_peer(rt, 1);
+
                /* Limit redirects both by destination (here)
                   and by source (inside ndisc_send_redirect)
                 */
-               if (xrlim_allow(dst, 1*HZ))
+               if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                        ndisc_send_redirect(skb, n, target);
        } else {
                int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -879,7 +879,7 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
 
 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
                                          struct dst_entry *dst,
-                                         struct flowi *fl)
+                                         struct flowi6 *fl6)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct rt6_info *rt = (struct rt6_info *)dst;
@@ -904,11 +904,11 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
         *    sockets.
         * 2. oif also should be the same.
         */
-       if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
+       if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 #ifdef CONFIG_IPV6_SUBTREES
-           ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
+           ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 #endif
-           (fl->oif && fl->oif != dst->dev->ifindex)) {
+           (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
                dst_release(dst);
                dst = NULL;
        }
@@ -918,22 +918,22 @@ out:
 }
 
 static int ip6_dst_lookup_tail(struct sock *sk,
-                              struct dst_entry **dst, struct flowi *fl)
+                              struct dst_entry **dst, struct flowi6 *fl6)
 {
        int err;
        struct net *net = sock_net(sk);
 
        if (*dst == NULL)
-               *dst = ip6_route_output(net, sk, fl);
+               *dst = ip6_route_output(net, sk, fl6);
 
        if ((err = (*dst)->error))
                goto out_err_release;
 
-       if (ipv6_addr_any(&fl->fl6_src)) {
+       if (ipv6_addr_any(&fl6->saddr)) {
                err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
-                                        &fl->fl6_dst,
+                                        &fl6->daddr,
                                         sk ? inet6_sk(sk)->srcprefs : 0,
-                                        &fl->fl6_src);
+                                        &fl6->saddr);
                if (err)
                        goto out_err_release;
        }
@@ -949,10 +949,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         */
        if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
                struct inet6_ifaddr *ifp;
-               struct flowi fl_gw;
+               struct flowi6 fl_gw6;
                int redirect;
 
-               ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
+               ifp = ipv6_get_ifaddr(net, &fl6->saddr,
                                      (*dst)->dev, 1);
 
                redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
@@ -965,9 +965,9 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                         * default router instead
                         */
                        dst_release(*dst);
-                       memcpy(&fl_gw, fl, sizeof(struct flowi));
-                       memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
-                       *dst = ip6_route_output(net, sk, &fl_gw);
+                       memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
+                       memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
+                       *dst = ip6_route_output(net, sk, &fl_gw6);
                        if ((err = (*dst)->error))
                                goto out_err_release;
                }
@@ -988,43 +988,85 @@ out_err_release:
  *     ip6_dst_lookup - perform route lookup on flow
  *     @sk: socket which provides route info
  *     @dst: pointer to dst_entry * for result
- *     @fl: flow to lookup
+ *     @fl6: flow to lookup
  *
  *     This function performs a route lookup on the given flow.
  *
  *     It returns zero on success, or a standard errno code on error.
  */
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
 {
        *dst = NULL;
-       return ip6_dst_lookup_tail(sk, dst, fl);
+       return ip6_dst_lookup_tail(sk, dst, fl6);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
 /**
- *     ip6_sk_dst_lookup - perform socket cached route lookup on flow
+ *     ip6_dst_lookup_flow - perform route lookup on flow with ipsec
+ *     @sk: socket which provides route info
+ *     @fl6: flow to lookup
+ *     @final_dst: final destination address for ipsec lookup
+ *     @can_sleep: we are in a sleepable context
+ *
+ *     This function performs a route lookup on the given flow.
+ *
+ *     It returns a valid dst pointer on success, or a pointer encoded
+ *     error code.
+ */
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                     const struct in6_addr *final_dst,
+                                     bool can_sleep)
+{
+       struct dst_entry *dst = NULL;
+       int err;
+
+       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       if (err)
+               return ERR_PTR(err);
+       if (final_dst)
+               ipv6_addr_copy(&fl6->daddr, final_dst);
+       if (can_sleep)
+               fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
+
+       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+}
+EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+
+/**
+ *     ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
  *     @sk: socket which provides the dst cache and route info
- *     @dst: pointer to dst_entry * for result
- *     @fl: flow to lookup
+ *     @fl6: flow to lookup
+ *     @final_dst: final destination address for ipsec lookup
+ *     @can_sleep: we are in a sleepable context
  *
  *     This function performs a route lookup on the given flow with the
  *     possibility of using the cached route in the socket if it is valid.
  *     It will take the socket dst lock when operating on the dst cache.
  *     As a result, this function can only be used in process context.
  *
- *     It returns zero on success, or a standard errno code on error.
+ *     It returns a valid dst pointer on success, or a pointer encoded
+ *     error code.
  */
-int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                        const struct in6_addr *final_dst,
+                                        bool can_sleep)
 {
-       *dst = NULL;
-       if (sk) {
-               *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-               *dst = ip6_sk_dst_check(sk, *dst, fl);
-       }
+       struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
+       int err;
+
+       dst = ip6_sk_dst_check(sk, dst, fl6);
+
+       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       if (err)
+               return ERR_PTR(err);
+       if (final_dst)
+               ipv6_addr_copy(&fl6->daddr, final_dst);
+       if (can_sleep)
+               fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
-       return ip6_dst_lookup_tail(sk, dst, fl);
+       return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
-EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
+EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
 static inline int ip6_ufo_append_data(struct sock *sk,
                        int getfrag(void *from, char *to, int offset, int len,
@@ -1061,7 +1103,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-               sk->sk_sndmsg_off = 0;
        }
 
        err = skb_append_datato_frags(sk,skb, getfrag, from,
@@ -1104,7 +1145,7 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int offset, int len, int odd, struct sk_buff *skb),
        void *from, int length, int transhdrlen,
-       int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
+       int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
        struct rt6_info *rt, unsigned int flags, int dontfrag)
 {
        struct inet_sock *inet = inet_sk(sk);
@@ -1118,6 +1159,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int err;
        int offset = 0;
        int csummode = CHECKSUM_NONE;
+       __u8 tx_flags = 0;
 
        if (flags&MSG_PROBE)
                return 0;
@@ -1161,7 +1203,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                }
                dst_hold(&rt->dst);
                inet->cork.dst = &rt->dst;
-               inet->cork.fl = *fl;
+               inet->cork.fl.u.ip6 = *fl6;
                np->cork.hop_limit = hlimit;
                np->cork.tclass = tclass;
                mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
@@ -1182,7 +1224,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                transhdrlen += exthdrlen;
        } else {
                rt = (struct rt6_info *)inet->cork.dst;
-               fl = &inet->cork.fl;
+               fl6 = &inet->cork.fl.u.ip6;
                opt = np->cork.opt;
                transhdrlen = 0;
                exthdrlen = 0;
@@ -1197,11 +1239,18 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 
        if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
                if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
-                       ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
+                       ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
                        return -EMSGSIZE;
                }
        }
 
+       /* For UDP, check if TX timestamp is enabled */
+       if (sk->sk_type == SOCK_DGRAM) {
+               err = sock_tx_timestamp(sk, &tx_flags);
+               if (err)
+                       goto error;
+       }
+
        /*
         * Let's try using as much space as possible.
         * Use MTU if total length of the message fits into the MTU.
@@ -1222,7 +1271,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        if (length > mtu) {
                int proto = sk->sk_protocol;
                if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
-                       ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen);
+                       ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
                        return -EMSGSIZE;
                }
 
@@ -1306,6 +1355,12 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
+                               else {
+                                       /* Only the initial fragment
+                                        * is time stamped.
+                                        */
+                                       tx_flags = 0;
+                               }
                        }
                        if (skb == NULL)
                                goto error;
@@ -1317,6 +1372,9 @@ alloc_new_skb:
                        /* reserve for fragmentation */
                        skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
 
+                       if (sk->sk_type == SOCK_DGRAM)
+                               skb_shinfo(skb)->tx_flags = tx_flags;
+
                        /*
                         *      Find where to start putting bytes
                         */
@@ -1458,8 +1516,8 @@ int ip6_push_pending_frames(struct sock *sk)
        struct ipv6hdr *hdr;
        struct ipv6_txoptions *opt = np->cork.opt;
        struct rt6_info *rt = (struct rt6_info *)inet->cork.dst;
-       struct flowi *fl = &inet->cork.fl;
-       unsigned char proto = fl->proto;
+       struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
+       unsigned char proto = fl6->flowi6_proto;
        int err = 0;
 
        if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
@@ -1484,7 +1542,7 @@ int ip6_push_pending_frames(struct sock *sk)
        if (np->pmtudisc < IPV6_PMTUDISC_DO)
                skb->local_df = 1;
 
-       ipv6_addr_copy(final_dst, &fl->fl6_dst);
+       ipv6_addr_copy(final_dst, &fl6->daddr);
        __skb_pull(skb, skb_network_header_len(skb));
        if (opt && opt->opt_flen)
                ipv6_push_frag_opts(skb, opt, &proto);
@@ -1495,12 +1553,12 @@ int ip6_push_pending_frames(struct sock *sk)
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
 
-       *(__be32*)hdr = fl->fl6_flowlabel |
+       *(__be32*)hdr = fl6->flowlabel |
                     htonl(0x60000000 | ((int)np->cork.tclass << 20));
 
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
-       ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
+       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
        ipv6_addr_copy(&hdr->daddr, final_dst);
 
        skb->priority = sk->sk_priority;
index 4f4483e..c1b1bd3 100644 (file)
@@ -57,6 +57,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
@@ -535,7 +536,6 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        int err;
        struct sk_buff *skb2;
        struct iphdr *eiph;
-       struct flowi fl;
        struct rtable *rt;
 
        err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
@@ -577,11 +577,11 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        eiph = ip_hdr(skb2);
 
        /* Try to guess incoming interface */
-       memset(&fl, 0, sizeof(fl));
-       fl.fl4_dst = eiph->saddr;
-       fl.fl4_tos = RT_TOS(eiph->tos);
-       fl.proto = IPPROTO_IPIP;
-       if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
+       rt = ip_route_output_ports(dev_net(skb->dev), NULL,
+                                  eiph->saddr, 0,
+                                  0, 0,
+                                  IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+       if (IS_ERR(rt))
                goto out;
 
        skb2->dev = rt->dst.dev;
@@ -590,15 +590,18 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (rt->rt_flags & RTCF_LOCAL) {
                ip_rt_put(rt);
                rt = NULL;
-               fl.fl4_dst = eiph->daddr;
-               fl.fl4_src = eiph->saddr;
-               fl.fl4_tos = eiph->tos;
-               if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
+               rt = ip_route_output_ports(dev_net(skb->dev), NULL,
+                                          eiph->daddr, eiph->saddr,
+                                          0, 0,
+                                          IPPROTO_IPIP,
+                                          RT_TOS(eiph->tos), 0);
+               if (IS_ERR(rt) ||
                    rt->dst.dev->type != ARPHRD_TUNNEL) {
-                       ip_rt_put(rt);
+                       if (!IS_ERR(rt))
+                               ip_rt_put(rt);
                        goto out;
                }
-               skb_dst_set(skb2, (struct dst_entry *)rt);
+               skb_dst_set(skb2, &rt->dst);
        } else {
                ip_rt_put(rt);
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
@@ -881,7 +884,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 static int ip6_tnl_xmit2(struct sk_buff *skb,
                         struct net_device *dev,
                         __u8 dsfield,
-                        struct flowi *fl,
+                        struct flowi6 *fl6,
                         int encap_limit,
                         __u32 *pmtu)
 {
@@ -901,10 +904,16 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        if ((dst = ip6_tnl_dst_check(t)) != NULL)
                dst_hold(dst);
        else {
-               dst = ip6_route_output(net, NULL, fl);
+               dst = ip6_route_output(net, NULL, fl6);
 
-               if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0)
+               if (dst->error)
                        goto tx_err_link_failure;
+               dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
+               if (IS_ERR(dst)) {
+                       err = PTR_ERR(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
        }
 
        tdev = dst->dev;
@@ -954,7 +963,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
 
        skb->transport_header = skb->network_header;
 
-       proto = fl->proto;
+       proto = fl6->flowi6_proto;
        if (encap_limit >= 0) {
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
@@ -962,13 +971,13 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000);
+       *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000);
        dsfield = INET_ECN_encapsulate(0, dsfield);
        ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
-       ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src);
-       ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst);
+       ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
+       ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
        nf_reset(skb);
        pkt_len = skb->len;
        err = ip6_local_out(skb);
@@ -998,7 +1007,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct iphdr  *iph = ip_hdr(skb);
        int encap_limit = -1;
-       struct flowi fl;
+       struct flowi6 fl6;
        __u8 dsfield;
        __u32 mtu;
        int err;
@@ -1010,16 +1019,16 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                encap_limit = t->parms.encap_limit;
 
-       memcpy(&fl, &t->fl, sizeof (fl));
-       fl.proto = IPPROTO_IPIP;
+       memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
 
        dsfield = ipv4_get_dsfield(iph);
 
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
-               fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
                                          & IPV6_TCLASS_MASK;
 
-       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
+       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
        if (err != 0) {
                /* XXX: send ICMP error even if DF is not set. */
                if (err == -EMSGSIZE)
@@ -1038,7 +1047,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        int encap_limit = -1;
        __u16 offset;
-       struct flowi fl;
+       struct flowi6 fl6;
        __u8 dsfield;
        __u32 mtu;
        int err;
@@ -1060,16 +1069,16 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                encap_limit = t->parms.encap_limit;
 
-       memcpy(&fl, &t->fl, sizeof (fl));
-       fl.proto = IPPROTO_IPV6;
+       memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
-               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
-               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
 
-       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
+       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
        if (err != 0) {
                if (err == -EMSGSIZE)
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -1132,21 +1141,21 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
        struct ip6_tnl_parm *p = &t->parms;
-       struct flowi *fl = &t->fl;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
 
        /* Set up flowi template */
-       ipv6_addr_copy(&fl->fl6_src, &p->laddr);
-       ipv6_addr_copy(&fl->fl6_dst, &p->raddr);
-       fl->oif = p->link;
-       fl->fl6_flowlabel = 0;
+       ipv6_addr_copy(&fl6->saddr, &p->laddr);
+       ipv6_addr_copy(&fl6->daddr, &p->raddr);
+       fl6->flowi6_oif = p->link;
+       fl6->flowlabel = 0;
 
        if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
-               fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
+               fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
        if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
-               fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
+               fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
 
        ip6_tnl_set_cap(t);
 
index 9fab274..7ff0343 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -134,14 +135,15 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
        return NULL;
 }
 
-static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
+static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
                            struct mr6_table **mrt)
 {
        struct ip6mr_result res;
        struct fib_lookup_arg arg = { .result = &res, };
        int err;
 
-       err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg);
+       err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
+                              flowi6_to_flowi(flp6), 0, &arg);
        if (err < 0)
                return err;
        *mrt = res.mrt;
@@ -269,7 +271,7 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
        return net->ipv6.mrt6;
 }
 
-static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
+static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
                            struct mr6_table **mrt)
 {
        *mrt = net->ipv6.mrt6;
@@ -616,9 +618,9 @@ static int pim6_rcv(struct sk_buff *skb)
        struct net_device  *reg_dev = NULL;
        struct net *net = dev_net(skb->dev);
        struct mr6_table *mrt;
-       struct flowi fl = {
-               .iif    = skb->dev->ifindex,
-               .mark   = skb->mark,
+       struct flowi6 fl6 = {
+               .flowi6_iif     = skb->dev->ifindex,
+               .flowi6_mark    = skb->mark,
        };
        int reg_vif_num;
 
@@ -643,7 +645,7 @@ static int pim6_rcv(struct sk_buff *skb)
            ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
                goto drop;
 
-       if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
+       if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
                goto drop;
        reg_vif_num = mrt->mroute_reg_vif_num;
 
@@ -686,14 +688,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
 {
        struct net *net = dev_net(dev);
        struct mr6_table *mrt;
-       struct flowi fl = {
-               .oif            = dev->ifindex,
-               .iif            = skb->skb_iif,
-               .mark           = skb->mark,
+       struct flowi6 fl6 = {
+               .flowi6_oif     = dev->ifindex,
+               .flowi6_iif     = skb->skb_iif,
+               .flowi6_mark    = skb->mark,
        };
        int err;
 
-       err = ip6mr_fib_lookup(net, &fl, &mrt);
+       err = ip6mr_fib_lookup(net, &fl6, &mrt);
        if (err < 0)
                return err;
 
@@ -1038,7 +1040,6 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
 
        while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
                if (ipv6_hdr(skb)->version == 0) {
-                       int err;
                        struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
                        if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
@@ -1049,7 +1050,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
                                skb_trim(skb, nlh->nlmsg_len);
                                ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
                        }
-                       err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
                } else
                        ip6_mr_forward(net, mrt, skb, c);
        }
@@ -1547,13 +1548,13 @@ int ip6mr_sk_done(struct sock *sk)
 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
 {
        struct mr6_table *mrt;
-       struct flowi fl = {
-               .iif    = skb->skb_iif,
-               .oif    = skb->dev->ifindex,
-               .mark   = skb->mark,
+       struct flowi6 fl6 = {
+               .flowi6_iif     = skb->skb_iif,
+               .flowi6_oif     = skb->dev->ifindex,
+               .flowi6_mark    = skb->mark,
        };
 
-       if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
+       if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
                return NULL;
 
        return mrt->mroute6_sk;
@@ -1804,6 +1805,80 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
        }
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req6 {
+       struct sockaddr_in6 src;
+       struct sockaddr_in6 grp;
+       compat_ulong_t pktcnt;
+       compat_ulong_t bytecnt;
+       compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_mif_req6 {
+       mifi_t  mifi;
+       compat_ulong_t icount;
+       compat_ulong_t ocount;
+       compat_ulong_t ibytes;
+       compat_ulong_t obytes;
+};
+
+int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+       struct compat_sioc_sg_req6 sr;
+       struct compat_sioc_mif_req6 vr;
+       struct mif_device *vif;
+       struct mfc6_cache *c;
+       struct net *net = sock_net(sk);
+       struct mr6_table *mrt;
+
+       mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+       if (mrt == NULL)
+               return -ENOENT;
+
+       switch (cmd) {
+       case SIOCGETMIFCNT_IN6:
+               if (copy_from_user(&vr, arg, sizeof(vr)))
+                       return -EFAULT;
+               if (vr.mifi >= mrt->maxvif)
+                       return -EINVAL;
+               read_lock(&mrt_lock);
+               vif = &mrt->vif6_table[vr.mifi];
+               if (MIF_EXISTS(mrt, vr.mifi)) {
+                       vr.icount = vif->pkt_in;
+                       vr.ocount = vif->pkt_out;
+                       vr.ibytes = vif->bytes_in;
+                       vr.obytes = vif->bytes_out;
+                       read_unlock(&mrt_lock);
+
+                       if (copy_to_user(arg, &vr, sizeof(vr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               read_unlock(&mrt_lock);
+               return -EADDRNOTAVAIL;
+       case SIOCGETSGCNT_IN6:
+               if (copy_from_user(&sr, arg, sizeof(sr)))
+                       return -EFAULT;
+
+               read_lock(&mrt_lock);
+               c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+               if (c) {
+                       sr.pktcnt = c->mfc_un.res.pkt;
+                       sr.bytecnt = c->mfc_un.res.bytes;
+                       sr.wrong_if = c->mfc_un.res.wrong_if;
+                       read_unlock(&mrt_lock);
+
+                       if (copy_to_user(arg, &sr, sizeof(sr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               read_unlock(&mrt_lock);
+               return -EADDRNOTAVAIL;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+#endif
 
 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
@@ -1823,7 +1898,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
        struct mif_device *vif = &mrt->vif6_table[vifi];
        struct net_device *dev;
        struct dst_entry *dst;
-       struct flowi fl;
+       struct flowi6 fl6;
 
        if (vif->dev == NULL)
                goto out_free;
@@ -1841,12 +1916,12 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
 
        ipv6h = ipv6_hdr(skb);
 
-       fl = (struct flowi) {
-               .oif = vif->link,
-               .fl6_dst = ipv6h->daddr,
+       fl6 = (struct flowi6) {
+               .flowi6_oif = vif->link,
+               .daddr = ipv6h->daddr,
        };
 
-       dst = ip6_route_output(net, NULL, &fl);
+       dst = ip6_route_output(net, NULL, &fl6);
        if (!dst)
                goto out_free;
 
@@ -1969,13 +2044,13 @@ int ip6_mr_input(struct sk_buff *skb)
        struct mfc6_cache *cache;
        struct net *net = dev_net(skb->dev);
        struct mr6_table *mrt;
-       struct flowi fl = {
-               .iif    = skb->dev->ifindex,
-               .mark   = skb->mark,
+       struct flowi6 fl6 = {
+               .flowi6_iif     = skb->dev->ifindex,
+               .flowi6_mark    = skb->mark,
        };
        int err;
 
-       err = ip6mr_fib_lookup(net, &fl, &mrt);
+       err = ip6mr_fib_lookup(net, &fl6, &mrt);
        if (err < 0)
                return err;
 
index d1770e0..9cb191e 100644 (file)
@@ -444,12 +444,12 @@ sticky_done:
        {
                struct ipv6_txoptions *opt = NULL;
                struct msghdr msg;
-               struct flowi fl;
+               struct flowi6 fl6;
                int junk;
 
-               fl.fl6_flowlabel = 0;
-               fl.oif = sk->sk_bound_dev_if;
-               fl.mark = sk->sk_mark;
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
+               fl6.flowi6_mark = sk->sk_mark;
 
                if (optlen == 0)
                        goto update;
@@ -475,7 +475,7 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk,
+               retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk,
                                         &junk);
                if (retv)
                        goto done;
index 49f986d..76b8937 100644 (file)
@@ -319,7 +319,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
 {
        struct in6_addr *source, *group;
        struct ipv6_mc_socklist *pmc;
-       struct net_device *dev;
        struct inet6_dev *idev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *psl;
@@ -341,7 +340,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = -EADDRNOTAVAIL;
 
@@ -455,7 +453,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
 {
        struct in6_addr *group;
        struct ipv6_mc_socklist *pmc;
-       struct net_device *dev;
        struct inet6_dev *idev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *newpsl, *psl;
@@ -478,7 +475,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = 0;
 
@@ -549,7 +545,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
        struct in6_addr *group;
        struct ipv6_mc_socklist *pmc;
        struct inet6_dev *idev;
-       struct net_device *dev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *psl;
        struct net *net = sock_net(sk);
@@ -566,7 +561,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = -EADDRNOTAVAIL;
        /*
@@ -1402,7 +1396,7 @@ static void mld_sendpack(struct sk_buff *skb)
        struct inet6_dev *idev;
        struct net *net = dev_net(skb->dev);
        int err;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
 
        rcu_read_lock();
@@ -1425,11 +1419,16 @@ static void mld_sendpack(struct sk_buff *skb)
                goto err_out;
        }
 
-       icmpv6_flow_init(net->ipv6.igmp_sk, &fl, ICMPV6_MLD2_REPORT,
+       icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       err = 0;
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
+       }
        skb_dst_set(skb, dst);
        if (err)
                goto err_out;
@@ -1732,7 +1731,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
                     IPV6_TLV_PADN, 0 };
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
 
        if (type == ICMPV6_MGM_REDUCTION)
@@ -1792,13 +1791,15 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                goto err_out;
        }
 
-       icmpv6_flow_init(sk, &fl, type,
+       icmpv6_flow_init(sk, &fl6, type,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err)
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto err_out;
+       }
 
        skb_dst_set(skb, dst);
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
index d6e9599..9b21048 100644 (file)
@@ -203,18 +203,20 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
        return allow;
 }
 
-static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct flowi *fl)
+static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
+                              const struct flowi *fl)
 {
        struct net *net = xs_net(x);
        struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
+       const struct flowi6 *fl6 = &fl->u.ip6;
        struct ipv6_destopt_hao *hao = NULL;
        struct xfrm_selector sel;
        int offset;
        struct timeval stamp;
        int err = 0;
 
-       if (unlikely(fl->proto == IPPROTO_MH &&
-                    fl->fl_mh_type <= IP6_MH_TYPE_MAX))
+       if (unlikely(fl6->flowi6_proto == IPPROTO_MH &&
+                    fl6->fl6_mh_type <= IP6_MH_TYPE_MAX))
                goto out;
 
        if (likely(opt->dsthao)) {
@@ -239,14 +241,14 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct
               sizeof(sel.saddr));
        sel.prefixlen_s = 128;
        sel.family = AF_INET6;
-       sel.proto = fl->proto;
-       sel.dport = xfrm_flowi_dport(fl);
+       sel.proto = fl6->flowi6_proto;
+       sel.dport = xfrm_flowi_dport(fl, &fl6->uli);
        if (sel.dport)
                sel.dport_mask = htons(~0);
-       sel.sport = xfrm_flowi_sport(fl);
+       sel.sport = xfrm_flowi_sport(fl, &fl6->uli);
        if (sel.sport)
                sel.sport_mask = htons(~0);
-       sel.ifindex = fl->oif;
+       sel.ifindex = fl6->flowi6_oif;
 
        err = km_report(net, IPPROTO_DSTOPTS, &sel,
                        (hao ? (xfrm_address_t *)&hao->addr : NULL));
index 2342545..0e49c9d 100644 (file)
@@ -511,7 +511,7 @@ void ndisc_send_skb(struct sk_buff *skb,
                    const struct in6_addr *saddr,
                    struct icmp6hdr *icmp6h)
 {
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
        struct net *net = dev_net(dev);
        struct sock *sk = net->ipv6.ndisc_sk;
@@ -521,7 +521,7 @@ void ndisc_send_skb(struct sk_buff *skb,
 
        type = icmp6h->icmp6_type;
 
-       icmpv6_flow_init(sk, &fl, type, saddr, daddr, dev->ifindex);
+       icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
 
        dst = icmp6_dst_alloc(dev, neigh, daddr);
        if (!dst) {
@@ -529,8 +529,8 @@ void ndisc_send_skb(struct sk_buff *skb,
                return;
        }
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err < 0) {
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       if (IS_ERR(dst)) {
                kfree_skb(skb);
                return;
        }
@@ -1515,7 +1515,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        struct rt6_info *rt;
        struct dst_entry *dst;
        struct inet6_dev *idev;
-       struct flowi fl;
+       struct flowi6 fl6;
        u8 *opt;
        int rd_len;
        int err;
@@ -1535,15 +1535,15 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
                return;
        }
 
-       icmpv6_flow_init(sk, &fl, NDISC_REDIRECT,
+       icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
                         &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
 
-       dst = ip6_route_output(net, NULL, &fl);
+       dst = ip6_route_output(net, NULL, &fl6);
        if (dst == NULL)
                return;
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err)
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       if (IS_ERR(dst))
                return;
 
        rt = (struct rt6_info *) dst;
@@ -1553,7 +1553,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
                           "ICMPv6 Redirect: destination is not a neighbour.\n");
                goto release;
        }
-       if (!xrlim_allow(dst, 1*HZ))
+       if (!rt->rt6i_peer)
+               rt6_bind_peer(rt, 1);
+       if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                goto release;
 
        if (dev->addr_len) {
index 35915e8..39aaca2 100644 (file)
@@ -15,14 +15,14 @@ int ip6_route_me_harder(struct sk_buff *skb)
        struct net *net = dev_net(skb_dst(skb)->dev);
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct dst_entry *dst;
-       struct flowi fl = {
-               .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
-               .mark = skb->mark,
-               .fl6_dst = iph->daddr,
-               .fl6_src = iph->saddr,
+       struct flowi6 fl6 = {
+               .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
+               .flowi6_mark = skb->mark,
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
        };
 
-       dst = ip6_route_output(net, skb->sk, &fl);
+       dst = ip6_route_output(net, skb->sk, &fl6);
        if (dst->error) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
@@ -37,9 +37,10 @@ int ip6_route_me_harder(struct sk_buff *skb)
 
 #ifdef CONFIG_XFRM
        if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
-           xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
+           xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
                skb_dst_set(skb, NULL);
-               if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+               dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
+               if (IS_ERR(dst))
                        return -1;
                skb_dst_set(skb, dst);
        }
@@ -91,7 +92,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
 
 static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
 {
-       *dst = ip6_route_output(&init_net, NULL, fl);
+       *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6);
        return (*dst)->error;
 }
 
index 7d227c6..c9598a9 100644 (file)
@@ -1076,6 +1076,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(AF_INET6, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1274,6 +1275,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -1679,6 +1681,7 @@ translate_compat_table(struct net *net,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET6);
+       xt_compat_init_offsets(AF_INET6, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
@@ -1820,6 +1823,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
@@ -2049,6 +2053,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EFAULT;
                        break;
                }
+               rev.name[sizeof(rev.name)-1] = 0;
 
                if (cmd == IP6T_SO_GET_REVISION_TARGET)
                        target = 1;
index 09c8889..e6af8d7 100644 (file)
@@ -410,7 +410,7 @@ fallback:
                if (p != NULL) {
                        sb_add(m, "%02x", *p++);
                        for (i = 1; i < len; i++)
-                               sb_add(m, ":%02x", p[i]);
+                               sb_add(m, ":%02x", *p++);
                }
                sb_add(m, " ");
 
@@ -452,8 +452,7 @@ ip6t_log_packet(u_int8_t pf,
               in ? in->name : "",
               out ? out->name : "");
 
-       /* MAC logging for input path only. */
-       if (in && !out)
+       if (in != NULL)
                dump_mac_header(m, loginfo, skb);
 
        dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
index bf998fe..28e7448 100644 (file)
@@ -47,7 +47,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        struct ipv6hdr *ip6h;
        struct dst_entry *dst = NULL;
        u8 proto;
-       struct flowi fl;
+       struct flowi6 fl6;
 
        if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
            (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
@@ -89,19 +89,20 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
                return;
        }
 
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr);
-       ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr);
-       fl.fl_ip_sport = otcph.dest;
-       fl.fl_ip_dport = otcph.source;
-       security_skb_classify_flow(oldskb, &fl);
-       dst = ip6_route_output(net, NULL, &fl);
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_TCP;
+       ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
+       ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+       fl6.fl6_sport = otcph.dest;
+       fl6.fl6_dport = otcph.source;
+       security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+       dst = ip6_route_output(net, NULL, &fl6);
        if (dst == NULL || dst->error) {
                dst_release(dst);
                return;
        }
-       if (xfrm_lookup(net, &dst, &fl, NULL, 0))
+       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       if (IS_ERR(dst))
                return;
 
        hh_len = (dst->dev->hard_header_len + 15)&~15;
index 79d43aa..0857272 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 
 struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@ static struct inet_frags nf_frags;
 static struct netns_frags nf_init_frags;
 
 #ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_frag6_timeout",
                .data           = &nf_init_frags.timeout,
index 86c3952..4a1c3b4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/skbuff.h>
+#include <linux/compat.h>
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 
@@ -123,18 +124,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
 
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
-                                          struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
 {
        rcu_assign_pointer(mh_filter, filter);
        return 0;
 }
 EXPORT_SYMBOL(rawv6_mh_filter_register);
 
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
-                                            struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
 {
        rcu_assign_pointer(mh_filter, NULL);
        synchronize_rcu();
@@ -192,10 +193,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
                         * policy is placed in rawv6_rcv() because it is
                         * required for each socket.
                         */
-                       int (*filter)(struct sock *sock, struct sk_buff *skb);
+                       mh_filter_t *filter;
 
                        filter = rcu_dereference(mh_filter);
-                       filtered = filter ? filter(sk, skb) : 0;
+                       filtered = filter ? (*filter)(sk, skb) : 0;
                        break;
                }
 #endif
@@ -523,7 +524,7 @@ csum_copy_err:
        goto out;
 }
 
-static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
+static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                                     struct raw6_sock *rp)
 {
        struct sk_buff *skb;
@@ -585,11 +586,10 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
        if (unlikely(csum))
                tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
 
-       csum = csum_ipv6_magic(&fl->fl6_src,
-                                  &fl->fl6_dst,
-                                  total_len, fl->proto, tmp_csum);
+       csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
+                              total_len, fl6->flowi6_proto, tmp_csum);
 
-       if (csum == 0 && fl->proto == IPPROTO_UDP)
+       if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
                csum = CSUM_MANGLED_0;
 
        if (skb_store_bits(skb, offset, &csum, 2))
@@ -602,7 +602,7 @@ out:
 }
 
 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
-                       struct flowi *fl, struct dst_entry **dstp,
+                       struct flowi6 *fl6, struct dst_entry **dstp,
                        unsigned int flags)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -612,7 +612,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        struct rt6_info *rt = (struct rt6_info *)*dstp;
 
        if (length > rt->dst.dev->mtu) {
-               ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu);
+               ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
                return -EMSGSIZE;
        }
        if (flags&MSG_PROBE)
@@ -661,7 +661,7 @@ error:
        return err;
 }
 
-static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
+static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
 {
        struct iovec *iov;
        u8 __user *type = NULL;
@@ -678,7 +678,7 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
                if (!iov)
                        continue;
 
-               switch (fl->proto) {
+               switch (fl6->flowi6_proto) {
                case IPPROTO_ICMPV6:
                        /* check if one-byte field is readable or not. */
                        if (iov->iov_base && iov->iov_len < 1)
@@ -693,8 +693,8 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
                                code = iov->iov_base;
 
                        if (type && code) {
-                               if (get_user(fl->fl_icmp_type, type) ||
-                                   get_user(fl->fl_icmp_code, code))
+                               if (get_user(fl6->fl6_icmp_type, type) ||
+                                   get_user(fl6->fl6_icmp_code, code))
                                        return -EFAULT;
                                probed = 1;
                        }
@@ -705,7 +705,7 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
                        /* check if type field is readable or not. */
                        if (iov->iov_len > 2 - len) {
                                u8 __user *p = iov->iov_base;
-                               if (get_user(fl->fl_mh_type, &p[2 - len]))
+                               if (get_user(fl6->fl6_mh_type, &p[2 - len]))
                                        return -EFAULT;
                                probed = 1;
                        } else
@@ -734,7 +734,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct ipv6_txoptions *opt = NULL;
        struct ip6_flowlabel *flowlabel = NULL;
        struct dst_entry *dst = NULL;
-       struct flowi fl;
+       struct flowi6 fl6;
        int addr_len = msg->msg_namelen;
        int hlimit = -1;
        int tclass = -1;
@@ -755,9 +755,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        /*
         *      Get and verify the address.
         */
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
-       fl.mark = sk->sk_mark;
+       fl6.flowi6_mark = sk->sk_mark;
 
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
@@ -779,9 +779,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 
                daddr = &sin6->sin6_addr;
                if (np->sndflow) {
-                       fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-                       if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
-                               flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+                       fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+                       if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+                               flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                                if (flowlabel == NULL)
                                        return -EINVAL;
                                daddr = &flowlabel->dst;
@@ -799,32 +799,32 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
                    ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
-                       fl.oif = sin6->sin6_scope_id;
+                       fl6.flowi6_oif = sin6->sin6_scope_id;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
 
                proto = inet->inet_num;
                daddr = &np->daddr;
-               fl.fl6_flowlabel = np->flow_label;
+               fl6.flowlabel = np->flow_label;
        }
 
-       if (fl.oif == 0)
-               fl.oif = sk->sk_bound_dev_if;
+       if (fl6.flowi6_oif == 0)
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
 
        if (msg->msg_controllen) {
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
                                        &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
                }
-               if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
-                       flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+               if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
+                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
                }
@@ -837,40 +837,31 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
 
-       fl.proto = proto;
-       err = rawv6_probe_proto_opt(&fl, msg);
+       fl6.flowi6_proto = proto;
+       err = rawv6_probe_proto_opt(&fl6, msg);
        if (err)
                goto out;
 
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl.fl6_dst, daddr);
+               ipv6_addr_copy(&fl6.daddr, daddr);
        else
-               fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
-       if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl.fl6_src, &np->saddr);
+               fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+       if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
+               ipv6_addr_copy(&fl6.saddr, &np->saddr);
 
-       final_p = fl6_update_dst(&fl, opt, &final);
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
-       if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
-               fl.oif = np->mcast_oif;
-       security_sk_classify_flow(sk, &fl);
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
-
        if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl.fl6_dst))
+               if (ipv6_addr_is_multicast(&fl6.daddr))
                        hlimit = np->mcast_hops;
                else
                        hlimit = np->hop_limit;
@@ -889,17 +880,17 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 
 back_from_confirm:
        if (inet->hdrincl)
-               err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags);
+               err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags);
        else {
                lock_sock(sk);
                err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
-                       len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
+                       len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst,
                        msg->msg_flags, dontfrag);
 
                if (err)
                        ip6_flush_pending_frames(sk);
                else if (!(msg->msg_flags & MSG_MORE))
-                       err = rawv6_push_pending_frames(sk, &fl, rp);
+                       err = rawv6_push_pending_frames(sk, &fl6, rp);
                release_sock(sk);
        }
 done:
@@ -1157,6 +1148,23 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
        }
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case SIOCOUTQ:
+       case SIOCINQ:
+               return -ENOIOCTLCMD;
+       default:
+#ifdef CONFIG_IPV6_MROUTE
+               return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+               return -ENOIOCTLCMD;
+#endif
+       }
+}
+#endif
+
 static void rawv6_close(struct sock *sk, long timeout)
 {
        if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1223,7 @@ struct proto rawv6_prot = {
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_rawv6_setsockopt,
        .compat_getsockopt = compat_rawv6_getsockopt,
+       .compat_ioctl      = compat_rawv6_ioctl,
 #endif
 };
 
index 373bd04..6814c87 100644 (file)
@@ -72,8 +72,6 @@
 #define RT6_TRACE(x...) do { ; } while (0)
 #endif
 
-#define CLONE_OFFLINK_ROUTE 0
-
 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
 static struct dst_entry        *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
@@ -99,6 +97,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                                           struct in6_addr *gwaddr, int ifindex);
 #endif
 
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+       struct rt6_info *rt = (struct rt6_info *) dst;
+       struct inet_peer *peer;
+       u32 *p = NULL;
+
+       if (!rt->rt6i_peer)
+               rt6_bind_peer(rt, 1);
+
+       peer = rt->rt6i_peer;
+       if (peer) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               p = peer->metrics;
+               if (inet_metrics_new(peer))
+                       memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               }
+       }
+       return p;
+}
+
 static struct dst_ops ip6_dst_ops_template = {
        .family                 =       AF_INET6,
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
@@ -107,6 +135,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .check                  =       ip6_dst_check,
        .default_advmss         =       ip6_default_advmss,
        .default_mtu            =       ip6_default_mtu,
+       .cow_metrics            =       ipv6_cow_metrics,
        .destroy                =       ip6_dst_destroy,
        .ifdown                 =       ip6_dst_ifdown,
        .negative_advice        =       ip6_negative_advice,
@@ -115,6 +144,11 @@ static struct dst_ops ip6_dst_ops_template = {
        .local_out              =       __ip6_local_out,
 };
 
+static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+{
+       return 0;
+}
+
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -124,9 +158,15 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
+       .default_mtu            =       ip6_blackhole_default_mtu,
+       .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
 };
 
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+       [RTAX_HOPLIMIT - 1] = 255,
+};
+
 static struct rt6_info ip6_null_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
@@ -182,7 +222,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
 {
-       return (struct rt6_info *)dst_alloc(ops);
+       return (struct rt6_info *)dst_alloc(ops, 0);
 }
 
 static void ip6_dst_destroy(struct dst_entry *dst)
@@ -196,22 +236,27 @@ static void ip6_dst_destroy(struct dst_entry *dst)
                in6_dev_put(idev);
        }
        if (peer) {
-               BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
                rt->rt6i_peer = NULL;
                inet_putpeer(peer);
        }
 }
 
+static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt6_peer_genid(void)
+{
+       return atomic_read(&__rt6_peer_genid);
+}
+
 void rt6_bind_peer(struct rt6_info *rt, int create)
 {
        struct inet_peer *peer;
 
-       if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
-               return;
-
        peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
        if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
                inet_putpeer(peer);
+       else
+               rt->rt6i_peer_genid = rt6_peer_genid();
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -554,17 +599,17 @@ do { \
 
 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
                                             struct fib6_table *table,
-                                            struct flowi *fl, int flags)
+                                            struct flowi6 *fl6, int flags)
 {
        struct fib6_node *fn;
        struct rt6_info *rt;
 
        read_lock_bh(&table->tb6_lock);
-       fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
+       fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
        rt = fn->leaf;
-       rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
-       BACKTRACK(net, &fl->fl6_src);
+       rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
+       BACKTRACK(net, &fl6->saddr);
 out:
        dst_use(&rt->dst, jiffies);
        read_unlock_bh(&table->tb6_lock);
@@ -575,19 +620,19 @@ out:
 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
                            const struct in6_addr *saddr, int oif, int strict)
 {
-       struct flowi fl = {
-               .oif = oif,
-               .fl6_dst = *daddr,
+       struct flowi6 fl6 = {
+               .flowi6_oif = oif,
+               .daddr = *daddr,
        };
        struct dst_entry *dst;
        int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
 
        if (saddr) {
-               memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
+               memcpy(&fl6.saddr, saddr, sizeof(*saddr));
                flags |= RT6_LOOKUP_F_HAS_SADDR;
        }
 
-       dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
+       dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
        if (dst->error == 0)
                return (struct rt6_info *) dst;
 
@@ -708,7 +753,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
 }
 
 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
-                                     struct flowi *fl, int flags)
+                                     struct flowi6 *fl6, int flags)
 {
        struct fib6_node *fn;
        struct rt6_info *rt, *nrt;
@@ -723,12 +768,12 @@ relookup:
        read_lock_bh(&table->tb6_lock);
 
 restart_2:
-       fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
+       fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 
 restart:
        rt = rt6_select(fn, oif, strict | reachable);
 
-       BACKTRACK(net, &fl->fl6_src);
+       BACKTRACK(net, &fl6->saddr);
        if (rt == net->ipv6.ip6_null_entry ||
            rt->rt6i_flags & RTF_CACHE)
                goto out;
@@ -737,14 +782,11 @@ restart:
        read_unlock_bh(&table->tb6_lock);
 
        if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
-               nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
-       else {
-#if CLONE_OFFLINK_ROUTE
-               nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
+               nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+       else if (!(rt->dst.flags & DST_HOST))
+               nrt = rt6_alloc_clone(rt, &fl6->daddr);
+       else
                goto out2;
-#endif
-       }
 
        dst_release(&rt->dst);
        rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -781,9 +823,9 @@ out2:
 }
 
 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
-                                           struct flowi *fl, int flags)
+                                           struct flowi6 *fl6, int flags)
 {
-       return ip6_pol_route(net, table, fl->iif, fl, flags);
+       return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
 }
 
 void ip6_route_input(struct sk_buff *skb)
@@ -791,56 +833,54 @@ void ip6_route_input(struct sk_buff *skb)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
        int flags = RT6_LOOKUP_F_HAS_SADDR;
-       struct flowi fl = {
-               .iif = skb->dev->ifindex,
-               .fl6_dst = iph->daddr,
-               .fl6_src = iph->saddr,
-               .fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
-               .mark = skb->mark,
-               .proto = iph->nexthdr,
+       struct flowi6 fl6 = {
+               .flowi6_iif = skb->dev->ifindex,
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
+               .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
+               .flowi6_mark = skb->mark,
+               .flowi6_proto = iph->nexthdr,
        };
 
        if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
                flags |= RT6_LOOKUP_F_IFACE;
 
-       skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input));
+       skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
 }
 
 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
-                                            struct flowi *fl, int flags)
+                                            struct flowi6 *fl6, int flags)
 {
-       return ip6_pol_route(net, table, fl->oif, fl, flags);
+       return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
 }
 
 struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
-                                   struct flowi *fl)
+                                   struct flowi6 *fl6)
 {
        int flags = 0;
 
-       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
+       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
                flags |= RT6_LOOKUP_F_IFACE;
 
-       if (!ipv6_addr_any(&fl->fl6_src))
+       if (!ipv6_addr_any(&fl6->saddr))
                flags |= RT6_LOOKUP_F_HAS_SADDR;
        else if (sk)
                flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
 
-       return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
+       return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
 }
 
 EXPORT_SYMBOL(ip6_route_output);
 
-int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
+struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
-       struct rt6_info *ort = (struct rt6_info *) *dstp;
-       struct rt6_info *rt = (struct rt6_info *)
-               dst_alloc(&ip6_dst_blackhole_ops);
+       struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1);
+       struct rt6_info *ort = (struct rt6_info *) dst_orig;
        struct dst_entry *new = NULL;
 
        if (rt) {
                new = &rt->dst;
 
-               atomic_set(&new->__refcnt, 1);
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard;
@@ -866,11 +906,9 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
                dst_free(new);
        }
 
-       dst_release(*dstp);
-       *dstp = new;
-       return new ? 0 : -ENOMEM;
+       dst_release(dst_orig);
+       return new ? new : ERR_PTR(-ENOMEM);
 }
-EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
 
 /*
  *     Destination cache support functions
@@ -882,9 +920,14 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 
        rt = (struct rt6_info *) dst;
 
-       if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+       if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
+               if (rt->rt6i_peer_genid != rt6_peer_genid()) {
+                       if (!rt->rt6i_peer)
+                               rt6_bind_peer(rt, 0);
+                       rt->rt6i_peer_genid = rt6_peer_genid();
+               }
                return dst;
-
+       }
        return NULL;
 }
 
@@ -935,7 +978,6 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
                        dst_metric_set(dst, RTAX_FEATURES, features);
                }
                dst_metric_set(dst, RTAX_MTU, mtu);
-               call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
        }
 }
 
@@ -1032,11 +1074,9 @@ out:
 
 int icmp6_dst_gc(void)
 {
-       struct dst_entry *dst, *next, **pprev;
+       struct dst_entry *dst, **pprev;
        int more = 0;
 
-       next = NULL;
-
        spin_lock_bh(&icmp6_dst_lock);
        pprev = &icmp6_dst_gc_list;
 
@@ -1404,16 +1444,16 @@ static int ip6_route_del(struct fib6_config *cfg)
  *     Handle redirects
  */
 struct ip6rd_flowi {
-       struct flowi fl;
+       struct flowi6 fl6;
        struct in6_addr gateway;
 };
 
 static struct rt6_info *__ip6_route_redirect(struct net *net,
                                             struct fib6_table *table,
-                                            struct flowi *fl,
+                                            struct flowi6 *fl6,
                                             int flags)
 {
-       struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
+       struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
        struct rt6_info *rt;
        struct fib6_node *fn;
 
@@ -1429,7 +1469,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
         */
 
        read_lock_bh(&table->tb6_lock);
-       fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
+       fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
                /*
@@ -1444,7 +1484,7 @@ restart:
                        continue;
                if (!(rt->rt6i_flags & RTF_GATEWAY))
                        continue;
-               if (fl->oif != rt->rt6i_dev->ifindex)
+               if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
                        continue;
                if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
                        continue;
@@ -1453,7 +1493,7 @@ restart:
 
        if (!rt)
                rt = net->ipv6.ip6_null_entry;
-       BACKTRACK(net, &fl->fl6_src);
+       BACKTRACK(net, &fl6->saddr);
 out:
        dst_hold(&rt->dst);
 
@@ -1470,10 +1510,10 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
        int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct net *net = dev_net(dev);
        struct ip6rd_flowi rdfl = {
-               .fl = {
-                       .oif = dev->ifindex,
-                       .fl6_dst = *dest,
-                       .fl6_src = *src,
+               .fl6 = {
+                       .flowi6_oif = dev->ifindex,
+                       .daddr = *dest,
+                       .saddr = *src,
                },
        };
 
@@ -1482,7 +1522,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
        if (rt6_need_strict(dest))
                flags |= RT6_LOOKUP_F_IFACE;
 
-       return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
+       return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
                                                   flags, __ip6_route_redirect);
 }
 
@@ -1984,12 +2024,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        if (IS_ERR(neigh)) {
                dst_free(&rt->dst);
 
-               /* We are casting this because that is the return
-                * value type.  But an errno encoded pointer is the
-                * same regardless of the underlying pointer type,
-                * and that's what we are returning.  So this is OK.
-                */
-               return (struct rt6_info *) neigh;
+               return ERR_CAST(neigh);
        }
        rt->rt6i_nexthop = neigh;
 
@@ -2350,7 +2385,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        struct rt6_info *rt;
        struct sk_buff *skb;
        struct rtmsg *rtm;
-       struct flowi fl;
+       struct flowi6 fl6;
        int err, iif = 0;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
@@ -2358,27 +2393,27 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                goto errout;
 
        err = -EINVAL;
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
        if (tb[RTA_SRC]) {
                if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
+               ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
        }
 
        if (tb[RTA_DST]) {
                if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
+               ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
        }
 
        if (tb[RTA_IIF])
                iif = nla_get_u32(tb[RTA_IIF]);
 
        if (tb[RTA_OIF])
-               fl.oif = nla_get_u32(tb[RTA_OIF]);
+               fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
 
        if (iif) {
                struct net_device *dev;
@@ -2401,10 +2436,10 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        skb_reset_mac_header(skb);
        skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
 
-       rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
+       rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
        skb_dst_set(skb, &rt->dst);
 
-       err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
+       err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
                            RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
                            nlh->nlmsg_seq, 0, 0, 0);
        if (err < 0) {
@@ -2561,14 +2596,16 @@ static
 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct net *net = current->nsproxy->net_ns;
-       int delay = net->ipv6.sysctl.flush_delay;
-       if (write) {
-               proc_dointvec(ctl, write, buffer, lenp, ppos);
-               fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
-               return 0;
-       } else
+       struct net *net;
+       int delay;
+       if (!write)
                return -EINVAL;
+
+       net = (struct net *)ctl->extra1;
+       delay = net->ipv6.sysctl.flush_delay;
+       proc_dointvec(ctl, write, buffer, lenp, ppos);
+       fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+       return 0;
 }
 
 ctl_table ipv6_route_table_template[] = {
@@ -2655,6 +2692,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
 
        if (table) {
                table[0].data = &net->ipv6.sysctl.flush_delay;
+               table[0].extra1 = net;
                table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
                table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
                table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
@@ -2688,7 +2726,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_null_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_null_entry;
        net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+                        ip6_template_metrics, true);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2699,7 +2738,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_prohibit_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
        net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+                        ip6_template_metrics, true);
 
        net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
                                               sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2709,7 +2749,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_blk_hole_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
        net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+                        ip6_template_metrics, true);
 #endif
 
        net->ipv6.sysctl.flush_delay = 0;
index 8ce38f1..43b3337 100644 (file)
@@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
 
        p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
        do {
-               n = p->next;
+               n = rcu_dereference_protected(p->next, 1);
                kfree(p);
                p = n;
        } while (p);
@@ -421,15 +421,17 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
 static int
 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 {
-       struct ip_tunnel_prl_entry *x, **p;
+       struct ip_tunnel_prl_entry *x;
+       struct ip_tunnel_prl_entry __rcu **p;
        int err = 0;
 
        ASSERT_RTNL();
 
        if (a && a->addr != htonl(INADDR_ANY)) {
-               for (p = &t->prl; *p; p = &(*p)->next) {
-                       if ((*p)->addr == a->addr) {
-                               x = *p;
+               for (p = &t->prl;
+                    (x = rtnl_dereference(*p)) != NULL;
+                    p = &x->next) {
+                       if (x->addr == a->addr) {
                                *p = x->next;
                                call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
                                t->prl_count--;
@@ -438,9 +440,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
                }
                err = -ENXIO;
        } else {
-               if (t->prl) {
+               x = rtnl_dereference(t->prl);
+               if (x) {
                        t->prl_count = 0;
-                       x = t->prl;
                        call_rcu(&x->rcu_head, prl_list_destroy_rcu);
                        t->prl = NULL;
                }
@@ -730,16 +732,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                dst = addr6->s6_addr32[3];
        }
 
-       {
-               struct flowi fl = { .fl4_dst = dst,
-                                   .fl4_src = tiph->saddr,
-                                   .fl4_tos = RT_TOS(tos),
-                                   .oif = tunnel->parms.link,
-                                   .proto = IPPROTO_IPV6 };
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
-                       dev->stats.tx_carrier_errors++;
-                       goto tx_error_icmp;
-               }
+       rt = ip_route_output_ports(dev_net(dev), NULL,
+                                  dst, tiph->saddr,
+                                  0, 0,
+                                  IPPROTO_IPV6, RT_TOS(tos),
+                                  tunnel->parms.link);
+       if (IS_ERR(rt)) {
+               dev->stats.tx_carrier_errors++;
+               goto tx_error_icmp;
        }
        if (rt->rt_type != RTN_UNICAST) {
                ip_rt_put(rt);
@@ -855,13 +855,14 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
        iph = &tunnel->parms.iph;
 
        if (iph->daddr) {
-               struct flowi fl = { .fl4_dst = iph->daddr,
-                                   .fl4_src = iph->saddr,
-                                   .fl4_tos = RT_TOS(iph->tos),
-                                   .oif = tunnel->parms.link,
-                                   .proto = IPPROTO_IPV6 };
-               struct rtable *rt;
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL,
+                                                         iph->daddr, iph->saddr,
+                                                         0, 0,
+                                                         IPPROTO_IPV6,
+                                                         RT_TOS(iph->tos),
+                                                         tunnel->parms.link);
+
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
@@ -1179,7 +1180,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
        dev_hold(dev);
-       sitn->tunnels_wc[0]     = tunnel;
+       rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
        return 0;
 }
 
@@ -1196,11 +1197,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
        for (prio = 1; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
-                       struct ip_tunnel *t = sitn->tunnels[prio][h];
+                       struct ip_tunnel *t;
 
+                       t = rtnl_dereference(sitn->tunnels[prio][h]);
                        while (t != NULL) {
                                unregister_netdevice_queue(t->dev, head);
-                               t = t->next;
+                               t = rtnl_dereference(t->next);
                        }
                }
        }
@@ -1290,4 +1292,4 @@ static int __init sit_init(void)
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("sit0");
+MODULE_ALIAS_NETDEV("sit0");
index 09fd34f..352c260 100644 (file)
@@ -232,23 +232,20 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
         */
        {
                struct in6_addr *final_p, final;
-               struct flowi fl;
-               memset(&fl, 0, sizeof(fl));
-               fl.proto = IPPROTO_TCP;
-               ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
-               final_p = fl6_update_dst(&fl, np->opt, &final);
-               ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
-               fl.oif = sk->sk_bound_dev_if;
-               fl.mark = sk->sk_mark;
-               fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-               fl.fl_ip_sport = inet_sk(sk)->inet_sport;
-               security_req_classify_flow(req, &fl);
-               if (ip6_dst_lookup(sk, &dst, &fl))
-                       goto out_free;
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-               if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+               struct flowi6 fl6;
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_TCP;
+               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               final_p = fl6_update_dst(&fl6, np->opt, &final);
+               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
+               fl6.flowi6_mark = sk->sk_mark;
+               fl6.fl6_dport = inet_rsk(req)->rmt_port;
+               fl6.fl6_sport = inet_sk(sk)->inet_sport;
+               security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+
+               dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+               if (IS_ERR(dst))
                        goto out_free;
        }
 
index fa1d8f4..7cb65ef 100644 (file)
@@ -15,6 +15,8 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
+static struct ctl_table empty[1];
+
 static ctl_table ipv6_table_template[] = {
        {
                .procname       = "route",
@@ -35,6 +37,12 @@ static ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "neigh",
+               .maxlen         = 0,
+               .mode           = 0555,
+               .child          = empty,
+       },
        { }
 };
 
@@ -152,7 +160,6 @@ static struct ctl_table_header *ip6_base;
 
 int ipv6_static_sysctl_register(void)
 {
-       static struct ctl_table empty[1];
        ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
        if (ip6_base == NULL)
                return -ENOMEM;
index 20aa95e..2b0c186 100644 (file)
@@ -131,7 +131,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct tcp_sock *tp = tcp_sk(sk);
        struct in6_addr *saddr = NULL, *final_p, final;
        struct rt6_info *rt;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_type;
        int err;
@@ -142,14 +142,14 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
        if (np->sndflow) {
-               fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-               IP6_ECN_flow_init(fl.fl6_flowlabel);
-               if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+               fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+               IP6_ECN_flow_init(fl6.flowlabel);
+               if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                        struct ip6_flowlabel *flowlabel;
-                       flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
                        ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
@@ -195,7 +195,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
-       np->flow_label = fl.fl6_flowlabel;
+       np->flow_label = fl6.flowlabel;
 
        /*
         *      TCP over IPv4
@@ -242,35 +242,27 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (!ipv6_addr_any(&np->rcv_saddr))
                saddr = &np->rcv_saddr;
 
-       fl.proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-       ipv6_addr_copy(&fl.fl6_src,
+       fl6.flowi6_proto = IPPROTO_TCP;
+       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       ipv6_addr_copy(&fl6.saddr,
                       (saddr ? saddr : &np->saddr));
-       fl.oif = sk->sk_bound_dev_if;
-       fl.mark = sk->sk_mark;
-       fl.fl_ip_dport = usin->sin6_port;
-       fl.fl_ip_sport = inet->inet_sport;
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_dport = usin->sin6_port;
+       fl6.fl6_sport = inet->inet_sport;
 
-       final_p = fl6_update_dst(&fl, np->opt, &final);
+       final_p = fl6_update_dst(&fl6, np->opt, &final);
 
-       security_sk_classify_flow(sk, &fl);
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto failure;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto failure;
        }
 
        if (saddr == NULL) {
-               saddr = &fl.fl6_src;
+               saddr = &fl6.saddr;
                ipv6_addr_copy(&np->rcv_saddr, saddr);
        }
 
@@ -385,7 +377,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        np = inet6_sk(sk);
 
        if (type == ICMPV6_PKT_TOOBIG) {
-               struct dst_entry *dst = NULL;
+               struct dst_entry *dst;
 
                if (sock_owned_by_user(sk))
                        goto out;
@@ -397,29 +389,25 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
                if (dst == NULL) {
                        struct inet_sock *inet = inet_sk(sk);
-                       struct flowi fl;
+                       struct flowi6 fl6;
 
                        /* BUGGG_FUTURE: Again, it is not clear how
                           to handle rthdr case. Ignore this complexity
                           for now.
                         */
-                       memset(&fl, 0, sizeof(fl));
-                       fl.proto = IPPROTO_TCP;
-                       ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-                       ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-                       fl.oif = sk->sk_bound_dev_if;
-                       fl.mark = sk->sk_mark;
-                       fl.fl_ip_dport = inet->inet_dport;
-                       fl.fl_ip_sport = inet->inet_sport;
-                       security_skb_classify_flow(skb, &fl);
-
-                       if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
-                               sk->sk_err_soft = -err;
-                               goto out;
-                       }
-
-                       if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
-                               sk->sk_err_soft = -err;
+                       memset(&fl6, 0, sizeof(fl6));
+                       fl6.flowi6_proto = IPPROTO_TCP;
+                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.flowi6_oif = sk->sk_bound_dev_if;
+                       fl6.flowi6_mark = sk->sk_mark;
+                       fl6.fl6_dport = inet->inet_dport;
+                       fl6.fl6_sport = inet->inet_sport;
+                       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+
+                       dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
+                       if (IS_ERR(dst)) {
+                               sk->sk_err_soft = -PTR_ERR(dst);
                                goto out;
                        }
 
@@ -494,38 +482,36 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
        struct sk_buff * skb;
        struct ipv6_txoptions *opt = NULL;
        struct in6_addr * final_p, final;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
-       int err = -1;
-
-       memset(&fl, 0, sizeof(fl));
-       fl.proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-       ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
-       fl.fl6_flowlabel = 0;
-       fl.oif = treq->iif;
-       fl.mark = sk->sk_mark;
-       fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-       fl.fl_ip_sport = inet_rsk(req)->loc_port;
-       security_req_classify_flow(req, &fl);
+       int err;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_TCP;
+       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.flowlabel = 0;
+       fl6.flowi6_oif = treq->iif;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_dport = inet_rsk(req)->rmt_port;
+       fl6.fl6_sport = inet_rsk(req)->loc_port;
+       security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
        opt = np->opt;
-       final_p = fl6_update_dst(&fl, opt, &final);
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
-               goto done;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-       if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto done;
-
+       }
        skb = tcp_make_synack(sk, dst, req, rvp);
+       err = -ENOMEM;
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
-               ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-               err = ip6_xmit(sk, skb, &fl, opt);
+               ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+               err = ip6_xmit(sk, skb, &fl6, opt);
                err = net_xmit_eval(err);
        }
 
@@ -1006,7 +992,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 {
        struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct net *net = dev_net(skb_dst(skb)->dev);
        struct sock *ctl_sk = net->ipv6.tcp_sk;
        unsigned int tot_len = sizeof(struct tcphdr);
@@ -1060,34 +1046,33 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
        }
 #endif
 
-       memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
+       memset(&fl6, 0, sizeof(fl6));
+       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+       ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
 
        buff->ip_summed = CHECKSUM_PARTIAL;
        buff->csum = 0;
 
-       __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
+       __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 
-       fl.proto = IPPROTO_TCP;
-       fl.oif = inet6_iif(skb);
-       fl.fl_ip_dport = t1->dest;
-       fl.fl_ip_sport = t1->source;
-       security_skb_classify_flow(skb, &fl);
+       fl6.flowi6_proto = IPPROTO_TCP;
+       fl6.flowi6_oif = inet6_iif(skb);
+       fl6.fl6_dport = t1->dest;
+       fl6.fl6_sport = t1->source;
+       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        /* Pass a socket to ip6_dst_lookup either it is for RST
         * Underlying function will use this to retrieve the network
         * namespace
         */
-       if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
-               if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
-                       skb_dst_set(buff, dst);
-                       ip6_xmit(ctl_sk, buff, &fl, NULL);
-                       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-                       if (rst)
-                               TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
-                       return;
-               }
+       dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(buff, dst);
+               ip6_xmit(ctl_sk, buff, &fl6, NULL);
+               TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+               if (rst)
+                       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+               return;
        }
 
        kfree_skb(buff);
@@ -1323,7 +1308,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                    tcp_death_row.sysctl_tw_recycle &&
                    (dst = inet6_csk_route_req(sk, req)) != NULL &&
                    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
-                   ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+                   ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
                                    &treq->rmt_addr)) {
                        inet_peer_refcheck(peer);
                        if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1636,10 +1621,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                opt_skb = skb_clone(skb, GFP_ATOMIC);
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               TCP_CHECK_TIMER(sk);
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
-               TCP_CHECK_TIMER(sk);
                if (opt_skb)
                        goto ipv6_pktoptions;
                return 0;
@@ -1667,10 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       TCP_CHECK_TIMER(sk);
        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
                goto reset;
-       TCP_CHECK_TIMER(sk);
        if (opt_skb)
                goto ipv6_pktoptions;
        return 0;
index 9a009c6..d7037c0 100644 (file)
@@ -886,7 +886,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
        struct udphdr *uh;
        struct udp_sock  *up = udp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
-       struct flowi *fl = &inet->cork.fl;
+       struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
        int err = 0;
        int is_udplite = IS_UDPLITE(sk);
        __wsum csum = 0;
@@ -899,23 +899,23 @@ static int udp_v6_push_pending_frames(struct sock *sk)
         * Create a UDP header
         */
        uh = udp_hdr(skb);
-       uh->source = fl->fl_ip_sport;
-       uh->dest = fl->fl_ip_dport;
+       uh->source = fl6->fl6_sport;
+       uh->dest = fl6->fl6_dport;
        uh->len = htons(up->len);
        uh->check = 0;
 
        if (is_udplite)
                csum = udplite_csum_outgoing(sk, skb);
        else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
-               udp6_hwcsum_outgoing(sk, skb, &fl->fl6_src, &fl->fl6_dst,
+               udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
                                     up->len);
                goto send;
        } else
                csum = udp_csum_outgoing(sk, skb);
 
        /* add protocol-dependent pseudo-header */
-       uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst,
-                                   up->len, fl->proto, csum   );
+       uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
+                                   up->len, fl6->flowi6_proto, csum);
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
@@ -947,7 +947,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct in6_addr *daddr, *final_p, final;
        struct ipv6_txoptions *opt = NULL;
        struct ip6_flowlabel *flowlabel = NULL;
-       struct flowi fl;
+       struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_len = msg->msg_namelen;
        int ulen = len;
@@ -1030,19 +1030,19 @@ do_udp_sendmsg:
        }
        ulen += sizeof(struct udphdr);
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
        if (sin6) {
                if (sin6->sin6_port == 0)
                        return -EINVAL;
 
-               fl.fl_ip_dport = sin6->sin6_port;
+               fl6.fl6_dport = sin6->sin6_port;
                daddr = &sin6->sin6_addr;
 
                if (np->sndflow) {
-                       fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-                       if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
-                               flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+                       fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+                       if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+                               flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                                if (flowlabel == NULL)
                                        return -EINVAL;
                                daddr = &flowlabel->dst;
@@ -1060,38 +1060,38 @@ do_udp_sendmsg:
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
                    ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
-                       fl.oif = sin6->sin6_scope_id;
+                       fl6.flowi6_oif = sin6->sin6_scope_id;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
 
-               fl.fl_ip_dport = inet->inet_dport;
+               fl6.fl6_dport = inet->inet_dport;
                daddr = &np->daddr;
-               fl.fl6_flowlabel = np->flow_label;
+               fl6.flowlabel = np->flow_label;
                connected = 1;
        }
 
-       if (!fl.oif)
-               fl.oif = sk->sk_bound_dev_if;
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
 
-       if (!fl.oif)
-               fl.oif = np->sticky_pktinfo.ipi6_ifindex;
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
-       fl.mark = sk->sk_mark;
+       fl6.flowi6_mark = sk->sk_mark;
 
        if (msg->msg_controllen) {
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+               err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
                                        &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
                }
-               if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
-                       flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+               if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
+                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
                }
@@ -1105,42 +1105,35 @@ do_udp_sendmsg:
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
 
-       fl.proto = sk->sk_protocol;
+       fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl.fl6_dst, daddr);
+               ipv6_addr_copy(&fl6.daddr, daddr);
        else
-               fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
-       if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl.fl6_src, &np->saddr);
-       fl.fl_ip_sport = inet->inet_sport;
+               fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+       if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
+               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.fl6_sport = inet->inet_sport;
 
-       final_p = fl6_update_dst(&fl, opt, &final);
+       final_p = fl6_update_dst(&fl6, opt, &final);
        if (final_p)
                connected = 0;
 
-       if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
-               fl.oif = np->mcast_oif;
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
+               fl6.flowi6_oif = np->mcast_oif;
                connected = 0;
        }
 
-       security_sk_classify_flow(sk, &fl);
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       err = ip6_sk_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
 
        if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl.fl6_dst))
+               if (ipv6_addr_is_multicast(&fl6.daddr))
                        hlimit = np->mcast_hops;
                else
                        hlimit = np->hop_limit;
@@ -1175,7 +1168,7 @@ do_append_data:
        up->len += ulen;
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
-               sizeof(struct udphdr), hlimit, tclass, opt, &fl,
+               sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
                (struct rt6_info*)dst,
                corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
        if (err)
@@ -1188,10 +1181,10 @@ do_append_data:
        if (dst) {
                if (connected) {
                        ip6_dst_store(sk, dst,
-                                     ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
+                                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
                                      &np->daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
-                                     ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
+                                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                                      &np->saddr :
 #endif
                                      NULL);
@@ -1299,7 +1292,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index 7e74023..05e34c8 100644 (file)
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
-                                         xfrm_address_t *saddr,
-                                         xfrm_address_t *daddr)
+                                         const xfrm_address_t *saddr,
+                                         const xfrm_address_t *daddr)
 {
-       struct flowi fl = {};
+       struct flowi6 fl6;
        struct dst_entry *dst;
        int err;
 
-       memcpy(&fl.fl6_dst, daddr, sizeof(fl.fl6_dst));
+       memset(&fl6, 0, sizeof(fl6));
+       memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
-               memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src));
+               memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
 
-       dst = ip6_route_output(net, NULL, &fl);
+       dst = ip6_route_output(net, NULL, &fl6);
 
        err = dst->error;
        if (dst->error) {
@@ -67,7 +68,7 @@ static int xfrm6_get_saddr(struct net *net,
        return 0;
 }
 
-static int xfrm6_get_tos(struct flowi *fl)
+static int xfrm6_get_tos(const struct flowi *fl)
 {
        return 0;
 }
@@ -87,7 +88,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                         struct flowi *fl)
+                         const struct flowi *fl)
 {
        struct rt6_info *rt = (struct rt6_info*)xdst->route;
 
@@ -98,6 +99,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        if (!xdst->u.rt6.rt6i_idev)
                return -ENODEV;
 
+       xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+       if (rt->rt6i_peer)
+               atomic_inc(&rt->rt6i_peer->refcnt);
+
        /* Sheit... I remember I did this right. Apparently,
         * it was magically lost, so this code needs audit */
        xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -116,6 +121,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 static inline void
 _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
+       struct flowi6 *fl6 = &fl->u.ip6;
        int onlyproto = 0;
        u16 offset = skb_network_header_len(skb);
        struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -123,11 +129,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
        const unsigned char *nh = skb_network_header(skb);
        u8 nexthdr = nh[IP6CB(skb)->nhoff];
 
-       memset(fl, 0, sizeof(struct flowi));
-       fl->mark = skb->mark;
+       memset(fl6, 0, sizeof(struct flowi6));
+       fl6->flowi6_mark = skb->mark;
 
-       ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
-       ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
+       ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
+       ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
 
        while (nh + offset + 1 < skb->data ||
               pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
@@ -154,20 +160,20 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                             pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
                                __be16 *ports = (__be16 *)exthdr;
 
-                               fl->fl_ip_sport = ports[!!reverse];
-                               fl->fl_ip_dport = ports[!reverse];
+                               fl6->fl6_sport = ports[!!reverse];
+                               fl6->fl6_dport = ports[!reverse];
                        }
-                       fl->proto = nexthdr;
+                       fl6->flowi6_proto = nexthdr;
                        return;
 
                case IPPROTO_ICMPV6:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
                                u8 *icmp = (u8 *)exthdr;
 
-                               fl->fl_icmp_type = icmp[0];
-                               fl->fl_icmp_code = icmp[1];
+                               fl6->fl6_icmp_type = icmp[0];
+                               fl6->fl6_icmp_code = icmp[1];
                        }
-                       fl->proto = nexthdr;
+                       fl6->flowi6_proto = nexthdr;
                        return;
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -176,9 +182,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                                struct ip6_mh *mh;
                                mh = (struct ip6_mh *)exthdr;
 
-                               fl->fl_mh_type = mh->ip6mh_type;
+                               fl6->fl6_mh_type = mh->ip6mh_type;
                        }
-                       fl->proto = nexthdr;
+                       fl6->flowi6_proto = nexthdr;
                        return;
 #endif
 
@@ -187,8 +193,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                case IPPROTO_ESP:
                case IPPROTO_COMP:
                default:
-                       fl->fl_ipsec_spi = 0;
-                       fl->proto = nexthdr;
+                       fl6->fl6_ipsec_spi = 0;
+                       fl6->flowi6_proto = nexthdr;
                        return;
                }
        }
@@ -216,6 +222,9 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
 
        if (likely(xdst->u.rt6.rt6i_idev))
                in6_dev_put(xdst->u.rt6.rt6i_idev);
+       dst_destroy_metrics_generic(dst);
+       if (likely(xdst->u.rt6.rt6i_peer))
+               inet_putpeer(xdst->u.rt6.rt6i_peer);
        xfrm_dst_destroy(xdst);
 }
 
@@ -251,6 +260,7 @@ static struct dst_ops xfrm6_dst_ops = {
        .protocol =             cpu_to_be16(ETH_P_IPV6),
        .gc =                   xfrm6_garbage_collect,
        .update_pmtu =          xfrm6_update_pmtu,
+       .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              xfrm6_dst_destroy,
        .ifdown =               xfrm6_dst_ifdown,
        .local_out =            __ip6_local_out,
@@ -266,6 +276,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
        .get_tos =              xfrm6_get_tos,
        .init_path =            xfrm6_init_path,
        .fill_dst =             xfrm6_fill_dst,
+       .blackhole_route =      ip6_blackhole_route,
 };
 
 static int __init xfrm6_policy_init(void)
index a67575d..afe941e 100644 (file)
 #include <net/addrconf.h>
 
 static void
-__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 {
+       const struct flowi6 *fl6 = &fl->u.ip6;
+
        /* Initialize temporary selector matching only
         * to current session. */
-       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
-       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
-       sel->dport = xfrm_flowi_dport(fl);
+       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
+       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+       sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
        sel->dport_mask = htons(0xffff);
-       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
        sel->sport_mask = htons(0xffff);
        sel->family = AF_INET6;
        sel->prefixlen_d = 128;
        sel->prefixlen_s = 128;
-       sel->proto = fl->proto;
-       sel->ifindex = fl->oif;
+       sel->proto = fl6->flowi6_proto;
+       sel->ifindex = fl6->flowi6_oif;
 }
 
 static void
-xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+                  const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        x->id = tmpl->id;
        if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
index d87c22d..7db86ff 100644 (file)
@@ -70,7 +70,7 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
        return (struct pfkey_sock *)sk;
 }
 
-static int pfkey_can_dump(struct sock *sk)
+static int pfkey_can_dump(const struct sock *sk)
 {
        if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
                return 1;
@@ -303,12 +303,13 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        return rc;
 }
 
-static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
+static inline void pfkey_hdr_dup(struct sadb_msg *new,
+                                const struct sadb_msg *orig)
 {
        *new = *orig;
 }
 
-static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk)
+static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
 {
        struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
        struct sadb_msg *hdr;
@@ -369,13 +370,13 @@ static u8 sadb_ext_min_len[] = {
 };
 
 /* Verify sadb_address_{len,prefixlen} against sa_family.  */
-static int verify_address_len(void *p)
+static int verify_address_len(const void *p)
 {
-       struct sadb_address *sp = p;
-       struct sockaddr *addr = (struct sockaddr *)(sp + 1);
-       struct sockaddr_in *sin;
+       const struct sadb_address *sp = p;
+       const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
+       const struct sockaddr_in *sin;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-       struct sockaddr_in6 *sin6;
+       const struct sockaddr_in6 *sin6;
 #endif
        int len;
 
@@ -411,16 +412,16 @@ static int verify_address_len(void *p)
        return 0;
 }
 
-static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx)
+static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
 {
        return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
                            sec_ctx->sadb_x_ctx_len,
                            sizeof(uint64_t));
 }
 
-static inline int verify_sec_ctx_len(void *p)
+static inline int verify_sec_ctx_len(const void *p)
 {
-       struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p;
+       const struct sadb_x_sec_ctx *sec_ctx = p;
        int len = sec_ctx->sadb_x_ctx_len;
 
        if (len > PAGE_SIZE)
@@ -434,7 +435,7 @@ static inline int verify_sec_ctx_len(void *p)
        return 0;
 }
 
-static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb_x_sec_ctx *sec_ctx)
+static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
 {
        struct xfrm_user_sec_ctx *uctx = NULL;
        int ctx_size = sec_ctx->sadb_x_ctx_len;
@@ -455,16 +456,16 @@ static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb
        return uctx;
 }
 
-static int present_and_same_family(struct sadb_address *src,
-                                  struct sadb_address *dst)
+static int present_and_same_family(const struct sadb_address *src,
+                                  const struct sadb_address *dst)
 {
-       struct sockaddr *s_addr, *d_addr;
+       const struct sockaddr *s_addr, *d_addr;
 
        if (!src || !dst)
                return 0;
 
-       s_addr = (struct sockaddr *)(src + 1);
-       d_addr = (struct sockaddr *)(dst + 1);
+       s_addr = (const struct sockaddr *)(src + 1);
+       d_addr = (const struct sockaddr *)(dst + 1);
        if (s_addr->sa_family != d_addr->sa_family)
                return 0;
        if (s_addr->sa_family != AF_INET
@@ -477,15 +478,15 @@ static int present_and_same_family(struct sadb_address *src,
        return 1;
 }
 
-static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs)
 {
-       char *p = (char *) hdr;
+       const char *p = (char *) hdr;
        int len = skb->len;
 
        len -= sizeof(*hdr);
        p += sizeof(*hdr);
        while (len > 0) {
-               struct sadb_ext *ehdr = (struct sadb_ext *) p;
+               const struct sadb_ext *ehdr = (const struct sadb_ext *) p;
                uint16_t ext_type;
                int ext_len;
 
@@ -514,7 +515,7 @@ static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_h
                                if (verify_sec_ctx_len(p))
                                        return -EINVAL;
                        }
-                       ext_hdrs[ext_type-1] = p;
+                       ext_hdrs[ext_type-1] = (void *) p;
                }
                p   += ext_len;
                len -= ext_len;
@@ -606,21 +607,21 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
 }
 
 static
-int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
+int pfkey_sadb_addr2xfrm_addr(const struct sadb_address *addr, xfrm_address_t *xaddr)
 {
        return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
                                      xaddr);
 }
 
-static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_msg *hdr, void **ext_hdrs)
+static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
-       struct sadb_sa *sa;
-       struct sadb_address *addr;
+       const struct sadb_sa *sa;
+       const struct sadb_address *addr;
        uint16_t proto;
        unsigned short family;
        xfrm_address_t *xaddr;
 
-       sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+       sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
        if (sa == NULL)
                return NULL;
 
@@ -629,18 +630,18 @@ static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
                return NULL;
 
        /* sadb_address_len should be checked by caller */
-       addr = (struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
+       addr = (const struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
        if (addr == NULL)
                return NULL;
 
-       family = ((struct sockaddr *)(addr + 1))->sa_family;
+       family = ((const struct sockaddr *)(addr + 1))->sa_family;
        switch (family) {
        case AF_INET:
-               xaddr = (xfrm_address_t *)&((struct sockaddr_in *)(addr + 1))->sin_addr;
+               xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
                break;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
-               xaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(addr + 1))->sin6_addr;
+               xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
                break;
 #endif
        default:
@@ -690,9 +691,9 @@ static inline int pfkey_mode_to_xfrm(int mode)
        }
 }
 
-static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
-                                      struct sockaddr *sa,
-                                      unsigned short family)
+static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port,
+                                       struct sockaddr *sa,
+                                       unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -720,7 +721,7 @@ static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
        return 0;
 }
 
-static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
+static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
                                              int add_keys, int hsc)
 {
        struct sk_buff *skb;
@@ -1010,7 +1011,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
 }
 
 
-static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
+static inline struct sk_buff *pfkey_xfrm_state2msg(const struct xfrm_state *x)
 {
        struct sk_buff *skb;
 
@@ -1019,26 +1020,26 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
        return skb;
 }
 
-static inline struct sk_buff *pfkey_xfrm_state2msg_expire(struct xfrm_state *x,
+static inline struct sk_buff *pfkey_xfrm_state2msg_expire(const struct xfrm_state *x,
                                                          int hsc)
 {
        return __pfkey_xfrm_state2msg(x, 0, hsc);
 }
 
 static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
-                                               struct sadb_msg *hdr,
-                                               void **ext_hdrs)
+                                               const struct sadb_msg *hdr,
+                                               void * const *ext_hdrs)
 {
        struct xfrm_state *x;
-       struct sadb_lifetime *lifetime;
-       struct sadb_sa *sa;
-       struct sadb_key *key;
-       struct sadb_x_sec_ctx *sec_ctx;
+       const struct sadb_lifetime *lifetime;
+       const struct sadb_sa *sa;
+       const struct sadb_key *key;
+       const struct sadb_x_sec_ctx *sec_ctx;
        uint16_t proto;
        int err;
 
 
-       sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+       sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
        if (!sa ||
            !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
                                     ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
@@ -1077,7 +1078,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
             sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
            sa->sadb_sa_encrypt > SADB_EALG_MAX)
                return ERR_PTR(-EINVAL);
-       key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+       key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
        if (key != NULL &&
            sa->sadb_sa_auth != SADB_X_AALG_NULL &&
            ((key->sadb_key_bits+7) / 8 == 0 ||
@@ -1104,14 +1105,14 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
                x->props.flags |= XFRM_STATE_NOPMTUDISC;
 
-       lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
+       lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
        if (lifetime != NULL) {
                x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
                x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
                x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
                x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
        }
-       lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
+       lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
        if (lifetime != NULL) {
                x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
                x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
@@ -1119,7 +1120,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
        }
 
-       sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+       sec_ctx = (const struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
        if (sec_ctx != NULL) {
                struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
 
@@ -1133,7 +1134,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                        goto out;
        }
 
-       key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+       key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
        if (sa->sadb_sa_auth) {
                int keysize = 0;
                struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
@@ -1202,7 +1203,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                                  &x->id.daddr);
 
        if (ext_hdrs[SADB_X_EXT_SA2-1]) {
-               struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1];
+               const struct sadb_x_sa2 *sa2 = ext_hdrs[SADB_X_EXT_SA2-1];
                int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
                if (mode < 0) {
                        err = -EINVAL;
@@ -1213,7 +1214,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        }
 
        if (ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]) {
-               struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
+               const struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
 
                /* Nobody uses this, but we try. */
                x->sel.family = pfkey_sadb_addr2xfrm_addr(addr, &x->sel.saddr);
@@ -1224,7 +1225,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                x->sel.family = x->props.family;
 
        if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
-               struct sadb_x_nat_t_type* n_type;
+               const struct sadb_x_nat_t_type* n_type;
                struct xfrm_encap_tmpl *natt;
 
                x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
@@ -1236,12 +1237,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                natt->encap_type = n_type->sadb_x_nat_t_type_type;
 
                if (ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]) {
-                       struct sadb_x_nat_t_port* n_port =
+                       const struct sadb_x_nat_t_port *n_port =
                                ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1];
                        natt->encap_sport = n_port->sadb_x_nat_t_port_port;
                }
                if (ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]) {
-                       struct sadb_x_nat_t_port* n_port =
+                       const struct sadb_x_nat_t_port *n_port =
                                ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
                        natt->encap_dport = n_port->sadb_x_nat_t_port_port;
                }
@@ -1261,12 +1262,12 @@ out:
        return ERR_PTR(err);
 }
 
-static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        return -EOPNOTSUPP;
 }
 
-static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct sk_buff *resp_skb;
@@ -1365,7 +1366,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
        return 0;
 }
 
-static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1429,7 +1430,7 @@ static inline int event2keytype(int event)
 }
 
 /* ADD/UPD/DEL */
-static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1453,7 +1454,7 @@ static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1492,7 +1493,7 @@ out:
        return err;
 }
 
-static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1534,7 +1535,7 @@ out:
        return err;
 }
 
-static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        __u8 proto;
@@ -1570,7 +1571,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
        return 0;
 }
 
-static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
+static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
                                              gfp_t allocation)
 {
        struct sk_buff *skb;
@@ -1642,7 +1643,7 @@ out_put_algs:
        return skb;
 }
 
-static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
        struct sk_buff *supp_skb;
@@ -1671,7 +1672,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
        return 0;
 }
 
-static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
+static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1688,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
        return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
 }
 
-static int key_notify_sa_flush(struct km_event *c)
+static int key_notify_sa_flush(const struct km_event *c)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1710,7 +1711,7 @@ static int key_notify_sa_flush(struct km_event *c)
        return 0;
 }
 
-static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        unsigned proto;
@@ -1784,7 +1785,7 @@ static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
        xfrm_state_walk_done(&pfk->dump.u.state);
 }
 
-static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        u8 proto;
        struct pfkey_sock *pfk = pfkey_sk(sk);
@@ -1805,19 +1806,29 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr
        return pfkey_do_dump(pfk);
 }
 
-static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
        int satype = hdr->sadb_msg_satype;
+       bool reset_errno = false;
 
        if (hdr->sadb_msg_len == (sizeof(*hdr) / sizeof(uint64_t))) {
-               /* XXX we mangle packet... */
-               hdr->sadb_msg_errno = 0;
+               reset_errno = true;
                if (satype != 0 && satype != 1)
                        return -EINVAL;
                pfk->promisc = satype;
        }
-       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+       if (reset_errno && skb_cloned(skb))
+               skb = skb_copy(skb, GFP_KERNEL);
+       else
+               skb = skb_clone(skb, GFP_KERNEL);
+
+       if (reset_errno && skb) {
+               struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data;
+               new_hdr->sadb_msg_errno = 0;
+       }
+
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
        return 0;
 }
 
@@ -1921,7 +1932,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        return 0;
 }
 
-static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
+static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp)
 {
   struct xfrm_sec_ctx *xfrm_ctx = xp->security;
 
@@ -1933,9 +1944,9 @@ static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
        return 0;
 }
 
-static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
+static int pfkey_xfrm_policy2msg_size(const struct xfrm_policy *xp)
 {
-       struct xfrm_tmpl *t;
+       const struct xfrm_tmpl *t;
        int sockaddr_size = pfkey_sockaddr_size(xp->family);
        int socklen = 0;
        int i;
@@ -1955,7 +1966,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
                pfkey_xfrm_policy2sec_ctx_size(xp);
 }
 
-static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
+static struct sk_buff * pfkey_xfrm_policy2msg_prep(const struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
        int size;
@@ -1969,7 +1980,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
        return skb;
 }
 
-static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir)
+static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir)
 {
        struct sadb_msg *hdr;
        struct sadb_address *addr;
@@ -2065,8 +2076,8 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
        pol->sadb_x_policy_priority = xp->priority;
 
        for (i=0; i<xp->xfrm_nr; i++) {
+               const struct xfrm_tmpl *t = xp->xfrm_vec + i;
                struct sadb_x_ipsecrequest *rq;
-               struct xfrm_tmpl *t = xp->xfrm_vec + i;
                int req_size;
                int mode;
 
@@ -2123,7 +2134,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
        return 0;
 }
 
-static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct sk_buff *out_skb;
        struct sadb_msg *out_hdr;
@@ -2152,7 +2163,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
 
 }
 
-static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        int err = 0;
@@ -2273,7 +2284,7 @@ out:
        return err;
 }
 
-static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        int err;
@@ -2350,7 +2361,7 @@ out:
        return err;
 }
 
-static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb_msg *hdr, int dir)
+static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir)
 {
        int err;
        struct sk_buff *out_skb;
@@ -2458,7 +2469,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
 }
 
 static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
-                        struct sadb_msg *hdr, void **ext_hdrs)
+                        const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        int i, len, ret, err = -EINVAL;
        u8 dir;
@@ -2549,14 +2560,14 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
 }
 #else
 static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
-                        struct sadb_msg *hdr, void **ext_hdrs)
+                        const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        return -ENOPROTOOPT;
 }
 #endif
 
 
-static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        unsigned int dir;
@@ -2644,7 +2655,7 @@ static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
        xfrm_policy_walk_done(&pfk->dump.u.policy);
 }
 
-static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
@@ -2660,7 +2671,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
        return pfkey_do_dump(pfk);
 }
 
-static int key_notify_policy_flush(struct km_event *c)
+static int key_notify_policy_flush(const struct km_event *c)
 {
        struct sk_buff *skb_out;
        struct sadb_msg *hdr;
@@ -2680,7 +2691,7 @@ static int key_notify_policy_flush(struct km_event *c)
 
 }
 
-static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct km_event c;
@@ -2709,7 +2720,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
 }
 
 typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
-                            struct sadb_msg *hdr, void **ext_hdrs);
+                            const struct sadb_msg *hdr, void * const *ext_hdrs);
 static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
        [SADB_RESERVED]         = pfkey_reserved,
        [SADB_GETSPI]           = pfkey_getspi,
@@ -2736,7 +2747,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
        [SADB_X_MIGRATE]        = pfkey_migrate,
 };
 
-static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
+static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
 {
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
@@ -2781,7 +2792,8 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
        return hdr;
 }
 
-static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int aalg_tmpl_set(const struct xfrm_tmpl *t,
+                               const struct xfrm_algo_desc *d)
 {
        unsigned int id = d->desc.sadb_alg_id;
 
@@ -2791,7 +2803,8 @@ static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
        return (t->aalgos >> id) & 1;
 }
 
-static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int ealg_tmpl_set(const struct xfrm_tmpl *t,
+                               const struct xfrm_algo_desc *d)
 {
        unsigned int id = d->desc.sadb_alg_id;
 
@@ -2801,12 +2814,12 @@ static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
        return (t->ealgos >> id) & 1;
 }
 
-static int count_ah_combs(struct xfrm_tmpl *t)
+static int count_ah_combs(const struct xfrm_tmpl *t)
 {
        int i, sz = 0;
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+               const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                if (!aalg)
                        break;
                if (aalg_tmpl_set(t, aalg) && aalg->available)
@@ -2815,12 +2828,12 @@ static int count_ah_combs(struct xfrm_tmpl *t)
        return sz + sizeof(struct sadb_prop);
 }
 
-static int count_esp_combs(struct xfrm_tmpl *t)
+static int count_esp_combs(const struct xfrm_tmpl *t)
 {
        int i, k, sz = 0;
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+               const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
                if (!ealg)
                        break;
 
@@ -2828,7 +2841,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
                        continue;
 
                for (k = 1; ; k++) {
-                       struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+                       const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
                        if (!aalg)
                                break;
 
@@ -2839,7 +2852,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
        return sz + sizeof(struct sadb_prop);
 }
 
-static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
        int i;
@@ -2851,7 +2864,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+               const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                if (!aalg)
                        break;
 
@@ -2871,7 +2884,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        }
 }
 
-static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
        int i, k;
@@ -2883,7 +2896,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
 
        for (i=0; ; i++) {
-               struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+               const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
                if (!ealg)
                        break;
 
@@ -2892,7 +2905,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
 
                for (k = 1; ; k++) {
                        struct sadb_comb *c;
-                       struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+                       const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
                        if (!aalg)
                                break;
                        if (!(aalg_tmpl_set(t, aalg) && aalg->available))
@@ -2914,12 +2927,12 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        }
 }
 
-static int key_notify_policy_expire(struct xfrm_policy *xp, struct km_event *c)
+static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
 {
        return 0;
 }
 
-static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
 {
        struct sk_buff *out_skb;
        struct sadb_msg *out_hdr;
@@ -2949,7 +2962,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
+static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = x ? xs_net(x) : c->net;
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -2976,7 +2989,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        if (xp && xp->type != XFRM_POLICY_TYPE_MAIN)
                return 0;
@@ -3318,7 +3331,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
 
 #ifdef CONFIG_NET_KEY_MIGRATE
 static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
-                           struct xfrm_selector *sel)
+                           const struct xfrm_selector *sel)
 {
        struct sadb_address *addr;
        addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
@@ -3348,7 +3361,7 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
 }
 
 
-static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
+static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k)
 {
        struct sadb_x_kmaddress *kma;
        u8 *sa;
@@ -3376,7 +3389,7 @@ static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
 static int set_ipsecrequest(struct sk_buff *skb,
                            uint8_t proto, uint8_t mode, int level,
                            uint32_t reqid, uint8_t family,
-                           xfrm_address_t *src, xfrm_address_t *dst)
+                           const xfrm_address_t *src, const xfrm_address_t *dst)
 {
        struct sadb_x_ipsecrequest *rq;
        u8 *sa;
@@ -3404,9 +3417,9 @@ static int set_ipsecrequest(struct sk_buff *skb,
 #endif
 
 #ifdef CONFIG_NET_KEY_MIGRATE
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                             struct xfrm_migrate *m, int num_bundles,
-                             struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                             const struct xfrm_migrate *m, int num_bundles,
+                             const struct xfrm_kmaddress *k)
 {
        int i;
        int sasize_sel;
@@ -3415,7 +3428,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
        struct sk_buff *skb;
        struct sadb_msg *hdr;
        struct sadb_x_policy *pol;
-       struct xfrm_migrate *mp;
+       const struct xfrm_migrate *mp;
 
        if (type != XFRM_POLICY_TYPE_MAIN)
                return 0;
@@ -3513,9 +3526,9 @@ err:
        return -EINVAL;
 }
 #else
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                             struct xfrm_migrate *m, int num_bundles,
-                             struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                             const struct xfrm_migrate *m, int num_bundles,
+                             const struct xfrm_kmaddress *k)
 {
        return -ENOPROTOOPT;
 }
@@ -3655,6 +3668,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
 }
 
 static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
+       __acquires(rcu)
 {
        struct net *net = seq_file_net(f);
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -3672,6 +3686,7 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
 }
 
 static void pfkey_seq_stop(struct seq_file *f, void *v)
+       __releases(rcu)
 {
        rcu_read_unlock();
 }
index 110efb7..fce9bd3 100644 (file)
@@ -320,11 +320,12 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
                goto out;
 
-       rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
+       rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr,
                              RT_CONN_FLAGS(sk), oif,
                              IPPROTO_L2TP,
-                             0, 0, sk, 1);
-       if (rc) {
+                             0, 0, sk, true);
+       if (IS_ERR(rt)) {
+               rc = PTR_ERR(rt);
                if (rc == -ENETUNREACH)
                        IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
                goto out;
@@ -474,24 +475,17 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                if (opt && opt->srr)
                        daddr = opt->faddr;
 
-               {
-                       struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                                           .fl4_dst = daddr,
-                                           .fl4_src = inet->inet_saddr,
-                                           .fl4_tos = RT_CONN_FLAGS(sk),
-                                           .proto = sk->sk_protocol,
-                                           .flags = inet_sk_flowi_flags(sk),
-                                           .fl_ip_sport = inet->inet_sport,
-                                           .fl_ip_dport = inet->inet_dport };
-
-                       /* If this fails, retransmit mechanism of transport layer will
-                        * keep trying until route appears or the connection times
-                        * itself out.
-                        */
-                       security_sk_classify_flow(sk, &fl);
-                       if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
-                               goto no_route;
-               }
+               /* If this fails, retransmit mechanism of transport layer will
+                * keep trying until route appears or the connection times
+                * itself out.
+                */
+               rt = ip_route_output_ports(sock_net(sk), sk,
+                                          daddr, inet->inet_saddr,
+                                          inet->inet_dport, inet->inet_sport,
+                                          sk->sk_protocol, RT_CONN_FLAGS(sk),
+                                          sk->sk_bound_dev_if);
+               if (IS_ERR(rt))
+                       goto no_route;
                sk_setup_caps(sk, &rt->dst);
        }
        skb_dst_set(skb, dst_clone(&rt->dst));
index f996874..058f1e9 100644 (file)
@@ -181,25 +181,26 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
         * LLC functionality
         */
        rcv = rcu_dereference(sap->rcv_func);
-       if (rcv) {
-               struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
-               if (cskb)
-                       rcv(cskb, dev, pt, orig_dev);
-       }
        dest = llc_pdu_type(skb);
-       if (unlikely(!dest || !llc_type_handlers[dest - 1]))
-               goto drop_put;
-       llc_type_handlers[dest - 1](sap, skb);
-out_put:
+       if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
+               if (rcv)
+                       rcv(skb, dev, pt, orig_dev);
+               else
+                       kfree_skb(skb);
+       } else {
+               if (rcv) {
+                       struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
+                       if (cskb)
+                               rcv(cskb, dev, pt, orig_dev);
+               }
+               llc_type_handlers[dest - 1](sap, skb);
+       }
        llc_sap_put(sap);
 out:
        return 0;
 drop:
        kfree_skb(skb);
        goto out;
-drop_put:
-       kfree_skb(skb);
-       goto out_put;
 handle_station:
        if (!llc_station_handler)
                goto drop;
index c766056..513f85c 100644 (file)
@@ -17,7 +17,7 @@ comment "CFG80211 needs to be enabled for MAC80211"
 if MAC80211 != n
 
 config MAC80211_HAS_RC
-       def_bool n
+       bool
 
 config MAC80211_RC_PID
        bool "PID controller based rate control algorithm" if EXPERT
@@ -78,7 +78,7 @@ config MAC80211_RC_DEFAULT
 endif
 
 comment "Some wireless drivers require a rate control algorithm"
-       depends on MAC80211_HAS_RC=n
+       depends on MAC80211 && MAC80211_HAS_RC=n
 
 config MAC80211_MESH
        bool "Enable mac80211 mesh networking (pre-802.11s) support"
index 227ca82..0c9d0c0 100644 (file)
@@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-                            &sta->sta, tid, NULL))
+                            &sta->sta, tid, NULL, 0))
                printk(KERN_DEBUG "HW problem - can not stop rx "
                                "aggregation for tid %d\n", tid);
 
@@ -232,6 +232,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
        if (buf_size == 0)
                buf_size = IEEE80211_MAX_AMPDU_BUF;
 
+       /* make sure the size doesn't exceed the maximum supported by the hw */
+       if (buf_size > local->hw.max_rx_aggregation_subframes)
+               buf_size = local->hw.max_rx_aggregation_subframes;
 
        /* examine state machine */
        mutex_lock(&sta->ampdu_mlme.mtx);
@@ -287,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
        }
 
        ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-                              &sta->sta, tid, &start_seq_num);
+                              &sta->sta, tid, &start_seq_num, 0);
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
index 9cc472c..63d852c 100644 (file)
@@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 
        ret = drv_ampdu_action(local, sta->sdata,
                               IEEE80211_AMPDU_TX_STOP,
-                              &sta->sta, tid, NULL);
+                              &sta->sta, tid, NULL, 0);
 
        /* HW shall not deny going back to legacy */
        if (WARN_ON(ret)) {
@@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        start_seq_num = sta->tid_seq[tid] >> 4;
 
        ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-                              &sta->sta, tid, &start_seq_num);
+                              &sta->sta, tid, &start_seq_num, 0);
        if (ret) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "BA request denied - HW unavailable for"
@@ -342,7 +342,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
                                     tid_tx->dialog_token, start_seq_num,
-                                    0x40, tid_tx->timeout);
+                                    local->hw.max_tx_aggregation_subframes,
+                                    tid_tx->timeout);
 }
 
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -487,7 +488,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 
        drv_ampdu_action(local, sta->sdata,
                         IEEE80211_AMPDU_TX_OPERATIONAL,
-                        &sta->sta, tid, NULL);
+                        &sta->sta, tid, NULL,
+                        sta->ampdu_mlme.tid_tx[tid]->buf_size);
 
        /*
         * synchronize with TX path, while splicing the TX path
@@ -742,9 +744,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 {
        struct tid_ampdu_tx *tid_tx;
        u16 capab, tid;
+       u8 buf_size;
 
        capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
        tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+       buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
 
        mutex_lock(&sta->ampdu_mlme.mtx);
 
@@ -767,12 +771,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
        if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
                        == WLAN_STATUS_SUCCESS) {
+               /*
+                * IEEE 802.11-2007 7.3.1.14:
+                * In an ADDBA Response frame, when the Status Code field
+                * is set to 0, the Buffer Size subfield is set to a value
+                * of at least 1.
+                */
+               if (!buf_size)
+                       goto out;
+
                if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
                                     &tid_tx->state)) {
                        /* ignore duplicate response */
                        goto out;
                }
 
+               tid_tx->buf_size = buf_size;
+
                if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
                        ieee80211_agg_tx_operational(local, sta, tid);
 
index 4bc8a92..3342135 100644 (file)
@@ -316,6 +316,17 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
        return 0;
 }
 
+static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
+{
+       if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
+               struct ieee80211_supported_band *sband;
+               sband = sta->local->hw.wiphy->bands[
+                               sta->local->hw.conf.channel->band];
+               rate->legacy = sband->bitrates[idx].bitrate;
+       } else
+               rate->mcs = idx;
+}
+
 static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -330,6 +341,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        STATION_INFO_TX_RETRIES |
                        STATION_INFO_TX_FAILED |
                        STATION_INFO_TX_BITRATE |
+                       STATION_INFO_RX_BITRATE |
                        STATION_INFO_RX_DROP_MISC;
 
        sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
@@ -355,15 +367,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
        if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
                sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+       rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
 
-       if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) {
-               struct ieee80211_supported_band *sband;
-               sband = sta->local->hw.wiphy->bands[
-                               sta->local->hw.conf.channel->band];
-               sinfo->txrate.legacy =
-                       sband->bitrates[sta->last_tx_rate.idx].bitrate;
-       } else
-               sinfo->txrate.mcs = sta->last_tx_rate.idx;
+       sinfo->rxrate.flags = 0;
+       if (sta->last_rx_rate_flag & RX_FLAG_HT)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
+       if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+       if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+       rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
 
        if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
@@ -821,6 +834,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
 
        rcu_read_unlock();
 
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+               ieee80211_recalc_ps(local, -1);
+
        return 0;
 }
 
@@ -1215,6 +1232,9 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_sub_if_data *sdata = NULL;
+       struct ieee80211_channel *old_oper;
+       enum nl80211_channel_type old_oper_type;
+       enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
 
        if (netdev)
                sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
@@ -1232,13 +1252,23 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
                break;
        }
 
-       local->oper_channel = chan;
+       if (sdata)
+               old_vif_oper_type = sdata->vif.bss_conf.channel_type;
+       old_oper_type = local->_oper_channel_type;
 
        if (!ieee80211_set_channel_type(local, sdata, channel_type))
                return -EBUSY;
 
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-       if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+       old_oper = local->oper_channel;
+       local->oper_channel = chan;
+
+       /* Update driver if changes were actually made. */
+       if ((old_oper != local->oper_channel) ||
+           (old_oper_type != local->_oper_channel_type))
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+       if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+           old_vif_oper_type != sdata->vif.bss_conf.channel_type)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
 
        return 0;
@@ -1274,8 +1304,11 @@ static int ieee80211_scan(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
                        break;
-               /* FIXME: implement NoA while scanning in software */
-               return -EOPNOTSUPP;
+               /*
+                * FIXME: implement NoA while scanning in software,
+                * for now fall through to allow scanning only when
+                * beaconing hasn't been configured yet
+                */
        case NL80211_IFTYPE_AP:
                if (sdata->u.ap.beacon)
                        return -EOPNOTSUPP;
@@ -1784,6 +1817,33 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
 
        *cookie = (unsigned long) skb;
 
+       if (is_offchan && local->ops->offchannel_tx) {
+               int ret;
+
+               IEEE80211_SKB_CB(skb)->band = chan->band;
+
+               mutex_lock(&local->mtx);
+
+               if (local->hw_offchan_tx_cookie) {
+                       mutex_unlock(&local->mtx);
+                       return -EBUSY;
+               }
+
+               /* TODO: bitrate control, TX processing? */
+               ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
+
+               if (ret == 0)
+                       local->hw_offchan_tx_cookie = *cookie;
+               mutex_unlock(&local->mtx);
+
+               /*
+                * Allow driver to return 1 to indicate it wants to have the
+                * frame transmitted with a remain_on_channel + regular TX.
+                */
+               if (ret != 1)
+                       return ret;
+       }
+
        if (is_offchan && local->ops->remain_on_channel) {
                unsigned int duration;
                int ret;
@@ -1822,6 +1882,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
                *cookie ^= 2;
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
                local->hw_roc_skb = skb;
+               local->hw_roc_skb_for_status = skb;
                mutex_unlock(&local->mtx);
 
                return 0;
@@ -1846,6 +1907,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
 
        wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
        wk->chan = chan;
+       wk->chan_type = channel_type;
        wk->sdata = sdata;
        wk->done = ieee80211_offchan_tx_done;
        wk->offchan_tx.frame = skb;
@@ -1868,6 +1930,18 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
 
        mutex_lock(&local->mtx);
 
+       if (local->ops->offchannel_tx_cancel_wait &&
+           local->hw_offchan_tx_cookie == cookie) {
+               ret = drv_offchannel_tx_cancel_wait(local);
+
+               if (!ret)
+                       local->hw_offchan_tx_cookie = 0;
+
+               mutex_unlock(&local->mtx);
+
+               return ret;
+       }
+
        if (local->ops->cancel_remain_on_channel) {
                cookie ^= 2;
                ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -1875,6 +1949,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
                if (ret == 0) {
                        kfree_skb(local->hw_roc_skb);
                        local->hw_roc_skb = NULL;
+                       local->hw_roc_skb_for_status = NULL;
                }
 
                mutex_unlock(&local->mtx);
@@ -1937,6 +2012,21 @@ static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
        return drv_get_antenna(local, tx_ant, rx_ant);
 }
 
+static int ieee80211_set_ringparam(struct wiphy *wiphy, u32 tx, u32 rx)
+{
+       struct ieee80211_local *local = wiphy_priv(wiphy);
+
+       return drv_set_ringparam(local, tx, rx);
+}
+
+static void ieee80211_get_ringparam(struct wiphy *wiphy,
+                                   u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+       struct ieee80211_local *local = wiphy_priv(wiphy);
+
+       drv_get_ringparam(local, tx, tx_max, rx, rx_max);
+}
+
 struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
@@ -1994,4 +2084,6 @@ struct cfg80211_ops mac80211_config_ops = {
        .mgmt_frame_register = ieee80211_mgmt_frame_register,
        .set_antenna = ieee80211_set_antenna,
        .get_antenna = ieee80211_get_antenna,
+       .set_ringparam = ieee80211_set_ringparam,
+       .get_ringparam = ieee80211_get_ringparam,
 };
index 5b24740..889c3e9 100644 (file)
@@ -77,6 +77,9 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
                switch (tmp->vif.bss_conf.channel_type) {
                case NL80211_CHAN_NO_HT:
                case NL80211_CHAN_HT20:
+                       if (superchan > tmp->vif.bss_conf.channel_type)
+                               break;
+
                        superchan = tmp->vif.bss_conf.channel_type;
                        break;
                case NL80211_CHAN_HT40PLUS:
index 1f02e59..51f0d78 100644 (file)
@@ -60,6 +60,10 @@ static const struct file_operations name## _ops = {                  \
        debugfs_create_file(#name, mode, phyd, local, &name## _ops);
 
 
+DEBUGFS_READONLY_FILE(user_power, "%d",
+                     local->user_power_level);
+DEBUGFS_READONLY_FILE(power, "%d",
+                     local->hw.conf.power_level);
 DEBUGFS_READONLY_FILE(frequency, "%d",
                      local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
@@ -391,6 +395,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_ADD(uapsd_queues);
        DEBUGFS_ADD(uapsd_max_sp_len);
        DEBUGFS_ADD(channel_type);
+       DEBUGFS_ADD(user_power);
+       DEBUGFS_ADD(power);
 
        statsd = debugfs_create_dir("statistics", phyd);
 
index 2dabdf7..dacace6 100644 (file)
@@ -36,7 +36,7 @@ static ssize_t ieee80211_if_read(
                ret = (*format)(sdata, buf, sizeof(buf));
        read_unlock(&dev_base_lock);
 
-       if (ret != -EINVAL)
+       if (ret >= 0)
                ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
 
        return ret;
@@ -81,6 +81,8 @@ static ssize_t ieee80211_if_fmt_##name(                                       \
                IEEE80211_IF_FMT(name, field, "%d\n")
 #define IEEE80211_IF_FMT_HEX(name, field)                              \
                IEEE80211_IF_FMT(name, field, "%#x\n")
+#define IEEE80211_IF_FMT_LHEX(name, field)                             \
+               IEEE80211_IF_FMT(name, field, "%#lx\n")
 #define IEEE80211_IF_FMT_SIZE(name, field)                             \
                IEEE80211_IF_FMT(name, field, "%zd\n")
 
@@ -145,6 +147,9 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
                  HEX);
 IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
                  HEX);
+IEEE80211_IF_FILE(flags, flags, HEX);
+IEEE80211_IF_FILE(state, state, LHEX);
+IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
 
 /* STA attributes */
 IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -216,6 +221,104 @@ static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
 
 __IEEE80211_IF_FILE_W(smps);
 
+static ssize_t ieee80211_if_fmt_tkip_mic_test(
+       const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+       return -EOPNOTSUPP;
+}
+
+static int hwaddr_aton(const char *txt, u8 *addr)
+{
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               int a, b;
+
+               a = hex_to_bin(*txt++);
+               if (a < 0)
+                       return -1;
+               b = hex_to_bin(*txt++);
+               if (b < 0)
+                       return -1;
+               *addr++ = (a << 4) | b;
+               if (i < 5 && *txt++ != ':')
+                       return -1;
+       }
+
+       return 0;
+}
+
+static ssize_t ieee80211_if_parse_tkip_mic_test(
+       struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+       struct ieee80211_local *local = sdata->local;
+       u8 addr[ETH_ALEN];
+       struct sk_buff *skb;
+       struct ieee80211_hdr *hdr;
+       __le16 fc;
+
+       /*
+        * Assume colon-delimited MAC address with possible white space
+        * following.
+        */
+       if (buflen < 3 * ETH_ALEN - 1)
+               return -EINVAL;
+       if (hwaddr_aton(buf, addr) < 0)
+               return -EINVAL;
+
+       if (!ieee80211_sdata_running(sdata))
+               return -ENOTCONN;
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, local->hw.extra_tx_headroom);
+
+       hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
+       memset(hdr, 0, 24);
+       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
+               fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+               /* DA BSSID SA */
+               memcpy(hdr->addr1, addr, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
+               break;
+       case NL80211_IFTYPE_STATION:
+               fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+               /* BSSID SA DA */
+               if (sdata->vif.bss_conf.bssid == NULL) {
+                       dev_kfree_skb(skb);
+                       return -ENOTCONN;
+               }
+               memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               memcpy(hdr->addr3, addr, ETH_ALEN);
+               break;
+       default:
+               dev_kfree_skb(skb);
+               return -EOPNOTSUPP;
+       }
+       hdr->frame_control = fc;
+
+       /*
+        * Add some length to the test frame to make it look bit more valid.
+        * The exact contents does not matter since the recipient is required
+        * to drop this because of the Michael MIC failure.
+        */
+       memset(skb_put(skb, 50), 0, 50);
+
+       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
+
+       ieee80211_tx_skb(sdata, skb);
+
+       return buflen;
+}
+
+__IEEE80211_IF_FILE_W(tkip_mic_test);
+
 /* AP attributes */
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -283,6 +386,9 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
@@ -291,22 +397,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(last_beacon);
        DEBUGFS_ADD(ave_beacon);
        DEBUGFS_ADD_MODE(smps, 0600);
+       DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
        DEBUGFS_ADD(num_sta_ps);
        DEBUGFS_ADD(dtim_count);
        DEBUGFS_ADD(num_buffered_multicast);
+       DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 }
 
 static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
@@ -316,12 +430,18 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 }
 
 static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
 {
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
 }
 
 #ifdef CONFIG_MAC80211_MESH
index 98d5899..9c0d62b 100644 (file)
@@ -5,9 +5,9 @@
 #include "ieee80211_i.h"
 #include "driver-trace.h"
 
-static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
 {
-       return local->ops->tx(&local->hw, skb);
+       local->ops->tx(&local->hw, skb);
 }
 
 static inline int drv_start(struct ieee80211_local *local)
@@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
                                   struct ieee80211_sub_if_data *sdata,
                                   enum ieee80211_ampdu_mlme_action action,
                                   struct ieee80211_sta *sta, u16 tid,
-                                  u16 *ssn)
+                                  u16 *ssn, u8 buf_size)
 {
        int ret = -EOPNOTSUPP;
 
        might_sleep();
 
-       trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+       trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
 
        if (local->ops->ampdu_action)
                ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-                                              sta, tid, ssn);
+                                              sta, tid, ssn, buf_size);
 
        trace_drv_return_int(local, ret);
 
@@ -495,4 +495,61 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
        return ret;
 }
 
+static inline int drv_offchannel_tx(struct ieee80211_local *local,
+                                   struct sk_buff *skb,
+                                   struct ieee80211_channel *chan,
+                                   enum nl80211_channel_type channel_type,
+                                   unsigned int wait)
+{
+       int ret;
+
+       might_sleep();
+
+       trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
+       ret = local->ops->offchannel_tx(&local->hw, skb, chan,
+                                       channel_type, wait);
+       trace_drv_return_int(local, ret);
+
+       return ret;
+}
+
+static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
+{
+       int ret;
+
+       might_sleep();
+
+       trace_drv_offchannel_tx_cancel_wait(local);
+       ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
+       trace_drv_return_int(local, ret);
+
+       return ret;
+}
+
+static inline int drv_set_ringparam(struct ieee80211_local *local,
+                                   u32 tx, u32 rx)
+{
+       int ret = -ENOTSUPP;
+
+       might_sleep();
+
+       trace_drv_set_ringparam(local, tx, rx);
+       if (local->ops->set_ringparam)
+               ret = local->ops->set_ringparam(&local->hw, tx, rx);
+       trace_drv_return_int(local, ret);
+
+       return ret;
+}
+
+static inline void drv_get_ringparam(struct ieee80211_local *local,
+                                    u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+       might_sleep();
+
+       trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max);
+       if (local->ops->get_ringparam)
+               local->ops->get_ringparam(&local->hw, tx, tx_max, rx, rx_max);
+       trace_drv_return_void(local);
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
index 49c8421..45aab80 100644 (file)
@@ -9,6 +9,11 @@
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, ...) \
 static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
 #endif
 
 #undef TRACE_SYSTEM
@@ -38,7 +43,7 @@ static inline void trace_ ## name(proto) {}
  * Tracing for driver callbacks.
  */
 
-TRACE_EVENT(drv_return_void,
+DECLARE_EVENT_CLASS(local_only_evt,
        TP_PROTO(struct ieee80211_local *local),
        TP_ARGS(local),
        TP_STRUCT__entry(
@@ -50,6 +55,11 @@ TRACE_EVENT(drv_return_void,
        TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
 );
 
+DEFINE_EVENT(local_only_evt, drv_return_void,
+       TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
+
 TRACE_EVENT(drv_return_int,
        TP_PROTO(struct ieee80211_local *local, int ret),
        TP_ARGS(local, ret),
@@ -78,40 +88,14 @@ TRACE_EVENT(drv_return_u64,
        TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
 );
 
-TRACE_EVENT(drv_start,
+DEFINE_EVENT(local_only_evt, drv_start,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_stop,
+DEFINE_EVENT(local_only_evt, drv_stop,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_add_interface,
@@ -439,40 +423,14 @@ TRACE_EVENT(drv_hw_scan,
        )
 );
 
-TRACE_EVENT(drv_sw_scan_start,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_sw_scan_complete,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_complete,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_get_stats,
@@ -702,23 +660,9 @@ TRACE_EVENT(drv_conf_tx,
        )
 );
 
-TRACE_EVENT(drv_get_tsf,
+DEFINE_EVENT(local_only_evt, drv_get_tsf,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_set_tsf,
@@ -742,41 +686,14 @@ TRACE_EVENT(drv_set_tsf,
        )
 );
 
-TRACE_EVENT(drv_reset_tsf,
+DEFINE_EVENT(local_only_evt, drv_reset_tsf,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_tx_last_beacon,
+DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_ampdu_action,
@@ -784,9 +701,9 @@ TRACE_EVENT(drv_ampdu_action,
                 struct ieee80211_sub_if_data *sdata,
                 enum ieee80211_ampdu_mlme_action action,
                 struct ieee80211_sta *sta, u16 tid,
-                u16 *ssn),
+                u16 *ssn, u8 buf_size),
 
-       TP_ARGS(local, sdata, action, sta, tid, ssn),
+       TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
@@ -794,6 +711,7 @@ TRACE_EVENT(drv_ampdu_action,
                __field(u32, action)
                __field(u16, tid)
                __field(u16, ssn)
+               __field(u8, buf_size)
                VIF_ENTRY
        ),
 
@@ -804,11 +722,13 @@ TRACE_EVENT(drv_ampdu_action,
                __entry->action = action;
                __entry->tid = tid;
                __entry->ssn = ssn ? *ssn : 0;
+               __entry->buf_size = buf_size;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
-               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
+               __entry->tid, __entry->buf_size
        )
 );
 
@@ -959,24 +879,96 @@ TRACE_EVENT(drv_remain_on_channel,
        )
 );
 
-TRACE_EVENT(drv_cancel_remain_on_channel,
+DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
        TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
 
-       TP_ARGS(local),
+TRACE_EVENT(drv_offchannel_tx,
+       TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
+                struct ieee80211_channel *chan,
+                enum nl80211_channel_type channel_type,
+                unsigned int wait),
+
+       TP_ARGS(local, skb, chan, channel_type, wait),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
+               __field(int, center_freq)
+               __field(int, channel_type)
+               __field(unsigned int, wait)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
+               __entry->center_freq = chan->center_freq;
+               __entry->channel_type = channel_type;
+               __entry->wait = wait;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
+               LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
+               LOCAL_PR_ARG, __entry->center_freq, __entry->wait
+       )
+);
+
+TRACE_EVENT(drv_set_ringparam,
+       TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx),
+
+       TP_ARGS(local, tx, rx),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               __field(u32, tx)
+               __field(u32, rx)
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               __entry->tx = tx;
+               __entry->rx = rx;
+       ),
+
+       TP_printk(
+               LOCAL_PR_FMT " tx:%d rx %d",
+               LOCAL_PR_ARG, __entry->tx, __entry->rx
+       )
+);
+
+TRACE_EVENT(drv_get_ringparam,
+       TP_PROTO(struct ieee80211_local *local, u32 *tx, u32 *tx_max,
+                u32 *rx, u32 *rx_max),
+
+       TP_ARGS(local, tx, tx_max, rx, rx_max),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               __field(u32, tx)
+               __field(u32, tx_max)
+               __field(u32, rx)
+               __field(u32, rx_max)
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               __entry->tx = *tx;
+               __entry->tx_max = *tx_max;
+               __entry->rx = *rx;
+               __entry->rx_max = *rx_max;
+       ),
+
+       TP_printk(
+               LOCAL_PR_FMT " tx:%d tx_max %d rx %d rx_max %d",
+               LOCAL_PR_ARG,
+               __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max
        )
 );
 
+DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
+       TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
@@ -1069,23 +1061,9 @@ TRACE_EVENT(api_stop_tx_ba_cb,
        )
 );
 
-TRACE_EVENT(api_restart_hw,
+DEFINE_EVENT(local_only_evt, api_restart_hw,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(api_beacon_loss,
@@ -1214,40 +1192,14 @@ TRACE_EVENT(api_chswitch_done,
        )
 );
 
-TRACE_EVENT(api_ready_on_channel,
+DEFINE_EVENT(local_only_evt, api_ready_on_channel,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(api_remain_on_channel_expired,
+DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 /*
index 75d679d..b9e4b9b 100644 (file)
@@ -66,6 +66,9 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
        /* own MCS TX capabilities */
        tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
 
+       /* Copy peer MCS TX capabilities, the driver might need them. */
+       ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params;
+
        /* can we TX with MCS rates? */
        if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
                return;
@@ -79,7 +82,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
                max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS;
 
        /*
-        * 802.11n D5.0 20.3.5 / 20.6 says:
+        * 802.11n-2009 20.3.5 / 20.6 says:
         * - indices 0 to 7 and 32 are single spatial stream
         * - 8 to 31 are multiple spatial streams using equal modulation
         *   [8..15 for two streams, 16..23 for three and 24..31 for four]
index 53c7077..3e81af1 100644 (file)
@@ -31,7 +31,6 @@
 #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
 
 #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
-#define IEEE80211_IBSS_MERGE_DELAY 0x400000
 #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
 
 #define IEEE80211_IBSS_MAX_STA_ENTRIES 128
@@ -270,7 +269,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band = rx_status->band;
 
        if (elems->ds_params && elems->ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+                                                     band);
        else
                freq = rx_status->freq;
 
@@ -354,7 +354,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
                goto put_bss;
 
-       if (rx_status->flag & RX_FLAG_TSFT) {
+       if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
                /*
                 * For correct IBSS merging we need mactime; since mactime is
                 * defined as the time the first data symbol of the frame hits
@@ -396,10 +396,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
               jiffies);
 #endif
 
-       /* give slow hardware some time to do the TSF sync */
-       if (rx_timestamp < IEEE80211_IBSS_MERGE_DELAY)
-               goto put_bss;
-
        if (beacon_timestamp > rx_timestamp) {
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
                printk(KERN_DEBUG "%s: beacon TSF higher than "
@@ -663,12 +659,13 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
 }
 
 static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
-                                       struct ieee80211_mgmt *mgmt,
-                                       size_t len)
+                                       struct sk_buff *req)
 {
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
+       struct ieee80211_mgmt *mgmt = (void *)req->data;
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
-       int tx_last_beacon;
+       int tx_last_beacon, len = req->len;
        struct sk_buff *skb;
        struct ieee80211_mgmt *resp;
        u8 *pos, *end;
@@ -688,7 +685,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
               mgmt->bssid, tx_last_beacon);
 #endif /* CONFIG_MAC80211_IBSS_DEBUG */
 
-       if (!tx_last_beacon)
+       if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH))
                return;
 
        if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -785,7 +782,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 
        switch (fc & IEEE80211_FCTL_STYPE) {
        case IEEE80211_STYPE_PROBE_REQ:
-               ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
+               ieee80211_rx_mgmt_probe_req(sdata, skb);
                break;
        case IEEE80211_STYPE_PROBE_RESP:
                ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
index c47d7c0..a404017 100644 (file)
@@ -225,6 +225,7 @@ struct ieee80211_if_ap {
        struct sk_buff_head ps_bc_buf;
        atomic_t num_sta_ps; /* number of stations in PS mode */
        int dtim_count;
+       bool dtim_bc_mc;
 };
 
 struct ieee80211_if_wds {
@@ -654,8 +655,6 @@ struct tpt_led_trigger {
  *     well be on the operating channel
  * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
  *     determine if we are on the operating channel or not
- * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
- *     gets only set in conjunction with SCAN_SW_SCANNING
  * @SCAN_COMPLETED: Set for our scan work function when the driver reported
  *     that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -664,7 +663,6 @@ struct tpt_led_trigger {
 enum {
        SCAN_SW_SCANNING,
        SCAN_HW_SCANNING,
-       SCAN_OFF_CHANNEL,
        SCAN_COMPLETED,
        SCAN_ABORTED,
 };
@@ -953,12 +951,13 @@ struct ieee80211_local {
 
        struct ieee80211_channel *hw_roc_channel;
        struct net_device *hw_roc_dev;
-       struct sk_buff *hw_roc_skb;
+       struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
        struct work_struct hw_roc_start, hw_roc_done;
        enum nl80211_channel_type hw_roc_channel_type;
        unsigned int hw_roc_duration;
        u32 hw_roc_cookie;
        bool hw_roc_for_tx;
+       unsigned long hw_offchan_tx_cookie;
 
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
@@ -1068,8 +1067,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 void ieee80211_configure_filter(struct ieee80211_local *local);
 u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 
-extern bool ieee80211_disable_40mhz_24ghz;
-
 /* STA code */
 void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1147,10 +1144,14 @@ void ieee80211_rx_bss_put(struct ieee80211_local *local,
                          struct ieee80211_bss *bss);
 
 /* off-channel helpers */
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+                                       bool tell_ap);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+                                   bool offchannel_ps_enable);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing);
+                                bool enable_beaconing,
+                                bool offchannel_ps_disable);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
 /* interface handling */
index 8acba45..4054399 100644 (file)
@@ -382,6 +382,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        struct sk_buff *skb, *tmp;
        u32 hw_reconf_flags = 0;
        int i;
+       enum nl80211_channel_type orig_ct;
 
        if (local->scan_sdata == sdata)
                ieee80211_scan_cancel(local);
@@ -542,8 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                hw_reconf_flags = 0;
        }
 
+       /* Re-calculate channel-type, in case there are multiple vifs
+        * on different channel types.
+        */
+       orig_ct = local->_oper_channel_type;
+       ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
+
        /* do after stop to avoid reconfiguring when we stop anyway */
-       if (hw_reconf_flags)
+       if (hw_reconf_flags || (orig_ct != local->_oper_channel_type))
                ieee80211_hw_config(local, hw_reconf_flags);
 
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
@@ -1229,6 +1236,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
+       list_del(&unreg_list);
 }
 
 static u32 ieee80211_idle_off(struct ieee80211_local *local,
index 8106aa1..4ddbe27 100644 (file)
@@ -21,7 +21,6 @@
 
 #define WEP_IV_LEN             4
 #define WEP_ICV_LEN            4
-#define ALG_TKIP_KEY_LEN       32
 #define ALG_CCMP_KEY_LEN       16
 #define CCMP_HDR_LEN           8
 #define CCMP_MIC_LEN           8
index a46ff06..562d298 100644 (file)
@@ -34,7 +34,7 @@
 #include "debugfs.h"
 
 
-bool ieee80211_disable_40mhz_24ghz;
+static bool ieee80211_disable_40mhz_24ghz;
 module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
 MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
                 "Disable 40MHz support in the 2.4GHz band");
@@ -98,6 +98,47 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
        ieee80211_configure_filter(local);
 }
 
+/*
+ * Returns true if we are logically configured to be on
+ * the operating channel AND the hardware-conf is currently
+ * configured on the operating channel.  Compares channel-type
+ * as well.
+ */
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+{
+       struct ieee80211_channel *chan, *scan_chan;
+       enum nl80211_channel_type channel_type;
+
+       /* This logic needs to match logic in ieee80211_hw_config */
+       if (local->scan_channel) {
+               chan = local->scan_channel;
+               /* If scanning on oper channel, use whatever channel-type
+                * is currently in use.
+                */
+               if (chan == local->oper_channel)
+                       channel_type = local->_oper_channel_type;
+               else
+                       channel_type = NL80211_CHAN_NO_HT;
+       } else if (local->tmp_channel) {
+               chan = scan_chan = local->tmp_channel;
+               channel_type = local->tmp_channel_type;
+       } else {
+               chan = local->oper_channel;
+               channel_type = local->_oper_channel_type;
+       }
+
+       if (chan != local->oper_channel ||
+           channel_type != local->_oper_channel_type)
+               return false;
+
+       /* Check current hardware-config against oper_channel. */
+       if ((local->oper_channel != local->hw.conf.channel) ||
+           (local->_oper_channel_type != local->hw.conf.channel_type))
+               return false;
+
+       return true;
+}
+
 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 {
        struct ieee80211_channel *chan, *scan_chan;
@@ -110,21 +151,33 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 
        scan_chan = local->scan_channel;
 
+       /* If this off-channel logic ever changes,  ieee80211_on_oper_channel
+        * may need to change as well.
+        */
        offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
        if (scan_chan) {
                chan = scan_chan;
-               channel_type = NL80211_CHAN_NO_HT;
-               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
-       } else if (local->tmp_channel &&
-                  local->oper_channel != local->tmp_channel) {
+               /* If scanning on oper channel, use whatever channel-type
+                * is currently in use.
+                */
+               if (chan == local->oper_channel)
+                       channel_type = local->_oper_channel_type;
+               else
+                       channel_type = NL80211_CHAN_NO_HT;
+       } else if (local->tmp_channel) {
                chan = scan_chan = local->tmp_channel;
                channel_type = local->tmp_channel_type;
-               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
        } else {
                chan = local->oper_channel;
                channel_type = local->_oper_channel_type;
-               local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
        }
+
+       if (chan != local->oper_channel ||
+           channel_type != local->_oper_channel_type)
+               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+       else
+               local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
+
        offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 
        if (offchannel_flag || chan != local->hw.conf.channel ||
@@ -146,7 +199,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
                changed |= IEEE80211_CONF_CHANGE_SMPS;
        }
 
-       if (scan_chan)
+       if ((local->scanning & SCAN_SW_SCANNING) ||
+           (local->scanning & SCAN_HW_SCANNING))
                power = chan->max_power;
        else
                power = local->power_constr_level ?
@@ -231,7 +285,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
                if (local->quiescing || !ieee80211_sdata_running(sdata) ||
-                   test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+                   test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
                        sdata->vif.bss_conf.enable_beacon = false;
                } else {
                        /*
@@ -326,6 +380,9 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
 
        trace_api_restart_hw(local);
 
+       wiphy_info(hw->wiphy,
+                  "Hardware restart was requested\n");
+
        /* use this reason, ieee80211_reconfig will unblock it */
        ieee80211_stop_queues_by_reason(hw,
                IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -554,6 +611,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        local->hw.queues = 1;
        local->hw.max_rates = 1;
        local->hw.max_report_rates = 0;
+       local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
        local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
        local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
        local->user_power_level = -1;
@@ -668,6 +726,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                }
                channels += sband->n_channels;
 
+               /*
+                * Since ieee80211_disable_40mhz_24ghz is global, we can
+                * modify the sband's ht data even if the driver uses a
+                * global structure for that.
+                */
+               if (ieee80211_disable_40mhz_24ghz &&
+                   band == IEEE80211_BAND_2GHZ &&
+                   sband->ht_cap.ht_supported) {
+                       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+               }
+
                if (max_bitrates < sband->n_bitrates)
                        max_bitrates = sband->n_bitrates;
                supp_ht = supp_ht || sband->ht_cap.ht_supported;
index ca3af46..2a57cc0 100644 (file)
@@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                               &elems);
 
        if (elems.ds_params && elems.ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
        else
                freq = rx_status->freq;
 
@@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
        if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
                mesh_mpath_table_grow();
 
-       if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+       if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
                mesh_mpp_table_grow();
 
        if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
index 45fbb9e..64d92d5 100644 (file)
 #include "rate.h"
 #include "led.h"
 
-#define IEEE80211_MAX_NULLFUNC_TRIES 2
-#define IEEE80211_MAX_PROBE_TRIES 5
+static int max_nullfunc_tries = 2;
+module_param(max_nullfunc_tries, int, 0644);
+MODULE_PARM_DESC(max_nullfunc_tries,
+                "Maximum nullfunc tx tries before disconnecting (reason 4).");
+
+static int max_probe_tries = 5;
+module_param(max_probe_tries, int, 0644);
+MODULE_PARM_DESC(max_probe_tries,
+                "Maximum probe tries before disconnecting (reason 4).");
 
 /*
  * Beacon loss timeout is calculated as N frames times the
  * a probe request because of beacon loss or for
  * checking the connection still works.
  */
-#define IEEE80211_PROBE_WAIT           (HZ / 2)
+static int probe_wait_ms = 500;
+module_param(probe_wait_ms, int, 0644);
+MODULE_PARM_DESC(probe_wait_ms,
+                "Maximum time(ms) to wait for probe response"
+                " before disconnecting (reason 4).");
 
 /*
  * Weight given to the latest Beacon frame when calculating average signal
@@ -134,6 +145,9 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       if (unlikely(!sdata->u.mgd.associated))
+               return;
+
        if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
                return;
 
@@ -161,6 +175,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_supported_band *sband;
        struct sta_info *sta;
        u32 changed = 0;
+       int hti_cfreq;
        u16 ht_opmode;
        bool enable_ht = true;
        enum nl80211_channel_type prev_chantype;
@@ -174,10 +189,27 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
        if (!sband->ht_cap.ht_supported)
                enable_ht = false;
 
-       /* check that channel matches the right operating channel */
-       if (local->hw.conf.channel->center_freq !=
-           ieee80211_channel_to_frequency(hti->control_chan))
-               enable_ht = false;
+       if (enable_ht) {
+               hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
+                                                          sband->band);
+               /* check that channel matches the right operating channel */
+               if (local->hw.conf.channel->center_freq != hti_cfreq) {
+                       /* Some APs mess this up, evidently.
+                        * Netgear WNDR3700 sometimes reports 4 higher than
+                        * the actual channel, for instance.
+                        */
+                       printk(KERN_DEBUG
+                              "%s: Wrong control channel in association"
+                              " response: configured center-freq: %d"
+                              " hti-cfreq: %d  hti->control_chan: %d"
+                              " band: %d.  Disabling HT.\n",
+                              sdata->name,
+                              local->hw.conf.channel->center_freq,
+                              hti_cfreq, hti->control_chan,
+                              sband->band);
+                       enable_ht = false;
+               }
+       }
 
        if (enable_ht) {
                channel_type = NL80211_CHAN_HT20;
@@ -429,7 +461,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                container_of((void *)bss, struct cfg80211_bss, priv);
        struct ieee80211_channel *new_ch;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
+       int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
+                                                     cbss->channel->band);
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -580,6 +613,37 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
        }
 }
 
+static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_managed *mgd = &sdata->u.mgd;
+       struct sta_info *sta = NULL;
+       u32 sta_flags = 0;
+
+       if (!mgd->powersave)
+               return false;
+
+       if (!mgd->associated)
+               return false;
+
+       if (!mgd->associated->beacon_ies)
+               return false;
+
+       if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
+                         IEEE80211_STA_CONNECTION_POLL))
+               return false;
+
+       rcu_read_lock();
+       sta = sta_info_get(sdata, mgd->bssid);
+       if (sta)
+               sta_flags = get_sta_flags(sta);
+       rcu_read_unlock();
+
+       if (!(sta_flags & WLAN_STA_AUTHORIZED))
+               return false;
+
+       return true;
+}
+
 /* need to hold RTNL or interface lock */
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
 {
@@ -600,17 +664,21 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_AP) {
+                       /* If an AP vif is found, then disable PS
+                        * by setting the count to zero thereby setting
+                        * ps_sdata to NULL.
+                        */
+                       count = 0;
+                       break;
+               }
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        continue;
                found = sdata;
                count++;
        }
 
-       if (count == 1 && found->u.mgd.powersave &&
-           found->u.mgd.associated &&
-           found->u.mgd.associated->beacon_ies &&
-           !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
-                                   IEEE80211_STA_CONNECTION_POLL))) {
+       if (count == 1 && ieee80211_powersave_allowed(found)) {
                struct ieee80211_conf *conf = &local->hw.conf;
                s32 beaconint_us;
 
@@ -700,9 +768,19 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
                return;
 
        if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
+           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+               netif_tx_stop_all_queues(sdata->dev);
+               /*
+                * Flush all the frames queued in the driver before
+                * going to power save
+                */
+               drv_flush(local, false);
                ieee80211_send_nullfunc(local, sdata, 1);
 
+               /* Flush once again to get the tx status of nullfunc frame */
+               drv_flush(local, false);
+       }
+
        if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
              (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
            (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
@@ -710,6 +788,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
                local->hw.conf.flags |= IEEE80211_CONF_PS;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        }
+
+       netif_tx_start_all_queues(sdata->dev);
 }
 
 void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1089,7 +1169,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        const u8 *ssid;
        u8 *dst = ifmgd->associated->bssid;
-       u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+       u8 unicast_limit = max(1, max_probe_tries - 3);
 
        /*
         * Try sending broadcast probe requests for the last three
@@ -1115,7 +1195,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
        }
 
        ifmgd->probe_send_count++;
-       ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
+       ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
        run_again(ifmgd, ifmgd->probe_timeout);
 }
 
@@ -1216,7 +1296,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
 
        memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
-       printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+       printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
+              sdata->name, bssid);
 
        ieee80211_set_disassoc(sdata, true, true);
        mutex_unlock(&ifmgd->mtx);
@@ -1519,7 +1600,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        }
 
        if (elems->ds_params && elems->ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+                                                     rx_status->band);
        else
                freq = rx_status->freq;
 
@@ -1960,9 +2042,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
                if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
-                       max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
+                       max_tries = max_nullfunc_tries;
                else
-                       max_tries = IEEE80211_MAX_PROBE_TRIES;
+                       max_tries = max_probe_tries;
 
                /* ACK received for nullfunc probing frame */
                if (!ifmgd->probe_send_count)
@@ -1972,9 +2054,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
                                wiphy_debug(local->hw.wiphy,
                                            "%s: No ack for nullfunc frame to"
-                                           " AP %pM, try %d\n",
+                                           " AP %pM, try %d/%i\n",
                                            sdata->name, bssid,
-                                           ifmgd->probe_send_count);
+                                           ifmgd->probe_send_count, max_tries);
 #endif
                                ieee80211_mgd_probe_ap_send(sdata);
                        } else {
@@ -1994,17 +2076,17 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                    "%s: Failed to send nullfunc to AP %pM"
                                    " after %dms, disconnecting.\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+                                   bssid, probe_wait_ms);
 #endif
                        ieee80211_sta_connection_lost(sdata, bssid);
                } else if (ifmgd->probe_send_count < max_tries) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
                        wiphy_debug(local->hw.wiphy,
                                    "%s: No probe response from AP %pM"
-                                   " after %dms, try %d\n",
+                                   " after %dms, try %d/%i\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ,
-                                   ifmgd->probe_send_count);
+                                   bssid, probe_wait_ms,
+                                   ifmgd->probe_send_count, max_tries);
 #endif
                        ieee80211_mgd_probe_ap_send(sdata);
                } else {
@@ -2016,7 +2098,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                    "%s: No probe response from AP %pM"
                                    " after %dms, disconnecting.\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+                                   bssid, probe_wait_ms);
 
                        ieee80211_sta_connection_lost(sdata, bssid);
                }
@@ -2254,6 +2336,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        else
                wk->type = IEEE80211_WORK_DIRECT_PROBE;
        wk->chan = req->bss->channel;
+       wk->chan_type = NL80211_CHAN_NO_HT;
        wk->sdata = sdata;
        wk->done = ieee80211_probe_auth_done;
 
@@ -2403,6 +2486,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
 
        wk->chan = req->bss->channel;
+       wk->chan_type = NL80211_CHAN_NO_HT;
        wk->sdata = sdata;
        wk->done = ieee80211_assoc_done;
        if (!bss->dtim_period &&
index b4e5267..13427b1 100644 (file)
 #include "driver-trace.h"
 
 /*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
+ * Tell our hardware to disable PS.
+ * Optionally inform AP that we will go to sleep so that it will buffer
+ * the frames while we are doing off-channel work.  This is optional
+ * because we *may* be doing work on-operating channel, and want our
+ * hardware unconditionally awake, but still let the AP send us normal frames.
  */
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+                                          bool tell_ap)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -41,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        }
 
-       if (!(local->offchannel_ps_enabled) ||
-           !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+       if (tell_ap && (!local->offchannel_ps_enabled ||
+                       !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
                /*
                 * If power save was enabled, no need to send a nullfunc
                 * frame because AP knows that we are sleeping. But if the
@@ -77,6 +81,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
                 * we are sleeping, let's just enable power save mode in
                 * hardware.
                 */
+               /* TODO:  Only set hardware if CONF_PS changed?
+                * TODO:  Should we set offchannel_ps_enabled to false?
+                */
                local->hw.conf.flags |= IEEE80211_CONF_PS;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        } else if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -95,63 +102,61 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+                                   bool offchannel_ps_enable)
 {
        struct ieee80211_sub_if_data *sdata;
 
+       /*
+        * notify the AP about us leaving the channel and stop all
+        * STA interfaces.
+        */
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               /* disable beaconing */
+               if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
+               /* Check to see if we should disable beaconing. */
                if (sdata->vif.type == NL80211_IFTYPE_AP ||
                    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
                    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                        ieee80211_bss_info_change_notify(
                                sdata, BSS_CHANGED_BEACON_ENABLED);
 
-               /*
-                * only handle non-STA interfaces here, STA interfaces
-                * are handled in ieee80211_offchannel_stop_station(),
-                * e.g., from the background scan state machine.
-                *
-                * In addition, do not stop monitor interface to allow it to be
-                * used from user space controlled off-channel operations.
-                */
-               if (sdata->vif.type != NL80211_IFTYPE_STATION &&
-                   sdata->vif.type != NL80211_IFTYPE_MONITOR) {
-                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+               if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        netif_tx_stop_all_queues(sdata->dev);
+                       if (offchannel_ps_enable &&
+                           (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+                           sdata->u.mgd.associated)
+                               ieee80211_offchannel_ps_enable(sdata, true);
                }
        }
        mutex_unlock(&local->iflist_mtx);
 }
 
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+                                       bool tell_ap)
 {
        struct ieee80211_sub_if_data *sdata;
 
-       /*
-        * notify the AP about us leaving the channel and stop all STA interfaces
-        */
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
-                       netif_tx_stop_all_queues(sdata->dev);
-                       if (sdata->u.mgd.associated)
-                               ieee80211_offchannel_ps_enable(sdata);
-               }
+               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+                   sdata->u.mgd.associated)
+                       ieee80211_offchannel_ps_enable(sdata, tell_ap);
        }
        mutex_unlock(&local->iflist_mtx);
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing)
+                                bool enable_beaconing,
+                                bool offchannel_ps_disable)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -161,7 +166,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        continue;
 
                /* Tell AP we're back */
-               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+               if (offchannel_ps_disable &&
+                   sdata->vif.type == NL80211_IFTYPE_STATION) {
                        if (sdata->u.mgd.associated)
                                ieee80211_offchannel_ps_disable(sdata);
                }
@@ -181,7 +187,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        netif_tx_wake_all_queues(sdata->dev);
                }
 
-               /* re-enable beaconing */
+               /* Check to see if we should re-enable beaconing */
                if (enable_beaconing &&
                    (sdata->vif.type == NL80211_IFTYPE_AP ||
                     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
index 165a451..8212a8b 100644 (file)
@@ -415,10 +415,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
                mi->sample_count--;
        }
 
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
                mi->sample_packets += info->status.ampdu_len;
-               minstrel_next_sample_idx(mi);
-       }
 
        for (i = 0; !last; i++) {
                last = (i == IEEE80211_TX_MAX_RATES - 1) ||
@@ -519,9 +517,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                rate->count = mr->retry_count;
 
        rate->flags = IEEE80211_TX_RC_MCS | group->flags;
-       if (txrc->short_preamble)
-               rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
-       if (txrc->rts || rtscts)
+       if (rtscts)
                rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
        rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
 }
@@ -553,13 +549,14 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        sample_idx = sample_table[mg->column][mg->index];
        mr = &mg->rates[sample_idx];
        sample_idx += mi->sample_group * MCS_GROUP_RATES;
+       minstrel_next_sample_idx(mi);
 
        /*
         * When not using MRR, do not sample if the probability is already
         * higher than 95% to avoid wasting airtime
         */
        if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
-               goto next;
+               return -1;
 
        /*
         * Make sure that lower rates get sampled only occasionally,
@@ -568,17 +565,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        if (minstrel_get_duration(sample_idx) >
            minstrel_get_duration(mi->max_tp_rate)) {
                if (mr->sample_skipped < 20)
-                       goto next;
+                       return -1;
 
                if (mi->sample_slow++ > 2)
-                       goto next;
+                       return -1;
        }
 
        return sample_idx;
-
-next:
-       minstrel_next_sample_idx(mi);
-       return -1;
 }
 
 static void
@@ -605,19 +598,46 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                sample = true;
                minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
                        txrc, true, false);
-               minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
-                       txrc, false, false);
                info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
                minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
                        txrc, false, false);
-               minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
-                       txrc, false, true);
        }
-       minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, !sample);
 
-       ar[3].count = 0;
-       ar[3].idx = -1;
+       if (mp->hw->max_rates >= 3) {
+               /*
+                * At least 3 tx rates supported, use
+                * sample_rate -> max_tp_rate -> max_prob_rate for sampling and
+                * max_tp_rate -> max_tp_rate2 -> max_prob_rate by default.
+                */
+               if (sample_idx >= 0)
+                       minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
+                               txrc, false, false);
+               else
+                       minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
+                               txrc, false, true);
+
+               minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
+                                    txrc, false, !sample);
+
+               ar[3].count = 0;
+               ar[3].idx = -1;
+       } else if (mp->hw->max_rates == 2) {
+               /*
+                * Only 2 tx rates supported, use
+                * sample_rate -> max_prob_rate for sampling and
+                * max_tp_rate -> max_prob_rate by default.
+                */
+               minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
+                                    txrc, false, !sample);
+
+               ar[2].count = 0;
+               ar[2].idx = -1;
+       } else {
+               /* Not using MRR, only use the first rate */
+               ar[1].count = 0;
+               ar[1].idx = -1;
+       }
 
        mi->total_packets++;
 
index 1a873f0..6510f8e 100644 (file)
@@ -24,9 +24,6 @@
 /* Fixed point arithmetic shifting amount. */
 #define RC_PID_ARITH_SHIFT             8
 
-/* Fixed point arithmetic factor. */
-#define RC_PID_ARITH_FACTOR            (1 << RC_PID_ARITH_SHIFT)
-
 /* Proportional PID component coefficient. */
 #define RC_PID_COEFF_P                 15
 /* Integral PID component coefficient. */
index a6701ed..5c1930b 100644 (file)
@@ -77,7 +77,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        /* always present fields */
        len = sizeof(struct ieee80211_radiotap_header) + 9;
 
-       if (status->flag & RX_FLAG_TSFT)
+       if (status->flag & RX_FLAG_MACTIME_MPDU)
                len += 8;
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
                len += 1;
@@ -85,6 +85,9 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        if (len & 1) /* padding for RX_FLAGS if necessary */
                len++;
 
+       if (status->flag & RX_FLAG_HT) /* HT info */
+               len += 3;
+
        return len;
 }
 
@@ -120,7 +123,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        /* the order of the following fields is important */
 
        /* IEEE80211_RADIOTAP_TSFT */
-       if (status->flag & RX_FLAG_TSFT) {
+       if (status->flag & RX_FLAG_MACTIME_MPDU) {
                put_unaligned_le64(status->mactime, pos);
                rthdr->it_present |=
                        cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
@@ -139,11 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        /* IEEE80211_RADIOTAP_RATE */
        if (status->flag & RX_FLAG_HT) {
                /*
-                * TODO: add following information into radiotap header once
-                * suitable fields are defined for it:
-                * - MCS index (status->rate_idx)
-                * - HT40 (status->flag & RX_FLAG_40MHZ)
-                * - short-GI (status->flag & RX_FLAG_SHORT_GI)
+                * MCS information is a separate field in radiotap,
+                * added below.
                 */
                *pos = 0;
        } else {
@@ -193,6 +193,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
        put_unaligned_le16(rx_flags, pos);
        pos += 2;
+
+       if (status->flag & RX_FLAG_HT) {
+               rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+               *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+                        IEEE80211_RADIOTAP_MCS_HAVE_GI |
+                        IEEE80211_RADIOTAP_MCS_HAVE_BW;
+               *pos = 0;
+               if (status->flag & RX_FLAG_SHORT_GI)
+                       *pos |= IEEE80211_RADIOTAP_MCS_SGI;
+               if (status->flag & RX_FLAG_40MHZ)
+                       *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+               pos++;
+               *pos++ = status->rate_idx;
+       }
 }
 
 /*
@@ -392,16 +406,10 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
        if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
                return RX_CONTINUE;
 
-       if (test_bit(SCAN_HW_SCANNING, &local->scanning))
+       if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+           test_bit(SCAN_SW_SCANNING, &local->scanning))
                return ieee80211_scan_rx(rx->sdata, skb);
 
-       if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
-               /* drop all the other packets during a software scan anyway */
-               if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
-                       dev_kfree_skb(skb);
-               return RX_QUEUED;
-       }
-
        /* scanning finished during invoking of handlers */
        I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
        return RX_DROP_UNUSABLE;
@@ -798,7 +806,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                                rx->local->dot11FrameDuplicateCount++;
                                rx->sta->num_duplicates++;
                        }
-                       return RX_DROP_MONITOR;
+                       return RX_DROP_UNUSABLE;
                } else
                        rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
        }
@@ -824,18 +832,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                      ieee80211_is_pspoll(hdr->frame_control)) &&
                     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
                     rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
-                    (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
-               if ((!ieee80211_has_fromds(hdr->frame_control) &&
-                    !ieee80211_has_tods(hdr->frame_control) &&
-                    ieee80211_is_data(hdr->frame_control)) ||
-                   !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
-                       /* Drop IBSS frames and frames for other hosts
-                        * silently. */
-                       return RX_DROP_MONITOR;
-               }
-
+                    (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
                return RX_DROP_MONITOR;
-       }
 
        return RX_CONTINUE;
 }
@@ -1088,7 +1086,8 @@ static void ap_sta_ps_start(struct sta_info *sta)
 
        atomic_inc(&sdata->bss->num_sta_ps);
        set_sta_flags(sta, WLAN_STA_PS_STA);
-       drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
+       if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+               drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
        printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
               sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1117,6 +1116,27 @@ static void ap_sta_ps_end(struct sta_info *sta)
        ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+{
+       struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+       bool in_ps;
+
+       WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+
+       /* Don't let the same PS state be set twice */
+       in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+       if ((start && in_ps) || (!start && !in_ps))
+               return -EINVAL;
+
+       if (start)
+               ap_sta_ps_start(sta_inf);
+       else
+               ap_sta_ps_end(sta_inf);
+
+       return 0;
+}
+EXPORT_SYMBOL(ieee80211_sta_ps_transition);
+
 static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 {
@@ -1136,14 +1156,23 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
                u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
                                                NL80211_IFTYPE_ADHOC);
-               if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
+               if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
                        sta->last_rx = jiffies;
+                       if (ieee80211_is_data(hdr->frame_control)) {
+                               sta->last_rx_rate_idx = status->rate_idx;
+                               sta->last_rx_rate_flag = status->flag;
+                       }
+               }
        } else if (!is_multicast_ether_addr(hdr->addr1)) {
                /*
                 * Mesh beacons will update last_rx when if they are found to
                 * match the current local configuration when processed.
                 */
                sta->last_rx = jiffies;
+               if (ieee80211_is_data(hdr->frame_control)) {
+                       sta->last_rx_rate_idx = status->rate_idx;
+                       sta->last_rx_rate_flag = status->flag;
+               }
        }
 
        if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -1161,7 +1190,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         * Change STA power saving mode only at the end of a frame
         * exchange sequence.
         */
-       if (!ieee80211_has_morefrags(hdr->frame_control) &&
+       if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+           !ieee80211_has_morefrags(hdr->frame_control) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
             rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
@@ -1556,17 +1586,36 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+       bool check_port_control = false;
+       struct ethhdr *ehdr;
+       int ret;
 
        if (ieee80211_has_a4(hdr->frame_control) &&
            sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
                return -1;
 
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
+
+               if (!sdata->u.mgd.use_4addr)
+                       return -1;
+               else
+                       check_port_control = true;
+       }
+
        if (is_multicast_ether_addr(hdr->addr1) &&
-           ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
-            (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
                return -1;
 
-       return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+       ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+       if (ret < 0 || !check_port_control)
+               return ret;
+
+       ehdr = (struct ethhdr *) rx->skb->data;
+       if (ehdr->h_proto != rx->sdata->control_port_protocol)
+               return -1;
+
+       return 0;
 }
 
 /*
@@ -1893,7 +1942,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
        dev->stats.rx_bytes += rx->skb->len;
 
        if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
-           !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
+           !is_multicast_ether_addr(
+                   ((struct ethhdr *)rx->skb->data)->h_dest) &&
+           (!local->scanning &&
+            !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
                        mod_timer(&local->dynamic_ps_timer, jiffies +
                         msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
        }
@@ -2590,7 +2642,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                        return 0;
                if (!multicast &&
                    compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
-                       if (!(sdata->dev->flags & IFF_PROMISC))
+                       if (!(sdata->dev->flags & IFF_PROMISC) ||
+                           sdata->u.mgd.use_4addr)
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
@@ -2639,7 +2692,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                                return 0;
                } else if (!ieee80211_bssid_match(bssid,
                                        sdata->vif.addr)) {
-                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
+                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+                           !ieee80211_is_beacon(hdr->frame_control))
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
@@ -2692,7 +2746,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
                if (!skb) {
                        if (net_ratelimit())
                                wiphy_debug(local->hw.wiphy,
-                                       "failed to copy multicast frame for %s\n",
+                                       "failed to copy skb for %s\n",
                                        sdata->name);
                        return true;
                }
@@ -2730,7 +2784,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
                local->dot11ReceivedFragmentCount++;
 
        if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
-                    test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+                    test_bit(SCAN_SW_SCANNING, &local->scanning)))
                status->rx_flags |= IEEE80211_RX_IN_SCAN;
 
        if (ieee80211_is_mgmt(fc))
index fb274db..489b6ad 100644 (file)
@@ -196,7 +196,8 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
 
        if (elems.ds_params && elems.ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems.ds_params[0],
+                                                     rx_status->band);
        else
                freq = rx_status->freq;
 
@@ -211,6 +212,14 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        if (bss)
                ieee80211_rx_bss_put(sdata->local, bss);
 
+       /* If we are on-operating-channel, and this packet is for the
+        * current channel, pass the pkt on up the stack so that
+        * the rest of the stack can make use of it.
+        */
+       if (ieee80211_cfg_on_oper_channel(sdata->local)
+           && (channel == sdata->local->oper_channel))
+               return RX_CONTINUE;
+
        dev_kfree_skb(skb);
        return RX_QUEUED;
 }
@@ -249,10 +258,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        return true;
 }
 
-static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
                                       bool was_hw_scan)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       bool on_oper_chan;
+       bool enable_beacons = false;
 
        lockdep_assert_held(&local->mtx);
 
@@ -266,12 +277,12 @@ static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
                aborted = true;
 
        if (WARN_ON(!local->scan_req))
-               return false;
+               return;
 
        if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
                int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req);
                if (rc == 0)
-                       return false;
+                       return;
        }
 
        kfree(local->hw_scan_req);
@@ -285,24 +296,28 @@ static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
        local->scanning = 0;
        local->scan_channel = NULL;
 
-       return true;
-}
+       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
 
-static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
-                                             bool was_hw_scan)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
+       if (was_hw_scan || !on_oper_chan)
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       else
+               /* Set power back to normal operating levels. */
+               ieee80211_hw_config(local, 0);
 
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
        if (!was_hw_scan) {
+               bool on_oper_chan2;
                ieee80211_configure_filter(local);
                drv_sw_scan_complete(local);
-               ieee80211_offchannel_return(local, true);
+               on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+               /* We should always be on-channel at this point. */
+               WARN_ON(!on_oper_chan2);
+               if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+                       enable_beacons = true;
+
+               ieee80211_offchannel_return(local, enable_beacons, true);
        }
 
-       mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
 
        ieee80211_mlme_notify_scan_completed(local);
        ieee80211_ibss_notify_scan_completed(local);
@@ -340,16 +355,21 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
         */
        drv_sw_scan_start(local);
 
-       ieee80211_offchannel_stop_beaconing(local);
-
        local->leave_oper_channel_time = 0;
        local->next_scan_state = SCAN_DECISION;
        local->scan_channel_idx = 0;
 
-       drv_flush(local, false);
+       /* We always want to use off-channel PS, even if we
+        * are not really leaving oper-channel.  Don't
+        * tell the AP though, as long as we are on-channel.
+        */
+       ieee80211_offchannel_enable_all_ps(local, false);
 
        ieee80211_configure_filter(local);
 
+       /* We need to set power level at maximum rate for scanning. */
+       ieee80211_hw_config(local, 0);
+
        ieee80211_queue_delayed_work(&local->hw,
                                     &local->scan_work,
                                     IEEE80211_CHANNEL_TIME);
@@ -486,7 +506,20 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
        }
        mutex_unlock(&local->iflist_mtx);
 
-       if (local->scan_channel) {
+       next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+       if (ieee80211_cfg_on_oper_channel(local)) {
+               /* We're currently on operating channel. */
+               if (next_chan == local->oper_channel)
+                       /* We don't need to move off of operating channel. */
+                       local->next_scan_state = SCAN_SET_CHANNEL;
+               else
+                       /*
+                        * We do need to leave operating channel, as next
+                        * scan is somewhere else.
+                        */
+                       local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+       } else {
                /*
                 * we're currently scanning a different channel, let's
                 * see if we can scan another channel without interfering
@@ -502,7 +535,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
                 *
                 * Otherwise switch back to the operating channel.
                 */
-               next_chan = local->scan_req->channels[local->scan_channel_idx];
 
                bad_latency = time_after(jiffies +
                                ieee80211_scan_get_channel_time(next_chan),
@@ -520,12 +552,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
                        local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
                else
                        local->next_scan_state = SCAN_SET_CHANNEL;
-       } else {
-               /*
-                * we're on the operating channel currently, let's
-                * leave that channel now to scan another one
-                */
-               local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
        }
 
        *next_delay = 0;
@@ -534,9 +560,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
 static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
                                                    unsigned long *next_delay)
 {
-       ieee80211_offchannel_stop_station(local);
-
-       __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+       /* PS will already be in off-channel mode,
+        * we do that once at the beginning of scanning.
+        */
+       ieee80211_offchannel_stop_vifs(local, false);
 
        /*
         * What if the nullfunc frames didn't arrive?
@@ -559,15 +586,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
 {
        /* switch back to the operating channel */
        local->scan_channel = NULL;
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       if (!ieee80211_cfg_on_oper_channel(local))
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        /*
-        * Only re-enable station mode interface now; beaconing will be
-        * re-enabled once the full scan has been completed.
+        * Re-enable vifs and beaconing.  Leave PS
+        * in off-channel state..will put that back
+        * on-channel at the end of scanning.
         */
-       ieee80211_offchannel_return(local, false);
-
-       __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+       ieee80211_offchannel_return(local, true, false);
 
        *next_delay = HZ / 5;
        local->next_scan_state = SCAN_DECISION;
@@ -583,8 +610,11 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
        chan = local->scan_req->channels[local->scan_channel_idx];
 
        local->scan_channel = chan;
-       if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
-               skip = 1;
+
+       /* Only call hw-config if we really need to change channels. */
+       if (chan != local->hw.conf.channel)
+               if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+                       skip = 1;
 
        /* advance state machine to next channel/band */
        local->scan_channel_idx++;
@@ -642,12 +672,14 @@ void ieee80211_scan_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, scan_work.work);
-       struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+       struct ieee80211_sub_if_data *sdata;
        unsigned long next_delay = 0;
-       bool aborted, hw_scan, finish;
+       bool aborted, hw_scan;
 
        mutex_lock(&local->mtx);
 
+       sdata = local->scan_sdata;
+
        if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
                aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
                goto out_complete;
@@ -711,17 +743,11 @@ void ieee80211_scan_work(struct work_struct *work)
        } while (next_delay == 0);
 
        ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
-       mutex_unlock(&local->mtx);
-       return;
+       goto out;
 
 out_complete:
        hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
-       finish = __ieee80211_scan_completed(&local->hw, aborted, hw_scan);
-       mutex_unlock(&local->mtx);
-       if (finish)
-               __ieee80211_scan_completed_finish(&local->hw, hw_scan);
-       return;
-
+       __ieee80211_scan_completed(&local->hw, aborted, hw_scan);
 out:
        mutex_unlock(&local->mtx);
 }
@@ -791,7 +817,6 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
 void ieee80211_scan_cancel(struct ieee80211_local *local)
 {
        bool abortscan;
-       bool finish = false;
 
        /*
         * We are only canceling software scan, or deferred scan that was not
@@ -811,14 +836,17 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
 
        mutex_lock(&local->mtx);
        abortscan = local->scan_req && !test_bit(SCAN_HW_SCANNING, &local->scanning);
-       if (abortscan)
-               finish = __ieee80211_scan_completed(&local->hw, true, false);
-       mutex_unlock(&local->mtx);
-
        if (abortscan) {
-               /* The scan is canceled, but stop work from being pending */
-               cancel_delayed_work_sync(&local->scan_work);
+               /*
+                * The scan is canceled, but stop work from being pending.
+                *
+                * If the work is currently running, it must be blocked on
+                * the mutex, but we'll set scan_sdata = NULL and it'll
+                * simply exit once it acquires the mutex.
+                */
+               cancel_delayed_work(&local->scan_work);
+               /* and clean up */
+               __ieee80211_scan_completed(&local->hw, true, false);
        }
-       if (finish)
-               __ieee80211_scan_completed_finish(&local->hw, false);
+       mutex_unlock(&local->mtx);
 }
index c426504..5a11078 100644 (file)
@@ -899,7 +899,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        struct ieee80211_local *local = sdata->local;
        int sent, buffered;
 
-       drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
+       if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+               drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
 
        if (!skb_queue_empty(&sta->ps_tx_buf))
                sta_info_clear_tim_bit(sta);
index bbdd2a8..5768114 100644 (file)
@@ -82,6 +82,7 @@ enum ieee80211_sta_info_flags {
  * @state: session state (see above)
  * @stop_initiator: initiator of a session stop
  * @tx_stop: TX DelBA frame when stopping
+ * @buf_size: reorder buffer size at receiver
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -101,6 +102,7 @@ struct tid_ampdu_tx {
        u8 dialog_token;
        u8 stop_initiator;
        bool tx_stop;
+       u8 buf_size;
 };
 
 /**
@@ -207,6 +209,8 @@ enum plink_state {
  * @rate_ctrl_priv: rate control private per-STA pointer
  * @last_tx_rate: rate used for last transmit, to report to userspace as
  *     "the" transmit rate
+ * @last_rx_rate_idx: rx status rate index of the last data packet
+ * @last_rx_rate_flag: rx status flag of the last data packet
  * @lock: used for locking all fields that require locking, see comments
  *     in the header file.
  * @flaglock: spinlock for flags accesses
@@ -309,6 +313,8 @@ struct sta_info {
        unsigned long tx_bytes;
        unsigned long tx_fragments;
        struct ieee80211_tx_rate last_tx_rate;
+       int last_rx_rate_idx;
+       int last_rx_rate_flag;
        u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
 
        /*
index 38a7972..b936dd2 100644 (file)
@@ -98,6 +98,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
         *  (b) always process RX events before TX status events if ordering
         *      can be unknown, for example with different interrupt status
         *      bits.
+        *  (c) if PS mode transitions are manual (i.e. the flag
+        *      %IEEE80211_HW_AP_LINK_PS is set), always process PS state
+        *      changes before calling TX status events if ordering can be
+        *      unknown.
         */
        if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
            skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
@@ -314,8 +318,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (info->flags & IEEE80211_TX_STAT_ACK) {
                        local->ps_sdata->u.mgd.flags |=
                                        IEEE80211_STA_NULLFUNC_ACKED;
-                       ieee80211_queue_work(&local->hw,
-                                       &local->dynamic_ps_enable_work);
                } else
                        mod_timer(&local->dynamic_ps_timer, jiffies +
                                        msecs_to_jiffies(10));
@@ -323,6 +325,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                struct ieee80211_work *wk;
+               u64 cookie = (unsigned long)skb;
 
                rcu_read_lock();
                list_for_each_entry_rcu(wk, &local->work_list, list) {
@@ -334,8 +337,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        break;
                }
                rcu_read_unlock();
+               if (local->hw_roc_skb_for_status == skb) {
+                       cookie = local->hw_roc_cookie ^ 2;
+                       local->hw_roc_skb_for_status = NULL;
+               }
+
+               if (cookie == local->hw_offchan_tx_cookie)
+                       local->hw_offchan_tx_cookie = 0;
+
                cfg80211_mgmt_tx_status(
-                       skb->dev, (unsigned long) skb, skb->data, skb->len,
+                       skb->dev, cookie, skb->data, skb->len,
                        !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
        }
 
index 5950e3a..081dcaf 100644 (file)
 #include "wme.h"
 #include "rate.h"
 
-#define IEEE80211_TX_OK                0
-#define IEEE80211_TX_AGAIN     1
-#define IEEE80211_TX_PENDING   2
-
 /* misc utils */
 
 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
@@ -236,6 +232,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (local->hw.conf.flags & IEEE80211_CONF_PS) {
                ieee80211_stop_queues_by_reason(&local->hw,
                                                IEEE80211_QUEUE_STOP_REASON_PS);
+               ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
                ieee80211_queue_work(&local->hw,
                                     &local->dynamic_ps_disable_work);
        }
@@ -257,7 +254,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
        if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
                return TX_CONTINUE;
 
-       if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+       if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+           test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
            !ieee80211_is_probe_req(hdr->frame_control) &&
            !ieee80211_is_nullfunc(hdr->frame_control))
                /*
@@ -1283,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        return TX_CONTINUE;
 }
 
-static int __ieee80211_tx(struct ieee80211_local *local,
-                         struct sk_buff **skbp,
-                         struct sta_info *sta,
-                         bool txpending)
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
+                          struct sta_info *sta, bool txpending)
 {
        struct sk_buff *skb = *skbp, *next;
        struct ieee80211_tx_info *info;
        struct ieee80211_sub_if_data *sdata;
        unsigned long flags;
-       int ret, len;
+       int len;
        bool fragm = false;
 
        while (skb) {
@@ -1300,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                __le16 fc;
 
                spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               ret = IEEE80211_TX_OK;
                if (local->queue_stop_reasons[q] ||
-                   (!txpending && !skb_queue_empty(&local->pending[q])))
-                       ret = IEEE80211_TX_PENDING;
+                   (!txpending && !skb_queue_empty(&local->pending[q]))) {
+                       /*
+                        * Since queue is stopped, queue up frames for later
+                        * transmission from the tx-pending tasklet when the
+                        * queue is woken again.
+                        */
+
+                       do {
+                               next = skb->next;
+                               skb->next = NULL;
+                               /*
+                                * NB: If txpending is true, next must already
+                                * be NULL since we must've gone through this
+                                * loop before already; therefore we can just
+                                * queue the frame to the head without worrying
+                                * about reordering of fragments.
+                                */
+                               if (unlikely(txpending))
+                                       __skb_queue_head(&local->pending[q],
+                                                        skb);
+                               else
+                                       __skb_queue_tail(&local->pending[q],
+                                                        skb);
+                       } while ((skb = next));
+
+                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+                                              flags);
+                       return false;
+               }
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-               if (ret != IEEE80211_TX_OK)
-                       return ret;
 
                info = IEEE80211_SKB_CB(skb);
 
@@ -1341,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                        info->control.sta = NULL;
 
                fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
-               ret = drv_tx(local, skb);
-               if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
-                       dev_kfree_skb(skb);
-                       ret = NETDEV_TX_OK;
-               }
-               if (ret != NETDEV_TX_OK) {
-                       info->control.vif = &sdata->vif;
-                       return IEEE80211_TX_AGAIN;
-               }
+               drv_tx(local, skb);
 
                ieee80211_tpt_led_trig_tx(local, fc, len);
                *skbp = skb = next;
@@ -1357,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                fragm = true;
        }
 
-       return IEEE80211_TX_OK;
+       return true;
 }
 
 /*
@@ -1394,7 +1409,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        /* handlers after fragment must be aware of tx info fragmentation! */
        CALL_TXH(ieee80211_tx_h_stats);
        CALL_TXH(ieee80211_tx_h_encrypt);
-       CALL_TXH(ieee80211_tx_h_calculate_duration);
+       if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+               CALL_TXH(ieee80211_tx_h_calculate_duration);
 #undef CALL_TXH
 
  txh_done:
@@ -1416,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        return 0;
 }
 
-static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                         struct sk_buff *skb, bool txpending)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_data tx;
        ieee80211_tx_result res_prepare;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct sk_buff *next;
-       unsigned long flags;
-       int ret, retries;
        u16 queue;
+       bool result = true;
 
        queue = skb_get_queue_mapping(skb);
 
        if (unlikely(skb->len < 10)) {
                dev_kfree_skb(skb);
-               return;
+               return true;
        }
 
        rcu_read_lock();
@@ -1442,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
 
        if (unlikely(res_prepare == TX_DROP)) {
                dev_kfree_skb(skb);
-               rcu_read_unlock();
-               return;
+               goto out;
        } else if (unlikely(res_prepare == TX_QUEUED)) {
-               rcu_read_unlock();
-               return;
+               goto out;
        }
 
        tx.channel = local->hw.conf.channel;
        info->band = tx.channel->band;
 
-       if (invoke_tx_handlers(&tx))
-               goto out;
-
-       retries = 0;
- retry:
-       ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
-       switch (ret) {
-       case IEEE80211_TX_OK:
-               break;
-       case IEEE80211_TX_AGAIN:
-               /*
-                * Since there are no fragmented frames on A-MPDU
-                * queues, there's no reason for a driver to reject
-                * a frame there, warn and drop it.
-                */
-               if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
-                       goto drop;
-               /* fall through */
-       case IEEE80211_TX_PENDING:
-               skb = tx.skb;
-
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-
-               if (local->queue_stop_reasons[queue] ||
-                   !skb_queue_empty(&local->pending[queue])) {
-                       /*
-                        * if queue is stopped, queue up frames for later
-                        * transmission from the tasklet
-                        */
-                       do {
-                               next = skb->next;
-                               skb->next = NULL;
-                               if (unlikely(txpending))
-                                       __skb_queue_head(&local->pending[queue],
-                                                        skb);
-                               else
-                                       __skb_queue_tail(&local->pending[queue],
-                                                        skb);
-                       } while ((skb = next));
-
-                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
-                                              flags);
-               } else {
-                       /*
-                        * otherwise retry, but this is a race condition or
-                        * a driver bug (which we warn about if it persists)
-                        */
-                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
-                                              flags);
-
-                       retries++;
-                       if (WARN(retries > 10, "tx refused but queue active\n"))
-                               goto drop;
-                       goto retry;
-               }
-       }
+       if (!invoke_tx_handlers(&tx))
+               result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
  out:
        rcu_read_unlock();
-       return;
-
- drop:
-       rcu_read_unlock();
-
-       skb = tx.skb;
-       while (skb) {
-               next = skb->next;
-               dev_kfree_skb(skb);
-               skb = next;
-       }
+       return result;
 }
 
 /* device xmit handlers */
@@ -1547,7 +1498,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
                skb_orphan(skb);
        }
 
-       if (skb_header_cloned(skb))
+       if (skb_cloned(skb))
                I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
        else if (head_need || tail_need)
                I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -1750,7 +1701,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
-       struct mesh_path *mppath = NULL;
+       struct mesh_path __maybe_unused *mppath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
@@ -1815,19 +1766,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        mppath = mpp_path_lookup(skb->data, sdata);
 
                /*
-                * Do not use address extension, if it is a packet from
-                * the same interface and the destination is not being
-                * proxied by any other mest point.
+                * Use address extension if it is a packet from
+                * another interface or if we know the destination
+                * is being proxied by a portal (i.e. portal address
+                * differs from proxied address)
                 */
                if (compare_ether_addr(sdata->vif.addr,
                                       skb->data + ETH_ALEN) == 0 &&
-                   (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
+                   !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        skb->data, skb->data + ETH_ALEN);
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
-                       /* packet from other interface */
                        int is_mesh_mcast = 1;
                        const u8 *mesh_da;
 
@@ -2067,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local)
                skb_queue_purge(&local->pending[i]);
 }
 
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead,
+ * which in this case means re-queued -- take as an indication to stop sending
+ * more pending frames.
+ */
 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                                     struct sk_buff *skb)
 {
@@ -2074,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
        struct ieee80211_hdr *hdr;
-       int ret;
-       bool result = true;
+       bool result;
 
        sdata = vif_to_sdata(info->control.vif);
 
        if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
-               ieee80211_tx(sdata, skb, true);
+               result = ieee80211_tx(sdata, skb, true);
        } else {
                hdr = (struct ieee80211_hdr *)skb->data;
                sta = sta_info_get(sdata, hdr->addr1);
 
-               ret = __ieee80211_tx(local, &skb, sta, true);
-               if (ret != IEEE80211_TX_OK)
-                       result = false;
+               result = __ieee80211_tx(local, &skb, sta, true);
        }
 
        return result;
@@ -2129,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data)
                                                flags);
 
                        txok = ieee80211_tx_pending_skb(local, skb);
-                       if (!txok)
-                               __skb_queue_head(&local->pending[i], skb);
                        spin_lock_irqsave(&local->queue_stop_reason_lock,
                                          flags);
                        if (!txok)
@@ -2178,6 +2129,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
        if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
                aid0 = 1;
 
+       bss->dtim_bc_mc = aid0 == 1;
+
        if (have_bits) {
                /* Find largest even number N1 so that bits numbered 1 through
                 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@ -2230,6 +2183,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
 
        sdata = vif_to_sdata(vif);
 
+       if (!ieee80211_sdata_running(sdata))
+               goto out;
+
        if (tim_offset)
                *tim_offset = 0;
        if (tim_length)
@@ -2238,7 +2194,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                ap = &sdata->u.ap;
                beacon = rcu_dereference(ap->beacon);
-               if (ap && beacon) {
+               if (beacon) {
                        /*
                         * headroom, head length,
                         * tail length and maximum TIM length
@@ -2299,6 +2255,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                struct ieee80211_mgmt *mgmt;
                u8 *pos;
 
+#ifdef CONFIG_MAC80211_MESH
+               if (!sdata->u.mesh.mesh_id_len)
+                       goto out;
+#endif
+
                /* headroom, head length, tail length and maximum TIM length */
                skb = dev_alloc_skb(local->tx_headroom + 400 +
                                sdata->u.mesh.vendor_ie_len);
@@ -2540,7 +2501,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
        if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
                goto out;
 
-       if (bss->dtim_count != 0)
+       if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
                goto out; /* send buffered bc/mc only after DTIM beacon */
 
        while (1) {
index cf68700..556647a 100644 (file)
@@ -986,12 +986,6 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                u16 cap = sband->ht_cap.cap;
                __le16 tmp;
 
-               if (ieee80211_disable_40mhz_24ghz &&
-                   sband->band == IEEE80211_BAND_2GHZ) {
-                       cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-                       cap &= ~IEEE80211_HT_CAP_SGI_40;
-               }
-
                *pos++ = WLAN_EID_HT_CAPABILITY;
                *pos++ = sizeof(struct ieee80211_ht_cap);
                memset(pos, 0, sizeof(struct ieee80211_ht_cap));
@@ -1210,7 +1204,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC;
+                       mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
+                       mutex_unlock(&sdata->u.mgd.mtx);
                        break;
                case NL80211_IFTYPE_ADHOC:
                        changed |= BSS_CHANGED_IBSS;
index 36305e0..e73c8ca 100644 (file)
@@ -30,7 +30,6 @@
 #define IEEE80211_AUTH_MAX_TRIES 3
 #define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
 #define IEEE80211_ASSOC_MAX_TRIES 3
-#define IEEE80211_MAX_PROBE_TRIES 5
 
 enum work_action {
        WORK_ACT_MISMATCH,
@@ -126,12 +125,6 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
 
        /* determine capability flags */
 
-       if (ieee80211_disable_40mhz_24ghz &&
-           sband->band == IEEE80211_BAND_2GHZ) {
-               cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               cap &= ~IEEE80211_HT_CAP_SGI_40;
-       }
-
        switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
        case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
                if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
@@ -874,6 +867,44 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
        kfree_skb(skb);
 }
 
+static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+                                      enum nl80211_channel_type oper_ct)
+{
+       switch (wk_ct) {
+       case NL80211_CHAN_NO_HT:
+               return true;
+       case NL80211_CHAN_HT20:
+               if (oper_ct != NL80211_CHAN_NO_HT)
+                       return true;
+               return false;
+       case NL80211_CHAN_HT40MINUS:
+       case NL80211_CHAN_HT40PLUS:
+               return (wk_ct == oper_ct);
+       }
+       WARN_ON(1); /* shouldn't get here */
+       return false;
+}
+
+static enum nl80211_channel_type
+ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+                 enum nl80211_channel_type oper_ct)
+{
+       switch (wk_ct) {
+       case NL80211_CHAN_NO_HT:
+               return oper_ct;
+       case NL80211_CHAN_HT20:
+               if (oper_ct != NL80211_CHAN_NO_HT)
+                       return oper_ct;
+               return wk_ct;
+       case NL80211_CHAN_HT40MINUS:
+       case NL80211_CHAN_HT40PLUS:
+               return wk_ct;
+       }
+       WARN_ON(1); /* shouldn't get here */
+       return wk_ct;
+}
+
+
 static void ieee80211_work_timer(unsigned long data)
 {
        struct ieee80211_local *local = (void *) data;
@@ -924,18 +955,52 @@ static void ieee80211_work_work(struct work_struct *work)
                }
 
                if (!started && !local->tmp_channel) {
+                       bool on_oper_chan;
+                       bool tmp_chan_changed = false;
+                       bool on_oper_chan2;
+                       enum nl80211_channel_type wk_ct;
+                       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+                       /* Work with existing channel type if possible. */
+                       wk_ct = wk->chan_type;
+                       if (wk->chan == local->hw.conf.channel)
+                               wk_ct = ieee80211_calc_ct(wk->chan_type,
+                                               local->hw.conf.channel_type);
+
+                       if (local->tmp_channel)
+                               if ((local->tmp_channel != wk->chan) ||
+                                   (local->tmp_channel_type != wk_ct))
+                                       tmp_chan_changed = true;
+
+                       local->tmp_channel = wk->chan;
+                       local->tmp_channel_type = wk_ct;
                        /*
-                        * TODO: could optimize this by leaving the
-                        *       station vifs in awake mode if they
-                        *       happen to be on the same channel as
-                        *       the requested channel
+                        * Leave the station vifs in awake mode if they
+                        * happen to be on the same channel as
+                        * the requested channel.
                         */
-                       ieee80211_offchannel_stop_beaconing(local);
-                       ieee80211_offchannel_stop_station(local);
+                       on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+                       if (on_oper_chan != on_oper_chan2) {
+                               if (on_oper_chan2) {
+                                       /* going off oper channel, PS too */
+                                       ieee80211_offchannel_stop_vifs(local,
+                                                                      true);
+                                       ieee80211_hw_config(local, 0);
+                               } else {
+                                       /* going on channel, but leave PS
+                                        * off-channel. */
+                                       ieee80211_hw_config(local, 0);
+                                       ieee80211_offchannel_return(local,
+                                                                   true,
+                                                                   false);
+                               }
+                       } else if (tmp_chan_changed)
+                               /* Still off-channel, but on some other
+                                * channel, so update hardware.
+                                * PS should already be off-channel.
+                                */
+                               ieee80211_hw_config(local, 0);
 
-                       local->tmp_channel = wk->chan;
-                       local->tmp_channel_type = wk->chan_type;
-                       ieee80211_hw_config(local, 0);
                        started = true;
                        wk->timeout = jiffies;
                }
@@ -1005,15 +1070,34 @@ static void ieee80211_work_work(struct work_struct *work)
                        continue;
                if (wk->chan != local->tmp_channel)
                        continue;
-               if (wk->chan_type != local->tmp_channel_type)
+               if (ieee80211_work_ct_coexists(wk->chan_type,
+                                              local->tmp_channel_type))
                        continue;
                remain_off_channel = true;
        }
 
        if (!remain_off_channel && local->tmp_channel) {
+               bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
                local->tmp_channel = NULL;
-               ieee80211_hw_config(local, 0);
-               ieee80211_offchannel_return(local, true);
+               /* If tmp_channel wasn't operating channel, then
+                * we need to go back on-channel.
+                * NOTE:  If we can ever be here while scannning,
+                * or if the hw_config() channel config logic changes,
+                * then we may need to do a more thorough check to see if
+                * we still need to do a hardware config.  Currently,
+                * we cannot be here while scanning, however.
+                */
+               if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+                       ieee80211_hw_config(local, 0);
+
+               /* At the least, we need to disable offchannel_ps,
+                * so just go ahead and run the entire offchannel
+                * return logic here.  We *could* skip enabling
+                * beaconing if we were already on-oper-channel
+                * as a future optimization.
+                */
+               ieee80211_offchannel_return(local, true, true);
+
                /* give connection some time to breathe */
                run_again(local, jiffies + HZ/2);
        }
index bee230d..f1765de 100644 (file)
 ieee80211_tx_result
 ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 {
-       u8 *data, *key, *mic, key_offset;
+       u8 *data, *key, *mic;
        size_t data_len;
        unsigned int hdrlen;
        struct ieee80211_hdr *hdr;
        struct sk_buff *skb = tx->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       int authenticator;
        int tail;
 
        hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,6 +46,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
        data = skb->data + hdrlen;
        data_len = skb->len - hdrlen;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
+               /* Need to use software crypto for the test */
+               info->control.hw_key = NULL;
+       }
+
        if (info->control.hw_key &&
            !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
            !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
@@ -62,17 +66,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
                    skb_headroom(skb) < TKIP_IV_LEN))
                return TX_DROP;
 
-#if 0
-       authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
-#else
-       authenticator = 1;
-#endif
-       key_offset = authenticator ?
-               NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
-               NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
-       key = &tx->key->conf.key[key_offset];
+       key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
        mic = skb_put(skb, MICHAEL_MIC_LEN);
        michael_mic(key, hdr, data, data_len, mic);
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
+               mic[0]++;
 
        return TX_CONTINUE;
 }
@@ -81,14 +79,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 ieee80211_rx_result
 ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 {
-       u8 *data, *key = NULL, key_offset;
+       u8 *data, *key = NULL;
        size_t data_len;
        unsigned int hdrlen;
        u8 mic[MICHAEL_MIC_LEN];
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       int authenticator = 1, wpa_test = 0;
 
        /* No way to verify the MIC if the hardware stripped it */
        if (status->flag & RX_FLAG_MMIC_STRIPPED)
@@ -106,17 +103,9 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        data = skb->data + hdrlen;
        data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
 
-#if 0
-       authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */
-#else
-       authenticator = 1;
-#endif
-       key_offset = authenticator ?
-               NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
-               NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
-       key = &rx->key->conf.key[key_offset];
+       key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
        michael_mic(key, hdr, data, data_len, mic);
-       if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
+       if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) {
                if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
                        return RX_DROP_UNUSABLE;
 
@@ -208,7 +197,7 @@ ieee80211_rx_result
 ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
-       int hdrlen, res, hwaccel = 0, wpa_test = 0;
+       int hdrlen, res, hwaccel = 0;
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -235,7 +224,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
                                          hdr->addr1, hwaccel, rx->queue,
                                          &rx->tkip_iv32,
                                          &rx->tkip_iv16);
-       if (res != TKIP_DECRYPT_OK || wpa_test)
+       if (res != TKIP_DECRYPT_OK)
                return RX_DROP_UNUSABLE;
 
        /* Trim ICV */
index 1534f2b..c3f988a 100644 (file)
@@ -85,6 +85,17 @@ config NF_CONNTRACK_EVENTS
 
          If unsure, say `N'.
 
+config NF_CONNTRACK_TIMESTAMP
+       bool  'Connection tracking timestamping'
+       depends on NETFILTER_ADVANCED
+       help
+         This option enables support for connection tracking timestamping.
+         This allows you to store the flow start-time and to obtain
+         the flow-stop time (once it has been destroyed) via Connection
+         tracking events.
+
+         If unsure, say `N'.
+
 config NF_CT_PROTO_DCCP
        tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
        depends on EXPERIMENTAL
@@ -185,9 +196,13 @@ config NF_CONNTRACK_IRC
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_BROADCAST
+       tristate
+
 config NF_CONNTRACK_NETBIOS_NS
        tristate "NetBIOS name service protocol support"
        depends on NETFILTER_ADVANCED
+       select NF_CONNTRACK_BROADCAST
        help
          NetBIOS name service requests are sent as broadcast messages from an
          unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@ config NF_CONNTRACK_NETBIOS_NS
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_SNMP
+       tristate "SNMP service protocol support"
+       depends on NETFILTER_ADVANCED
+       select NF_CONNTRACK_BROADCAST
+       help
+         SNMP service requests are sent as broadcast messages from an
+         unprivileged port and responded to with unicast messages to the
+         same port. This make them hard to firewall properly because connection
+         tracking doesn't deal with broadcasts. This helper tracks locally
+         originating SNMP service requests and the corresponding
+         responses. It relies on correct IP address configuration, specifically
+         netmask and broadcast address.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CONNTRACK_PPTP
        tristate "PPtP protocol support"
        depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@ config NETFILTER_XT_CONNMARK
        ctmark), similarly to the packet mark (nfmark). Using this
        target and match, you can set and match on this mark.
 
+config NETFILTER_XT_SET
+       tristate 'set target and match support'
+       depends on IP_SET
+       depends on NETFILTER_ADVANCED
+       help
+         This option adds the "SET" target and "set" match.
+
+         Using this target and match, you can add/delete and match
+         elements in the sets created by ipset(8).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 # alphabetically ordered list of targets
 
 comment "Xtables targets"
 
+config NETFILTER_XT_TARGET_AUDIT
+       tristate "AUDIT target support"
+       depends on AUDIT
+       depends on NETFILTER_ADVANCED
+       ---help---
+         This option adds a 'AUDIT' target, which can be used to create
+         audit records for packets dropped/accepted.
+
+         To compileit as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_CHECKSUM
        tristate "CHECKSUM target support"
        depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@ config NETFILTER_XT_TARGET_NFLOG
 config NETFILTER_XT_TARGET_NFQUEUE
        tristate '"NFQUEUE" target Support'
        depends on NETFILTER_ADVANCED
+       select NETFILTER_NETLINK_QUEUE
        help
          This target replaced the old obsolete QUEUE target.
 
@@ -596,6 +649,17 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
 
 comment "Xtables matches"
 
+config NETFILTER_XT_MATCH_ADDRTYPE
+       tristate '"addrtype" address type match support'
+       depends on NETFILTER_ADVANCED
+       depends on (IPV6 || IPV6=n)
+       ---help---
+         This option allows you to match what routing thinks of an address,
+         eg. UNICAST, LOCAL, BROADCAST, ...
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_CLUSTER
        tristate '"cluster" match support'
        depends on NF_CONNTRACK
@@ -685,6 +749,15 @@ config NETFILTER_XT_MATCH_DCCP
          If you want to compile it as a module, say M here and read
          <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_DEVGROUP
+       tristate '"devgroup" match support'
+       depends on NETFILTER_ADVANCED
+       help
+         This options adds a `devgroup' match, which allows to match on the
+         device group a network device is assigned to.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_DSCP
        tristate '"dscp" and "tos" match support'
        depends on NETFILTER_ADVANCED
@@ -886,7 +959,7 @@ config NETFILTER_XT_MATCH_RATEEST
 config NETFILTER_XT_MATCH_REALM
        tristate  '"realm" match support'
        depends on NETFILTER_ADVANCED
-       select NET_CLS_ROUTE
+       select IP_ROUTE_CLASSID
        help
          This option adds a `realm' match, which allows you to use the realm
          key from the routing subsystem inside iptables.
@@ -1011,4 +1084,6 @@ endif # NETFILTER_XTABLES
 
 endmenu
 
+source "net/netfilter/ipset/Kconfig"
+
 source "net/netfilter/ipvs/Kconfig"
index 441050f..1a02853 100644 (file)
@@ -1,6 +1,7 @@
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
 
 nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
 obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
 obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
 obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
 obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
 obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
 obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 # combos
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
 
 # targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -65,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
 
 # matches
+obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
@@ -72,6 +78,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +108,8 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
 
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
 # IPVS
 obj-$(CONFIG_IP_VS) += ipvs/
index 32fcbe2..899b71c 100644 (file)
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head,
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
+repeat:
                verdict = elem->hook(hook, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
 #endif
                        if (verdict != NF_REPEAT)
                                return verdict;
-                       *i = (*i)->prev;
+                       goto repeat;
                }
        }
        return NF_ACCEPT;
@@ -175,13 +176,21 @@ next_hook:
                ret = 1;
        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
                kfree_skb(skb);
-               ret = -(verdict >> NF_VERDICT_BITS);
+               ret = NF_DROP_GETERR(verdict);
                if (ret == 0)
                        ret = -EPERM;
        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
-               if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-                             verdict >> NF_VERDICT_BITS))
-                       goto next_hook;
+               ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+                              verdict >> NF_VERDICT_QBITS);
+               if (ret < 0) {
+                       if (ret == -ECANCELED)
+                               goto next_hook;
+                       if (ret == -ESRCH &&
+                          (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+                               goto next_hook;
+                       kfree_skb(skb);
+               }
+               ret = 0;
        }
        rcu_read_unlock();
        return ret;
@@ -214,7 +223,7 @@ EXPORT_SYMBOL(skb_make_writable);
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
 EXPORT_SYMBOL(ip_ct_attach);
 
 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -231,7 +240,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(nf_ct_attach);
 
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
 EXPORT_SYMBOL(nf_ct_destroy);
 
 void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644 (file)
index 0000000..2c5b348
--- /dev/null
@@ -0,0 +1,122 @@
+menuconfig IP_SET
+       tristate "IP set support"
+       depends on INET && NETFILTER
+       depends on NETFILTER_NETLINK
+       help
+         This option adds IP set support to the kernel.
+         In order to define and use the sets, you need the userspace utility
+         ipset(8). You can use the sets in netfilter via the "set" match
+         and "SET" target.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+       int "Maximum number of IP sets"
+       default 256
+       range 2 65534
+       depends on IP_SET
+       help
+         You can define here default value of the maximum number 
+         of IP sets for the kernel.
+
+         The value can be overriden by the 'max_sets' module
+         parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+       tristate "bitmap:ip set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:ip set type support, by which one
+         can store IPv4 addresses (or network addresse) from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+       tristate "bitmap:ip,mac set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:ip,mac set type support, by which one
+         can store IPv4 address and (source) MAC address pairs from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+       tristate "bitmap:port set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:port set type support, by which one
+         can store TCP/UDP port numbers from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IP
+       tristate "hash:ip set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip set type support, by which one
+         can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+         in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+       tristate "hash:ip,port set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port set type support, by which one
+         can store IPv4/IPv6 address and protocol/port pairs.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+       tristate "hash:ip,port,ip set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port,ip set type support, by which
+         one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+         address triples in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+       tristate "hash:ip,port,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port,net set type support, by which
+         one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+         network address/prefix triples in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NET
+       tristate "hash:net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net set type support, by which
+         one can store IPv4/IPv6 network address/prefix elements in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+       tristate "hash:net,port set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,port set type support, by which
+         one can store IPv4/IPv6 network address/prefix and
+         protocol/port pairs as elements in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_LIST_SET
+       tristate "list:set set support"
+       depends on IP_SET
+       help
+         This option adds the list:set set type support. In this
+         kind of set one can store the name of other sets and it forms
+         an ordered union of the member sets.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644 (file)
index 0000000..5adbdab
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644 (file)
index 0000000..bca9699
--- /dev/null
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+       void *members;          /* the set members */
+       u32 first_ip;           /* host byte order, included in range */
+       u32 last_ip;            /* host byte order, included in range */
+       u32 elements;           /* number of max elements in the set */
+       u32 hosts;              /* number of hosts in a subnet */
+       size_t memsize;         /* members size */
+       u8 netmask;             /* subnet netmask */
+       u32 timeout;            /* timeout parameter */
+       struct timer_list gc;   /* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+       return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (test_and_set_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (!test_and_clear_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+              struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->elements; cb->args[2]++) {
+               id = cb->args[2];
+               if (!test_bit(id, map->members))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id * map->hosts));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ip *map = set->data;
+       const unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       if (ip_set_timeout_test(members[id]))
+               return -IPSET_ERR_EXIST;
+
+       members[id] = ip_set_timeout_set(timeout);
+
+       return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+       int ret = -IPSET_ERR_EXIST;
+
+       if (ip_set_timeout_test(members[id]))
+               ret = 0;
+
+       members[id] = IPSET_ELEM_UNSET;
+       return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+               struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *adt, *nested;
+       u32 id, first = cb->args[2];
+       const unsigned long *members = map->members;
+
+       adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!adt)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->elements; cb->args[2]++) {
+               id = cb->args[2];
+               if (!ip_set_timeout_test(members[id]))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, adt);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id * map->hosts));
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                             htonl(ip_set_timeout_get(members[id])));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, adt);
+
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, adt);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_ip *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 ip;
+
+       ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+       if (ip < map->first_ip || ip > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       ip = ip_to_id(map, ip);
+
+       return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct bitmap_ip *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 timeout = map->timeout;
+       u32 ip, ip_to, id;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       if (ip < map->first_ip || ip > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               id = ip_to_id(map, ip);
+               return adtfn(set, &id, timeout);
+       }
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to) {
+                       swap(ip, ip_to);
+                       if (ip < map->first_ip)
+                               return -IPSET_ERR_BITMAP_RANGE;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       if (ip_to > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       for (; !before(ip_to, ip); ip += map->hosts) {
+               id = ip_to_id(map, ip);
+               ret = adtfn(set, &id, timeout);;
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+       if (map->netmask != 32)
+               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->memsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_ip *x = a->data;
+       const struct bitmap_ip *y = b->data;
+
+       return x->first_ip == y->first_ip &&
+              x->last_ip == y->last_ip &&
+              x->netmask == y->netmask &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+       .kadt   = bitmap_ip_kadt,
+       .uadt   = bitmap_ip_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ip_add,
+               [IPSET_DEL] = bitmap_ip_del,
+               [IPSET_TEST] = bitmap_ip_test,
+       },
+       .destroy = bitmap_ip_destroy,
+       .flush  = bitmap_ip_flush,
+       .head   = bitmap_ip_head,
+       .list   = bitmap_ip_list,
+       .same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+       .kadt   = bitmap_ip_kadt,
+       .uadt   = bitmap_ip_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ip_tadd,
+               [IPSET_DEL] = bitmap_ip_tdel,
+               [IPSET_TEST] = bitmap_ip_ttest,
+       },
+       .destroy = bitmap_ip_destroy,
+       .flush  = bitmap_ip_flush,
+       .head   = bitmap_ip_head,
+       .list   = bitmap_ip_tlist,
+       .same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_ip *map = set->data;
+       unsigned long *table = map->members;
+       u32 id;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id < map->elements; id++)
+               if (ip_set_timeout_expired(table[id]))
+                       table[id] = IPSET_ELEM_UNSET;
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_ip_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+           u32 first_ip, u32 last_ip,
+           u32 elements, u32 hosts, u8 netmask)
+{
+       map->members = ip_set_alloc(map->memsize);
+       if (!map->members)
+               return false;
+       map->first_ip = first_ip;
+       map->last_ip = last_ip;
+       map->elements = elements;
+       map->hosts = hosts;
+       map->netmask = netmask;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_INET;
+
+       return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct bitmap_ip *map;
+       u32 first_ip, last_ip, hosts, elements;
+       u8 netmask = 32;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+               if (ret)
+                       return ret;
+               if (first_ip > last_ip) {
+                       u32 tmp = first_ip;
+
+                       first_ip = last_ip;
+                       last_ip = tmp;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr >= 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               last_ip = first_ip | ~ip_set_hostmask(cidr);
+       } else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_NETMASK]) {
+               netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+               if (netmask > 32)
+                       return -IPSET_ERR_INVALID_NETMASK;
+
+               first_ip &= ip_set_hostmask(netmask);
+               last_ip |= ~ip_set_hostmask(netmask);
+       }
+
+       if (netmask == 32) {
+               hosts = 1;
+               elements = last_ip - first_ip + 1;
+       } else {
+               u8 mask_bits;
+               u32 mask;
+
+               mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+               if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+                   netmask <= mask_bits)
+                       return -IPSET_ERR_BITMAP_RANGE;
+
+               pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+               hosts = 2 << (32 - netmask - 1);
+               elements = 2 << (netmask - mask_bits - 1);
+       }
+       if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+               return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+       pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->memsize = elements * sizeof(unsigned long);
+
+               if (!init_map_ip(set, map, first_ip, last_ip,
+                                elements, hosts, netmask)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+               set->variant = &bitmap_tip;
+
+               bitmap_ip_gc_init(set);
+       } else {
+               map->memsize = bitmap_bytes(0, elements - 1);
+
+               if (!init_map_ip(set, map, first_ip, last_ip,
+                                elements, hosts, netmask)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               set->variant = &bitmap_ip;
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+       .name           = "bitmap:ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_INET,
+       .revision       = 0,
+       .create         = bitmap_ip_create,
+       .create_policy  = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_NETMASK]    = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+       return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+       ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644 (file)
index 0000000..5e79017
--- /dev/null
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                        Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+       MAC_EMPTY,              /* element is not set */
+       MAC_FILLED,             /* element is set with MAC */
+       MAC_UNSET,              /* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+       void *members;          /* the set members */
+       u32 first_ip;           /* host byte order, included in range */
+       u32 last_ip;            /* host byte order, included in range */
+       u32 timeout;            /* timeout value */
+       struct timer_list gc;   /* garbage collector */
+       size_t dsize;           /* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+       u32 id;                 /* id in array */
+       unsigned char *ether;   /* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+       unsigned char ether[ETH_ALEN];
+       unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+       unsigned char ether[ETH_ALEN];
+       unsigned char match;
+       unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+       return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+       const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+       return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+       const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+       return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+       return elem->match == MAC_UNSET ||
+              (elem->match == MAC_FILLED &&
+               !ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               /* Trigger kernel to fill out the ethernet address */
+               return -EAGAIN;
+       case MAC_FILLED:
+               return data->ether == NULL ||
+                      compare_ether_addr(data->ether, elem->ether) == 0;
+       }
+       return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               if (!data->ether)
+                       /* Already added without ethernet address */
+                       return -IPSET_ERR_EXIST;
+               /* Fill the MAC address */
+               memcpy(elem->ether, data->ether, ETH_ALEN);
+               elem->match = MAC_FILLED;
+               break;
+       case MAC_FILLED:
+               return -IPSET_ERR_EXIST;
+       case MAC_EMPTY:
+               if (data->ether) {
+                       memcpy(elem->ether, data->ether, ETH_ALEN);
+                       elem->match = MAC_FILLED;
+               } else
+                       elem->match = MAC_UNSET;
+       }
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       if (elem->match == MAC_EMPTY)
+               return -IPSET_ERR_EXIST;
+
+       elem->match = MAC_EMPTY;
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+                 struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac_elem *elem;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+       u32 last = map->last_ip - map->first_ip;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               elem = bitmap_ipmac_elem(map, id);
+               if (elem->match == MAC_EMPTY)
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id));
+               if (elem->match == MAC_FILLED)
+                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                               elem->ether);
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               /* Trigger kernel to fill out the ethernet address */
+               return -EAGAIN;
+       case MAC_FILLED:
+               return (data->ether == NULL ||
+                       compare_ether_addr(data->ether, elem->ether) == 0) &&
+                      !bitmap_expired(map, data->id);
+       }
+       return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               if (!data->ether)
+                       /* Already added without ethernet address */
+                       return -IPSET_ERR_EXIST;
+               /* Fill the MAC address and activate the timer */
+               memcpy(elem->ether, data->ether, ETH_ALEN);
+               elem->match = MAC_FILLED;
+               if (timeout == map->timeout)
+                       /* Timeout was not specified, get stored one */
+                       timeout = elem->timeout;
+               elem->timeout = ip_set_timeout_set(timeout);
+               break;
+       case MAC_FILLED:
+               if (!bitmap_expired(map, data->id))
+                       return -IPSET_ERR_EXIST;
+               /* Fall through */
+       case MAC_EMPTY:
+               if (data->ether) {
+                       memcpy(elem->ether, data->ether, ETH_ALEN);
+                       elem->match = MAC_FILLED;
+               } else
+                       elem->match = MAC_UNSET;
+               /* If MAC is unset yet, we store plain timeout value
+                * because the timer is not activated yet
+                * and we can reuse it later when MAC is filled out,
+                * possibly by the kernel */
+               elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+                                           : timeout;
+               break;
+       }
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+       if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+               return -IPSET_ERR_EXIST;
+
+       elem->match = MAC_EMPTY;
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+                  struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac_telem *elem;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+       u32 timeout, last = map->last_ip - map->first_ip;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               elem = bitmap_ipmac_elem(map, id);
+               if (!bitmap_ipmac_exist(elem))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id));
+               if (elem->match == MAC_FILLED)
+                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                               elem->ether);
+               timeout = elem->match == MAC_UNSET ? elem->timeout
+                               : ip_set_timeout_get(elem->timeout);
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_ipmac *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct ipmac data;
+
+       data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+       if (data.id < map->first_ip || data.id > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       /* Backward compatibility: we don't check the second flag */
+       if (skb_mac_header(skb) < skb->head ||
+           (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+               return -EINVAL;
+
+       data.id -= map->first_ip;
+       data.ether = eth_hdr(skb)->h_source;
+
+       return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct bitmap_ipmac *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct ipmac data;
+       u32 timeout = map->timeout;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+       if (ret)
+               return ret;
+
+       if (data.id < map->first_ip || data.id > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_ETHER])
+               data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+       else
+               data.ether = NULL;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       data.id -= map->first_ip;
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       memset(map->members, 0,
+              (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map)
+                           + (map->last_ip - map->first_ip + 1) * map->dsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_ipmac *x = a->data;
+       const struct bitmap_ipmac *y = b->data;
+
+       return x->first_ip == y->first_ip &&
+              x->last_ip == y->last_ip &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+       .kadt   = bitmap_ipmac_kadt,
+       .uadt   = bitmap_ipmac_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ipmac_add,
+               [IPSET_DEL] = bitmap_ipmac_del,
+               [IPSET_TEST] = bitmap_ipmac_test,
+       },
+       .destroy = bitmap_ipmac_destroy,
+       .flush  = bitmap_ipmac_flush,
+       .head   = bitmap_ipmac_head,
+       .list   = bitmap_ipmac_list,
+       .same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+       .kadt   = bitmap_ipmac_kadt,
+       .uadt   = bitmap_ipmac_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ipmac_tadd,
+               [IPSET_DEL] = bitmap_ipmac_tdel,
+               [IPSET_TEST] = bitmap_ipmac_ttest,
+       },
+       .destroy = bitmap_ipmac_destroy,
+       .flush  = bitmap_ipmac_flush,
+       .head   = bitmap_ipmac_head,
+       .list   = bitmap_ipmac_tlist,
+       .same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_ipmac *map = set->data;
+       struct ipmac_telem *elem;
+       u32 id, last = map->last_ip - map->first_ip;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id <= last; id++) {
+               elem = bitmap_ipmac_elem(map, id);
+               if (elem->match == MAC_FILLED &&
+                   ip_set_timeout_expired(elem->timeout))
+                       elem->match = MAC_EMPTY;
+       }
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_ipmac_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+              u32 first_ip, u32 last_ip)
+{
+       map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+       if (!map->members)
+               return false;
+       map->first_ip = first_ip;
+       map->last_ip = last_ip;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_INET;
+
+       return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+                   u32 flags)
+{
+       u32 first_ip, last_ip, elements;
+       struct bitmap_ipmac *map;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+               if (ret)
+                       return ret;
+               if (first_ip > last_ip) {
+                       u32 tmp = first_ip;
+
+                       first_ip = last_ip;
+                       last_ip = tmp;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr >= 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               last_ip = first_ip | ~ip_set_hostmask(cidr);
+       } else
+               return -IPSET_ERR_PROTOCOL;
+
+       elements = last_ip - first_ip + 1;
+
+       if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+               return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->dsize = sizeof(struct ipmac_telem);
+
+               if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = &bitmap_tipmac;
+
+               bitmap_ipmac_gc_init(set);
+       } else {
+               map->dsize = sizeof(struct ipmac_elem);
+
+               if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               set->variant = &bitmap_ipmac;
+
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+       .name           = "bitmap:ip,mac",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_INET,
+       .revision       = 0,
+       .create         = bitmap_ipmac_create,
+       .create_policy  = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_ETHER]      = { .type = NLA_BINARY, .len  = ETH_ALEN },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+       return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+       ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644 (file)
index 0000000..165f09b
--- /dev/null
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+       void *members;          /* the set members */
+       u16 first_port;         /* host byte order, included in range */
+       u16 last_port;          /* host byte order, included in range */
+       size_t memsize;         /* members size */
+       u32 timeout;            /* timeout parameter */
+       struct timer_list gc;   /* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (test_and_set_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (!test_and_clear_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+                struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *atd, *nested;
+       u16 id, first = cb->args[2];
+       u16 last = map->last_port - map->first_port;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               if (!test_bit(id, map->members))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+                             htons(map->first_port + id));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_port *map = set->data;
+       const unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       if (ip_set_timeout_test(members[id]))
+               return -IPSET_ERR_EXIST;
+
+       members[id] = ip_set_timeout_set(timeout);
+
+       return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+       int ret = -IPSET_ERR_EXIST;
+
+       if (ip_set_timeout_test(members[id]))
+               ret = 0;
+
+       members[id] = IPSET_ELEM_UNSET;
+       return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+                 struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *adt, *nested;
+       u16 id, first = cb->args[2];
+       u16 last = map->last_port - map->first_port;
+       const unsigned long *members = map->members;
+
+       adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!adt)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               if (!ip_set_timeout_test(members[id]))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, adt);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+                             htons(map->first_port + id));
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                             htonl(ip_set_timeout_get(members[id])));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, adt);
+
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, adt);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+                enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_port *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       __be16 __port;
+       u16 port = 0;
+
+       if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+               return -EINVAL;
+
+       port = ntohs(__port);
+
+       if (port < map->first_port || port > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       port -= map->first_port;
+
+       return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+                enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct bitmap_port *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 timeout = map->timeout;
+       u32 port;       /* wraparound */
+       u16 id, port_to;
+       int ret = 0;
+
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+       if (port < map->first_port || port > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               id = port - map->first_port;
+               return adtfn(set, &id, timeout);
+       }
+
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to) {
+                       swap(port, port_to);
+                       if (port < map->first_port)
+                               return -IPSET_ERR_BITMAP_RANGE;
+               }
+       } else
+               port_to = port;
+
+       if (port_to > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       for (; port <= port_to; port++) {
+               id = port - map->first_port;
+               ret = adtfn(set, &id, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->memsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_port *x = a->data;
+       const struct bitmap_port *y = b->data;
+
+       return x->first_port == y->first_port &&
+              x->last_port == y->last_port &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+       .kadt   = bitmap_port_kadt,
+       .uadt   = bitmap_port_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_port_add,
+               [IPSET_DEL] = bitmap_port_del,
+               [IPSET_TEST] = bitmap_port_test,
+       },
+       .destroy = bitmap_port_destroy,
+       .flush  = bitmap_port_flush,
+       .head   = bitmap_port_head,
+       .list   = bitmap_port_list,
+       .same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+       .kadt   = bitmap_port_kadt,
+       .uadt   = bitmap_port_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_port_tadd,
+               [IPSET_DEL] = bitmap_port_tdel,
+               [IPSET_TEST] = bitmap_port_ttest,
+       },
+       .destroy = bitmap_port_destroy,
+       .flush  = bitmap_port_flush,
+       .head   = bitmap_port_head,
+       .list   = bitmap_port_tlist,
+       .same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_port *map = set->data;
+       unsigned long *table = map->members;
+       u32 id; /* wraparound */
+       u16 last = map->last_port - map->first_port;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id <= last; id++)
+               if (ip_set_timeout_expired(table[id]))
+                       table[id] = IPSET_ELEM_UNSET;
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_port_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+             u16 first_port, u16 last_port)
+{
+       map->members = ip_set_alloc(map->memsize);
+       if (!map->members)
+               return false;
+       map->first_port = first_port;
+       map->last_port = last_port;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_UNSPEC;
+
+       return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+                u32 flags)
+{
+       struct bitmap_port *map;
+       u16 first_port, last_port;
+
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+       last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (first_port > last_port) {
+               u16 tmp = first_port;
+
+               first_port = last_port;
+               last_port = tmp;
+       }
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->memsize = (last_port - first_port + 1)
+                              * sizeof(unsigned long);
+
+               if (!init_map_port(set, map, first_port, last_port)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+               set->variant = &bitmap_tport;
+
+               bitmap_port_gc_init(set);
+       } else {
+               map->memsize = bitmap_bytes(0, last_port - first_port);
+               pr_debug("memsize: %zu\n", map->memsize);
+               if (!init_map_port(set, map, first_port, last_port)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               set->variant = &bitmap_port;
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+       .name           = "bitmap:port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = bitmap_port_create,
+       .create_policy  = {
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+       return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+       ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644 (file)
index 0000000..618a615
--- /dev/null
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list);            /* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex);                /* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list;            /* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b)    (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+       mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+       mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+       struct ip_set_type *type;
+
+       list_for_each_entry_rcu(type, &ip_set_type_list, list)
+               if (STREQ(type->name, name) &&
+                   (type->family == family || type->family == AF_UNSPEC) &&
+                   type->revision == revision)
+                       return type;
+       return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+       nfnl_unlock();
+       pr_debug("try to load ip_set_%s\n", name);
+       if (request_module("ip_set_%s", name) < 0) {
+               pr_warning("Can't find ip_set type %s\n", name);
+               nfnl_lock();
+               return -IPSET_ERR_FIND_TYPE;
+       }
+       nfnl_lock();
+       return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+                 struct ip_set_type **found)
+{
+       rcu_read_lock();
+       *found = find_set_type(name, family, revision);
+       if (*found) {
+               int err = !try_module_get((*found)->me);
+               rcu_read_unlock();
+               return err ? -EFAULT : 0;
+       }
+       rcu_read_unlock();
+
+       return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+       struct ip_set_type *type;
+       bool found = false;
+
+       *min = *max = 0;
+       rcu_read_lock();
+       list_for_each_entry_rcu(type, &ip_set_type_list, list)
+               if (STREQ(type->name, name) &&
+                   (type->family == family || type->family == AF_UNSPEC)) {
+                       found = true;
+                       if (type->revision < *min)
+                               *min = type->revision;
+                       else if (type->revision > *max)
+                               *max = type->revision;
+               }
+       rcu_read_unlock();
+       if (found)
+               return 0;
+
+       return try_to_load_type(name);
+}
+
+#define family_name(f) ((f) == AF_INET ? "inet" : \
+                        (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+       int ret = 0;
+
+       if (type->protocol != IPSET_PROTOCOL) {
+               pr_warning("ip_set type %s, family %s, revision %u uses "
+                          "wrong protocol version %u (want %u)\n",
+                          type->name, family_name(type->family),
+                          type->revision, type->protocol, IPSET_PROTOCOL);
+               return -EINVAL;
+       }
+
+       ip_set_type_lock();
+       if (find_set_type(type->name, type->family, type->revision)) {
+               /* Duplicate! */
+               pr_warning("ip_set type %s, family %s, revision %u "
+                          "already registered!\n", type->name,
+                          family_name(type->family), type->revision);
+               ret = -EINVAL;
+               goto unlock;
+       }
+       list_add_rcu(&type->list, &ip_set_type_list);
+       pr_debug("type %s, family %s, revision %u registered.\n",
+                type->name, family_name(type->family), type->revision);
+unlock:
+       ip_set_type_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+       ip_set_type_lock();
+       if (!find_set_type(type->name, type->family, type->revision)) {
+               pr_warning("ip_set type %s, family %s, revision %u "
+                          "not registered\n", type->name,
+                          family_name(type->family), type->revision);
+               goto unlock;
+       }
+       list_del_rcu(&type->list);
+       pr_debug("type %s, family %s, revision %u unregistered.\n",
+                type->name, family_name(type->family), type->revision);
+unlock:
+       ip_set_type_unlock();
+
+       synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+       void *members = NULL;
+
+       if (size < KMALLOC_MAX_SIZE)
+               members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+       if (members) {
+               pr_debug("%p: allocated with kmalloc\n", members);
+               return members;
+       }
+
+       members = vzalloc(size);
+       if (!members)
+               return NULL;
+       pr_debug("%p: allocated with vmalloc\n", members);
+
+       return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+       pr_debug("%p: free with %s\n", members,
+                is_vmalloc_addr(members) ? "vfree" : "kfree");
+       if (is_vmalloc_addr(members))
+               vfree(members);
+       else
+               kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+       return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+       [IPSET_ATTR_IPADDR_IPV4]        = { .type = NLA_U32 },
+       [IPSET_ATTR_IPADDR_IPV6]        = { .type = NLA_BINARY,
+                                           .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr)
+{
+       struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+       if (unlikely(!flag_nested(nla)))
+               return -IPSET_ERR_PROTOCOL;
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+               return -IPSET_ERR_PROTOCOL;
+
+       *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+       struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+       if (unlikely(!flag_nested(nla)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+               return -IPSET_ERR_PROTOCOL;
+
+       memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+               sizeof(struct in6_addr));
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+       atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+       atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+           u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret = 0;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       read_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+       read_unlock_bh(&set->lock);
+
+       if (ret == -EAGAIN) {
+               /* Type requests element to be completed */
+               pr_debug("element must be competed, ADD is triggered\n");
+               write_lock_bh(&set->lock);
+               set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+               write_unlock_bh(&set->lock);
+               ret = 1;
+       }
+
+       /* Convert error codes to nomatch */
+       return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+          u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       write_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+       write_unlock_bh(&set->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+          u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret = 0;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       write_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+       write_unlock_bh(&set->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+       ip_set_id_t i, index = IPSET_INVALID_ID;
+       struct ip_set *s;
+
+       for (i = 0; i < ip_set_max; i++) {
+               s = ip_set_list[i];
+               if (s != NULL && STREQ(s->name, name)) {
+                       __ip_set_get(i);
+                       index = i;
+                       *set = s;
+               }
+       }
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+       if (ip_set_list[index] != NULL) {
+               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+               __ip_set_put(index);
+       }
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+       const struct ip_set *set = ip_set_list[index];
+
+       BUG_ON(set == NULL);
+       BUG_ON(atomic_read(&set->ref) == 0);
+
+       /* Referenced, so it's safe */
+       return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+       struct ip_set *s;
+       ip_set_id_t index;
+
+       nfnl_lock();
+       index = ip_set_get_byname(name, &s);
+       nfnl_unlock();
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+       if (index > ip_set_max)
+               return IPSET_INVALID_ID;
+
+       nfnl_lock();
+       if (ip_set_list[index])
+               __ip_set_get(index);
+       else
+               index = IPSET_INVALID_ID;
+       nfnl_unlock();
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+       nfnl_lock();
+       if (ip_set_list[index] != NULL) {
+               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+               __ip_set_put(index);
+       }
+       nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+       return !tb[IPSET_ATTR_PROTOCOL] ||
+              nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+       return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+         enum ipset_cmd cmd)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+                       sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               return NULL;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = AF_INET;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_TYPENAME]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1},
+       [IPSET_ATTR_REVISION]   = { .type = NLA_U8 },
+       [IPSET_ATTR_FAMILY]     = { .type = NLA_U8 },
+       [IPSET_ATTR_DATA]       = { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+       ip_set_id_t i, index = IPSET_INVALID_ID;
+       const struct ip_set *set;
+
+       for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+               set = ip_set_list[i];
+               if (set != NULL && STREQ(set->name, name))
+                       index = i;
+       }
+       return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+       ip_set_id_t index = find_set_id(name);
+
+       return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+       ip_set_id_t i;
+
+       *index = IPSET_INVALID_ID;
+       for (i = 0;  i < ip_set_max; i++) {
+               if (ip_set_list[i] == NULL) {
+                       if (*index == IPSET_INVALID_ID)
+                               *index = i;
+               } else if (STREQ(name, ip_set_list[i]->name)) {
+                       /* Name clash */
+                       *set = ip_set_list[i];
+                       return -EEXIST;
+               }
+       }
+       if (*index == IPSET_INVALID_ID)
+               /* No free slot remained */
+               return -IPSET_ERR_MAX_SETS;
+       return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       struct ip_set *set, *clash = NULL;
+       ip_set_id_t index = IPSET_INVALID_ID;
+       struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+       const char *name, *typename;
+       u8 family, revision;
+       u32 flags = flag_exist(nlh);
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_TYPENAME] == NULL ||
+                    attr[IPSET_ATTR_REVISION] == NULL ||
+                    attr[IPSET_ATTR_FAMILY] == NULL ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA]))))
+               return -IPSET_ERR_PROTOCOL;
+
+       name = nla_data(attr[IPSET_ATTR_SETNAME]);
+       typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+       family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+       revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+       pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+                name, typename, family_name(family), revision);
+
+       /*
+        * First, and without any locks, allocate and initialize
+        * a normal base set structure.
+        */
+       set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+       if (!set)
+               return -ENOMEM;
+       rwlock_init(&set->lock);
+       strlcpy(set->name, name, IPSET_MAXNAMELEN);
+       atomic_set(&set->ref, 0);
+       set->family = family;
+
+       /*
+        * Next, check that we know the type, and take
+        * a reference on the type, to make sure it stays available
+        * while constructing our new set.
+        *
+        * After referencing the type, we try to create the type
+        * specific part of the set without holding any locks.
+        */
+       ret = find_set_type_get(typename, family, revision, &(set->type));
+       if (ret)
+               goto out;
+
+       /*
+        * Without holding any locks, create private part.
+        */
+       if (attr[IPSET_ATTR_DATA] &&
+           nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->create_policy)) {
+               ret = -IPSET_ERR_PROTOCOL;
+               goto put_out;
+       }
+
+       ret = set->type->create(set, tb, flags);
+       if (ret != 0)
+               goto put_out;
+
+       /* BTW, ret==0 here. */
+
+       /*
+        * Here, we have a valid, constructed set and we are protected
+        * by nfnl_lock. Find the first free index in ip_set_list and
+        * check clashing.
+        */
+       if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+               /* If this is the same set and requested, ignore error */
+               if (ret == -EEXIST &&
+                   (flags & IPSET_FLAG_EXIST) &&
+                   STREQ(set->type->name, clash->type->name) &&
+                   set->type->family == clash->type->family &&
+                   set->type->revision == clash->type->revision &&
+                   set->variant->same_set(set, clash))
+                       ret = 0;
+               goto cleanup;
+       }
+
+       /*
+        * Finally! Add our shiny new set to the list, and be done.
+        */
+       pr_debug("create: '%s' created with index %u!\n", set->name, index);
+       ip_set_list[index] = set;
+
+       return ret;
+
+cleanup:
+       set->variant->destroy(set);
+put_out:
+       module_put(set->type->me);
+out:
+       kfree(set);
+       return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+       struct ip_set *set = ip_set_list[index];
+
+       pr_debug("set: %s\n",  set->name);
+       ip_set_list[index] = NULL;
+
+       /* Must call it without holding any lock */
+       set->variant->destroy(set);
+       module_put(set->type->me);
+       kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+              const struct nlmsghdr *nlh,
+              const struct nlattr * const attr[])
+{
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr)))
+               return -IPSET_ERR_PROTOCOL;
+
+       /* References are protected by the nfnl mutex */
+       if (!attr[IPSET_ATTR_SETNAME]) {
+               for (i = 0; i < ip_set_max; i++) {
+                       if (ip_set_list[i] != NULL &&
+                           (atomic_read(&ip_set_list[i]->ref)))
+                               return -IPSET_ERR_BUSY;
+               }
+               for (i = 0; i < ip_set_max; i++) {
+                       if (ip_set_list[i] != NULL)
+                               ip_set_destroy_set(i);
+               }
+       } else {
+               i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+               if (i == IPSET_INVALID_ID)
+                       return -ENOENT;
+               else if (atomic_read(&ip_set_list[i]->ref))
+                       return -IPSET_ERR_BUSY;
+
+               ip_set_destroy_set(i);
+       }
+       return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+       pr_debug("set: %s\n",  set->name);
+
+       write_lock_bh(&set->lock);
+       set->variant->flush(set);
+       write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh,
+            const struct nlattr * const attr[])
+{
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr)))
+               return -EPROTO;
+
+       if (!attr[IPSET_ATTR_SETNAME]) {
+               for (i = 0; i < ip_set_max; i++)
+                       if (ip_set_list[i] != NULL)
+                               ip_set_flush_set(ip_set_list[i]);
+       } else {
+               i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+               if (i == IPSET_INVALID_ID)
+                       return -ENOENT;
+
+               ip_set_flush_set(ip_set_list[i]);
+       }
+
+       return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_SETNAME2]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       const char *name2;
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_SETNAME2] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+       if (atomic_read(&set->ref) != 0)
+               return -IPSET_ERR_REFERENCED;
+
+       name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+       for (i = 0; i < ip_set_max; i++) {
+               if (ip_set_list[i] != NULL &&
+                   STREQ(ip_set_list[i]->name, name2))
+                       return -IPSET_ERR_EXIST_SETNAME2;
+       }
+       strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+       return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *from, *to;
+       ip_set_id_t from_id, to_id;
+       char from_name[IPSET_MAXNAMELEN];
+       u32 from_ref;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_SETNAME2] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (from_id == IPSET_INVALID_ID)
+               return -ENOENT;
+
+       to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+       if (to_id == IPSET_INVALID_ID)
+               return -IPSET_ERR_EXIST_SETNAME2;
+
+       from = ip_set_list[from_id];
+       to = ip_set_list[to_id];
+
+       /* Features must not change.
+        * Not an artifical restriction anymore, as we must prevent
+        * possible loops created by swapping in setlist type of sets. */
+       if (!(from->type->features == to->type->features &&
+             from->type->family == to->type->family))
+               return -IPSET_ERR_TYPE_MISMATCH;
+
+       /* No magic here: ref munging protected by the nfnl_lock */
+       strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+       from_ref = atomic_read(&from->ref);
+
+       strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+       atomic_set(&from->ref, atomic_read(&to->ref));
+       strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+       atomic_set(&to->ref, from_ref);
+
+       ip_set_list[from_id] = to;
+       ip_set_list[to_id] = from;
+
+       return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT      0L
+#define DUMP_ALL       1L
+#define DUMP_ONE       2L
+#define DUMP_LAST      3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+       if (cb->args[2]) {
+               pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+               __ip_set_put((ip_set_id_t) cb->args[1]);
+       }
+       return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+       const struct nlattr *attr;
+       int rem;
+
+       pr_debug("dump nlmsg\n");
+       nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+               pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+       }
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+       struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+       int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+       struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+       struct nlattr *attr = (void *)nlh + min_len;
+       ip_set_id_t index;
+
+       /* Second pass, so parser can't fail */
+       nla_parse(cda, IPSET_ATTR_CMD_MAX,
+                 attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+       /* cb->args[0] : dump single set/all sets
+        *         [1] : set index
+        *         [..]: type specific
+        */
+
+       if (!cda[IPSET_ATTR_SETNAME]) {
+               cb->args[0] = DUMP_ALL;
+               return 0;
+       }
+
+       index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+       if (index == IPSET_INVALID_ID)
+               return -ENOENT;
+
+       cb->args[0] = DUMP_ONE;
+       cb->args[1] = index;
+       return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       ip_set_id_t index = IPSET_INVALID_ID, max;
+       struct ip_set *set = NULL;
+       struct nlmsghdr *nlh = NULL;
+       unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+       int ret = 0;
+
+       if (cb->args[0] == DUMP_INIT) {
+               ret = dump_init(cb);
+               if (ret < 0) {
+                       nlh = nlmsg_hdr(cb->skb);
+                       /* We have to create and send the error message
+                        * manually :-( */
+                       if (nlh->nlmsg_flags & NLM_F_ACK)
+                               netlink_ack(cb->skb, nlh, ret);
+                       return ret;
+               }
+       }
+
+       if (cb->args[1] >= ip_set_max)
+               goto out;
+
+       pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+       max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+       for (; cb->args[1] < max; cb->args[1]++) {
+               index = (ip_set_id_t) cb->args[1];
+               set = ip_set_list[index];
+               if (set == NULL) {
+                       if (cb->args[0] == DUMP_ONE) {
+                               ret = -ENOENT;
+                               goto out;
+                       }
+                       continue;
+               }
+               /* When dumping all sets, we must dump "sorted"
+                * so that lists (unions of sets) are dumped last.
+                */
+               if (cb->args[0] != DUMP_ONE &&
+                   !((cb->args[0] == DUMP_ALL) ^
+                     (set->type->features & IPSET_DUMP_LAST)))
+                       continue;
+               pr_debug("List set: %s\n", set->name);
+               if (!cb->args[2]) {
+                       /* Start listing: make sure set won't be destroyed */
+                       pr_debug("reference set\n");
+                       __ip_set_get(index);
+               }
+               nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+                               cb->nlh->nlmsg_seq, flags,
+                               IPSET_CMD_LIST);
+               if (!nlh) {
+                       ret = -EMSGSIZE;
+                       goto release_refcount;
+               }
+               NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+               NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+               switch (cb->args[2]) {
+               case 0:
+                       /* Core header data */
+                       NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+                                      set->type->name);
+                       NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+                                  set->family);
+                       NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+                                  set->type->revision);
+                       ret = set->variant->head(set, skb);
+                       if (ret < 0)
+                               goto release_refcount;
+                       /* Fall through and add elements */
+               default:
+                       read_lock_bh(&set->lock);
+                       ret = set->variant->list(set, skb, cb);
+                       read_unlock_bh(&set->lock);
+                       if (!cb->args[2]) {
+                               /* Set is done, proceed with next one */
+                               if (cb->args[0] == DUMP_ONE)
+                                       cb->args[1] = IPSET_INVALID_ID;
+                               else
+                                       cb->args[1]++;
+                       }
+                       goto release_refcount;
+               }
+       }
+       goto out;
+
+nla_put_failure:
+       ret = -EFAULT;
+release_refcount:
+       /* If there was an error or set is done, release set */
+       if (ret || !cb->args[2]) {
+               pr_debug("release set %s\n", ip_set_list[index]->name);
+               __ip_set_put(index);
+       }
+
+       /* If we dump all sets, continue with dumping last ones */
+       if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+               cb->args[0] = DUMP_LAST;
+
+out:
+       if (nlh) {
+               nlmsg_end(skb, nlh);
+               pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+               dump_attrs(nlh);
+       }
+
+       return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       if (unlikely(protocol_failed(attr)))
+               return -IPSET_ERR_PROTOCOL;
+
+       return netlink_dump_start(ctnl, skb, nlh,
+                                 ip_set_dump_start,
+                                 ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       [IPSET_ATTR_DATA]       = { .type = NLA_NESTED },
+       [IPSET_ATTR_ADT]        = { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+       struct nlattr *tb[], enum ipset_adt adt,
+       u32 flags, bool use_lineno)
+{
+       int ret, retried = 0;
+       u32 lineno = 0;
+       bool eexist = flags & IPSET_FLAG_EXIST;
+
+       do {
+               write_lock_bh(&set->lock);
+               ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+               write_unlock_bh(&set->lock);
+       } while (ret == -EAGAIN &&
+                set->variant->resize &&
+                (ret = set->variant->resize(set, retried++)) == 0);
+
+       if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+               return 0;
+       if (lineno && use_lineno) {
+               /* Error in restore/batch mode: send back lineno */
+               struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+               struct sk_buff *skb2;
+               struct nlmsgerr *errmsg;
+               size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+               int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+               struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+               struct nlattr *cmdattr;
+               u32 *errline;
+
+               skb2 = nlmsg_new(payload, GFP_KERNEL);
+               if (skb2 == NULL)
+                       return -ENOMEM;
+               rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+                                 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+               errmsg = nlmsg_data(rep);
+               errmsg->error = ret;
+               memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+               cmdattr = (void *)&errmsg->msg + min_len;
+
+               nla_parse(cda, IPSET_ATTR_CMD_MAX,
+                         cmdattr, nlh->nlmsg_len - min_len,
+                         ip_set_adt_policy);
+
+               errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+               *errline = lineno;
+
+               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+               /* Signal netlink not to send its ACK/errmsg.  */
+               return -EINTR;
+       }
+
+       return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       const struct nlattr *nla;
+       u32 flags = flag_exist(nlh);
+       bool use_lineno;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    !((attr[IPSET_ATTR_DATA] != NULL) ^
+                      (attr[IPSET_ATTR_ADT] != NULL)) ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA])) ||
+                    (attr[IPSET_ATTR_ADT] != NULL &&
+                     (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+                      attr[IPSET_ATTR_LINENO] == NULL))))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       use_lineno = !!attr[IPSET_ATTR_LINENO];
+       if (attr[IPSET_ATTR_DATA]) {
+               if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+                                    attr[IPSET_ATTR_DATA],
+                                    set->type->adt_policy))
+                       return -IPSET_ERR_PROTOCOL;
+               ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+                             use_lineno);
+       } else {
+               int nla_rem;
+
+               nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+                       memset(tb, 0, sizeof(tb));
+                       if (nla_type(nla) != IPSET_ATTR_DATA ||
+                           !flag_nested(nla) ||
+                           nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+                                            set->type->adt_policy))
+                               return -IPSET_ERR_PROTOCOL;
+                       ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+                                     flags, use_lineno);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       const struct nlattr *nla;
+       u32 flags = flag_exist(nlh);
+       bool use_lineno;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    !((attr[IPSET_ATTR_DATA] != NULL) ^
+                      (attr[IPSET_ATTR_ADT] != NULL)) ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA])) ||
+                    (attr[IPSET_ATTR_ADT] != NULL &&
+                     (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+                      attr[IPSET_ATTR_LINENO] == NULL))))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       use_lineno = !!attr[IPSET_ATTR_LINENO];
+       if (attr[IPSET_ATTR_DATA]) {
+               if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+                                    attr[IPSET_ATTR_DATA],
+                                    set->type->adt_policy))
+                       return -IPSET_ERR_PROTOCOL;
+               ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+                             use_lineno);
+       } else {
+               int nla_rem;
+
+               nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+                       memset(tb, 0, sizeof(*tb));
+                       if (nla_type(nla) != IPSET_ATTR_DATA ||
+                           !flag_nested(nla) ||
+                           nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+                                            set->type->adt_policy))
+                               return -IPSET_ERR_PROTOCOL;
+                       ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+                                     flags, use_lineno);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh,
+            const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_DATA] == NULL ||
+                    !flag_nested(attr[IPSET_ATTR_DATA])))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->adt_policy))
+               return -IPSET_ERR_PROTOCOL;
+
+       read_lock_bh(&set->lock);
+       ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+       read_unlock_bh(&set->lock);
+       /* Userspace can't trigger element to be re-added */
+       if (ret == -EAGAIN)
+               ret = 1;
+
+       return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       const struct ip_set *set;
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       ip_set_id_t index;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (index == IPSET_INVALID_ID)
+               return -ENOENT;
+       set = ip_set_list[index];
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_HEADER);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+       nlmsg_end(skb2, nlh2);
+
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_TYPENAME]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_FAMILY]     = { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       u8 family, min, max;
+       const char *typename;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_TYPENAME] == NULL ||
+                    attr[IPSET_ATTR_FAMILY] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+       typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+       ret = find_set_type_minmax(typename, family, &min, &max);
+       if (ret)
+               return ret;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_TYPE);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+       nlmsg_end(skb2, nlh2);
+
+       pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+               const struct nlmsghdr *nlh,
+               const struct nlattr * const attr[])
+{
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       int ret = 0;
+
+       if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_PROTOCOL);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       nlmsg_end(skb2, nlh2);
+
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_CREATE]      = {
+               .call           = ip_set_create,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_create_policy,
+       },
+       [IPSET_CMD_DESTROY]     = {
+               .call           = ip_set_destroy,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_FLUSH]       = {
+               .call           = ip_set_flush,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_RENAME]      = {
+               .call           = ip_set_rename,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname2_policy,
+       },
+       [IPSET_CMD_SWAP]        = {
+               .call           = ip_set_swap,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname2_policy,
+       },
+       [IPSET_CMD_LIST]        = {
+               .call           = ip_set_dump,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_SAVE]        = {
+               .call           = ip_set_dump,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_ADD] = {
+               .call           = ip_set_uadd,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_DEL] = {
+               .call           = ip_set_udel,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_TEST]        = {
+               .call           = ip_set_utest,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_HEADER]      = {
+               .call           = ip_set_header,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_TYPE]        = {
+               .call           = ip_set_type,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_type_policy,
+       },
+       [IPSET_CMD_PROTOCOL]    = {
+               .call           = ip_set_protocol,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_protocol_policy,
+       },
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+       .name           = "ip_set",
+       .subsys_id      = NFNL_SUBSYS_IPSET,
+       .cb_count       = IPSET_MSG_MAX,
+       .cb             = ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+       unsigned *op;
+       void *data;
+       int copylen = *len, ret = 0;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+       if (optval != SO_IP_SET)
+               return -EBADF;
+       if (*len < sizeof(unsigned))
+               return -EINVAL;
+
+       data = vmalloc(*len);
+       if (!data)
+               return -ENOMEM;
+       if (copy_from_user(data, user, *len) != 0) {
+               ret = -EFAULT;
+               goto done;
+       }
+       op = (unsigned *) data;
+
+       if (*op < IP_SET_OP_VERSION) {
+               /* Check the version at the beginning of operations */
+               struct ip_set_req_version *req_version = data;
+               if (req_version->version != IPSET_PROTOCOL) {
+                       ret = -EPROTO;
+                       goto done;
+               }
+       }
+
+       switch (*op) {
+       case IP_SET_OP_VERSION: {
+               struct ip_set_req_version *req_version = data;
+
+               if (*len != sizeof(struct ip_set_req_version)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+
+               req_version->version = IPSET_PROTOCOL;
+               ret = copy_to_user(user, req_version,
+                                  sizeof(struct ip_set_req_version));
+               goto done;
+       }
+       case IP_SET_OP_GET_BYNAME: {
+               struct ip_set_req_get_set *req_get = data;
+
+               if (*len != sizeof(struct ip_set_req_get_set)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+               nfnl_lock();
+               req_get->set.index = find_set_id(req_get->set.name);
+               nfnl_unlock();
+               goto copy;
+       }
+       case IP_SET_OP_GET_BYINDEX: {
+               struct ip_set_req_get_set *req_get = data;
+
+               if (*len != sizeof(struct ip_set_req_get_set) ||
+                   req_get->set.index >= ip_set_max) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               nfnl_lock();
+               strncpy(req_get->set.name,
+                       ip_set_list[req_get->set.index]
+                               ? ip_set_list[req_get->set.index]->name : "",
+                       IPSET_MAXNAMELEN);
+               nfnl_unlock();
+               goto copy;
+       }
+       default:
+               ret = -EBADMSG;
+               goto done;
+       }       /* end of switch(op) */
+
+copy:
+       ret = copy_to_user(user, data, copylen);
+
+done:
+       vfree(data);
+       if (ret > 0)
+               ret = 0;
+       return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+       .pf             = PF_INET,
+       .get_optmin     = SO_IP_SET,
+       .get_optmax     = SO_IP_SET + 1,
+       .get            = &ip_set_sockfn_get,
+       .owner          = THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+       int ret;
+
+       if (max_sets)
+               ip_set_max = max_sets;
+       if (ip_set_max >= IPSET_INVALID_ID)
+               ip_set_max = IPSET_INVALID_ID - 1;
+
+       ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+                             GFP_KERNEL);
+       if (!ip_set_list) {
+               pr_err("ip_set: Unable to create ip_set_list\n");
+               return -ENOMEM;
+       }
+
+       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+       if (ret != 0) {
+               pr_err("ip_set: cannot register with nfnetlink.\n");
+               kfree(ip_set_list);
+               return ret;
+       }
+       ret = nf_register_sockopt(&so_set);
+       if (ret != 0) {
+               pr_err("SO_SET registry failed: %d\n", ret);
+               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               kfree(ip_set_list);
+               return ret;
+       }
+
+       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+       /* There can't be any existing set */
+       nf_unregister_sockopt(&so_set);
+       nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+       kfree(ip_set_list);
+       pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644 (file)
index 0000000..8d52272
--- /dev/null
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+        bool src, __be16 *port, u8 *proto)
+{
+       switch (protocol) {
+       case IPPROTO_TCP: {
+               struct tcphdr _tcph;
+               const struct tcphdr *th;
+
+               th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+               if (th == NULL)
+                       /* No choice either */
+                       return false;
+
+               *port = src ? th->source : th->dest;
+               break;
+       }
+       case IPPROTO_UDP: {
+               struct udphdr _udph;
+               const struct udphdr *uh;
+
+               uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+               if (uh == NULL)
+                       /* No choice either */
+                       return false;
+
+               *port = src ? uh->source : uh->dest;
+               break;
+       }
+       case IPPROTO_ICMP: {
+               struct icmphdr _ich;
+               const struct icmphdr *ic;
+
+               ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+               if (ic == NULL)
+                       return false;
+
+               *port = (__force __be16)htons((ic->type << 8) | ic->code);
+               break;
+       }
+       case IPPROTO_ICMPV6: {
+               struct icmp6hdr _ich;
+               const struct icmp6hdr *ic;
+
+               ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+               if (ic == NULL)
+                       return false;
+
+               *port = (__force __be16)
+                       htons((ic->icmp6_type << 8) | ic->icmp6_code);
+               break;
+       }
+       default:
+               break;
+       }
+       *proto = protocol;
+
+       return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+                   __be16 *port, u8 *proto)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       unsigned int protooff = ip_hdrlen(skb);
+       int protocol = iph->protocol;
+
+       /* See comments at tcp_match in ip_tables.c */
+       if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+               return false;
+
+       return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                   __be16 *port, u8 *proto)
+{
+       int protoff;
+       u8 nexthdr;
+
+       nexthdr = ipv6_hdr(skb)->nexthdr;
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+       if (protoff < 0)
+               return false;
+
+       return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+       bool ret;
+       u8 proto;
+
+       switch (pf) {
+       case AF_INET:
+               ret = ip_set_get_ip4_port(skb, src, port, &proto);
+               break;
+       case AF_INET6:
+               ret = ip_set_get_ip6_port(skb, src, port, &proto);
+               break;
+       default:
+               return false;
+       }
+       if (!ret)
+               return ret;
+       switch (proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               return true;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644 (file)
index 0000000..43bcce2
--- /dev/null
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE           hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set      hash_ip_same_set
+#define hash_ip6_same_set      hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+       __be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+       __be32 ip;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+                   const struct hash_ip4_elem *ip2)
+{
+       return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+       return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+       dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+       elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+       const struct hash_ip4_telem *tdata =
+               (const struct hash_ip4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       __be32 ip;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+       ip &= ip_set_netmask(h->netmask);
+       if (ip == 0)
+               return -EINVAL;
+
+       return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 ip, ip_to, hosts, timeout = h->timeout;
+       __be32 nip;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ip &= ip_set_hostmask(h->netmask);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               nip = htonl(ip);
+               if (nip == 0)
+                       return -IPSET_ERR_HASH_ELEM;
+               return adtfn(set, &nip, timeout);
+       }
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+       for (; !before(ip_to, ip); ip += hosts) {
+               nip = htonl(ip);
+               if (nip == 0)
+                       return -IPSET_ERR_HASH_ELEM;
+               ret = adtfn(set, &nip, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout &&
+              x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+       union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+       union nf_inet_addr ip;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+                   const struct hash_ip6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+       return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+       ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+       const struct hash_ip6_telem *e =
+               (const struct hash_ip6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       union nf_inet_addr ip;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+       ip6_netmask(&ip, h->netmask);
+       if (ipv6_addr_any(&ip.in6))
+               return -EINVAL;
+
+       return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+       [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+       [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       union nf_inet_addr ip;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ip6_netmask(&ip, h->netmask);
+       if (ipv6_addr_any(&ip.in6))
+               return -IPSET_ERR_HASH_ELEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &ip, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 netmask, hbits;
+       struct ip_set_hash *h;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+       netmask = set->family == AF_INET ? 32 : 128;
+       pr_debug("Create set %s with family %s\n",
+                set->name, set->family == AF_INET ? "inet" : "inet6");
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       if (tb[IPSET_ATTR_NETMASK]) {
+               netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+               if ((set->family == AF_INET && netmask > 32) ||
+                   (set->family == AF_INET6 && netmask > 128) ||
+                   netmask == 0)
+                       return -IPSET_ERR_INVALID_NETMASK;
+       }
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       h->netmask = netmask;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ip4_gc_init(set);
+               else
+                       hash_ip6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ip4_variant : &hash_ip6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+       .name           = "hash:ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ip_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_NETMASK]    = { .type = NLA_U8  },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+       return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+       ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644 (file)
index 0000000..adbe787
--- /dev/null
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set  hash_ipport_same_set
+#define hash_ipport6_same_set  hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+                       const struct hash_ipport4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+                      const struct hash_ipport4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->port = src->port;
+       dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+                      const struct hash_ipport4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipport4_elem *data)
+{
+       const struct hash_ipport4_telem *tdata =
+               (const struct hash_ipport4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport4_elem data = { };
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport4_elem data = { };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+struct hash_ipport6_telem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+                       const struct hash_ipport6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+                      const struct hash_ipport6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+                      const struct hash_ipport6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipport6_elem *data)
+{
+       const struct hash_ipport6_telem *e =
+               (const struct hash_ipport6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport6_elem data = { };
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport6_elem data = { };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipport4_gc_init(set);
+               else
+                       hash_ipport6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipport4_variant : &hash_ipport6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+       .name           = "hash:ip,port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipport_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+       return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+       ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644 (file)
index 0000000..22e23ab
--- /dev/null
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set        hash_ipportip_same_set
+#define hash_ipportip6_same_set        hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+                         const struct hash_ipportip4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->ip2 == ip2->ip2 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+                        const struct hash_ipportip4_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+                      const struct hash_ipportip4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipportip4_elem *data)
+{
+       const struct hash_ipportip4_telem *tdata =
+               (const struct hash_ipportip4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip4_elem data = { };
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip4_elem data = { };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+struct hash_ipportip6_telem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+                         const struct hash_ipportip6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+                        const struct hash_ipportip6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+                        const struct hash_ipportip6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+                         const struct hash_ipportip6_elem *data)
+{
+       const struct hash_ipportip6_telem *e =
+               (const struct hash_ipportip6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip6_elem data = { };
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip6_elem data = { };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipportip4_gc_init(set);
+               else
+                       hash_ipportip6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportip4_variant : &hash_ipportip6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+       .name           = "hash:ip,port,ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipportip_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+       return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+       ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644 (file)
index 0000000..6033e8b
--- /dev/null
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set       hash_ipportnet_same_set
+#define hash_ipportnet6_same_set       hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+                          const struct hash_ipportnet4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->ip2 == ip2->ip2 &&
+              ip1->cidr == ip2->cidr &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+                         const struct hash_ipportnet4_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+       elem->ip2 &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+                         const struct hash_ipportnet4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+                          const struct hash_ipportnet4_elem *data)
+{
+       const struct hash_ipportnet4_telem *tdata =
+               (const struct hash_ipportnet4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet4_elem data =
+               { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+       data.ip2 &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR2])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       data.ip2 &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+                          const struct hash_ipportnet6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+              ip1->cidr == ip2->cidr &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+                         const struct hash_ipportnet6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip2, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+                         const struct hash_ipportnet6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+                          const struct hash_ipportnet6_elem *data)
+{
+       const struct hash_ipportnet6_telem *e =
+               (const struct hash_ipportnet6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet6_elem data =
+               { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+       ip6_netmask(&data.ip2, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR2])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&data.ip2, data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportnet4_tvariant
+                       : &hash_ipportnet6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipportnet4_gc_init(set);
+               else
+                       hash_ipportnet6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+       .name           = "hash:ip,port,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipportnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+       return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+       ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644 (file)
index 0000000..c4db202
--- /dev/null
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE           hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set     hash_net_same_set
+#define hash_net6_same_set     hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+       __be32 ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+       __be32 ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+                   const struct hash_net4_elem *ip2)
+{
+       return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+       return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+                   const struct hash_net4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+       elem->ip &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+       elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+       const struct hash_net4_telem *tdata =
+               (const struct hash_net4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       data.ip &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net4_elem data = { .cidr = HOST_MASK };
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       data.ip &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+       union nf_inet_addr ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+};
+
+struct hash_net6_telem {
+       union nf_inet_addr ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+                    const struct hash_net6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+       return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+                   const struct hash_net6_elem *src)
+{
+       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+       elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+       const struct hash_net6_telem *e =
+               (const struct hash_net6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6_netmask(&data.ip, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net6_elem data = { .cidr = HOST_MASK };
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&data.ip, data.cidr);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       struct ip_set_hash *h;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_net4_tvariant : &hash_net6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_net4_gc_init(set);
+               else
+                       hash_net6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_net4_variant : &hash_net6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+       .name           = "hash:net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_net_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+       return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+       ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644 (file)
index 0000000..34a1656
--- /dev/null
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE           hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set hash_netport_same_set
+#define hash_netport6_same_set hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+                        const struct hash_netport4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+                       const struct hash_netport4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->port = src->port;
+       dst->proto = src->proto;
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+       elem->ip &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+                       const struct hash_netport4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+                        const struct hash_netport4_elem *data)
+{
+       const struct hash_netport4_telem *tdata =
+               (const struct hash_netport4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport4_elem data = {
+               .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       data.ip &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+                  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport4_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+       data.ip &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+};
+
+struct hash_netport6_telem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+                        const struct hash_netport6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+                       const struct hash_netport6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+                       const struct hash_netport6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+                        const struct hash_netport6_elem *data)
+{
+       const struct hash_netport6_telem *e =
+               (const struct hash_netport6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport6_elem data = {
+               .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6_netmask(&data.ip, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+                  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport6_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+       ip6_netmask(&data.ip, data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_netport4_gc_init(set);
+               else
+                       hash_netport6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_netport4_variant : &hash_netport6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+       .name           = "hash:net,port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_netport_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+       return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+       ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644 (file)
index 0000000..a47c329
--- /dev/null
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+       ip_set_id_t id;
+};
+
+struct set_telem {
+       ip_set_id_t id;
+       unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+       size_t dsize;           /* element size */
+       u32 size;               /* size of set list array */
+       u32 timeout;            /* timeout value */
+       struct timer_list gc;   /* garbage collection */
+       struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+       return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+       const struct set_telem *elem =
+               (const struct set_telem *) list_set_elem(map, id);
+
+       return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+       const struct set_telem *elem =
+               (const struct set_telem *) list_set_elem(map, id);
+
+       return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+       return elem->id != IPSET_INVALID_ID &&
+              !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct list_set *map = set->data;
+       struct set_elem *elem;
+       u32 i;
+       int ret;
+
+       for (i = 0; i < map->size; i++) {
+               elem = list_set_elem(map, i);
+               if (elem->id == IPSET_INVALID_ID)
+                       return 0;
+               if (with_timeout(map->timeout) && list_set_expired(map, i))
+                       continue;
+               switch (adt) {
+               case IPSET_TEST:
+                       ret = ip_set_test(elem->id, skb, pf, dim, flags);
+                       if (ret > 0)
+                               return ret;
+                       break;
+               case IPSET_ADD:
+                       ret = ip_set_add(elem->id, skb, pf, dim, flags);
+                       if (ret == 0)
+                               return ret;
+                       break;
+               case IPSET_DEL:
+                       ret = ip_set_del(elem->id, skb, pf, dim, flags);
+                       if (ret == 0)
+                               return ret;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+       const struct set_elem *elem;
+
+       if (i + 1 < map->size) {
+               elem = list_set_elem(map, i + 1);
+               return !!(elem->id == id &&
+                         !(with_timeout(map->timeout) &&
+                           list_set_expired(map, i + 1)));
+       }
+
+       return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+       struct set_elem *e;
+
+       for (; i < map->size; i++) {
+               e = list_set_elem(map, i);
+               swap(e->id, id);
+               if (e->id == IPSET_INVALID_ID)
+                       break;
+       }
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+              unsigned long timeout)
+{
+       struct set_telem *e;
+
+       for (; i < map->size; i++) {
+               e = (struct set_telem *)list_set_elem(map, i);
+               swap(e->id, id);
+               if (e->id == IPSET_INVALID_ID)
+                       break;
+               swap(e->timeout, timeout);
+       }
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+            unsigned long timeout)
+{
+       const struct set_elem *e = list_set_elem(map, i);
+
+       if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+               /* Last element replaced: e.g. add new,before,last */
+               ip_set_put_byindex(e->id);
+       if (with_timeout(map->timeout))
+               list_elem_tadd(map, i, id, timeout);
+       else
+               list_elem_add(map, i, id);
+
+       return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+       struct set_elem *a = list_set_elem(map, i), *b;
+
+       ip_set_put_byindex(id);
+
+       for (; i < map->size - 1; i++) {
+               b = list_set_elem(map, i + 1);
+               a->id = b->id;
+               if (with_timeout(map->timeout))
+                       ((struct set_telem *)a)->timeout =
+                               ((struct set_telem *)b)->timeout;
+               a = b;
+               if (a->id == IPSET_INVALID_ID)
+                       break;
+       }
+       /* Last element */
+       a->id = IPSET_INVALID_ID;
+       return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct list_set *map = set->data;
+       bool with_timeout = with_timeout(map->timeout);
+       int before = 0;
+       u32 timeout = map->timeout;
+       ip_set_id_t id, refid = IPSET_INVALID_ID;
+       const struct set_elem *elem;
+       struct ip_set *s;
+       u32 i;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_NAME] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+       if (id == IPSET_INVALID_ID)
+               return -IPSET_ERR_NAME;
+       /* "Loop detection" */
+       if (s->type->features & IPSET_TYPE_NAME) {
+               ret = -IPSET_ERR_LOOP;
+               goto finish;
+       }
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               before = f & IPSET_FLAG_BEFORE;
+       }
+
+       if (before && !tb[IPSET_ATTR_NAMEREF]) {
+               ret = -IPSET_ERR_BEFORE;
+               goto finish;
+       }
+
+       if (tb[IPSET_ATTR_NAMEREF]) {
+               refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+                                         &s);
+               if (refid == IPSET_INVALID_ID) {
+                       ret = -IPSET_ERR_NAMEREF;
+                       goto finish;
+               }
+               if (!before)
+                       before = -1;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout) {
+                       ret = -IPSET_ERR_TIMEOUT;
+                       goto finish;
+               }
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       switch (adt) {
+       case IPSET_TEST:
+               for (i = 0; i < map->size && !ret; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID ||
+                           (before != 0 && i + 1 >= map->size))
+                               break;
+                       else if (with_timeout && list_set_expired(map, i))
+                               continue;
+                       else if (before > 0 && elem->id == id)
+                               ret = next_id_eq(map, i, refid);
+                       else if (before < 0 && elem->id == refid)
+                               ret = next_id_eq(map, i, id);
+                       else if (before == 0 && elem->id == id)
+                               ret = 1;
+               }
+               break;
+       case IPSET_ADD:
+               for (i = 0; i < map->size && !ret; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == id &&
+                           !(with_timeout && list_set_expired(map, i)))
+                               ret = -IPSET_ERR_EXIST;
+               }
+               if (ret == -IPSET_ERR_EXIST)
+                       break;
+               ret = -IPSET_ERR_LIST_FULL;
+               for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID)
+                               ret = before != 0 ? -IPSET_ERR_REF_EXIST
+                                       : list_set_add(map, i, id, timeout);
+                       else if (elem->id != refid)
+                               continue;
+                       else if (with_timeout && list_set_expired(map, i))
+                               ret = -IPSET_ERR_REF_EXIST;
+                       else if (before)
+                               ret = list_set_add(map, i, id, timeout);
+                       else if (i + 1 < map->size)
+                               ret = list_set_add(map, i + 1, id, timeout);
+               }
+               break;
+       case IPSET_DEL:
+               ret = -IPSET_ERR_EXIST;
+               for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID) {
+                               ret = before != 0 ? -IPSET_ERR_REF_EXIST
+                                                 : -IPSET_ERR_EXIST;
+                               break;
+                       } else if (with_timeout && list_set_expired(map, i))
+                               continue;
+                       else if (elem->id == id &&
+                                (before == 0 ||
+                                 (before > 0 &&
+                                  next_id_eq(map, i, refid))))
+                               ret = list_set_del(map, id, i);
+                       else if (before < 0 &&
+                                elem->id == refid &&
+                                next_id_eq(map, i, id))
+                               ret = list_set_del(map, id, i + 1);
+               }
+               break;
+       default:
+               break;
+       }
+
+finish:
+       if (refid != IPSET_INVALID_ID)
+               ip_set_put_byindex(refid);
+       if (adt != IPSET_ADD || ret)
+               ip_set_put_byindex(id);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+       struct set_elem *elem;
+       u32 i;
+
+       for (i = 0; i < map->size; i++) {
+               elem = list_set_elem(map, i);
+               if (elem->id != IPSET_INVALID_ID) {
+                       ip_set_put_byindex(elem->id);
+                       elem->id = IPSET_INVALID_ID;
+               }
+       }
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+       list_set_flush(set);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct list_set *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->size * map->dsize));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+             struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct list_set *map = set->data;
+       struct nlattr *atd, *nested;
+       u32 i, first = cb->args[2];
+       const struct set_elem *e;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->size; cb->args[2]++) {
+               i = cb->args[2];
+               e = list_set_elem(map, i);
+               if (e->id == IPSET_INVALID_ID)
+                       goto finish;
+               if (with_timeout(map->timeout) && list_set_expired(map, i))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (i == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+                              ip_set_name_byindex(e->id));
+               if (with_timeout(map->timeout)) {
+                       const struct set_telem *te =
+                               (const struct set_telem *) e;
+                       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                                     htonl(ip_set_timeout_get(te->timeout)));
+               }
+               ipset_nest_end(skb, nested);
+       }
+finish:
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(i == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct list_set *x = a->data;
+       const struct list_set *y = b->data;
+
+       return x->size == y->size &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+       .kadt   = list_set_kadt,
+       .uadt   = list_set_uadt,
+       .destroy = list_set_destroy,
+       .flush  = list_set_flush,
+       .head   = list_set_head,
+       .list   = list_set_list,
+       .same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct list_set *map = set->data;
+       struct set_telem *e;
+       u32 i;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (i = map->size - 1; i >= 0; i--) {
+               e = (struct set_telem *) list_set_elem(map, i);
+               if (e->id != IPSET_INVALID_ID &&
+                   list_set_expired(map, i))
+                       list_set_del(map, e->id, i);
+       }
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = list_set_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+             unsigned long timeout)
+{
+       struct list_set *map;
+       struct set_elem *e;
+       u32 i;
+
+       map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+       if (!map)
+               return false;
+
+       map->size = size;
+       map->dsize = dsize;
+       map->timeout = timeout;
+       set->data = map;
+
+       for (i = 0; i < size; i++) {
+               e = list_set_elem(map, i);
+               e->id = IPSET_INVALID_ID;
+       }
+
+       return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_SIZE])
+               size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+       if (size < IP_SET_LIST_MIN_SIZE)
+               size = IP_SET_LIST_MIN_SIZE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!init_list_set(set, size, sizeof(struct set_telem),
+                                  ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+                       return -ENOMEM;
+
+               list_set_gc_init(set);
+       } else {
+               if (!init_list_set(set, size, sizeof(struct set_elem),
+                                  IPSET_NO_TIMEOUT))
+                       return -ENOMEM;
+       }
+       set->variant = &list_set;
+       return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+       .name           = "list:set",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = list_set_create,
+       .create_policy  = {
+               [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_NAME]       = { .type = NLA_STRING,
+                                           .len = IPSET_MAXNAMELEN },
+               [IPSET_ATTR_NAMEREF]    = { .type = NLA_STRING,
+                                           .len = IPSET_MAXNAMELEN },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+       return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+       ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644 (file)
index 0000000..23f8c81
--- /dev/null
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+       {.ip6 = { \
+               __constant_htonl(a), __constant_htonl(b), \
+               __constant_htonl(c), __constant_htonl(d), \
+       } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+       E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef  E
+#define E(a, b, c, d)                                          \
+       {.ip6 = { (__force __be32) a, (__force __be32) b,       \
+                 (__force __be32) c, (__force __be32) d,       \
+       } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+       E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
index a475ede..5c48ffb 100644 (file)
@@ -43,11 +43,6 @@ EXPORT_SYMBOL(register_ip_vs_app);
 EXPORT_SYMBOL(unregister_ip_vs_app);
 EXPORT_SYMBOL(register_ip_vs_app_inc);
 
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
 /*
  *     Get an ip_vs_app object
  */
@@ -67,7 +62,8 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
  *     Allocate/initialize app incarnation and register it in proto apps.
  */
 static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+                 __u16 port)
 {
        struct ip_vs_protocol *pp;
        struct ip_vs_app *inc;
@@ -98,7 +94,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
                }
        }
 
-       ret = pp->register_app(inc);
+       ret = pp->register_app(net, inc);
        if (ret)
                goto out;
 
@@ -119,7 +115,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
  *     Release app incarnation
  */
 static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_protocol *pp;
 
@@ -127,7 +123,7 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
                return;
 
        if (pp->unregister_app)
-               pp->unregister_app(inc);
+               pp->unregister_app(net, inc);
 
        IP_VS_DBG(9, "%s App %s:%u unregistered\n",
                  pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc)
  *     Register an application incarnation in protocol applications
  */
 int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+                      __u16 port)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        int result;
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
-       result = ip_vs_app_inc_new(app, proto, port);
+       result = ip_vs_app_inc_new(net, app, proto, port);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        return result;
 }
@@ -185,16 +183,17 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
 /*
  *     ip_vs_app registration routine
  */
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        /* increase the module use count */
        ip_vs_use_count_inc();
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
-       list_add(&app->a_list, &ip_vs_app_list);
+       list_add(&app->a_list, &ipvs->app_list);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        return 0;
 }
@@ -204,19 +203,20 @@ int register_ip_vs_app(struct ip_vs_app *app)
  *     ip_vs_app unregistration routine
  *     We are sure there are no app incarnations attached to services
  */
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_app *inc, *nxt;
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
        list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-               ip_vs_app_inc_release(inc);
+               ip_vs_app_inc_release(net, inc);
        }
 
        list_del(&app->a_list);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
@@ -226,7 +226,8 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
 /*
  *     Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
  */
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+                  struct ip_vs_protocol *pp)
 {
        return pp->app_conn_bind(cp);
 }
@@ -481,11 +482,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
  *     /proc/net/ip_vs_app entry function
  */
 
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
 {
        struct ip_vs_app *app, *inc;
 
-       list_for_each_entry(app, &ip_vs_app_list, a_list) {
+       list_for_each_entry(app, &ipvs->app_list, a_list) {
                list_for_each_entry(inc, &app->incs_list, a_list) {
                        if (pos-- == 0)
                                return inc;
@@ -497,19 +498,24 @@ static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
 
 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       mutex_lock(&__ip_vs_app_mutex);
+       struct net *net = seq_file_net(seq);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+       mutex_lock(&ipvs->app_mutex);
+
+       return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct ip_vs_app *inc, *app;
        struct list_head *e;
+       struct net *net = seq_file_net(seq);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
-               return ip_vs_app_idx(0);
+               return ip_vs_app_idx(ipvs, 0);
 
        inc = v;
        app = inc->app;
@@ -518,7 +524,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return list_entry(e, struct ip_vs_app, a_list);
 
        /* go on to next application */
-       for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+       for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
                app = list_entry(e, struct ip_vs_app, a_list);
                list_for_each_entry(inc, &app->incs_list, a_list) {
                        return inc;
@@ -529,7 +535,9 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
 {
-       mutex_unlock(&__ip_vs_app_mutex);
+       struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+       mutex_unlock(&ipvs->app_mutex);
 }
 
 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@ static const struct seq_operations ip_vs_app_seq_ops = {
 
 static int ip_vs_app_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_app_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@ static const struct file_operations ip_vs_app_fops = {
 };
 #endif
 
-int __init ip_vs_app_init(void)
+static int __net_init __ip_vs_app_init(struct net *net)
 {
-       /* we will replace it with proc_net_ipvs_create() soon */
-       proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->app_list);
+       __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+       proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
        return 0;
 }
 
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+       proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+       .init = __ip_vs_app_init,
+       .exit = __ip_vs_app_cleanup,
+};
+
+int __init ip_vs_app_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_app_ops);
+       return rv;
+}
+
 
 void ip_vs_app_cleanup(void)
 {
-       proc_net_remove(&init_net, "ip_vs_app");
+       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index e9adecd..f289306 100644 (file)
 /*
  * Connection hash size. Default is what was selected at compile time.
 */
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
 
 /* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
 
 /*
  *  Connection hash table: for input and output packets lookups of IPVS
  */
-static struct list_head *ip_vs_conn_tab;
+static struct hlist_head *ip_vs_conn_tab __read_mostly;
 
 /*  SLAB cache for IPVS connections */
 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
-/*  counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
 /*  counter for no client port connections */
 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
 
 /* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
 
 /*
  *  Fine locking granularity for big connection hash table
  */
-#define CT_LOCKARRAY_BITS  4
+#define CT_LOCKARRAY_BITS  5
 #define CT_LOCKARRAY_SIZE  (1<<CT_LOCKARRAY_BITS)
 #define CT_LOCKARRAY_MASK  (CT_LOCKARRAY_SIZE-1)
 
@@ -133,19 +130,19 @@ static inline void ct_write_unlock_bh(unsigned key)
 /*
  *     Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
                                       const union nf_inet_addr *addr,
                                       __be16 port)
 {
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
-               return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
-                                   (__force u32)port, proto, ip_vs_conn_rnd)
-                       & ip_vs_conn_tab_mask;
+               return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+                                   (__force u32)port, proto, ip_vs_conn_rnd) ^
+                       ((size_t)net>>8)) & ip_vs_conn_tab_mask;
 #endif
-       return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
-                           ip_vs_conn_rnd)
-               & ip_vs_conn_tab_mask;
+       return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+                           ip_vs_conn_rnd) ^
+               ((size_t)net>>8)) & ip_vs_conn_tab_mask;
 }
 
 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
                port = p->vport;
        }
 
-       return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+       return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
 }
 
 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 {
        struct ip_vs_conn_param p;
 
-       ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
-                             NULL, 0, &p);
+       ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+                             &cp->caddr, cp->cport, NULL, 0, &p);
 
-       if (cp->dest && cp->dest->svc->pe) {
-               p.pe = cp->dest->svc->pe;
+       if (cp->pe) {
+               p.pe = cp->pe;
                p.pe_data = cp->pe_data;
                p.pe_data_len = cp->pe_data_len;
        }
@@ -186,7 +183,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 }
 
 /*
- *     Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ *     Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
  *     returns bool success.
  */
 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -204,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
        spin_lock(&cp->lock);
 
        if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
-               list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+               hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
                cp->flags |= IP_VS_CONN_F_HASHED;
                atomic_inc(&cp->refcnt);
                ret = 1;
@@ -237,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
        spin_lock(&cp->lock);
 
        if (cp->flags & IP_VS_CONN_F_HASHED) {
-               list_del(&cp->c_list);
+               hlist_del(&cp->c_list);
                cp->flags &= ~IP_VS_CONN_F_HASHED;
                atomic_dec(&cp->refcnt);
                ret = 1;
@@ -262,18 +259,20 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp;
+       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
+                   p->cport == cp->cport && p->vport == cp->vport &&
                    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
                    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
-                   p->cport == cp->cport && p->vport == cp->vport &&
                    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
-                   p->protocol == cp->protocol) {
+                   p->protocol == cp->protocol &&
+                   ip_vs_conn_net_eq(cp, p->net)) {
                        /* HIT */
                        atomic_inc(&cp->refcnt);
                        ct_read_unlock(hash);
@@ -313,23 +312,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
                            struct ip_vs_conn_param *p)
 {
        __be16 _ports[2], *pptr;
+       struct net *net = skb_net(skb);
 
        pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
        if (pptr == NULL)
                return 1;
 
        if (likely(!inverse))
-               ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
-                                     &iph->daddr, pptr[1], p);
+               ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+                                     pptr[0], &iph->daddr, pptr[1], p);
        else
-               ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
-                                     &iph->saddr, pptr[0], p);
+               ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+                                     pptr[1], &iph->saddr, pptr[0], p);
        return 0;
 }
 
 struct ip_vs_conn *
 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-                       struct ip_vs_protocol *pp,
                        const struct ip_vs_iphdr *iph,
                        unsigned int proto_off, int inverse)
 {
@@ -347,14 +346,17 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp;
+       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+               if (!ip_vs_conn_net_eq(cp, p->net))
+                       continue;
                if (p->pe_data && p->pe->ct_match) {
-                       if (p->pe->ct_match(p, cp))
+                       if (p->pe == cp->pe && p->pe->ct_match(p, cp))
                                goto out;
                        continue;
                }
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp, *ret=NULL;
+       struct hlist_node *n;
 
        /*
         *      Check for "full" addressed entries
@@ -402,12 +405,13 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
+                   p->vport == cp->cport && p->cport == cp->dport &&
                    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
                    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
-                   p->vport == cp->cport && p->cport == cp->dport &&
-                   p->protocol == cp->protocol) {
+                   p->protocol == cp->protocol &&
+                   ip_vs_conn_net_eq(cp, p->net)) {
                        /* HIT */
                        atomic_inc(&cp->refcnt);
                        ret = cp;
@@ -428,7 +432,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
 struct ip_vs_conn *
 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-                        struct ip_vs_protocol *pp,
                         const struct ip_vs_iphdr *iph,
                         unsigned int proto_off, int inverse)
 {
@@ -611,9 +614,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
        struct ip_vs_dest *dest;
 
        if ((cp) && (!cp->dest)) {
-               dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
-                                      &cp->vaddr, cp->vport,
-                                      cp->protocol);
+               dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+                                      cp->dport, &cp->vaddr, cp->vport,
+                                      cp->protocol, cp->fwmark);
                ip_vs_bind_dest(cp, dest);
                return dest;
        } else
@@ -677,6 +680,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
        atomic_dec(&dest->refcnt);
 }
 
+static int expire_quiescent_template(struct netns_ipvs *ipvs,
+                                    struct ip_vs_dest *dest)
+{
+#ifdef CONFIG_SYSCTL
+       return ipvs->sysctl_expire_quiescent_template &&
+               (atomic_read(&dest->weight) == 0);
+#else
+       return 0;
+#endif
+}
 
 /*
  *     Checking if the destination of a connection template is available.
@@ -686,14 +699,14 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
 int ip_vs_check_template(struct ip_vs_conn *ct)
 {
        struct ip_vs_dest *dest = ct->dest;
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
 
        /*
         * Checking the dest server status.
         */
        if ((dest == NULL) ||
            !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-           (sysctl_ip_vs_expire_quiescent_template &&
-            (atomic_read(&dest->weight) == 0))) {
+           expire_quiescent_template(ipvs, dest)) {
                IP_VS_DBG_BUF(9, "check_template: dest not available for "
                              "protocol %s s:%s:%d v:%s:%d "
                              "-> d:%s:%d\n",
@@ -730,6 +743,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
 static void ip_vs_conn_expire(unsigned long data)
 {
        struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 
        cp->timeout = 60*HZ;
 
@@ -765,13 +779,14 @@ static void ip_vs_conn_expire(unsigned long data)
                if (cp->flags & IP_VS_CONN_F_NFCT)
                        ip_vs_conn_drop_conntrack(cp);
 
+               ip_vs_pe_put(cp->pe);
                kfree(cp->pe_data);
                if (unlikely(cp->app != NULL))
                        ip_vs_unbind_app(cp);
                ip_vs_unbind_dest(cp);
                if (cp->flags & IP_VS_CONN_F_NO_CPORT)
                        atomic_dec(&ip_vs_conn_no_cport_cnt);
-               atomic_dec(&ip_vs_conn_count);
+               atomic_dec(&ipvs->conn_count);
 
                kmem_cache_free(ip_vs_conn_cachep, cp);
                return;
@@ -802,10 +817,12 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
 struct ip_vs_conn *
 ip_vs_conn_new(const struct ip_vs_conn_param *p,
               const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
-              struct ip_vs_dest *dest)
+              struct ip_vs_dest *dest, __u32 fwmark)
 {
        struct ip_vs_conn *cp;
-       struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+       struct netns_ipvs *ipvs = net_ipvs(p->net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+                                                          p->protocol);
 
        cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
        if (cp == NULL) {
@@ -813,8 +830,9 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
                return NULL;
        }
 
-       INIT_LIST_HEAD(&cp->c_list);
+       INIT_HLIST_NODE(&cp->c_list);
        setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+       ip_vs_conn_net_set(cp, p->net);
        cp->af             = p->af;
        cp->protocol       = p->protocol;
        ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +844,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
                        &cp->daddr, daddr);
        cp->dport          = dport;
        cp->flags          = flags;
-       if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+       cp->fwmark         = fwmark;
+       if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+               ip_vs_pe_get(p->pe);
+               cp->pe = p->pe;
                cp->pe_data = p->pe_data;
                cp->pe_data_len = p->pe_data_len;
        }
@@ -842,7 +863,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
        atomic_set(&cp->n_control, 0);
        atomic_set(&cp->in_pkts, 0);
 
-       atomic_inc(&ip_vs_conn_count);
+       atomic_inc(&ipvs->conn_count);
        if (flags & IP_VS_CONN_F_NO_CPORT)
                atomic_inc(&ip_vs_conn_no_cport_cnt);
 
@@ -861,8 +882,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
 #endif
                ip_vs_bind_xmit(cp);
 
-       if (unlikely(pp && atomic_read(&pp->appcnt)))
-               ip_vs_bind_app(cp, pp);
+       if (unlikely(pd && atomic_read(&pd->appcnt)))
+               ip_vs_bind_app(cp, pd->pp);
 
        /*
         * Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +892,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
         * IP_VS_CONN_F_ONE_PACKET too.
         */
 
-       if (ip_vs_conntrack_enabled())
+       if (ip_vs_conntrack_enabled(ipvs))
                cp->flags |= IP_VS_CONN_F_NFCT;
 
        /* Hash it in the ip_vs_conn_tab finally */
@@ -884,18 +905,24 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
  *     /proc/net/ip_vs_conn entries
  */
 #ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+       struct seq_net_private  p;
+       struct hlist_head       *l;
+};
 
 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 {
        int idx;
        struct ip_vs_conn *cp;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_node *n;
 
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
                ct_read_lock_bh(idx);
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
                        if (pos-- == 0) {
-                               seq->private = &ip_vs_conn_tab[idx];
-                       return cp;
+                               iter->l = &ip_vs_conn_tab[idx];
+                               return cp;
                        }
                }
                ct_read_unlock_bh(idx);
@@ -906,14 +933,18 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 
 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       seq->private = NULL;
+       struct ip_vs_iter_state *iter = seq->private;
+
+       iter->l = NULL;
        return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
 }
 
 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct ip_vs_conn *cp = v;
-       struct list_head *e, *l = seq->private;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_node *e;
+       struct hlist_head *l = iter->l;
        int idx;
 
        ++*pos;
@@ -921,27 +952,28 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return ip_vs_conn_array(seq, 0);
 
        /* more on same hash chain? */
-       if ((e = cp->c_list.next) != l)
-               return list_entry(e, struct ip_vs_conn, c_list);
+       if ((e = cp->c_list.next))
+               return hlist_entry(e, struct ip_vs_conn, c_list);
 
        idx = l - ip_vs_conn_tab;
        ct_read_unlock_bh(idx);
 
        while (++idx < ip_vs_conn_tab_size) {
                ct_read_lock_bh(idx);
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-                       seq->private = &ip_vs_conn_tab[idx];
+               hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
+                       iter->l = &ip_vs_conn_tab[idx];
                        return cp;
                }
                ct_read_unlock_bh(idx);
        }
-       seq->private = NULL;
+       iter->l = NULL;
        return NULL;
 }
 
 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
 {
-       struct list_head *l = seq->private;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_head *l = iter->l;
 
        if (l)
                ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +987,19 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires PEName PEData\n");
        else {
                const struct ip_vs_conn *cp = v;
+               struct net *net = seq_file_net(seq);
                char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
                size_t len = 0;
 
-               if (cp->dest && cp->pe_data &&
-                   cp->dest->svc->pe->show_pe_data) {
+               if (!ip_vs_conn_net_eq(cp, net))
+                       return 0;
+               if (cp->pe_data) {
                        pe_data[0] = ' ';
-                       len = strlen(cp->dest->svc->pe->name);
-                       memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+                       len = strlen(cp->pe->name);
+                       memcpy(pe_data + 1, cp->pe->name, len);
                        pe_data[len + 1] = ' ';
                        len += 2;
-                       len += cp->dest->svc->pe->show_pe_data(cp,
-                                                              pe_data + len);
+                       len += cp->pe->show_pe_data(cp, pe_data + len);
                }
                pe_data[len] = '\0';
 
@@ -1004,7 +1037,8 @@ static const struct seq_operations ip_vs_conn_seq_ops = {
 
 static int ip_vs_conn_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_conn_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+                           sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1065,10 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Origin Expires\n");
        else {
                const struct ip_vs_conn *cp = v;
+               struct net *net = seq_file_net(seq);
+
+               if (!ip_vs_conn_net_eq(cp, net))
+                       return 0;
 
 #ifdef CONFIG_IP_VS_IPV6
                if (cp->af == AF_INET6)
@@ -1067,7 +1105,8 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
 
 static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_conn_sync_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+                           sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1152,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
 }
 
 /* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
 {
        int idx;
        struct ip_vs_conn *cp;
@@ -1123,17 +1162,19 @@ void ip_vs_random_dropentry(void)
         */
        for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
                unsigned hash = net_random() & ip_vs_conn_tab_mask;
+               struct hlist_node *n;
 
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(hash);
 
-               list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                        if (cp->flags & IP_VS_CONN_F_TEMPLATE)
                                /* connection template */
                                continue;
-
+                       if (!ip_vs_conn_net_eq(cp, net))
+                               continue;
                        if (cp->protocol == IPPROTO_TCP) {
                                switch(cp->state) {
                                case IP_VS_TCP_S_SYN_RECV:
@@ -1168,20 +1209,24 @@ void ip_vs_random_dropentry(void)
 /*
  *      Flush all the connection entries in the ip_vs_conn_tab
  */
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
 {
        int idx;
        struct ip_vs_conn *cp;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-  flush_again:
+flush_again:
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
+               struct hlist_node *n;
+
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(idx);
 
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+                       if (!ip_vs_conn_net_eq(cp, net))
+                               continue;
                        IP_VS_DBG(4, "del connection\n");
                        ip_vs_conn_expire_now(cp);
                        if (cp->control) {
@@ -1194,16 +1239,41 @@ static void ip_vs_conn_flush(void)
 
        /* the counter may be not NULL, because maybe some conn entries
           are run by slow timer handler or unhashed but still referred */
-       if (atomic_read(&ip_vs_conn_count) != 0) {
+       if (atomic_read(&ipvs->conn_count) != 0) {
                schedule();
                goto flush_again;
        }
 }
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       atomic_set(&ipvs->conn_count, 0);
 
+       proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+       proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+       return 0;
+}
+
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+       /* flush all the connection entries first */
+       ip_vs_conn_flush(net);
+       proc_net_remove(net, "ip_vs_conn");
+       proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+       .init = __ip_vs_conn_init,
+       .exit = __ip_vs_conn_cleanup,
+};
 
 int __init ip_vs_conn_init(void)
 {
        int idx;
+       int retc;
 
        /* Compute size and mask */
        ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1212,8 +1282,7 @@ int __init ip_vs_conn_init(void)
        /*
         * Allocate the connection hash table and initialize its list heads
         */
-       ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
-                                sizeof(struct list_head));
+       ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
        if (!ip_vs_conn_tab)
                return -ENOMEM;
 
@@ -1233,32 +1302,25 @@ int __init ip_vs_conn_init(void)
        IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
                  sizeof(struct ip_vs_conn));
 
-       for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
-               INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
-       }
+       for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
+               INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
 
        for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
                rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
        }
 
-       proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
-       proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+       retc = register_pernet_subsys(&ipvs_conn_ops);
 
        /* calculate the random value for connection hash */
        get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
 
-       return 0;
+       return retc;
 }
 
-
 void ip_vs_conn_cleanup(void)
 {
-       /* flush all the connection entries first */
-       ip_vs_conn_flush();
-
+       unregister_pernet_subsys(&ipvs_conn_ops);
        /* Release the empty cache */
        kmem_cache_destroy(ip_vs_conn_cachep);
-       proc_net_remove(&init_net, "ip_vs_conn");
-       proc_net_remove(&init_net, "ip_vs_conn_sync");
        vfree(ip_vs_conn_tab);
 }
index b4e51e9..07accf6 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/icmp.h>                   /* for icmp_send */
 #include <net/route.h>
 #include <net/ip6_checksum.h>
+#include <net/netns/generic.h>         /* net_generic() */
 
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@ EXPORT_SYMBOL(ip_vs_conn_put);
 EXPORT_SYMBOL(ip_vs_get_debug_level);
 #endif
 
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
 
 /* ID used in ICMP lookups */
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@ static inline void
 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
        struct ip_vs_dest *dest = cp->dest;
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-               spin_lock(&dest->stats.lock);
-               dest->stats.ustats.inpkts++;
-               dest->stats.ustats.inbytes += skb->len;
-               spin_unlock(&dest->stats.lock);
-
-               spin_lock(&dest->svc->stats.lock);
-               dest->svc->stats.ustats.inpkts++;
-               dest->svc->stats.ustats.inbytes += skb->len;
-               spin_unlock(&dest->svc->stats.lock);
-
-               spin_lock(&ip_vs_stats.lock);
-               ip_vs_stats.ustats.inpkts++;
-               ip_vs_stats.ustats.inbytes += skb->len;
-               spin_unlock(&ip_vs_stats.lock);
+               struct ip_vs_cpu_stats *s;
+
+               s = this_cpu_ptr(dest->stats.cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
        }
 }
 
@@ -131,21 +145,28 @@ static inline void
 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
        struct ip_vs_dest *dest = cp->dest;
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-               spin_lock(&dest->stats.lock);
-               dest->stats.ustats.outpkts++;
-               dest->stats.ustats.outbytes += skb->len;
-               spin_unlock(&dest->stats.lock);
-
-               spin_lock(&dest->svc->stats.lock);
-               dest->svc->stats.ustats.outpkts++;
-               dest->svc->stats.ustats.outbytes += skb->len;
-               spin_unlock(&dest->svc->stats.lock);
-
-               spin_lock(&ip_vs_stats.lock);
-               ip_vs_stats.ustats.outpkts++;
-               ip_vs_stats.ustats.outbytes += skb->len;
-               spin_unlock(&ip_vs_stats.lock);
+               struct ip_vs_cpu_stats *s;
+
+               s = this_cpu_ptr(dest->stats.cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
        }
 }
 
@@ -153,41 +174,44 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 static inline void
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
-       spin_lock(&cp->dest->stats.lock);
-       cp->dest->stats.ustats.conns++;
-       spin_unlock(&cp->dest->stats.lock);
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       struct ip_vs_cpu_stats *s;
+
+       s = this_cpu_ptr(cp->dest->stats.cpustats);
+       s->ustats.conns++;
 
-       spin_lock(&svc->stats.lock);
-       svc->stats.ustats.conns++;
-       spin_unlock(&svc->stats.lock);
+       s = this_cpu_ptr(svc->stats.cpustats);
+       s->ustats.conns++;
 
-       spin_lock(&ip_vs_stats.lock);
-       ip_vs_stats.ustats.conns++;
-       spin_unlock(&ip_vs_stats.lock);
+       s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+       s->ustats.conns++;
 }
 
 
 static inline int
 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
                const struct sk_buff *skb,
-               struct ip_vs_protocol *pp)
+               struct ip_vs_proto_data *pd)
 {
-       if (unlikely(!pp->state_transition))
+       if (unlikely(!pd->pp->state_transition))
                return 0;
-       return pp->state_transition(cp, direction, skb, pp);
+       return pd->pp->state_transition(cp, direction, skb, pd);
 }
 
-static inline void
+static inline int
 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
                              struct sk_buff *skb, int protocol,
                              const union nf_inet_addr *caddr, __be16 cport,
                              const union nf_inet_addr *vaddr, __be16 vport,
                              struct ip_vs_conn_param *p)
 {
-       ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+       ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+                             vport, p);
        p->pe = svc->pe;
        if (p->pe && p->pe->fill_param)
-               p->pe->fill_param(p, skb);
+               return p->pe->fill_param(p, skb);
+
+       return 0;
 }
 
 /*
@@ -200,7 +224,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
 static struct ip_vs_conn *
 ip_vs_sched_persist(struct ip_vs_service *svc,
                    struct sk_buff *skb,
-                   __be16 ports[2])
+                   __be16 src_port, __be16 dst_port, int *ignored)
 {
        struct ip_vs_conn *cp = NULL;
        struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
 
        IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
                      "mnet %s\n",
-                     IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
-                     IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+                     IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+                     IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
                      IP_VS_DBG_ADDR(svc->af, &snet));
 
        /*
@@ -247,14 +271,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
                __be16 vport = 0;
 
-               if (ports[1] == svc->port) {
+               if (dst_port == svc->port) {
                        /* non-FTP template:
                         * <protocol, caddr, 0, vaddr, vport, daddr, dport>
                         * FTP template:
                         * <protocol, caddr, 0, vaddr, 0, daddr, 0>
                         */
                        if (svc->port != FTPPORT)
-                               vport = ports[1];
+                               vport = dst_port;
                } else {
                        /* Note: persistent fwmark-based services and
                         * persistent port zero service are handled here.
@@ -268,24 +292,31 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                                vaddr = &fwmark;
                        }
                }
-               ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
-                                             vaddr, vport, &param);
+               /* return *ignored = -1 so NF_DROP can be used */
+               if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+                                                 vaddr, vport, &param) < 0) {
+                       *ignored = -1;
+                       return NULL;
+               }
        }
 
        /* Check if a template already exists */
        ct = ip_vs_ct_in_get(&param);
        if (!ct || !ip_vs_check_template(ct)) {
-               /* No template found or the dest of the connection
+               /*
+                * No template found or the dest of the connection
                 * template is not available.
+                * return *ignored=0 i.e. ICMP and NF_DROP
                 */
                dest = svc->scheduler->schedule(svc, skb);
                if (!dest) {
                        IP_VS_DBG(1, "p-schedule: no dest found.\n");
                        kfree(param.pe_data);
+                       *ignored = 0;
                        return NULL;
                }
 
-               if (ports[1] == svc->port && svc->port != FTPPORT)
+               if (dst_port == svc->port && svc->port != FTPPORT)
                        dport = dest->port;
 
                /* Create a template
@@ -293,9 +324,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                 * and thus param.pe_data will be destroyed
                 * when the template expires */
                ct = ip_vs_conn_new(&param, &dest->addr, dport,
-                                   IP_VS_CONN_F_TEMPLATE, dest);
+                                   IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
                if (ct == NULL) {
                        kfree(param.pe_data);
+                       *ignored = -1;
                        return NULL;
                }
 
@@ -306,7 +338,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                kfree(param.pe_data);
        }
 
-       dport = ports[1];
+       dport = dst_port;
        if (dport == svc->port && dest->port)
                dport = dest->port;
 
@@ -317,11 +349,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
        /*
         *    Create a new connection according to the template
         */
-       ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
-                             &iph.daddr, ports[1], &param);
-       cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+       ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+                             src_port, &iph.daddr, dst_port, &param);
+
+       cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
        if (cp == NULL) {
                ip_vs_conn_put(ct);
+               *ignored = -1;
                return NULL;
        }
 
@@ -341,11 +375,27 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
  *  It selects a server according to the virtual service, and
  *  creates a connection entry.
  *  Protocols supported: TCP, UDP
+ *
+ *  Usage of *ignored
+ *
+ * 1 :   protocol tried to schedule (eg. on SYN), found svc but the
+ *       svc/scheduler decides that this packet should be accepted with
+ *       NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 :   scheduler can not find destination, so try bypass or
+ *       return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 :  scheduler tried to schedule but fatal error occurred, eg.
+ *       ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ *       failure such as missing Call-ID, ENOMEM on skb_linearize
+ *       or pe_data. In this case we should return NF_DROP without
+ *       any attempts to send ICMP with ip_vs_leave.
  */
 struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-              struct ip_vs_protocol *pp, int *ignored)
+              struct ip_vs_proto_data *pd, int *ignored)
 {
+       struct ip_vs_protocol *pp = pd->pp;
        struct ip_vs_conn *cp = NULL;
        struct ip_vs_iphdr iph;
        struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        }
 
        /*
-        * Do not schedule replies from local real server. It is risky
-        * for fwmark services but mostly for persistent services.
+        *    Do not schedule replies from local real server.
         */
        if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
-           (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
-           (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+           (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
                IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
                              "Not scheduling reply for existing connection");
                __ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        /*
         *    Persistent service
         */
-       if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
-               *ignored = 0;
-               return ip_vs_sched_persist(svc, skb, pptr);
-       }
+       if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+               return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+       *ignored = 0;
 
        /*
         *    Non-persistent service
@@ -402,8 +450,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
                return NULL;
        }
 
-       *ignored = 0;
-
        dest = svc->scheduler->schedule(svc, skb);
        if (dest == NULL) {
                IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
         */
        {
                struct ip_vs_conn_param p;
-               ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
-                                     pptr[0], &iph.daddr, pptr[1], &p);
+
+               ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+                                     &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+                                     &p);
                cp = ip_vs_conn_new(&p, &dest->addr,
                                    dest->port ? dest->port : pptr[1],
-                                   flags, dest);
-               if (!cp)
+                                   flags, dest, skb->mark);
+               if (!cp) {
+                       *ignored = -1;
                        return NULL;
+               }
        }
 
        IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,16 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
  *  no destination is available for a new connection.
  */
 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-               struct ip_vs_protocol *pp)
+               struct ip_vs_proto_data *pd)
 {
        __be16 _ports[2], *pptr;
        struct ip_vs_iphdr iph;
+#ifdef CONFIG_SYSCTL
+       struct net *net;
+       struct netns_ipvs *ipvs;
        int unicast;
+#endif
+
        ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 
        pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -460,17 +515,21 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                return NF_DROP;
        }
 
+#ifdef CONFIG_SYSCTL
+       net = skb_net(skb);
+
 #ifdef CONFIG_IP_VS_IPV6
        if (svc->af == AF_INET6)
                unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
        else
 #endif
-               unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+               unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
 
        /* if it is fwmark-based service, the cache_bypass sysctl is up
           and the destination is a non-local unicast, then create
           a cache_bypass connection entry */
-       if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+       ipvs = net_ipvs(net);
+       if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
                int ret, cs;
                struct ip_vs_conn *cp;
                unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +543,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
                {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(svc->af, iph.protocol,
+                       ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
                                              &iph.saddr, pptr[0],
                                              &iph.daddr, pptr[1], &p);
                        cp = ip_vs_conn_new(&p, &daddr, 0,
                                            IP_VS_CONN_F_BYPASS | flags,
-                                           NULL);
+                                           NULL, skb->mark);
                        if (!cp)
                                return NF_DROP;
                }
@@ -498,16 +557,17 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                ip_vs_in_stats(cp, skb);
 
                /* set state */
-               cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+               cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 
                /* transmit the first SYN packet */
-               ret = cp->packet_xmit(skb, cp, pp);
+               ret = cp->packet_xmit(skb, cp, pd->pp);
                /* do not touch skb anymore */
 
                atomic_inc(&cp->in_pkts);
                ip_vs_conn_put(cp);
                return ret;
        }
+#endif
 
        /*
         * When the virtual ftp service is presented, packets destined
@@ -544,6 +604,33 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
        return NF_DROP;
 }
 
+#ifdef CONFIG_SYSCTL
+
+static int sysctl_snat_reroute(struct sk_buff *skb)
+{
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+       return ipvs->sysctl_snat_reroute;
+}
+
+static int sysctl_nat_icmp_send(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       return ipvs->sysctl_nat_icmp_send;
+}
+
+static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_expire_nodest_conn;
+}
+
+#else
+
+static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
+static int sysctl_nat_icmp_send(struct net *net) { return 0; }
+static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
+
+#endif
+
 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
 {
        return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
@@ -576,6 +663,22 @@ static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
 }
 #endif
 
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_VS_IPV6
+       if (af == AF_INET6) {
+               if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+                       return 1;
+       } else
+#endif
+               if ((sysctl_snat_reroute(skb) ||
+                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+                   ip_route_me_harder(skb, RTN_LOCAL) != 0)
+                       return 1;
+
+       return 0;
+}
+
 /*
  * Packet has been made sufficiently writable in caller
  * - inout: 1=in->out, 0=out->in
@@ -674,7 +777,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
 #endif
 
 /* Handle relevant response ICMP messages - forward to the right
- * destination host. Used for NAT and local client.
+ * destination host.
  */
 static int handle_response_icmp(int af, struct sk_buff *skb,
                                union nf_inet_addr *snet,
@@ -710,16 +813,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 #endif
                ip_vs_nat_icmp(skb, pp, cp, 1);
 
-#ifdef CONFIG_IP_VS_IPV6
-       if (af == AF_INET6) {
-               if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
-                       goto out;
-       } else
-#endif
-               if ((sysctl_ip_vs_snat_reroute ||
-                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
-                   ip_route_me_harder(skb, RTN_LOCAL) != 0)
-                       goto out;
+       if (ip_vs_route_me_harder(af, skb))
+               goto out;
 
        /* do the statistics and put it back */
        ip_vs_out_stats(cp, skb);
@@ -808,7 +903,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+       cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -885,7 +980,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
 
        ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+       cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -921,12 +1016,13 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
 }
 
 /* Handle response packets: rewrite addresses and send away...
- * Used for NAT and local client.
  */
 static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                struct ip_vs_conn *cp, int ihl)
 {
+       struct ip_vs_protocol *pp = pd->pp;
+
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
 
        if (!skb_make_writable(skb, ihl))
@@ -961,21 +1057,13 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
         * if it came from this machine itself.  So re-compute
         * the routing information.
         */
-#ifdef CONFIG_IP_VS_IPV6
-       if (af == AF_INET6) {
-               if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
-                       goto drop;
-       } else
-#endif
-               if ((sysctl_ip_vs_snat_reroute ||
-                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
-                   ip_route_me_harder(skb, RTN_LOCAL) != 0)
-                       goto drop;
+       if (ip_vs_route_me_harder(af, skb))
+               goto drop;
 
        IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
 
        ip_vs_out_stats(cp, skb);
-       ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+       ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
        skb->ipvs_property = 1;
        if (!(cp->flags & IP_VS_CONN_F_NFCT))
                ip_vs_notrack(skb);
@@ -999,8 +1087,10 @@ drop:
 static unsigned int
 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+       struct net *net = NULL;
        struct ip_vs_iphdr iph;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        struct ip_vs_conn *cp;
 
        EnterFunction(11);
@@ -1022,6 +1112,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        if (unlikely(!skb_dst(skb)))
                return NF_ACCEPT;
 
+       net = skb_net(skb);
        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
@@ -1045,9 +1136,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
                        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
                }
 
-       pp = ip_vs_proto_get(iph.protocol);
-       if (unlikely(!pp))
+       pd = ip_vs_proto_data_get(net, iph.protocol);
+       if (unlikely(!pd))
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* reassemble IP fragments */
 #ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1165,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        /*
         * Check if the packet belongs to an existing entry
         */
-       cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+       cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
 
        if (likely(cp))
-               return handle_response(af, skb, pp, cp, iph.len);
-       if (sysctl_ip_vs_nat_icmp_send &&
+               return handle_response(af, skb, pd, cp, iph.len);
+       if (sysctl_nat_icmp_send(net) &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
             pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1179,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
                                          sizeof(_ports), _ports);
                if (pptr == NULL)
                        return NF_ACCEPT;       /* Not for me */
-               if (ip_vs_lookup_real_service(af, iph.protocol,
+               if (ip_vs_lookup_real_service(net, af, iph.protocol,
                                              &iph.saddr,
                                              pptr[0])) {
                        /*
@@ -1202,14 +1294,15 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
 static int
 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+       struct net *net = NULL;
        struct iphdr *iph;
        struct icmphdr  _icmph, *ic;
        struct iphdr    _ciph, *cih;    /* The ip header contained within the ICMP */
        struct ip_vs_iphdr ciph;
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        unsigned int offset, ihl, verdict;
-       union nf_inet_addr snet;
 
        *related = 1;
 
@@ -1249,9 +1342,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        if (cih == NULL)
                return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-       pp = ip_vs_proto_get(cih->protocol);
-       if (!pp)
+       net = skb_net(skb);
+       pd = ip_vs_proto_data_get(net, cih->protocol);
+       if (!pd)
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* Is the embedded protocol header present? */
        if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,18 +1360,9 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
-       if (!cp) {
-               /* The packet could also belong to a local client */
-               cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
-               if (cp) {
-                       snet.ip = iph->saddr;
-                       return handle_response_icmp(AF_INET, skb, &snet,
-                                                   cih->protocol, cp, pp,
-                                                   offset, ihl);
-               }
+       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+       if (!cp)
                return NF_ACCEPT;
-       }
 
        verdict = NF_DROP;
 
@@ -1312,6 +1398,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 static int
 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+       struct net *net = NULL;
        struct ipv6hdr *iph;
        struct icmp6hdr _icmph, *ic;
        struct ipv6hdr  _ciph, *cih;    /* The ip header contained
@@ -1319,8 +1406,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
        struct ip_vs_iphdr ciph;
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        unsigned int offset, verdict;
-       union nf_inet_addr snet;
        struct rt6_info *rt;
 
        *related = 1;
@@ -1361,9 +1448,11 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
        if (cih == NULL)
                return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-       pp = ip_vs_proto_get(cih->nexthdr);
-       if (!pp)
+       net = skb_net(skb);
+       pd = ip_vs_proto_data_get(net, cih->nexthdr);
+       if (!pd)
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* Is the embedded protocol header present? */
        /* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,19 +1466,9 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
-       if (!cp) {
-               /* The packet could also belong to a local client */
-               cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
-               if (cp) {
-                       ipv6_addr_copy(&snet.in6, &iph->saddr);
-                       return handle_response_icmp(AF_INET6, skb, &snet,
-                                                   cih->nexthdr,
-                                                   cp, pp, offset,
-                                                   sizeof(struct ipv6hdr));
-               }
+       cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
+       if (!cp)
                return NF_ACCEPT;
-       }
 
        verdict = NF_DROP;
 
@@ -1423,10 +1502,13 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 static unsigned int
 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+       struct net *net;
        struct ip_vs_iphdr iph;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        struct ip_vs_conn *cp;
        int ret, restart, pkts;
+       struct netns_ipvs *ipvs;
 
        /* Already marked as IPVS request or reply? */
        if (skb->ipvs_property)
@@ -1480,20 +1562,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
                        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
                }
 
+       net = skb_net(skb);
        /* Protocol supported? */
-       pp = ip_vs_proto_get(iph.protocol);
-       if (unlikely(!pp))
+       pd = ip_vs_proto_data_get(net, iph.protocol);
+       if (unlikely(!pd))
                return NF_ACCEPT;
-
+       pp = pd->pp;
        /*
         * Check if the packet belongs to an existing connection entry
         */
-       cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+       cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
 
        if (unlikely(!cp)) {
                int v;
 
-               if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+               if (!pp->conn_schedule(af, skb, pd, &v, &cp))
                        return v;
        }
 
@@ -1505,12 +1588,13 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+       net = skb_net(skb);
+       ipvs = net_ipvs(net);
        /* Check the server status */
        if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                /* the destination server is not available */
 
-               if (sysctl_ip_vs_expire_nodest_conn) {
+               if (sysctl_expire_nodest_conn(ipvs)) {
                        /* try to expire the connection immediately */
                        ip_vs_conn_expire_now(cp);
                }
@@ -1521,7 +1605,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        ip_vs_in_stats(cp, skb);
-       restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+       restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
        if (cp->packet_xmit)
                ret = cp->packet_xmit(skb, cp, pp);
                /* do not touch skb anymore */
@@ -1535,35 +1619,41 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
         *
         * Sync connection if it is about to close to
         * encorage the standby servers to update the connections timeout
+        *
+        * For ONE_PKT let ip_vs_sync_conn() do the filter work.
         */
-       pkts = atomic_add_return(1, &cp->in_pkts);
-       if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               pkts = sysctl_sync_threshold(ipvs);
+       else
+               pkts = atomic_add_return(1, &cp->in_pkts);
+
+       if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
            cp->protocol == IPPROTO_SCTP) {
                if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
-                       (pkts % sysctl_ip_vs_sync_threshold[1]
-                        == sysctl_ip_vs_sync_threshold[0])) ||
+                       (pkts % sysctl_sync_period(ipvs)
+                        == sysctl_sync_threshold(ipvs))) ||
                                (cp->old_state != cp->state &&
                                 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
                                  (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
                                  (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
-                       ip_vs_sync_conn(cp);
+                       ip_vs_sync_conn(net, cp);
                        goto out;
                }
        }
 
        /* Keep this block last: TCP and others with pp->num_states <= 1 */
-       else if (af == AF_INET &&
-           (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+       else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
            (((cp->protocol != IPPROTO_TCP ||
               cp->state == IP_VS_TCP_S_ESTABLISHED) &&
-             (pkts % sysctl_ip_vs_sync_threshold[1]
-              == sysctl_ip_vs_sync_threshold[0])) ||
+             (pkts % sysctl_sync_period(ipvs)
+              == sysctl_sync_threshold(ipvs))) ||
             ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
              ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
               (cp->state == IP_VS_TCP_S_CLOSE) ||
               (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
               (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
-               ip_vs_sync_conn(cp);
+               ip_vs_sync_conn(net, cp);
 out:
        cp->old_state = cp->state;
 
@@ -1782,7 +1872,39 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        },
 #endif
 };
+/*
+ *     Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+       struct netns_ipvs *ipvs;
+
+       ipvs = net_generic(net, ip_vs_net_id);
+       if (ipvs == NULL) {
+               pr_err("%s(): no memory.\n", __func__);
+               return -ENOMEM;
+       }
+       ipvs->net = net;
+       /* Counters used for creating unique names */
+       ipvs->gen = atomic_read(&ipvs_netns_cnt);
+       atomic_inc(&ipvs_netns_cnt);
+       net->ipvs = ipvs;
+       printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+                        sizeof(struct netns_ipvs), ipvs->gen);
+       return 0;
+}
 
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+       IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+       .init = __ip_vs_init,
+       .exit = __ip_vs_cleanup,
+       .id   = &ip_vs_net_id,
+       .size = sizeof(struct netns_ipvs),
+};
 
 /*
  *     Initialize IP Virtual Server
@@ -1791,8 +1913,11 @@ static int __init ip_vs_init(void)
 {
        int ret;
 
-       ip_vs_estimator_init();
+       ret = register_pernet_subsys(&ipvs_core_ops);   /* Alloc ip_vs struct */
+       if (ret < 0)
+               return ret;
 
+       ip_vs_estimator_init();
        ret = ip_vs_control_init();
        if (ret < 0) {
                pr_err("can't setup control.\n");
@@ -1813,15 +1938,23 @@ static int __init ip_vs_init(void)
                goto cleanup_app;
        }
 
+       ret = ip_vs_sync_init();
+       if (ret < 0) {
+               pr_err("can't setup sync data.\n");
+               goto cleanup_conn;
+       }
+
        ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
        if (ret < 0) {
                pr_err("can't register hooks.\n");
-               goto cleanup_conn;
+               goto cleanup_sync;
        }
 
        pr_info("ipvs loaded.\n");
        return ret;
 
+cleanup_sync:
+       ip_vs_sync_cleanup();
   cleanup_conn:
        ip_vs_conn_cleanup();
   cleanup_app:
@@ -1831,17 +1964,20 @@ static int __init ip_vs_init(void)
        ip_vs_control_cleanup();
   cleanup_estimator:
        ip_vs_estimator_cleanup();
+       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        return ret;
 }
 
 static void __exit ip_vs_cleanup(void)
 {
        nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+       ip_vs_sync_cleanup();
        ip_vs_conn_cleanup();
        ip_vs_app_cleanup();
        ip_vs_protocol_cleanup();
        ip_vs_control_cleanup();
        ip_vs_estimator_cleanup();
+       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        pr_info("ipvs unloaded.\n");
 }
 
index 22f7ad5..b799cea 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mutex.h>
 
 #include <net/net_namespace.h>
+#include <linux/nsproxy.h>
 #include <net/ip.h>
 #ifdef CONFIG_IP_VS_IPV6
 #include <net/ipv6.h>
@@ -57,42 +58,7 @@ static DEFINE_MUTEX(__ip_vs_mutex);
 /* lock for service table */
 static DEFINE_RWLOCK(__ip_vs_svc_lock);
 
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
 /* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
 
 #ifdef CONFIG_IP_VS_DEBUG
 static int sysctl_ip_vs_debug_level = 0;
@@ -105,27 +71,28 @@ int ip_vs_get_debug_level(void)
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+                                   const struct in6_addr *addr)
 {
        struct rt6_info *rt;
-       struct flowi fl = {
-               .oif = 0,
-               .fl6_dst = *addr,
-               .fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
+       struct flowi6 fl6 = {
+               .daddr = *addr,
        };
 
-       rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
        if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
-                       return 1;
+               return 1;
 
        return 0;
 }
 #endif
+
+#ifdef CONFIG_SYSCTL
 /*
  *     update_defense_level is called from keventd and from sysctl,
  *     so it needs to protect itself from softirqs
  */
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
 {
        struct sysinfo i;
        static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@ static void update_defense_level(void)
        /* si_swapinfo(&i); */
        /* availmem = availmem - (i.totalswap - i.freeswap); */
 
-       nomem = (availmem < sysctl_ip_vs_amemthresh);
+       nomem = (availmem < ipvs->sysctl_amemthresh);
 
        local_bh_disable();
 
        /* drop_entry */
-       spin_lock(&__ip_vs_dropentry_lock);
-       switch (sysctl_ip_vs_drop_entry) {
+       spin_lock(&ipvs->dropentry_lock);
+       switch (ipvs->sysctl_drop_entry) {
        case 0:
-               atomic_set(&ip_vs_dropentry, 0);
+               atomic_set(&ipvs->dropentry, 0);
                break;
        case 1:
                if (nomem) {
-                       atomic_set(&ip_vs_dropentry, 1);
-                       sysctl_ip_vs_drop_entry = 2;
+                       atomic_set(&ipvs->dropentry, 1);
+                       ipvs->sysctl_drop_entry = 2;
                } else {
-                       atomic_set(&ip_vs_dropentry, 0);
+                       atomic_set(&ipvs->dropentry, 0);
                }
                break;
        case 2:
                if (nomem) {
-                       atomic_set(&ip_vs_dropentry, 1);
+                       atomic_set(&ipvs->dropentry, 1);
                } else {
-                       atomic_set(&ip_vs_dropentry, 0);
-                       sysctl_ip_vs_drop_entry = 1;
+                       atomic_set(&ipvs->dropentry, 0);
+                       ipvs->sysctl_drop_entry = 1;
                };
                break;
        case 3:
-               atomic_set(&ip_vs_dropentry, 1);
+               atomic_set(&ipvs->dropentry, 1);
                break;
        }
-       spin_unlock(&__ip_vs_dropentry_lock);
+       spin_unlock(&ipvs->dropentry_lock);
 
        /* drop_packet */
-       spin_lock(&__ip_vs_droppacket_lock);
-       switch (sysctl_ip_vs_drop_packet) {
+       spin_lock(&ipvs->droppacket_lock);
+       switch (ipvs->sysctl_drop_packet) {
        case 0:
-               ip_vs_drop_rate = 0;
+               ipvs->drop_rate = 0;
                break;
        case 1:
                if (nomem) {
-                       ip_vs_drop_rate = ip_vs_drop_counter
-                               = sysctl_ip_vs_amemthresh /
-                               (sysctl_ip_vs_amemthresh-availmem);
-                       sysctl_ip_vs_drop_packet = 2;
+                       ipvs->drop_rate = ipvs->drop_counter
+                               = ipvs->sysctl_amemthresh /
+                               (ipvs->sysctl_amemthresh-availmem);
+                       ipvs->sysctl_drop_packet = 2;
                } else {
-                       ip_vs_drop_rate = 0;
+                       ipvs->drop_rate = 0;
                }
                break;
        case 2:
                if (nomem) {
-                       ip_vs_drop_rate = ip_vs_drop_counter
-                               = sysctl_ip_vs_amemthresh /
-                               (sysctl_ip_vs_amemthresh-availmem);
+                       ipvs->drop_rate = ipvs->drop_counter
+                               = ipvs->sysctl_amemthresh /
+                               (ipvs->sysctl_amemthresh-availmem);
                } else {
-                       ip_vs_drop_rate = 0;
-                       sysctl_ip_vs_drop_packet = 1;
+                       ipvs->drop_rate = 0;
+                       ipvs->sysctl_drop_packet = 1;
                }
                break;
        case 3:
-               ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+               ipvs->drop_rate = ipvs->sysctl_am_droprate;
                break;
        }
-       spin_unlock(&__ip_vs_droppacket_lock);
+       spin_unlock(&ipvs->droppacket_lock);
 
        /* secure_tcp */
-       spin_lock(&ip_vs_securetcp_lock);
-       switch (sysctl_ip_vs_secure_tcp) {
+       spin_lock(&ipvs->securetcp_lock);
+       switch (ipvs->sysctl_secure_tcp) {
        case 0:
                if (old_secure_tcp >= 2)
                        to_change = 0;
@@ -216,7 +183,7 @@ static void update_defense_level(void)
                if (nomem) {
                        if (old_secure_tcp < 2)
                                to_change = 1;
-                       sysctl_ip_vs_secure_tcp = 2;
+                       ipvs->sysctl_secure_tcp = 2;
                } else {
                        if (old_secure_tcp >= 2)
                                to_change = 0;
@@ -229,7 +196,7 @@ static void update_defense_level(void)
                } else {
                        if (old_secure_tcp >= 2)
                                to_change = 0;
-                       sysctl_ip_vs_secure_tcp = 1;
+                       ipvs->sysctl_secure_tcp = 1;
                }
                break;
        case 3:
@@ -237,10 +204,11 @@ static void update_defense_level(void)
                        to_change = 1;
                break;
        }
-       old_secure_tcp = sysctl_ip_vs_secure_tcp;
+       old_secure_tcp = ipvs->sysctl_secure_tcp;
        if (to_change >= 0)
-               ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
-       spin_unlock(&ip_vs_securetcp_lock);
+               ip_vs_protocol_timeout_change(ipvs,
+                                             ipvs->sysctl_secure_tcp > 1);
+       spin_unlock(&ipvs->securetcp_lock);
 
        local_bh_enable();
 }
@@ -250,17 +218,18 @@ static void update_defense_level(void)
  *     Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD   1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
 static void defense_work_handler(struct work_struct *work)
 {
-       update_defense_level();
-       if (atomic_read(&ip_vs_dropentry))
-               ip_vs_random_dropentry();
+       struct netns_ipvs *ipvs =
+               container_of(work, struct netns_ipvs, defense_work.work);
 
-       schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+       update_defense_level(ipvs);
+       if (atomic_read(&ipvs->dropentry))
+               ip_vs_random_dropentry(ipvs->net);
+       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
 }
+#endif
 
 int
 ip_vs_use_count_inc(void)
@@ -287,33 +256,13 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
 /* the service table hashed by fwmark */
 static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
 
-/*
- *     Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- *     Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- *     FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
 
 /*
  *     Returns hash value for virtual service
  */
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
-                 __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+                 const union nf_inet_addr *addr, __be16 port)
 {
        register unsigned porth = ntohs(port);
        __be32 addr_fold = addr->ip;
@@ -323,6 +272,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
                addr_fold = addr->ip6[0]^addr->ip6[1]^
                            addr->ip6[2]^addr->ip6[3];
 #endif
+       addr_fold ^= ((size_t)net>>8);
 
        return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
                & IP_VS_SVC_TAB_MASK;
@@ -331,13 +281,13 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
 /*
  *     Returns hash value of fwmark for virtual service lookup
  */
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
 {
-       return fwmark & IP_VS_SVC_TAB_MASK;
+       return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
 
 /*
- *     Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ *     Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
  *     or in the ip_vs_svc_fwm_table by fwmark.
  *     Should be called with locked tables.
  */
@@ -353,16 +303,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
 
        if (svc->fwmark == 0) {
                /*
-                *  Hash it by <protocol,addr,port> in ip_vs_svc_table
+                *  Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
                 */
-               hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
-                                        svc->port);
+               hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+                                        &svc->addr, svc->port);
                list_add(&svc->s_list, &ip_vs_svc_table[hash]);
        } else {
                /*
-                *  Hash it by fwmark in ip_vs_svc_fwm_table
+                *  Hash it by fwmark in svc_fwm_table
                 */
-               hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+               hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
                list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
        }
 
@@ -374,7 +324,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
 
 
 /*
- *     Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ *     Unhashes a service from svc_table / svc_fwm_table.
  *     Should be called with locked tables.
  */
 static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +336,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
        }
 
        if (svc->fwmark == 0) {
-               /* Remove it from the ip_vs_svc_table table */
+               /* Remove it from the svc_table table */
                list_del(&svc->s_list);
        } else {
-               /* Remove it from the ip_vs_svc_fwm_table table */
+               /* Remove it from the svc_fwm_table table */
                list_del(&svc->f_list);
        }
 
@@ -400,23 +350,24 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
 
 
 /*
- *     Get service by {proto,addr,port} in the service table.
+ *     Get service by {netns, proto,addr,port} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
-                   __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+                    const union nf_inet_addr *vaddr, __be16 vport)
 {
        unsigned hash;
        struct ip_vs_service *svc;
 
        /* Check for "full" addressed entries */
-       hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+       hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
 
        list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
                if ((svc->af == af)
                    && ip_vs_addr_equal(af, &svc->addr, vaddr)
                    && (svc->port == vport)
-                   && (svc->protocol == protocol)) {
+                   && (svc->protocol == protocol)
+                   && net_eq(svc->net, net)) {
                        /* HIT */
                        return svc;
                }
@@ -430,16 +381,17 @@ __ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
  *     Get service by {fwmark} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
 {
        unsigned hash;
        struct ip_vs_service *svc;
 
        /* Check for fwmark addressed entries */
-       hash = ip_vs_svc_fwm_hashkey(fwmark);
+       hash = ip_vs_svc_fwm_hashkey(net, fwmark);
 
        list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
-               if (svc->fwmark == fwmark && svc->af == af) {
+               if (svc->fwmark == fwmark && svc->af == af
+                   && net_eq(svc->net, net)) {
                        /* HIT */
                        return svc;
                }
@@ -449,42 +401,46 @@ __ip_vs_svc_fwm_find(int af, __u32 fwmark)
 }
 
 struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport)
 {
        struct ip_vs_service *svc;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        read_lock(&__ip_vs_svc_lock);
 
        /*
         *      Check the table hashed by fwmark first
         */
-       if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
-               goto out;
+       if (fwmark) {
+               svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+               if (svc)
+                       goto out;
+       }
 
        /*
         *      Check the table hashed by <protocol,addr,port>
         *      for "full" addressed entries
         */
-       svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+       svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
 
        if (svc == NULL
            && protocol == IPPROTO_TCP
-           && atomic_read(&ip_vs_ftpsvc_counter)
+           && atomic_read(&ipvs->ftpsvc_counter)
            && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
                /*
                 * Check if ftp service entry exists, the packet
                 * might belong to FTP data connections.
                 */
-               svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+               svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
        }
 
        if (svc == NULL
-           && atomic_read(&ip_vs_nullsvc_counter)) {
+           && atomic_read(&ipvs->nullsvc_counter)) {
                /*
                 * Check if the catch-all port (port zero) exists
                 */
-               svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+               svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
        }
 
   out:
@@ -519,6 +475,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port), atomic_read(&svc->usecnt));
+               free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
 }
@@ -545,10 +502,10 @@ static inline unsigned ip_vs_rs_hashkey(int af,
 }
 
 /*
- *     Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ *     Hashes ip_vs_dest in rs_table by <proto,addr,port>.
  *     should be called with locked tables.
  */
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
 {
        unsigned hash;
 
@@ -562,19 +519,19 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
         */
        hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
 
-       list_add(&dest->d_list, &ip_vs_rtable[hash]);
+       list_add(&dest->d_list, &ipvs->rs_table[hash]);
 
        return 1;
 }
 
 /*
- *     UNhashes ip_vs_dest from ip_vs_rtable.
+ *     UNhashes ip_vs_dest from rs_table.
  *     should be called with locked tables.
  */
 static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
 {
        /*
-        * Remove it from the ip_vs_rtable table.
+        * Remove it from the rs_table table.
         */
        if (!list_empty(&dest->d_list)) {
                list_del(&dest->d_list);
@@ -588,10 +545,11 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
  *     Lookup real service by <proto,addr,port> in the real service table.
  */
 struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
                          const union nf_inet_addr *daddr,
                          __be16 dport)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        unsigned hash;
        struct ip_vs_dest *dest;
 
@@ -601,19 +559,19 @@ ip_vs_lookup_real_service(int af, __u16 protocol,
         */
        hash = ip_vs_rs_hashkey(af, daddr, dport);
 
-       read_lock(&__ip_vs_rs_lock);
-       list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+       read_lock(&ipvs->rs_lock);
+       list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
                if ((dest->af == af)
                    && ip_vs_addr_equal(af, &dest->addr, daddr)
                    && (dest->port == dport)
                    && ((dest->protocol == protocol) ||
                        dest->vfwmark)) {
                        /* HIT */
-                       read_unlock(&__ip_vs_rs_lock);
+                       read_unlock(&ipvs->rs_lock);
                        return dest;
                }
        }
-       read_unlock(&__ip_vs_rs_lock);
+       read_unlock(&ipvs->rs_lock);
 
        return NULL;
 }
@@ -652,15 +610,16 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
  * ip_vs_lookup_real_service() looked promissing, but
  * seems not working as expected.
  */
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
+                                  const union nf_inet_addr *daddr,
                                   __be16 dport,
                                   const union nf_inet_addr *vaddr,
-                                  __be16 vport, __u16 protocol)
+                                  __be16 vport, __u16 protocol, __u32 fwmark)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_service *svc;
 
-       svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+       svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
        if (!svc)
                return NULL;
        dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +644,12 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                     __be16 dport)
 {
        struct ip_vs_dest *dest, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        /*
         * Find the destination in trash
         */
-       list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+       list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
                IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
                              "dest->refcnt=%d\n",
                              dest->vfwmark,
@@ -720,6 +680,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                        list_del(&dest->n_list);
                        ip_vs_dst_reset(dest);
                        __ip_vs_unbind_svc(dest);
+                       free_percpu(dest->stats.cpustats);
                        kfree(dest);
                }
        }
@@ -737,25 +698,53 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
  *  are expired, and the refcnt of each destination in the trash must
  *  be 1, so we simply release them here.
  */
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
 {
        struct ip_vs_dest *dest, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+       list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
                list_del(&dest->n_list);
                ip_vs_dst_reset(dest);
                __ip_vs_unbind_svc(dest);
+               free_percpu(dest->stats.cpustats);
                kfree(dest);
        }
 }
 
+static void
+ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
+{
+#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
+
+       spin_lock_bh(&src->lock);
+
+       IP_VS_SHOW_STATS_COUNTER(conns);
+       IP_VS_SHOW_STATS_COUNTER(inpkts);
+       IP_VS_SHOW_STATS_COUNTER(outpkts);
+       IP_VS_SHOW_STATS_COUNTER(inbytes);
+       IP_VS_SHOW_STATS_COUNTER(outbytes);
+
+       ip_vs_read_estimator(dst, src);
+
+       spin_unlock_bh(&src->lock);
+}
 
 static void
 ip_vs_zero_stats(struct ip_vs_stats *stats)
 {
        spin_lock_bh(&stats->lock);
 
-       memset(&stats->ustats, 0, sizeof(stats->ustats));
+       /* get current counters as zero point, rates are zeroed */
+
+#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
+
+       IP_VS_ZERO_STATS_COUNTER(conns);
+       IP_VS_ZERO_STATS_COUNTER(inpkts);
+       IP_VS_ZERO_STATS_COUNTER(outpkts);
+       IP_VS_ZERO_STATS_COUNTER(inbytes);
+       IP_VS_ZERO_STATS_COUNTER(outbytes);
+
        ip_vs_zero_estimator(stats);
 
        spin_unlock_bh(&stats->lock);
@@ -768,6 +757,7 @@ static void
 __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                    struct ip_vs_dest_user_kern *udest, int add)
 {
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
        int conn_flags;
 
        /* set the weight and the flags */
@@ -780,12 +770,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                conn_flags |= IP_VS_CONN_F_NOOUTPUT;
        } else {
                /*
-                *    Put the real service in ip_vs_rtable if not present.
+                *    Put the real service in rs_table if not present.
                 *    For now only for NAT!
                 */
-               write_lock_bh(&__ip_vs_rs_lock);
-               ip_vs_rs_hash(dest);
-               write_unlock_bh(&__ip_vs_rs_lock);
+               write_lock_bh(&ipvs->rs_lock);
+               ip_vs_rs_hash(ipvs, dest);
+               write_unlock_bh(&ipvs->rs_lock);
        }
        atomic_set(&dest->conn_flags, conn_flags);
 
@@ -808,12 +798,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        dest->u_threshold = udest->u_threshold;
        dest->l_threshold = udest->l_threshold;
 
-       spin_lock(&dest->dst_lock);
+       spin_lock_bh(&dest->dst_lock);
        ip_vs_dst_reset(dest);
-       spin_unlock(&dest->dst_lock);
+       spin_unlock_bh(&dest->dst_lock);
 
        if (add)
-               ip_vs_new_estimator(&dest->stats);
+               ip_vs_start_estimator(svc->net, &dest->stats);
 
        write_lock_bh(&__ip_vs_svc_lock);
 
@@ -850,12 +840,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
                atype = ipv6_addr_type(&udest->addr.in6);
                if ((!(atype & IPV6_ADDR_UNICAST) ||
                        atype & IPV6_ADDR_LINKLOCAL) &&
-                       !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+                       !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
                        return -EINVAL;
        } else
 #endif
        {
-               atype = inet_addr_type(&init_net, udest->addr.ip);
+               atype = inet_addr_type(svc->net, udest->addr.ip);
                if (atype != RTN_LOCAL && atype != RTN_UNICAST)
                        return -EINVAL;
        }
@@ -865,6 +855,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
                pr_err("%s(): no memory.\n", __func__);
                return -ENOMEM;
        }
+       dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (!dest->stats.cpustats) {
+               pr_err("%s() alloc_percpu failed\n", __func__);
+               goto err_alloc;
+       }
 
        dest->af = svc->af;
        dest->protocol = svc->protocol;
@@ -888,6 +883,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 
        LeaveFunction(2);
        return 0;
+
+err_alloc:
+       kfree(dest);
+       return -ENOMEM;
 }
 
 
@@ -1006,16 +1005,18 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
 /*
  *     Delete a destination (must be already unlinked from the service)
  */
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
 {
-       ip_vs_kill_estimator(&dest->stats);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_stop_estimator(net, &dest->stats);
 
        /*
         *  Remove it from the d-linked list with the real services.
         */
-       write_lock_bh(&__ip_vs_rs_lock);
+       write_lock_bh(&ipvs->rs_lock);
        ip_vs_rs_unhash(dest);
-       write_unlock_bh(&__ip_vs_rs_lock);
+       write_unlock_bh(&ipvs->rs_lock);
 
        /*
         *  Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1035,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
                   and only one user context can update virtual service at a
                   time, so the operation here is OK */
                atomic_dec(&dest->svc->refcnt);
+               free_percpu(dest->stats.cpustats);
                kfree(dest);
        } else {
                IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1043,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
                              IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port),
                              atomic_read(&dest->refcnt));
-               list_add(&dest->n_list, &ip_vs_dest_trash);
+               list_add(&dest->n_list, &ipvs->dest_trash);
                atomic_inc(&dest->refcnt);
        }
 }
@@ -1105,7 +1107,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
        /*
         *      Delete the destination
         */
-       __ip_vs_del_dest(dest);
+       __ip_vs_del_dest(svc->net, dest);
 
        LeaveFunction(2);
 
@@ -1117,13 +1119,14 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
  *     Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                  struct ip_vs_service **svc_p)
 {
        int ret = 0;
        struct ip_vs_scheduler *sched = NULL;
        struct ip_vs_pe *pe = NULL;
        struct ip_vs_service *svc = NULL;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -1137,7 +1140,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        }
 
        if (u->pe_name && *u->pe_name) {
-               pe = ip_vs_pe_get(u->pe_name);
+               pe = ip_vs_pe_getbyname(u->pe_name);
                if (pe == NULL) {
                        pr_info("persistence engine module ip_vs_pe_%s "
                                "not found\n", u->pe_name);
@@ -1159,6 +1162,11 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                ret = -ENOMEM;
                goto out_err;
        }
+       svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (!svc->stats.cpustats) {
+               pr_err("%s() alloc_percpu failed\n", __func__);
+               goto out_err;
+       }
 
        /* I'm the first user of the service */
        atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1180,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        svc->flags = u->flags;
        svc->timeout = u->timeout * HZ;
        svc->netmask = u->netmask;
+       svc->net = net;
 
        INIT_LIST_HEAD(&svc->destinations);
        rwlock_init(&svc->sched_lock);
@@ -1189,15 +1198,15 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
 
        /* Update the virtual service counters */
        if (svc->port == FTPPORT)
-               atomic_inc(&ip_vs_ftpsvc_counter);
+               atomic_inc(&ipvs->ftpsvc_counter);
        else if (svc->port == 0)
-               atomic_inc(&ip_vs_nullsvc_counter);
+               atomic_inc(&ipvs->nullsvc_counter);
 
-       ip_vs_new_estimator(&svc->stats);
+       ip_vs_start_estimator(net, &svc->stats);
 
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
-               ip_vs_num_services++;
+               ipvs->num_services++;
 
        /* Hash the service into the service table */
        write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1216,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        *svc_p = svc;
        return 0;
 
+
  out_err:
        if (svc != NULL) {
                ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1225,8 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                        ip_vs_app_inc_put(svc->inc);
                        local_bh_enable();
                }
+               if (svc->stats.cpustats)
+                       free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
        ip_vs_scheduler_put(sched);
@@ -1248,7 +1260,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
        old_sched = sched;
 
        if (u->pe_name && *u->pe_name) {
-               pe = ip_vs_pe_get(u->pe_name);
+               pe = ip_vs_pe_getbyname(u->pe_name);
                if (pe == NULL) {
                        pr_info("persistence engine module ip_vs_pe_%s "
                                "not found\n", u->pe_name);
@@ -1334,14 +1346,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
        struct ip_vs_dest *dest, *nxt;
        struct ip_vs_scheduler *old_sched;
        struct ip_vs_pe *old_pe;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        pr_info("%s: enter\n", __func__);
 
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
-               ip_vs_num_services--;
+               ipvs->num_services--;
 
-       ip_vs_kill_estimator(&svc->stats);
+       ip_vs_stop_estimator(svc->net, &svc->stats);
 
        /* Unbind scheduler */
        old_sched = svc->scheduler;
@@ -1364,16 +1377,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
         */
        list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
                __ip_vs_unlink_dest(svc, dest, 0);
-               __ip_vs_del_dest(dest);
+               __ip_vs_del_dest(svc->net, dest);
        }
 
        /*
         *    Update the virtual service counters
         */
        if (svc->port == FTPPORT)
-               atomic_dec(&ip_vs_ftpsvc_counter);
+               atomic_dec(&ipvs->ftpsvc_counter);
        else if (svc->port == 0)
-               atomic_dec(&ip_vs_nullsvc_counter);
+               atomic_dec(&ipvs->nullsvc_counter);
 
        /*
         *    Free the service if nobody refers to it
@@ -1383,6 +1396,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port), atomic_read(&svc->usecnt));
+               free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
 
@@ -1428,17 +1442,19 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
 /*
  *     Flush all the virtual services
  */
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
 {
        int idx;
        struct ip_vs_service *svc, *nxt;
 
        /*
-        * Flush the service table hashed by <protocol,addr,port>
+        * Flush the service table hashed by <netns,protocol,addr,port>
         */
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
-               list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
-                       ip_vs_unlink_service(svc);
+               list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+                                        s_list) {
+                       if (net_eq(svc->net, net))
+                               ip_vs_unlink_service(svc);
                }
        }
 
@@ -1448,7 +1464,8 @@ static int ip_vs_flush(void)
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry_safe(svc, nxt,
                                         &ip_vs_svc_fwm_table[idx], f_list) {
-                       ip_vs_unlink_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_unlink_service(svc);
                }
        }
 
@@ -1472,32 +1489,35 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
        return 0;
 }
 
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
 {
        int idx;
        struct ip_vs_service *svc;
 
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-                       ip_vs_zero_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_zero_service(svc);
                }
        }
 
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-                       ip_vs_zero_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_zero_service(svc);
                }
        }
 
-       ip_vs_zero_stats(&ip_vs_stats);
+       ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
        return 0;
 }
 
-
+#ifdef CONFIG_SYSCTL
 static int
 proc_do_defense_mode(ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
+       struct net *net = current->nsproxy->net_ns;
        int *valp = table->data;
        int val = *valp;
        int rc;
@@ -1508,13 +1528,12 @@ proc_do_defense_mode(ctl_table *table, int write,
                        /* Restore the correct value */
                        *valp = val;
                } else {
-                       update_defense_level();
+                       update_defense_level(net_ipvs(net));
                }
        }
        return rc;
 }
 
-
 static int
 proc_do_sync_threshold(ctl_table *table, int write,
                       void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1534,45 +1553,54 @@ proc_do_sync_threshold(ctl_table *table, int write,
        return rc;
 }
 
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+                    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int *valp = table->data;
+       int val = *valp;
+       int rc;
+
+       rc = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (write && (*valp != val)) {
+               if ((*valp < 0) || (*valp > 1)) {
+                       /* Restore the correct value */
+                       *valp = val;
+               } else {
+                       struct net *net = current->nsproxy->net_ns;
+                       ip_vs_sync_switch_mode(net, val);
+               }
+       }
+       return rc;
+}
 
 /*
  *     IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ *     Do not change order or insert new entries without
+ *     align with netns init in __ip_vs_control_init()
  */
 
 static struct ctl_table vs_vars[] = {
        {
                .procname       = "amemthresh",
-               .data           = &sysctl_ip_vs_amemthresh,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-#ifdef CONFIG_IP_VS_DEBUG
-       {
-               .procname       = "debug_level",
-               .data           = &sysctl_ip_vs_debug_level,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-#endif
        {
                .procname       = "am_droprate",
-               .data           = &sysctl_ip_vs_am_droprate,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "drop_entry",
-               .data           = &sysctl_ip_vs_drop_entry,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
        },
        {
                .procname       = "drop_packet",
-               .data           = &sysctl_ip_vs_drop_packet,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
@@ -1580,7 +1608,6 @@ static struct ctl_table vs_vars[] = {
 #ifdef CONFIG_IP_VS_NFCT
        {
                .procname       = "conntrack",
-               .data           = &sysctl_ip_vs_conntrack,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
@@ -1588,18 +1615,62 @@ static struct ctl_table vs_vars[] = {
 #endif
        {
                .procname       = "secure_tcp",
-               .data           = &sysctl_ip_vs_secure_tcp,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
        },
        {
                .procname       = "snat_reroute",
-               .data           = &sysctl_ip_vs_snat_reroute,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
        },
+       {
+               .procname       = "sync_version",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_do_sync_mode,
+       },
+       {
+               .procname       = "cache_bypass",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "expire_nodest_conn",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "expire_quiescent_template",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sync_threshold",
+               .maxlen         =
+                       sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+               .mode           = 0644,
+               .proc_handler   = proc_do_sync_threshold,
+       },
+       {
+               .procname       = "nat_icmp_send",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#ifdef CONFIG_IP_VS_DEBUG
+       {
+               .procname       = "debug_level",
+               .data           = &sysctl_ip_vs_debug_level,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#endif
 #if 0
        {
                .procname       = "timeout_established",
@@ -1686,41 +1757,6 @@ static struct ctl_table vs_vars[] = {
                .proc_handler   = proc_dointvec_jiffies,
        },
 #endif
-       {
-               .procname       = "cache_bypass",
-               .data           = &sysctl_ip_vs_cache_bypass,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "expire_nodest_conn",
-               .data           = &sysctl_ip_vs_expire_nodest_conn,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "expire_quiescent_template",
-               .data           = &sysctl_ip_vs_expire_quiescent_template,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "sync_threshold",
-               .data           = &sysctl_ip_vs_sync_threshold,
-               .maxlen         = sizeof(sysctl_ip_vs_sync_threshold),
-               .mode           = 0644,
-               .proc_handler   = proc_do_sync_threshold,
-       },
-       {
-               .procname       = "nat_icmp_send",
-               .data           = &sysctl_ip_vs_nat_icmp_send,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        { }
 };
 
@@ -1731,12 +1767,12 @@ const struct ctl_path net_vs_ctl_path[] = {
        { }
 };
 EXPORT_SYMBOL_GPL(net_vs_ctl_path);
-
-static struct ctl_table_header * sysctl_header;
+#endif
 
 #ifdef CONFIG_PROC_FS
 
 struct ip_vs_iter {
+       struct seq_net_private p;  /* Do not move this, netns depends upon it*/
        struct list_head *table;
        int bucket;
 };
@@ -1763,6 +1799,7 @@ static inline const char *ip_vs_fwd_name(unsigned flags)
 /* Get the Nth entry in the two lists */
 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 {
+       struct net *net = seq_file_net(seq);
        struct ip_vs_iter *iter = seq->private;
        int idx;
        struct ip_vs_service *svc;
@@ -1770,7 +1807,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
        /* look in hash by protocol */
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-                       if (pos-- == 0){
+                       if (net_eq(svc->net, net) && pos-- == 0) {
                                iter->table = ip_vs_svc_table;
                                iter->bucket = idx;
                                return svc;
@@ -1781,7 +1818,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
        /* keep looking in fwmark */
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-                       if (pos-- == 0) {
+                       if (net_eq(svc->net, net) && pos-- == 0) {
                                iter->table = ip_vs_svc_fwm_table;
                                iter->bucket = idx;
                                return svc;
@@ -1935,7 +1972,7 @@ static const struct seq_operations ip_vs_info_seq_ops = {
 
 static int ip_vs_info_open(struct inode *inode, struct file *file)
 {
-       return seq_open_private(file, &ip_vs_info_seq_ops,
+       return seq_open_net(inode, file, &ip_vs_info_seq_ops,
                        sizeof(struct ip_vs_iter));
 }
 
@@ -1949,13 +1986,11 @@ static const struct file_operations ip_vs_info_fops = {
 
 #endif
 
-struct ip_vs_stats ip_vs_stats = {
-       .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
 #ifdef CONFIG_PROC_FS
 static int ip_vs_stats_show(struct seq_file *seq, void *v)
 {
+       struct net *net = seq_file_single_net(seq);
+       struct ip_vs_stats_user show;
 
 /*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
@@ -1963,29 +1998,25 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
-       spin_lock_bh(&ip_vs_stats.lock);
-       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
-                  ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
-                  (unsigned long long) ip_vs_stats.ustats.inbytes,
-                  (unsigned long long) ip_vs_stats.ustats.outbytes);
+       ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
+       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
+                  show.inpkts, show.outpkts,
+                  (unsigned long long) show.inbytes,
+                  (unsigned long long) show.outbytes);
 
 /*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
                   " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
-       seq_printf(seq,"%8X %8X %8X %16X %16X\n",
-                       ip_vs_stats.ustats.cps,
-                       ip_vs_stats.ustats.inpps,
-                       ip_vs_stats.ustats.outpps,
-                       ip_vs_stats.ustats.inbps,
-                       ip_vs_stats.ustats.outbps);
-       spin_unlock_bh(&ip_vs_stats.lock);
+       seq_printf(seq, "%8X %8X %8X %16X %16X\n",
+                       show.cps, show.inpps, show.outpps,
+                       show.inbps, show.outbps);
 
        return 0;
 }
 
 static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ip_vs_stats_show, NULL);
+       return single_open_net(inode, file, ip_vs_stats_show);
 }
 
 static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2027,85 @@ static const struct file_operations ip_vs_stats_fops = {
        .release = single_release,
 };
 
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+       struct net *net = seq_file_single_net(seq);
+       struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
+       struct ip_vs_cpu_stats *cpustats = tot_stats->cpustats;
+       struct ip_vs_stats_user rates;
+       int i;
+
+/*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
+       seq_puts(seq,
+                "       Total Incoming Outgoing         Incoming         Outgoing\n");
+       seq_printf(seq,
+                  "CPU    Conns  Packets  Packets            Bytes            Bytes\n");
+
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
+               unsigned int start;
+               __u64 inbytes, outbytes;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&u->syncp);
+                       inbytes = u->ustats.inbytes;
+                       outbytes = u->ustats.outbytes;
+               } while (u64_stats_fetch_retry_bh(&u->syncp, start));
+
+               seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+                          i, u->ustats.conns, u->ustats.inpkts,
+                          u->ustats.outpkts, (__u64)inbytes,
+                          (__u64)outbytes);
+       }
+
+       spin_lock_bh(&tot_stats->lock);
+
+       seq_printf(seq, "  ~ %8X %8X %8X %16LX %16LX\n\n",
+                  tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+                  tot_stats->ustats.outpkts,
+                  (unsigned long long) tot_stats->ustats.inbytes,
+                  (unsigned long long) tot_stats->ustats.outbytes);
+
+       ip_vs_read_estimator(&rates, tot_stats);
+
+       spin_unlock_bh(&tot_stats->lock);
+
+/*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+       seq_puts(seq,
+                  "     Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
+       seq_printf(seq, "    %8X %8X %8X %16X %16X\n",
+                       rates.cps,
+                       rates.inpps,
+                       rates.outpps,
+                       rates.inbps,
+                       rates.outbps);
+
+       return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+       .owner = THIS_MODULE,
+       .open = ip_vs_stats_percpu_seq_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
 #endif
 
 /*
  *     Set timeout values for tcp tcpfin udp in the timeout_table.
  */
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+       struct ip_vs_proto_data *pd;
+#endif
+
        IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
                  u->tcp_timeout,
                  u->tcp_fin_timeout,
@@ -2010,19 +2113,22 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
        if (u->tcp_timeout) {
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+               pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+               pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
                        = u->tcp_timeout * HZ;
        }
 
        if (u->tcp_fin_timeout) {
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+               pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+               pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
                        = u->tcp_fin_timeout * HZ;
        }
 #endif
 
 #ifdef CONFIG_IP_VS_PROTO_UDP
        if (u->udp_timeout) {
-               ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+               pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+               pd->timeout_table[IP_VS_UDP_S_NORMAL]
                        = u->udp_timeout * HZ;
        }
 #endif
@@ -2087,6 +2193,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
 static int
 do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 {
+       struct net *net = sock_net(sk);
        int ret;
        unsigned char arg[MAX_ARG_LEN];
        struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2228,20 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        if (cmd == IP_VS_SO_SET_FLUSH) {
                /* Flush the virtual service */
-               ret = ip_vs_flush();
+               ret = ip_vs_flush(net);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_TIMEOUT) {
                /* Set timeout values for (tcp tcpfin udp) */
-               ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+               ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
                struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-               ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+               ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+                                       dm->syncid);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
                struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-               ret = stop_sync_thread(dm->state);
+               ret = stop_sync_thread(net, dm->state);
                goto out_unlock;
        }
 
@@ -2148,7 +2256,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
        if (cmd == IP_VS_SO_SET_ZERO) {
                /* if no service address is set, zero counters in all */
                if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
-                       ret = ip_vs_zero_all();
+                       ret = ip_vs_zero_all(net);
                        goto out_unlock;
                }
        }
@@ -2165,10 +2273,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        /* Lookup the exact service by <protocol, addr, port> or fwmark */
        if (usvc.fwmark == 0)
-               svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+               svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
                                           &usvc.addr, usvc.port);
        else
-               svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+               svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
 
        if (cmd != IP_VS_SO_SET_ADD
            && (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2289,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                if (svc != NULL)
                        ret = -EEXIST;
                else
-                       ret = ip_vs_add_service(&usvc, &svc);
+                       ret = ip_vs_add_service(net, &usvc, &svc);
                break;
        case IP_VS_SO_SET_EDIT:
                ret = ip_vs_edit_service(svc, &usvc);
@@ -2217,14 +2325,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 }
 
 
-static void
-ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
-{
-       spin_lock_bh(&src->lock);
-       memcpy(dst, &src->ustats, sizeof(*dst));
-       spin_unlock_bh(&src->lock);
-}
-
 static void
 ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 {
@@ -2241,7 +2341,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 }
 
 static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+                           const struct ip_vs_get_services *get,
                            struct ip_vs_get_services __user *uptr)
 {
        int idx, count=0;
@@ -2252,7 +2353,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
                        /* Only expose IPv4 entries to old interface */
-                       if (svc->af != AF_INET)
+                       if (svc->af != AF_INET || !net_eq(svc->net, net))
                                continue;
 
                        if (count >= get->num_services)
@@ -2271,7 +2372,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
                        /* Only expose IPv4 entries to old interface */
-                       if (svc->af != AF_INET)
+                       if (svc->af != AF_INET || !net_eq(svc->net, net))
                                continue;
 
                        if (count >= get->num_services)
@@ -2291,7 +2392,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
 }
 
 static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                         struct ip_vs_get_dests __user *uptr)
 {
        struct ip_vs_service *svc;
@@ -2299,9 +2400,9 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
        int ret = 0;
 
        if (get->fwmark)
-               svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+               svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
        else
-               svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+               svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
                                           get->port);
 
        if (svc) {
@@ -2336,17 +2437,21 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
 }
 
 static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+       struct ip_vs_proto_data *pd;
+#endif
+
 #ifdef CONFIG_IP_VS_PROTO_TCP
-       u->tcp_timeout =
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
-       u->tcp_fin_timeout =
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+       pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+       u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+       u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
+       pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
        u->udp_timeout =
-               ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+                       pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
 #endif
 }
 
@@ -2375,7 +2480,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        unsigned char arg[128];
        int ret = 0;
        unsigned int copylen;
+       struct net *net = sock_net(sk);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       BUG_ON(!net);
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
@@ -2418,7 +2526,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                struct ip_vs_getinfo info;
                info.version = IP_VS_VERSION_CODE;
                info.size = ip_vs_conn_tab_size;
-               info.num_services = ip_vs_num_services;
+               info.num_services = ipvs->num_services;
                if (copy_to_user(user, &info, sizeof(info)) != 0)
                        ret = -EFAULT;
        }
@@ -2437,7 +2545,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EINVAL;
                        goto out;
                }
-               ret = __ip_vs_get_service_entries(get, user);
+               ret = __ip_vs_get_service_entries(net, get, user);
        }
        break;
 
@@ -2450,10 +2558,11 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                entry = (struct ip_vs_service_entry *)arg;
                addr.ip = entry->addr;
                if (entry->fwmark)
-                       svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+                       svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
                else
-                       svc = __ip_vs_service_find(AF_INET, entry->protocol,
-                                                  &addr, entry->port);
+                       svc = __ip_vs_service_find(net, AF_INET,
+                                                  entry->protocol, &addr,
+                                                  entry->port);
                if (svc) {
                        ip_vs_copy_service(entry, svc);
                        if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2585,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EINVAL;
                        goto out;
                }
-               ret = __ip_vs_get_dest_entries(get, user);
+               ret = __ip_vs_get_dest_entries(net, get, user);
        }
        break;
 
@@ -2484,7 +2593,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        {
                struct ip_vs_timeout_user t;
 
-               __ip_vs_get_timeouts(&t);
+               __ip_vs_get_timeouts(net, &t);
                if (copy_to_user(user, &t, sizeof(t)) != 0)
                        ret = -EFAULT;
        }
@@ -2495,15 +2604,17 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                struct ip_vs_daemon_user d[2];
 
                memset(&d, 0, sizeof(d));
-               if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+               if (ipvs->sync_state & IP_VS_STATE_MASTER) {
                        d[0].state = IP_VS_STATE_MASTER;
-                       strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
-                       d[0].syncid = ip_vs_master_syncid;
+                       strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+                               sizeof(d[0].mcast_ifn));
+                       d[0].syncid = ipvs->master_syncid;
                }
-               if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+               if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
                        d[1].state = IP_VS_STATE_BACKUP;
-                       strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
-                       d[1].syncid = ip_vs_backup_syncid;
+                       strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+                               sizeof(d[1].mcast_ifn));
+                       d[1].syncid = ipvs->backup_syncid;
                }
                if (copy_to_user(user, &d, sizeof(d)) != 0)
                        ret = -EFAULT;
@@ -2542,6 +2653,7 @@ static struct genl_family ip_vs_genl_family = {
        .name           = IPVS_GENL_NAME,
        .version        = IPVS_GENL_VERSION,
        .maxattr        = IPVS_CMD_MAX,
+       .netnsok        = true,         /* Make ipvsadm to work on netns */
 };
 
 /* Policy used for first-level command attributes */
@@ -2599,31 +2711,29 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
 static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
                                 struct ip_vs_stats *stats)
 {
+       struct ip_vs_stats_user ustats;
        struct nlattr *nl_stats = nla_nest_start(skb, container_type);
        if (!nl_stats)
                return -EMSGSIZE;
 
-       spin_lock_bh(&stats->lock);
-
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
+       ip_vs_copy_stats(&ustats, stats);
 
-       spin_unlock_bh(&stats->lock);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
+       NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
+       NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
 
        nla_nest_end(skb, nl_stats);
 
        return 0;
 
 nla_put_failure:
-       spin_unlock_bh(&stats->lock);
        nla_nest_cancel(skb, nl_stats);
        return -EMSGSIZE;
 }
@@ -2696,11 +2806,12 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
        int idx = 0, i;
        int start = cb->args[0];
        struct ip_vs_service *svc;
+       struct net *net = skb_sknet(skb);
 
        mutex_lock(&__ip_vs_mutex);
        for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
                list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
-                       if (++idx <= start)
+                       if (++idx <= start || !net_eq(svc->net, net))
                                continue;
                        if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
                                idx--;
@@ -2711,7 +2822,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
 
        for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
-                       if (++idx <= start)
+                       if (++idx <= start || !net_eq(svc->net, net))
                                continue;
                        if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
                                idx--;
@@ -2727,7 +2838,8 @@ nla_put_failure:
        return skb->len;
 }
 
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+                                   struct ip_vs_service_user_kern *usvc,
                                    struct nlattr *nla, int full_entry,
                                    struct ip_vs_service **ret_svc)
 {
@@ -2770,9 +2882,9 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
        }
 
        if (usvc->fwmark)
-               svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+               svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
        else
-               svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+               svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
                                           &usvc->addr, usvc->port);
        *ret_svc = svc;
 
@@ -2809,13 +2921,14 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
        return 0;
 }
 
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+                                                    struct nlattr *nla)
 {
        struct ip_vs_service_user_kern usvc;
        struct ip_vs_service *svc;
        int ret;
 
-       ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+       ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
        return ret ? ERR_PTR(ret) : svc;
 }
 
@@ -2883,6 +2996,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+       struct net *net = skb_sknet(skb);
 
        mutex_lock(&__ip_vs_mutex);
 
@@ -2891,7 +3005,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
                        IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
                goto out_err;
 
-       svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+       svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
        if (IS_ERR(svc) || svc == NULL)
                goto out_err;
 
@@ -3005,20 +3120,23 @@ nla_put_failure:
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
                                   struct netlink_callback *cb)
 {
+       struct net *net = skb_net(skb);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
        mutex_lock(&__ip_vs_mutex);
-       if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+       if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
                if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
-                                          ip_vs_master_mcast_ifn,
-                                          ip_vs_master_syncid, cb) < 0)
+                                          ipvs->master_mcast_ifn,
+                                          ipvs->master_syncid, cb) < 0)
                        goto nla_put_failure;
 
                cb->args[0] = 1;
        }
 
-       if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+       if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
                if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
-                                          ip_vs_backup_mcast_ifn,
-                                          ip_vs_backup_syncid, cb) < 0)
+                                          ipvs->backup_mcast_ifn,
+                                          ipvs->backup_syncid, cb) < 0)
                        goto nla_put_failure;
 
                cb->args[1] = 1;
@@ -3030,31 +3148,33 @@ nla_put_failure:
        return skb->len;
 }
 
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
 {
        if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
              attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
              attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
                return -EINVAL;
 
-       return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+       return start_sync_thread(net,
+                                nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
                                 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
                                 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
 }
 
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
 {
        if (!attrs[IPVS_DAEMON_ATTR_STATE])
                return -EINVAL;
 
-       return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+       return stop_sync_thread(net,
+                               nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 }
 
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
 {
        struct ip_vs_timeout_user t;
 
-       __ip_vs_get_timeouts(&t);
+       __ip_vs_get_timeouts(net, &t);
 
        if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
                t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3186,7 @@ static int ip_vs_genl_set_config(struct nlattr **attrs)
        if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
                t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
 
-       return ip_vs_set_timeout(&t);
+       return ip_vs_set_timeout(net, &t);
 }
 
 static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3196,20 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        struct ip_vs_dest_user_kern udest;
        int ret = 0, cmd;
        int need_full_svc = 0, need_full_dest = 0;
+       struct net *net;
+       struct netns_ipvs *ipvs;
 
+       net = skb_sknet(skb);
+       ipvs = net_ipvs(net);
        cmd = info->genlhdr->cmd;
 
        mutex_lock(&__ip_vs_mutex);
 
        if (cmd == IPVS_CMD_FLUSH) {
-               ret = ip_vs_flush();
+               ret = ip_vs_flush(net);
                goto out;
        } else if (cmd == IPVS_CMD_SET_CONFIG) {
-               ret = ip_vs_genl_set_config(info->attrs);
+               ret = ip_vs_genl_set_config(net, info->attrs);
                goto out;
        } else if (cmd == IPVS_CMD_NEW_DAEMON ||
                   cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3225,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
                }
 
                if (cmd == IPVS_CMD_NEW_DAEMON)
-                       ret = ip_vs_genl_new_daemon(daemon_attrs);
+                       ret = ip_vs_genl_new_daemon(net, daemon_attrs);
                else
-                       ret = ip_vs_genl_del_daemon(daemon_attrs);
+                       ret = ip_vs_genl_del_daemon(net, daemon_attrs);
                goto out;
        } else if (cmd == IPVS_CMD_ZERO &&
                   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
-               ret = ip_vs_zero_all();
+               ret = ip_vs_zero_all(net);
                goto out;
        }
 
@@ -3117,7 +3241,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
                need_full_svc = 1;
 
-       ret = ip_vs_genl_parse_service(&usvc,
+       ret = ip_vs_genl_parse_service(net, &usvc,
                                       info->attrs[IPVS_CMD_ATTR_SERVICE],
                                       need_full_svc, &svc);
        if (ret)
@@ -3147,7 +3271,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        switch (cmd) {
        case IPVS_CMD_NEW_SERVICE:
                if (svc == NULL)
-                       ret = ip_vs_add_service(&usvc, &svc);
+                       ret = ip_vs_add_service(net, &usvc, &svc);
                else
                        ret = -EEXIST;
                break;
@@ -3185,7 +3309,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        void *reply;
        int ret, cmd, reply_cmd;
+       struct net *net;
+       struct netns_ipvs *ipvs;
 
+       net = skb_sknet(skb);
+       ipvs = net_ipvs(net);
        cmd = info->genlhdr->cmd;
 
        if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3342,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        {
                struct ip_vs_service *svc;
 
-               svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+               svc = ip_vs_genl_find_service(net,
+                                             info->attrs[IPVS_CMD_ATTR_SERVICE]);
                if (IS_ERR(svc)) {
                        ret = PTR_ERR(svc);
                        goto out_err;
@@ -3234,7 +3363,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        {
                struct ip_vs_timeout_user t;
 
-               __ip_vs_get_timeouts(&t);
+               __ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
                NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
                NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3509,189 @@ static void ip_vs_genl_unregister(void)
 
 /* End of Generic Netlink interface definitions */
 
+/*
+ * per netns intit/exit func.
+ */
+#ifdef CONFIG_SYSCTL
+int __net_init __ip_vs_control_init_sysctl(struct net *net)
+{
+       int idx;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ctl_table *tbl;
+
+       atomic_set(&ipvs->dropentry, 0);
+       spin_lock_init(&ipvs->dropentry_lock);
+       spin_lock_init(&ipvs->droppacket_lock);
+       spin_lock_init(&ipvs->securetcp_lock);
+
+       if (!net_eq(net, &init_net)) {
+               tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+               if (tbl == NULL)
+                       return -ENOMEM;
+       } else
+               tbl = vs_vars;
+       /* Initialize sysctl defaults */
+       idx = 0;
+       ipvs->sysctl_amemthresh = 1024;
+       tbl[idx++].data = &ipvs->sysctl_amemthresh;
+       ipvs->sysctl_am_droprate = 10;
+       tbl[idx++].data = &ipvs->sysctl_am_droprate;
+       tbl[idx++].data = &ipvs->sysctl_drop_entry;
+       tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+       tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+       tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+       ipvs->sysctl_snat_reroute = 1;
+       tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+       ipvs->sysctl_sync_ver = 1;
+       tbl[idx++].data = &ipvs->sysctl_sync_ver;
+       tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+       tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+       tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+       ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+       ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+       tbl[idx].data = &ipvs->sysctl_sync_threshold;
+       tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+       tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+       ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+                                                    tbl);
+       if (ipvs->sysctl_hdr == NULL) {
+               if (!net_eq(net, &init_net))
+                       kfree(tbl);
+               return -ENOMEM;
+       }
+       ip_vs_start_estimator(net, &ipvs->tot_stats);
+       ipvs->sysctl_tbl = tbl;
+       /* Schedule defense work */
+       INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+
+       return 0;
+}
+
+void __net_init __ip_vs_control_cleanup_sysctl(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       cancel_delayed_work_sync(&ipvs->defense_work);
+       cancel_work_sync(&ipvs->defense_work.work);
+       unregister_net_sysctl_table(ipvs->sysctl_hdr);
+}
+
+#else
+
+int __net_init __ip_vs_control_init_sysctl(struct net *net) { return 0; }
+void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
+
+#endif
+
+int __net_init __ip_vs_control_init(struct net *net)
+{
+       int idx;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+       /* Initialize rs_table */
+       for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+               INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+       INIT_LIST_HEAD(&ipvs->dest_trash);
+       atomic_set(&ipvs->ftpsvc_counter, 0);
+       atomic_set(&ipvs->nullsvc_counter, 0);
+
+       /* procfs stats */
+       ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (ipvs->tot_stats.cpustats) {
+               pr_err("%s(): alloc_percpu.\n", __func__);
+               return -ENOMEM;
+       }
+       spin_lock_init(&ipvs->tot_stats.lock);
+
+       proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+       proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+       proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+                            &ip_vs_stats_percpu_fops);
+
+       if (__ip_vs_control_init_sysctl(net))
+               goto err;
+
+       return 0;
+
+err:
+       free_percpu(ipvs->tot_stats.cpustats);
+       return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_trash_cleanup(net);
+       ip_vs_stop_estimator(net, &ipvs->tot_stats);
+       __ip_vs_control_cleanup_sysctl(net);
+       proc_net_remove(net, "ip_vs_stats_percpu");
+       proc_net_remove(net, "ip_vs_stats");
+       proc_net_remove(net, "ip_vs");
+       free_percpu(ipvs->tot_stats.cpustats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+       .init = __ip_vs_control_init,
+       .exit = __ip_vs_control_cleanup,
+};
 
 int __init ip_vs_control_init(void)
 {
-       int ret;
        int idx;
+       int ret;
 
        EnterFunction(2);
 
-       /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+       /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++)  {
                INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
                INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
        }
-       for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++)  {
-               INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+       ret = register_pernet_subsys(&ipvs_control_ops);
+       if (ret) {
+               pr_err("cannot register namespace.\n");
+               goto err;
        }
-       smp_wmb();
+
+       smp_wmb();      /* Do we really need it now ? */
 
        ret = nf_register_sockopt(&ip_vs_sockopts);
        if (ret) {
                pr_err("cannot register sockopt.\n");
-               return ret;
+               goto err_net;
        }
 
        ret = ip_vs_genl_register();
        if (ret) {
                pr_err("cannot register Generic Netlink interface.\n");
                nf_unregister_sockopt(&ip_vs_sockopts);
-               return ret;
+               goto err_net;
        }
 
-       proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
-       proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
-       ip_vs_new_estimator(&ip_vs_stats);
-
-       /* Hook the defense timer */
-       schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
        LeaveFunction(2);
        return 0;
+
+err_net:
+       unregister_pernet_subsys(&ipvs_control_ops);
+err:
+       return ret;
 }
 
 
 void ip_vs_control_cleanup(void)
 {
        EnterFunction(2);
-       ip_vs_trash_cleanup();
-       cancel_delayed_work_sync(&defense_work);
-       cancel_work_sync(&defense_work.work);
-       ip_vs_kill_estimator(&ip_vs_stats);
-       unregister_sysctl_table(sysctl_header);
-       proc_net_remove(&init_net, "ip_vs_stats");
-       proc_net_remove(&init_net, "ip_vs");
+       unregister_pernet_subsys(&ipvs_control_ops);
        ip_vs_genl_unregister();
        nf_unregister_sockopt(&ip_vs_sockopts);
        LeaveFunction(2);
index ff28801..8c8766c 100644 (file)
@@ -8,8 +8,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
- *
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              Affected data: est_list and est_lock.
+ *              estimation_timer() runs with timer per netns.
+ *              get_stats()) do the per cpu summing.
  */
 
 #define KMSG_COMPONENT "IPVS"
  */
 
 
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+                                struct ip_vs_cpu_stats *stats)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+               unsigned int start;
+               __u64 inbytes, outbytes;
+               if (i) {
+                       sum->conns += s->ustats.conns;
+                       sum->inpkts += s->ustats.inpkts;
+                       sum->outpkts += s->ustats.outpkts;
+                       do {
+                               start = u64_stats_fetch_begin(&s->syncp);
+                               inbytes = s->ustats.inbytes;
+                               outbytes = s->ustats.outbytes;
+                       } while (u64_stats_fetch_retry(&s->syncp, start));
+                       sum->inbytes += inbytes;
+                       sum->outbytes += outbytes;
+               } else {
+                       sum->conns = s->ustats.conns;
+                       sum->inpkts = s->ustats.inpkts;
+                       sum->outpkts = s->ustats.outpkts;
+                       do {
+                               start = u64_stats_fetch_begin(&s->syncp);
+                               sum->inbytes = s->ustats.inbytes;
+                               sum->outbytes = s->ustats.outbytes;
+                       } while (u64_stats_fetch_retry(&s->syncp, start));
+               }
+       }
+}
 
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
 
 static void estimation_timer(unsigned long arg)
 {
@@ -62,12 +97,16 @@ static void estimation_timer(unsigned long arg)
        u32 n_inpkts, n_outpkts;
        u64 n_inbytes, n_outbytes;
        u32 rate;
+       struct net *net = (struct net *)arg;
+       struct netns_ipvs *ipvs;
 
-       spin_lock(&est_lock);
-       list_for_each_entry(e, &est_list, list) {
+       ipvs = net_ipvs(net);
+       spin_lock(&ipvs->est_lock);
+       list_for_each_entry(e, &ipvs->est_list, list) {
                s = container_of(e, struct ip_vs_stats, est);
 
                spin_lock(&s->lock);
+               ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
                n_conns = s->ustats.conns;
                n_inpkts = s->ustats.inpkts;
                n_outpkts = s->ustats.outpkts;
@@ -75,81 +114,64 @@ static void estimation_timer(unsigned long arg)
                n_outbytes = s->ustats.outbytes;
 
                /* scaled by 2^10, but divided 2 seconds */
-               rate = (n_conns - e->last_conns)<<9;
+               rate = (n_conns - e->last_conns) << 9;
                e->last_conns = n_conns;
-               e->cps += ((long)rate - (long)e->cps)>>2;
-               s->ustats.cps = (e->cps+0x1FF)>>10;
+               e->cps += ((long)rate - (long)e->cps) >> 2;
 
-               rate = (n_inpkts - e->last_inpkts)<<9;
+               rate = (n_inpkts - e->last_inpkts) << 9;
                e->last_inpkts = n_inpkts;
-               e->inpps += ((long)rate - (long)e->inpps)>>2;
-               s->ustats.inpps = (e->inpps+0x1FF)>>10;
+               e->inpps += ((long)rate - (long)e->inpps) >> 2;
 
-               rate = (n_outpkts - e->last_outpkts)<<9;
+               rate = (n_outpkts - e->last_outpkts) << 9;
                e->last_outpkts = n_outpkts;
-               e->outpps += ((long)rate - (long)e->outpps)>>2;
-               s->ustats.outpps = (e->outpps+0x1FF)>>10;
+               e->outpps += ((long)rate - (long)e->outpps) >> 2;
 
-               rate = (n_inbytes - e->last_inbytes)<<4;
+               rate = (n_inbytes - e->last_inbytes) << 4;
                e->last_inbytes = n_inbytes;
-               e->inbps += ((long)rate - (long)e->inbps)>>2;
-               s->ustats.inbps = (e->inbps+0xF)>>5;
+               e->inbps += ((long)rate - (long)e->inbps) >> 2;
 
-               rate = (n_outbytes - e->last_outbytes)<<4;
+               rate = (n_outbytes - e->last_outbytes) << 4;
                e->last_outbytes = n_outbytes;
-               e->outbps += ((long)rate - (long)e->outbps)>>2;
-               s->ustats.outbps = (e->outbps+0xF)>>5;
+               e->outbps += ((long)rate - (long)e->outbps) >> 2;
                spin_unlock(&s->lock);
        }
-       spin_unlock(&est_lock);
-       mod_timer(&est_timer, jiffies + 2*HZ);
+       spin_unlock(&ipvs->est_lock);
+       mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
 }
 
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_estimator *est = &stats->est;
 
        INIT_LIST_HEAD(&est->list);
 
-       est->last_conns = stats->ustats.conns;
-       est->cps = stats->ustats.cps<<10;
-
-       est->last_inpkts = stats->ustats.inpkts;
-       est->inpps = stats->ustats.inpps<<10;
-
-       est->last_outpkts = stats->ustats.outpkts;
-       est->outpps = stats->ustats.outpps<<10;
-
-       est->last_inbytes = stats->ustats.inbytes;
-       est->inbps = stats->ustats.inbps<<5;
-
-       est->last_outbytes = stats->ustats.outbytes;
-       est->outbps = stats->ustats.outbps<<5;
-
-       spin_lock_bh(&est_lock);
-       list_add(&est->list, &est_list);
-       spin_unlock_bh(&est_lock);
+       spin_lock_bh(&ipvs->est_lock);
+       list_add(&est->list, &ipvs->est_list);
+       spin_unlock_bh(&ipvs->est_lock);
 }
 
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_estimator *est = &stats->est;
 
-       spin_lock_bh(&est_lock);
+       spin_lock_bh(&ipvs->est_lock);
        list_del(&est->list);
-       spin_unlock_bh(&est_lock);
+       spin_unlock_bh(&ipvs->est_lock);
 }
 
 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
 {
        struct ip_vs_estimator *est = &stats->est;
-
-       /* set counters zero, caller must hold the stats->lock lock */
-       est->last_inbytes = 0;
-       est->last_outbytes = 0;
-       est->last_conns = 0;
-       est->last_inpkts = 0;
-       est->last_outpkts = 0;
+       struct ip_vs_stats_user *u = &stats->ustats;
+
+       /* reset counters, caller must hold the stats->lock lock */
+       est->last_inbytes = u->inbytes;
+       est->last_outbytes = u->outbytes;
+       est->last_conns = u->conns;
+       est->last_inpkts = u->inpkts;
+       est->last_outpkts = u->outpkts;
        est->cps = 0;
        est->inpps = 0;
        est->outpps = 0;
@@ -157,13 +179,48 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
        est->outbps = 0;
 }
 
-int __init ip_vs_estimator_init(void)
+/* Get decoded rates */
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+                         struct ip_vs_stats *stats)
 {
-       mod_timer(&est_timer, jiffies + 2 * HZ);
+       struct ip_vs_estimator *e = &stats->est;
+
+       dst->cps = (e->cps + 0x1FF) >> 10;
+       dst->inpps = (e->inpps + 0x1FF) >> 10;
+       dst->outpps = (e->outpps + 0x1FF) >> 10;
+       dst->inbps = (e->inbps + 0xF) >> 5;
+       dst->outbps = (e->outbps + 0xF) >> 5;
+}
+
+static int __net_init __ip_vs_estimator_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->est_list);
+       spin_lock_init(&ipvs->est_lock);
+       setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+       mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
        return 0;
 }
 
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+       del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+       .init = __ip_vs_estimator_init,
+       .exit = __ip_vs_estimator_exit,
+};
+
+int __init ip_vs_estimator_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_app_ops);
+       return rv;
+}
+
 void ip_vs_estimator_cleanup(void)
 {
-       del_timer_sync(&est_timer);
+       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index 7545500..6b5dd6d 100644 (file)
@@ -157,6 +157,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        int ret = 0;
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
+       struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                 */
                {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(AF_INET, iph->protocol,
-                                             &from, port, &cp->caddr, 0, &p);
+                       ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+                                             iph->protocol, &from, port,
+                                             &cp->caddr, 0, &p);
                        n_cp = ip_vs_conn_out_get(&p);
                }
                if (!n_cp) {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+                       ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+                                             AF_INET, IPPROTO_TCP, &cp->caddr,
                                              0, &cp->vaddr, port, &p);
                        n_cp = ip_vs_conn_new(&p, &from, port,
                                              IP_VS_CONN_F_NO_CPORT |
                                              IP_VS_CONN_F_NFCT,
-                                             cp->dest);
+                                             cp->dest, skb->mark);
                        if (!n_cp)
                                return 0;
 
@@ -257,8 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                 * would be adjusted twice.
                 */
 
+               net = skb_net(skb);
                cp->app_data = NULL;
-               ip_vs_tcp_conn_listen(n_cp);
+               ip_vs_tcp_conn_listen(net, n_cp);
                ip_vs_conn_put(n_cp);
                return ret;
        }
@@ -287,6 +291,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        union nf_inet_addr to;
        __be16 port;
        struct ip_vs_conn *n_cp;
+       struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
 
        {
                struct ip_vs_conn_param p;
-               ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
-                                     &cp->vaddr, htons(ntohs(cp->vport)-1),
-                                     &p);
+               ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+                                     iph->protocol, &to, port, &cp->vaddr,
+                                     htons(ntohs(cp->vport)-1), &p);
                n_cp = ip_vs_conn_in_get(&p);
                if (!n_cp) {
                        n_cp = ip_vs_conn_new(&p, &cp->daddr,
                                              htons(ntohs(cp->dport)-1),
-                                             IP_VS_CONN_F_NFCT, cp->dest);
+                                             IP_VS_CONN_F_NFCT, cp->dest,
+                                             skb->mark);
                        if (!n_cp)
                                return 0;
 
@@ -377,7 +383,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        /*
         *      Move tunnel to listen state
         */
-       ip_vs_tcp_conn_listen(n_cp);
+       net = skb_net(skb);
+       ip_vs_tcp_conn_listen(net, n_cp);
        ip_vs_conn_put(n_cp);
 
        return 1;
@@ -398,23 +405,22 @@ static struct ip_vs_app ip_vs_ftp = {
        .pkt_in =       ip_vs_ftp_in,
 };
 
-
 /*
- *     ip_vs_ftp initialization
+ *     per netns ip_vs_ftp initialization
  */
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
 {
        int i, ret;
        struct ip_vs_app *app = &ip_vs_ftp;
 
-       ret = register_ip_vs_app(app);
+       ret = register_ip_vs_app(net, app);
        if (ret)
                return ret;
 
        for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
                if (!ports[i])
                        continue;
-               ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+               ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
                if (ret)
                        break;
                pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@ static int __init ip_vs_ftp_init(void)
        }
 
        if (ret)
-               unregister_ip_vs_app(app);
+               unregister_ip_vs_app(net, app);
 
        return ret;
 }
+/*
+ *     netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+       struct ip_vs_app *app = &ip_vs_ftp;
+
+       unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+       .init = __ip_vs_ftp_init,
+       .exit = __ip_vs_ftp_exit,
+};
 
+int __init ip_vs_ftp_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_ftp_ops);
+       return rv;
+}
 
 /*
  *     ip_vs_ftp finish.
  */
 static void __exit ip_vs_ftp_exit(void)
 {
-       unregister_ip_vs_app(&ip_vs_ftp);
+       unregister_pernet_subsys(&ip_vs_ftp_ops);
 }
 
 
index 9323f89..f276df9 100644 (file)
@@ -63,6 +63,8 @@
 #define CHECK_EXPIRE_INTERVAL   (60*HZ)
 #define ENTRY_TIMEOUT           (6*60*HZ)
 
+#define DEFAULT_EXPIRATION     (24*60*60*HZ)
+
 /*
  *    It is for full expiration check.
  *    When there is no partial expiration check (garbage collection)
@@ -70,7 +72,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
 
 
 /*
@@ -113,19 +114,18 @@ struct ip_vs_lblc_table {
 /*
  *      IPVS LBLC sysctl table
  */
-
+#ifdef CONFIG_SYSCTL
 static ctl_table vs_vars_table[] = {
        {
                .procname       = "lblc_expiration",
-               .data           = &sysctl_ip_vs_lblc_expiration,
+               .data           = NULL,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        { }
 };
-
-static struct ctl_table_header * sysctl_header;
+#endif
 
 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
 {
@@ -241,6 +241,15 @@ static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
        }
 }
 
+static int sysctl_lblc_expiration(struct ip_vs_service *svc)
+{
+#ifdef CONFIG_SYSCTL
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       return ipvs->sysctl_lblc_expiration;
+#else
+       return DEFAULT_EXPIRATION;
+#endif
+}
 
 static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
 {
@@ -255,7 +264,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
                write_lock(&svc->sched_lock);
                list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
                        if (time_before(now,
-                                       en->lastuse + sysctl_ip_vs_lblc_expiration))
+                                       en->lastuse +
+                                       sysctl_lblc_expiration(svc)))
                                continue;
 
                        ip_vs_lblc_free(en);
@@ -390,12 +400,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
        int loh, doh;
 
        /*
-        * We think the overhead of processing active connections is fifty
-        * times higher than that of inactive connections in average. (This
-        * fifty times might not be accurate, we will change it later.) We
-        * use the following formula to estimate the overhead:
-        *                dest->activeconns*50 + dest->inactconns
-        * and the load:
+        * We use the following formula to estimate the load:
         *                (dest overhead) / dest->weight
         *
         * Remember -- no floats in kernel mode!!!
@@ -411,8 +416,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                        continue;
                if (atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -426,8 +430,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
@@ -511,7 +514,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        /* No cache entry or it is invalid, time to schedule */
        dest = __ip_vs_lblc_schedule(svc);
        if (!dest) {
-               IP_VS_ERR_RL("LBLC: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
@@ -543,23 +546,77 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
        .schedule =             ip_vs_lblc_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+#ifdef CONFIG_SYSCTL
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!net_eq(net, &init_net)) {
+               ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+                                               sizeof(vs_vars_table),
+                                               GFP_KERNEL);
+               if (ipvs->lblc_ctl_table == NULL)
+                       return -ENOMEM;
+       } else
+               ipvs->lblc_ctl_table = vs_vars_table;
+       ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
+       ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+       ipvs->lblc_ctl_header =
+               register_net_sysctl_table(net, net_vs_ctl_path,
+                                         ipvs->lblc_ctl_table);
+       if (!ipvs->lblc_ctl_header) {
+               if (!net_eq(net, &init_net))
+                       kfree(ipvs->lblc_ctl_table);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->lblc_ctl_table);
+}
+
+#else
+
+static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
+static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
+
+#endif
+
+static struct pernet_operations ip_vs_lblc_ops = {
+       .init = __ip_vs_lblc_init,
+       .exit = __ip_vs_lblc_exit,
+};
 
 static int __init ip_vs_lblc_init(void)
 {
        int ret;
 
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+       ret = register_pernet_subsys(&ip_vs_lblc_ops);
+       if (ret)
+               return ret;
+
        ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
        if (ret)
-               unregister_sysctl_table(sysctl_header);
+               unregister_pernet_subsys(&ip_vs_lblc_ops);
        return ret;
 }
 
-
 static void __exit ip_vs_lblc_cleanup(void)
 {
-       unregister_sysctl_table(sysctl_header);
        unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+       unregister_pernet_subsys(&ip_vs_lblc_ops);
 }
 
 
index dbeed8e..cb1c991 100644 (file)
@@ -63,6 +63,8 @@
 #define CHECK_EXPIRE_INTERVAL   (60*HZ)
 #define ENTRY_TIMEOUT           (6*60*HZ)
 
+#define DEFAULT_EXPIRATION     (24*60*60*HZ)
+
 /*
  *    It is for full expiration check.
  *    When there is no partial expiration check (garbage collection)
@@ -70,8 +72,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
 
 /*
  *     for IPVS lblcr entry hash table
@@ -180,8 +180,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
 
                if ((atomic_read(&least->weight) > 0)
                    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -194,8 +193,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if ((loh * atomic_read(&dest->weight) >
                     doh * atomic_read(&least->weight))
                    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -230,8 +228,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
        list_for_each_entry(e, &set->list, list) {
                most = e->dest;
                if (atomic_read(&most->weight) > 0) {
-                       moh = atomic_read(&most->activeconns) * 50
-                               + atomic_read(&most->inactconns);
+                       moh = ip_vs_dest_conn_overhead(most);
                        goto nextstage;
                }
        }
@@ -241,8 +238,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
   nextstage:
        list_for_each_entry(e, &set->list, list) {
                dest = e->dest;
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
                if ((moh * atomic_read(&dest->weight) <
                     doh * atomic_read(&most->weight))
@@ -289,6 +285,7 @@ struct ip_vs_lblcr_table {
 };
 
 
+#ifdef CONFIG_SYSCTL
 /*
  *      IPVS LBLCR sysctl table
  */
@@ -296,15 +293,14 @@ struct ip_vs_lblcr_table {
 static ctl_table vs_vars_table[] = {
        {
                .procname       = "lblcr_expiration",
-               .data           = &sysctl_ip_vs_lblcr_expiration,
+               .data           = NULL,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        { }
 };
-
-static struct ctl_table_header * sysctl_header;
+#endif
 
 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
 {
@@ -418,6 +414,15 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
        }
 }
 
+static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
+{
+#ifdef CONFIG_SYSCTL
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       return ipvs->sysctl_lblcr_expiration;
+#else
+       return DEFAULT_EXPIRATION;
+#endif
+}
 
 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
 {
@@ -431,8 +436,8 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
 
                write_lock(&svc->sched_lock);
                list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
-                       if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
-                                      now))
+                       if (time_after(en->lastuse +
+                                      sysctl_lblcr_expiration(svc), now))
                                continue;
 
                        ip_vs_lblcr_free(en);
@@ -566,12 +571,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
        int loh, doh;
 
        /*
-        * We think the overhead of processing active connections is fifty
-        * times higher than that of inactive connections in average. (This
-        * fifty times might not be accurate, we will change it later.) We
-        * use the following formula to estimate the overhead:
-        *                dest->activeconns*50 + dest->inactconns
-        * and the load:
+        * We use the following formula to estimate the load:
         *                (dest overhead) / dest->weight
         *
         * Remember -- no floats in kernel mode!!!
@@ -588,8 +588,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
 
                if (atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -603,8 +602,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
@@ -675,7 +673,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                /* More than one destination + enough time passed by, cleanup */
                if (atomic_read(&en->set.size) > 1 &&
                                time_after(jiffies, en->set.lastmod +
-                               sysctl_ip_vs_lblcr_expiration)) {
+                               sysctl_lblcr_expiration(svc))) {
                        struct ip_vs_dest *m;
 
                        write_lock(&en->set.lock);
@@ -694,7 +692,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                /* The cache entry is invalid, time to schedule */
                dest = __ip_vs_lblcr_schedule(svc);
                if (!dest) {
-                       IP_VS_ERR_RL("LBLCR: no destination available\n");
+                       ip_vs_scheduler_err(svc, "no destination available");
                        read_unlock(&svc->sched_lock);
                        return NULL;
                }
@@ -744,23 +742,77 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
        .schedule =             ip_vs_lblcr_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+#ifdef CONFIG_SYSCTL
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!net_eq(net, &init_net)) {
+               ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+                                               sizeof(vs_vars_table),
+                                               GFP_KERNEL);
+               if (ipvs->lblcr_ctl_table == NULL)
+                       return -ENOMEM;
+       } else
+               ipvs->lblcr_ctl_table = vs_vars_table;
+       ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
+       ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+       ipvs->lblcr_ctl_header =
+               register_net_sysctl_table(net, net_vs_ctl_path,
+                                         ipvs->lblcr_ctl_table);
+       if (!ipvs->lblcr_ctl_header) {
+               if (!net_eq(net, &init_net))
+                       kfree(ipvs->lblcr_ctl_table);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->lblcr_ctl_table);
+}
+
+#else
+
+static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
+static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
+
+#endif
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+       .init = __ip_vs_lblcr_init,
+       .exit = __ip_vs_lblcr_exit,
+};
 
 static int __init ip_vs_lblcr_init(void)
 {
        int ret;
 
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+       ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+       if (ret)
+               return ret;
+
        ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
        if (ret)
-               unregister_sysctl_table(sysctl_header);
+               unregister_pernet_subsys(&ip_vs_lblcr_ops);
        return ret;
 }
 
-
 static void __exit ip_vs_lblcr_cleanup(void)
 {
-       unregister_sysctl_table(sysctl_header);
        unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+       unregister_pernet_subsys(&ip_vs_lblcr_ops);
 }
 
 
index 4f69db1..f391819 100644 (file)
 
 #include <net/ip_vs.h>
 
-
-static inline unsigned int
-ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
-{
-       /*
-        * We think the overhead of processing active connections is 256
-        * times higher than that of inactive connections in average. (This
-        * 256 times might not be accurate, we will change it later) We
-        * use the following formula to estimate the overhead now:
-        *                dest->activeconns*256 + dest->inactconns
-        */
-       return (atomic_read(&dest->activeconns) << 8) +
-               atomic_read(&dest->inactconns);
-}
-
-
 /*
  *     Least Connection scheduling
  */
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
                    atomic_read(&dest->weight) == 0)
                        continue;
-               doh = ip_vs_lc_dest_overhead(dest);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (!least || doh < loh) {
                        least = dest;
                        loh = doh;
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        }
 
        if (!least)
-               IP_VS_ERR_RL("LC: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
        else
                IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
                              "inactconns %d\n",
index 4680647..f454c80 100644 (file)
@@ -141,6 +141,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
        struct nf_conntrack_tuple *orig, new_reply;
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = nf_ct_net(ct);
 
        if (exp->tuple.src.l3num != PF_INET)
                return;
@@ -155,7 +156,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
 
        /* RS->CLIENT */
        orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-       ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+       ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
                              &orig->src.u3, orig->src.u.tcp.port,
                              &orig->dst.u3, orig->dst.u.tcp.port, &p);
        cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
                " for conn " FMT_CONN "\n",
                __func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-       h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+                                 &tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                /* Show what happens instead of calling nf_ct_kill() */
index c413e18..984d9c1 100644 (file)
@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        }
 
        if (!least) {
-               IP_VS_ERR_RL("NQ: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
index 3414af7..5cf859c 100644 (file)
@@ -29,12 +29,11 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc)
 }
 
 /* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
 {
        struct ip_vs_pe *pe;
 
-       IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+       IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
                  pe_name);
 
        spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@ ip_vs_pe_getbyname(const char *pe_name)
 }
 
 /* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
 {
        struct ip_vs_pe *pe;
 
        /* Search for the pe by name */
-       pe = ip_vs_pe_getbyname(name);
+       pe = __ip_vs_pe_getbyname(name);
 
        /* If pe not found, load the module and search again */
        if (!pe) {
                request_module("ip_vs_pe_%s", name);
-               pe = ip_vs_pe_getbyname(name);
+               pe = __ip_vs_pe_getbyname(name);
        }
 
        return pe;
 }
 
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
-       if (pe && pe->module)
-               module_put(pe->module);
-}
-
 /* Register a pe in the pe list */
 int register_ip_vs_pe(struct ip_vs_pe *pe)
 {
index b8b4e96..13d607a 100644 (file)
@@ -71,6 +71,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        struct ip_vs_iphdr iph;
        unsigned int dataoff, datalen, matchoff, matchlen;
        const char *dptr;
+       int retc;
 
        ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
 
@@ -83,20 +84,21 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        if (dataoff >= skb->len)
                return -EINVAL;
 
+       if ((retc=skb_linearize(skb)) < 0)
+               return retc;
        dptr = skb->data + dataoff;
        datalen = skb->len - dataoff;
 
        if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
                return -EINVAL;
 
-       p->pe_data = kmalloc(matchlen, GFP_ATOMIC);
-       if (!p->pe_data)
-               return -ENOMEM;
-
        /* N.B: pe_data is only set on success,
         * this allows fallback to the default persistence logic on failure
         */
-       memcpy(p->pe_data, dptr + matchoff, matchlen);
+       p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
+       if (!p->pe_data)
+               return -ENOMEM;
+
        p->pe_data_len = matchlen;
 
        return 0;
index c539983..17484a4 100644 (file)
@@ -60,6 +60,35 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
        return 0;
 }
 
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+    defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+    defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ *     register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+       struct ip_vs_proto_data *pd =
+                       kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+       if (!pd) {
+               pr_err("%s(): no memory.\n", __func__);
+               return -ENOMEM;
+       }
+       pd->pp = pp;    /* For speed issues */
+       pd->next = ipvs->proto_data_table[hash];
+       ipvs->proto_data_table[hash] = pd;
+       atomic_set(&pd->appcnt, 0);     /* Init app counter */
+
+       if (pp->init_netns != NULL)
+               pp->init_netns(net, pd);
+
+       return 0;
+}
+#endif
 
 /*
  *     unregister an ipvs protocol
@@ -82,6 +111,29 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
        return -ESRCH;
 }
 
+/*
+ *     unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data **pd_p;
+       unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+       pd_p = &ipvs->proto_data_table[hash];
+       for (; *pd_p; pd_p = &(*pd_p)->next) {
+               if (*pd_p == pd) {
+                       *pd_p = pd->next;
+                       if (pd->pp->exit_netns != NULL)
+                               pd->pp->exit_netns(net, pd);
+                       kfree(pd);
+                       return 0;
+               }
+       }
+
+       return -ESRCH;
+}
 
 /*
  *     get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
 }
 EXPORT_SYMBOL(ip_vs_proto_get);
 
+/*
+ *     get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+       struct ip_vs_proto_data *pd;
+       unsigned hash = IP_VS_PROTO_HASH(proto);
+
+       for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+               if (pd->pp->protocol == proto)
+                       return pd;
+       }
+
+       return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
 
 /*
  *     Propagate event for state change to all protocols
  */
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
 {
-       struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        int i;
 
        for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
-               for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
-                       if (pp->timeout_change)
-                               pp->timeout_change(pp, flags);
+               for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+                       if (pd->pp->timeout_change)
+                               pd->pp->timeout_change(pd, flags);
                }
        }
 }
@@ -236,6 +313,46 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
                ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
 }
 
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+       return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd;
+       int i;
+
+       /* unregister all the ipvs proto data for this netns */
+       for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+               while ((pd = ipvs->proto_data_table[i]) != NULL)
+                       unregister_ip_vs_proto_netns(net, pd);
+       }
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+       .init = __ip_vs_protocol_init,
+       .exit = __ip_vs_protocol_cleanup,
+};
 
 int __init ip_vs_protocol_init(void)
 {
@@ -265,6 +382,7 @@ int __init ip_vs_protocol_init(void)
        REGISTER_PROTOCOL(&ip_vs_protocol_esp);
 #endif
        pr_info("Registered protocols (%s)\n", &protocols[2]);
+       return register_pernet_subsys(&ipvs_proto_ops);
 
        return 0;
 }
@@ -275,6 +393,7 @@ void ip_vs_protocol_cleanup(void)
        struct ip_vs_protocol *pp;
        int i;
 
+       unregister_pernet_subsys(&ipvs_proto_ops);
        /* unregister all the ipvs protocols */
        for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
                while ((pp = ip_vs_proto_table[i]) != NULL)
index 3a04611..5b8eb8b 100644 (file)
@@ -41,28 +41,30 @@ struct isakmp_hdr {
 #define PORT_ISAKMP    500
 
 static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
-                            int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+                            const struct ip_vs_iphdr *iph, int inverse,
+                            struct ip_vs_conn_param *p)
 {
        if (likely(!inverse))
-               ip_vs_conn_fill_param(af, IPPROTO_UDP,
+               ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
                                      &iph->saddr, htons(PORT_ISAKMP),
                                      &iph->daddr, htons(PORT_ISAKMP), p);
        else
-               ip_vs_conn_fill_param(af, IPPROTO_UDP,
+               ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
                                      &iph->daddr, htons(PORT_ISAKMP),
                                      &iph->saddr, htons(PORT_ISAKMP), p);
 }
 
 static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
                   const struct ip_vs_iphdr *iph, unsigned int proto_off,
                   int inverse)
 {
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = skb_net(skb);
 
-       ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+       ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
        cp = ip_vs_conn_in_get(&p);
        if (!cp) {
                /*
@@ -72,7 +74,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
                IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
                              "%s%s %s->%s\n",
                              inverse ? "ICMP+" : "",
-                             pp->name,
+                             ip_vs_proto_get(iph->protocol)->name,
                              IP_VS_DBG_ADDR(af, &iph->saddr),
                              IP_VS_DBG_ADDR(af, &iph->daddr));
        }
@@ -83,21 +85,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
 
 static struct ip_vs_conn *
 ah_esp_conn_out_get(int af, const struct sk_buff *skb,
-                   struct ip_vs_protocol *pp,
                    const struct ip_vs_iphdr *iph,
                    unsigned int proto_off,
                    int inverse)
 {
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = skb_net(skb);
 
-       ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+       ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
        cp = ip_vs_conn_out_get(&p);
        if (!cp) {
                IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
                              "%s%s %s->%s\n",
                              inverse ? "ICMP+" : "",
-                             pp->name,
+                             ip_vs_proto_get(iph->protocol)->name,
                              IP_VS_DBG_ADDR(af, &iph->saddr),
                              IP_VS_DBG_ADDR(af, &iph->daddr));
        }
@@ -107,7 +109,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
 
 
 static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                     int *verdict, struct ip_vs_conn **cpp)
 {
        /*
@@ -117,26 +119,14 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
        return 0;
 }
 
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
-       /* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
-       /* nothing to do now */
-}
-
-
 #ifdef CONFIG_IP_VS_PROTO_AH
 struct ip_vs_protocol ip_vs_protocol_ah = {
        .name =                 "AH",
        .protocol =             IPPROTO_AH,
        .num_states =           1,
        .dont_defrag =          1,
-       .init =                 ah_esp_init,
-       .exit =                 ah_esp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
        .conn_schedule =        ah_esp_conn_schedule,
        .conn_in_get =          ah_esp_conn_in_get,
        .conn_out_get =         ah_esp_conn_out_get,
@@ -149,7 +139,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
        .app_conn_bind =        NULL,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       NULL,           /* ISAKMP */
-       .set_state_timeout =    NULL,
 };
 #endif
 
@@ -159,8 +148,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
        .protocol =             IPPROTO_ESP,
        .num_states =           1,
        .dont_defrag =          1,
-       .init =                 ah_esp_init,
-       .exit =                 ah_esp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
        .conn_schedule =        ah_esp_conn_schedule,
        .conn_in_get =          ah_esp_conn_in_get,
        .conn_out_get =         ah_esp_conn_out_get,
index 1ea96bc..b027ccc 100644 (file)
@@ -9,9 +9,10 @@
 #include <net/ip_vs.h>
 
 static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                   int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        sctp_chunkhdr_t _schunkh, *sch;
        sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                                 sizeof(_schunkh), &_schunkh);
        if (sch == NULL)
                return 0;
-
+       net = skb_net(skb);
        if ((sch->type == SCTP_CID_INIT) &&
-           (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+           (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
                                     &iph.daddr, sh->dest))) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -46,14 +47,19 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
-
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -856,7 +862,7 @@ static struct ipvs_sctp_nextstate
 /*
  *      Timeout table[state]
  */
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
        [IP_VS_SCTP_S_NONE]         =     2 * HZ,
        [IP_VS_SCTP_S_INIT_CLI]     =     1 * 60 * HZ,
        [IP_VS_SCTP_S_INIT_SER]     =     1 * 60 * HZ,
@@ -900,20 +906,8 @@ static const char *sctp_state_name(int state)
        return "?";
 }
 
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
-                               sctp_state_name_table, sname, to);
-}
-
 static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
                int direction, const struct sk_buff *skb)
 {
        sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
 
                IP_VS_DBG_BUF(8, "%s %s  %s:%d->"
                                "%s:%d state: %s->%s conn->refcnt:%d\n",
-                               pp->name,
+                               pd->pp->name,
                                ((direction == IP_VS_DIR_OUTPUT) ?
                                 "output " : "input "),
                                IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                        }
                }
        }
+       if (likely(pd))
+               cp->timeout = pd->timeout_table[cp->state = next_state];
+       else    /* What to do ? */
+               cp->timeout = sctp_timeouts[cp->state = next_state];
 
-        cp->timeout = pp->timeout_table[cp->state = next_state];
-
-        return 1;
+       return 1;
 }
 
 static int
 sctp_state_transition(struct ip_vs_conn *cp, int direction,
-               const struct sk_buff *skb, struct ip_vs_protocol *pp)
+               const struct sk_buff *skb, struct ip_vs_proto_data *pd)
 {
        int ret = 0;
 
        spin_lock(&cp->lock);
-       ret = set_sctp_state(pp, cp, direction, skb);
+       ret = set_sctp_state(pd, cp, direction, skb);
        spin_unlock(&cp->lock);
 
        return ret;
 }
 
-/*
- *      Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS        4
-#define SCTP_APP_TAB_SIZE        (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK        (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
 static inline __u16 sctp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
                & SCTP_APP_TAB_MASK;
 }
 
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
 
        hash = sctp_app_hashkey(port);
 
-       spin_lock_bh(&sctp_app_lock);
-       list_for_each_entry(i, &sctp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->sctp_app_lock);
+       list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &sctp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_sctp.appcnt);
+       list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 out:
-       spin_unlock_bh(&sctp_app_lock);
+       spin_unlock_bh(&ipvs->sctp_app_lock);
 
        return ret;
 }
 
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&sctp_app_lock);
-       atomic_dec(&ip_vs_protocol_sctp.appcnt);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+       spin_lock_bh(&ipvs->sctp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&sctp_app_lock);
+       spin_unlock_bh(&ipvs->sctp_app_lock);
 }
 
 static int sctp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -1074,12 +1066,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = sctp_app_hashkey(cp->vport);
 
-       spin_lock(&sctp_app_lock);
-       list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+       spin_lock(&ipvs->sctp_app_lock);
+       list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&sctp_app_lock);
+                       spin_unlock(&ipvs->sctp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
                                        "%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&sctp_app_lock);
+       spin_unlock(&ipvs->sctp_app_lock);
 out:
        return result;
 }
 
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(sctp_apps);
-       pp->timeout_table = sctp_timeouts;
-}
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->sctp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+                                                       sizeof(sctp_timeouts));
+}
 
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
-
+       kfree(pd->timeout_table);
 }
 
 struct ip_vs_protocol ip_vs_protocol_sctp = {
-       .name = "SCTP",
-       .protocol = IPPROTO_SCTP,
-       .num_states = IP_VS_SCTP_S_LAST,
-       .dont_defrag = 0,
-       .appcnt = ATOMIC_INIT(0),
-       .init = ip_vs_sctp_init,
-       .exit = ip_vs_sctp_exit,
-       .register_app = sctp_register_app,
+       .name           = "SCTP",
+       .protocol       = IPPROTO_SCTP,
+       .num_states     = IP_VS_SCTP_S_LAST,
+       .dont_defrag    = 0,
+       .init           = NULL,
+       .exit           = NULL,
+       .init_netns     = __ip_vs_sctp_init,
+       .exit_netns     = __ip_vs_sctp_exit,
+       .register_app   = sctp_register_app,
        .unregister_app = sctp_unregister_app,
-       .conn_schedule = sctp_conn_schedule,
-       .conn_in_get = ip_vs_conn_in_get_proto,
-       .conn_out_get = ip_vs_conn_out_get_proto,
-       .snat_handler = sctp_snat_handler,
-       .dnat_handler = sctp_dnat_handler,
-       .csum_check = sctp_csum_check,
-       .state_name = sctp_state_name,
+       .conn_schedule  = sctp_conn_schedule,
+       .conn_in_get    = ip_vs_conn_in_get_proto,
+       .conn_out_get   = ip_vs_conn_out_get_proto,
+       .snat_handler   = sctp_snat_handler,
+       .dnat_handler   = sctp_dnat_handler,
+       .csum_check     = sctp_csum_check,
+       .state_name     = sctp_state_name,
        .state_transition = sctp_state_transition,
-       .app_conn_bind = sctp_app_conn_bind,
-       .debug_packet = ip_vs_tcpudp_debug_packet,
-       .timeout_change = sctp_timeout_change,
-       .set_state_timeout = sctp_set_state_timeout,
+       .app_conn_bind  = sctp_app_conn_bind,
+       .debug_packet   = ip_vs_tcpudp_debug_packet,
+       .timeout_change = NULL,
 };
index f6c5200..c0cc341 100644 (file)
@@ -9,8 +9,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
  *
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              tcp_timeouts table has copy per netns in a hash table per
+ *              protocol ip_vs_proto_data and is handled by netns
  */
 
 #define KMSG_COMPONENT "IPVS"
 #include <net/ip_vs.h>
 
 static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                  int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        struct tcphdr _tcph, *th;
        struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                *verdict = NF_DROP;
                return 0;
        }
-
+       net = skb_net(skb);
        /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
        if (th->syn &&
-           (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
-                                    th->dest))) {
+           (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+                                    &iph.daddr, th->dest))) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -63,13 +68,19 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -338,7 +349,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = {
 /*
  *     Timeout table[state]
  */
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
        [IP_VS_TCP_S_NONE]              =       2*HZ,
        [IP_VS_TCP_S_ESTABLISHED]       =       15*60*HZ,
        [IP_VS_TCP_S_SYN_SENT]          =       2*60*HZ,
@@ -437,10 +448,7 @@ static struct tcp_states_t tcp_states_dos [] = {
 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
 };
 
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
 {
        int on = (flags & 1);           /* secure_tcp */
 
@@ -450,14 +458,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
        ** for most if not for all of the applications. Something
        ** like "capabilities" (flags) for each object.
        */
-       tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-       return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
-                                      tcp_state_name_table, sname, to);
+       pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
 }
 
 static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@ static inline int tcp_state_idx(struct tcphdr *th)
 }
 
 static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
              int direction, struct tcphdr *th)
 {
        int state_idx;
@@ -497,7 +498,8 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                goto tcp_state_out;
        }
 
-       new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+       new_state =
+               pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
 
   tcp_state_out:
        if (new_state != cp->state) {
@@ -505,7 +507,7 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
 
                IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
                              "%s:%d state: %s->%s conn->refcnt:%d\n",
-                             pp->name,
+                             pd->pp->name,
                              ((state_off == TCP_DIR_OUTPUT) ?
                               "output " : "input "),
                              th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                }
        }
 
-       cp->timeout = pp->timeout_table[cp->state = new_state];
+       if (likely(pd))
+               cp->timeout = pd->timeout_table[cp->state = new_state];
+       else    /* What to do ? */
+               cp->timeout = tcp_timeouts[cp->state = new_state];
 }
 
-
 /*
  *     Handle state transitions
  */
 static int
 tcp_state_transition(struct ip_vs_conn *cp, int direction,
                     const struct sk_buff *skb,
-                    struct ip_vs_protocol *pp)
+                    struct ip_vs_proto_data *pd)
 {
        struct tcphdr _tcph, *th;
 
@@ -560,23 +564,12 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
                return 0;
 
        spin_lock(&cp->lock);
-       set_tcp_state(pp, cp, direction, th);
+       set_tcp_state(pd, cp, direction, th);
        spin_unlock(&cp->lock);
 
        return 1;
 }
 
-
-/*
- *     Hash table for TCP application incarnations
- */
-#define        TCP_APP_TAB_BITS        4
-#define        TCP_APP_TAB_SIZE        (1 << TCP_APP_TAB_BITS)
-#define        TCP_APP_TAB_MASK        (TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
 static inline __u16 tcp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@ static inline __u16 tcp_app_hashkey(__be16 port)
 }
 
 
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
 
        hash = tcp_app_hashkey(port);
 
-       spin_lock_bh(&tcp_app_lock);
-       list_for_each_entry(i, &tcp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->tcp_app_lock);
+       list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &tcp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_tcp.appcnt);
+       list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 
   out:
-       spin_unlock_bh(&tcp_app_lock);
+       spin_unlock_bh(&ipvs->tcp_app_lock);
        return ret;
 }
 
 
 static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&tcp_app_lock);
-       atomic_dec(&ip_vs_protocol_tcp.appcnt);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+       spin_lock_bh(&ipvs->tcp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&tcp_app_lock);
+       spin_unlock_bh(&ipvs->tcp_app_lock);
 }
 
 
 static int
 tcp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -633,12 +632,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = tcp_app_hashkey(cp->vport);
 
-       spin_lock(&tcp_app_lock);
-       list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+       spin_lock(&ipvs->tcp_app_lock);
+       list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&tcp_app_lock);
+                       spin_unlock(&ipvs->tcp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
                                      "%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&tcp_app_lock);
+       spin_unlock(&ipvs->tcp_app_lock);
 
   out:
        return result;
@@ -665,24 +664,35 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
 /*
  *     Set LISTEN timeout. (ip_vs_conn_put will setup timer)
  */
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
 {
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
        spin_lock(&cp->lock);
        cp->state = IP_VS_TCP_S_LISTEN;
-       cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+       cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+                          : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
        spin_unlock(&cp->lock);
 }
 
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(tcp_apps);
-       pp->timeout_table = tcp_timeouts;
-}
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->tcp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+                                                       sizeof(tcp_timeouts));
+       pd->tcp_state_table =  tcp_states;
+}
 
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+       kfree(pd->timeout_table);
 }
 
 
@@ -691,9 +701,10 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
        .protocol =             IPPROTO_TCP,
        .num_states =           IP_VS_TCP_S_LAST,
        .dont_defrag =          0,
-       .appcnt =               ATOMIC_INIT(0),
-       .init =                 ip_vs_tcp_init,
-       .exit =                 ip_vs_tcp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
+       .init_netns =           __ip_vs_tcp_init,
+       .exit_netns =           __ip_vs_tcp_exit,
        .register_app =         tcp_register_app,
        .unregister_app =       tcp_unregister_app,
        .conn_schedule =        tcp_conn_schedule,
@@ -707,5 +718,4 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
        .app_conn_bind =        tcp_app_conn_bind,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       tcp_timeout_change,
-       .set_state_timeout =    tcp_set_state_timeout,
 };
index 9d106a0..f1282cb 100644 (file)
@@ -9,7 +9,8 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
  *
  */
 
 #include <net/ip6_checksum.h>
 
 static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                  int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        struct udphdr _udph, *uh;
        struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                *verdict = NF_DROP;
                return 0;
        }
-
-       svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+       net = skb_net(skb);
+       svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
                                &iph.daddr, uh->dest);
        if (svc) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -62,13 +64,19 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -338,19 +346,6 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
        return 1;
 }
 
-
-/*
- *     Note: the caller guarantees that only one of register_app,
- *     unregister_app or app_conn_bind is called each time.
- */
-
-#define        UDP_APP_TAB_BITS        4
-#define        UDP_APP_TAB_SIZE        (1 << UDP_APP_TAB_BITS)
-#define        UDP_APP_TAB_MASK        (UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
 static inline __u16 udp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@ static inline __u16 udp_app_hashkey(__be16 port)
 }
 
 
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
 
        hash = udp_app_hashkey(port);
 
 
-       spin_lock_bh(&udp_app_lock);
-       list_for_each_entry(i, &udp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->udp_app_lock);
+       list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &udp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_udp.appcnt);
+       list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 
   out:
-       spin_unlock_bh(&udp_app_lock);
+       spin_unlock_bh(&ipvs->udp_app_lock);
        return ret;
 }
 
 
 static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&udp_app_lock);
-       atomic_dec(&ip_vs_protocol_udp.appcnt);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       spin_lock_bh(&ipvs->udp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&udp_app_lock);
+       spin_unlock_bh(&ipvs->udp_app_lock);
 }
 
 
 static int udp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -407,12 +408,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = udp_app_hashkey(cp->vport);
 
-       spin_lock(&udp_app_lock);
-       list_for_each_entry(inc, &udp_apps[hash], p_list) {
+       spin_lock(&ipvs->udp_app_lock);
+       list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&udp_app_lock);
+                       spin_unlock(&ipvs->udp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
                                      "%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&udp_app_lock);
+       spin_unlock(&ipvs->udp_app_lock);
 
   out:
        return result;
 }
 
 
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
        [IP_VS_UDP_S_NORMAL]            =       5*60*HZ,
        [IP_VS_UDP_S_LAST]              =       2*HZ,
 };
@@ -446,14 +447,6 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
        [IP_VS_UDP_S_LAST]              =       "BUG!",
 };
 
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-       return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
-                                      udp_state_name_table, sname, to);
-}
-
 static const char * udp_state_name(int state)
 {
        if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@ static const char * udp_state_name(int state)
 static int
 udp_state_transition(struct ip_vs_conn *cp, int direction,
                     const struct sk_buff *skb,
-                    struct ip_vs_protocol *pp)
+                    struct ip_vs_proto_data *pd)
 {
-       cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+       if (unlikely(!pd)) {
+               pr_err("UDP no ns data\n");
+               return 0;
+       }
+
+       cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
        return 1;
 }
 
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(udp_apps);
-       pp->timeout_table = udp_timeouts;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->udp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+                                                       sizeof(udp_timeouts));
 }
 
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+       kfree(pd->timeout_table);
 }
 
 
@@ -486,8 +489,10 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
        .protocol =             IPPROTO_UDP,
        .num_states =           IP_VS_UDP_S_LAST,
        .dont_defrag =          0,
-       .init =                 udp_init,
-       .exit =                 udp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
+       .init_netns =           __udp_init,
+       .exit_netns =           __udp_exit,
        .conn_schedule =        udp_conn_schedule,
        .conn_in_get =          ip_vs_conn_in_get_proto,
        .conn_out_get =         ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
        .app_conn_bind =        udp_app_conn_bind,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       NULL,
-       .set_state_timeout =    udp_set_state_timeout,
 };
index e210f37..c49b388 100644 (file)
@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                q = q->next;
        } while (q != p);
        write_unlock(&svc->sched_lock);
-       IP_VS_ERR_RL("RR: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
   out:
index 076ebe0..08dbdd5 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <net/ip_vs.h>
 
+EXPORT_SYMBOL(ip_vs_scheduler_err);
 /*
  *  IPVS scheduler list
  */
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
                module_put(scheduler->module);
 }
 
+/*
+ * Common error output helper for schedulers
+ */
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
+{
+       if (svc->fwmark) {
+               IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
+                            svc->scheduler->name, svc->fwmark,
+                            svc->fwmark, msg);
+#ifdef CONFIG_IP_VS_IPV6
+       } else if (svc->af == AF_INET6) {
+               IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
+                            svc->scheduler->name,
+                            ip_vs_proto_name(svc->protocol),
+                            &svc->addr.in6, ntohs(svc->port), msg);
+#endif
+       } else {
+               IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
+                            svc->scheduler->name,
+                            ip_vs_proto_name(svc->protocol),
+                            &svc->addr.ip, ntohs(svc->port), msg);
+       }
+}
 
 /*
  *  Register a scheduler in the scheduler list
index 1ab75a9..89ead24 100644 (file)
@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                        goto nextstage;
                }
        }
-       IP_VS_ERR_RL("SED: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
        /*
index e6cc174..b5e2556 100644 (file)
@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
            || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
            || atomic_read(&dest->weight) <= 0
            || is_overloaded(dest)) {
-               IP_VS_ERR_RL("SH: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
index ab85aed..3e7961e 100644 (file)
@@ -5,6 +5,18 @@
  *              high-performance and highly available server based on a
  *              cluster of servers.
  *
+ * Version 1,   is capable of handling both version 0 and 1 messages.
+ *              Version 0 is the plain old format.
+ *              Note Version 0 receivers will just drop Ver 1 messages.
+ *              Version 1 is capable of handle IPv6, Persistence data,
+ *              time-outs, and firewall marks.
+ *              In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ *              Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions  Message: is a complete datagram
+ *              Sync_conn: is a part of a Message
+ *              Param Data is an option to a Sync_conn.
+ *
  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
  *
  * ip_vs_sync:  sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
  *     Alexandre Cassen        :       Added SyncID support for incoming sync
  *                                     messages filtering.
  *     Justin Ossevoort        :       Fix endian problem on sync message size.
+ *     Hans Schillstrom        :       Added Version 1: i.e. IPv6,
+ *                                     Persistence support, fwmark and time-out.
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
 #include <linux/wait.h>
 #include <linux/kernel.h>
 
+#include <asm/unaligned.h>             /* Used for ntoh_seq and hton_seq */
+
 #include <net/ip.h>
 #include <net/sock.h>
 
 #define IP_VS_SYNC_GROUP 0xe0000051    /* multicast addr - 224.0.0.81 */
 #define IP_VS_SYNC_PORT  8848          /* multicast port */
 
+#define SYNC_PROTO_VER  1              /* Protocol version in header */
 
 /*
  *     IPVS sync connection entry
+ *     Version 0, i.e. original version.
  */
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
        __u8                    reserved;
 
        /* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@ struct ip_vs_sync_conn_options {
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
+/*
+     Sync Connection format (sync_conn)
+
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |    Type       |    Protocol   | Ver.  |        Size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             Flags                             |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            State              |         cport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            vport              |         dport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             fwmark                            |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             timeout  (in sec.)                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                              ...                              |
+      |                        IP-Addresses  (v4 or v6)               |
+      |                              ...                              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  Optional Parameters.
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      | Param. Type    | Param. Length |   Param. data                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               |
+      |                              ...                              |
+      |                               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                               | Param Type    | Param. Length |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                           Param  data                         |
+      |         Last Param data should be padded for 32 bit alignment |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ *  Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+       __u8                    type;
+       __u8                    protocol;       /* Which protocol (TCP/UDP) */
+       __be16                  ver_size;       /* Version msb 4 bits */
+       /* Flags and state transition */
+       __be32                  flags;          /* status flags */
+       __be16                  state;          /* state info   */
+       /* Protocol, addresses and port numbers */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __be32                  fwmark;         /* Firewall mark from skb */
+       __be32                  timeout;        /* cp timeout */
+       __be32                  caddr;          /* client address */
+       __be32                  vaddr;          /* virtual address */
+       __be32                  daddr;          /* destination address */
+       /* The sequence options start here */
+       /* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+       __u8                    type;
+       __u8                    protocol;       /* Which protocol (TCP/UDP) */
+       __be16                  ver_size;       /* Version msb 4 bits */
+       /* Flags and state transition */
+       __be32                  flags;          /* status flags */
+       __be16                  state;          /* state info   */
+       /* Protocol, addresses and port numbers */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __be32                  fwmark;         /* Firewall mark from skb */
+       __be32                  timeout;        /* cp timeout */
+       struct in6_addr         caddr;          /* client address */
+       struct in6_addr         vaddr;          /* virtual address */
+       struct in6_addr         daddr;          /* destination address */
+       /* The sequence options start here */
+       /* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+       struct ip_vs_sync_v4    v4;
+       struct ip_vs_sync_v6    v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6            0
+#define STYPE_F_INET6          (1 << STYPE_INET6)
+
+#define SVER_SHIFT             12              /* Shift to get version */
+#define SVER_MASK              0x0fff          /* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA      1
+#define IPVS_OPT_PE_DATA       2
+#define IPVS_OPT_PE_NAME       3
+#define IPVS_OPT_PARAM         7
+
+#define IPVS_OPT_F_SEQ_DATA    (1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA     (1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME     (1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM       (1 << (IPVS_OPT_PARAM-1))
+
 struct ip_vs_sync_thread_data {
+       struct net *net;
        struct socket *sock;
        char *buf;
 };
 
-#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn_v0))
 #define FULL_CONN_SIZE  \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
 
 
 /*
-  The master mulitcasts messages to the backup load balancers in the
-  following format.
+  The master mulitcasts messages (Datagrams) to the backup load balancers
+  in the following format.
+
+ Version 1:
+  Note, first byte should be Zero, so ver 0 receivers will drop the packet.
 
        0                   1                   2                   3
        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-      |  Count Conns  |    SyncID     |            Size               |
+      |      0        |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    Version    |    Reserved, set to Zero      |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (1)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                            .                                  |
-      |                            .                                  |
+      ~                            .                                  ~
       |                            .                                  |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (n)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                    IPVS Sync Connection (1)                   |
 */
 
 #define SYNC_MESG_HEADER_LEN   4
 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
 
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
        __u8                    nr_conns;
        __u8                    syncid;
        __u16                   size;
@@ -113,9 +249,16 @@ struct ip_vs_sync_mesg {
        /* ip_vs_sync_conn entries start here */
 };
 
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+       __u8                    reserved;       /* must be zero */
+       __u8                    syncid;
+       __u16                   size;
+       __u8                    nr_conns;
+       __s8                    version;        /* SYNC_PROTO_VER  */
+       __u16                   spare;
+       /* ip_vs_sync_conn entries start here */
+};
 
 struct ip_vs_sync_buff {
        struct list_head        list;
@@ -127,28 +270,6 @@ struct ip_vs_sync_buff {
        unsigned char           *end;
 };
 
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff   *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
 /* multicast addr */
 static struct sockaddr_in mcast_addr = {
        .sin_family             = AF_INET,
@@ -156,41 +277,71 @@ static struct sockaddr_in mcast_addr = {
        .sin_addr.s_addr        = cpu_to_be32(IP_VS_SYNC_GROUP),
 };
 
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+       ho->init_seq       = get_unaligned_be32(&no->init_seq);
+       ho->delta          = get_unaligned_be32(&no->delta);
+       ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
+
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+       put_unaligned_be32(ho->init_seq, &no->init_seq);
+       put_unaligned_be32(ho->delta, &no->delta);
+       put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
 
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
 {
        struct ip_vs_sync_buff *sb;
 
-       spin_lock_bh(&ip_vs_sync_lock);
-       if (list_empty(&ip_vs_sync_queue)) {
+       spin_lock_bh(&ipvs->sync_lock);
+       if (list_empty(&ipvs->sync_queue)) {
                sb = NULL;
        } else {
-               sb = list_entry(ip_vs_sync_queue.next,
+               sb = list_entry(ipvs->sync_queue.next,
                                struct ip_vs_sync_buff,
                                list);
                list_del(&sb->list);
        }
-       spin_unlock_bh(&ip_vs_sync_lock);
+       spin_unlock_bh(&ipvs->sync_lock);
 
        return sb;
 }
 
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
 {
        struct ip_vs_sync_buff *sb;
 
        if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
                return NULL;
 
-       if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+       sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+       if (!sb->mesg) {
                kfree(sb);
                return NULL;
        }
+       sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zeo now */
+       sb->mesg->version = SYNC_PROTO_VER;
+       sb->mesg->syncid = ipvs->master_syncid;
+       sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
        sb->mesg->nr_conns = 0;
-       sb->mesg->syncid = ip_vs_master_syncid;
-       sb->mesg->size = 4;
-       sb->head = (unsigned char *)sb->mesg + 4;
-       sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+       sb->mesg->spare = 0;
+       sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+       sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
        sb->firstuse = jiffies;
        return sb;
 }
@@ -201,14 +352,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
        kfree(sb);
 }
 
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
 {
-       spin_lock(&ip_vs_sync_lock);
-       if (ip_vs_sync_state & IP_VS_STATE_MASTER)
-               list_add_tail(&sb->list, &ip_vs_sync_queue);
+       struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+       spin_lock(&ipvs->sync_lock);
+       if (ipvs->sync_state & IP_VS_STATE_MASTER)
+               list_add_tail(&sb->list, &ipvs->sync_queue);
        else
                ip_vs_sync_buff_release(sb);
-       spin_unlock(&ip_vs_sync_lock);
+       spin_unlock(&ipvs->sync_lock);
 }
 
 /*
@@ -216,36 +369,101 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
  *     than the specified time or the specified time is zero.
  */
 static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
 {
        struct ip_vs_sync_buff *sb;
 
-       spin_lock_bh(&curr_sb_lock);
-       if (curr_sb && (time == 0 ||
-                       time_before(jiffies - curr_sb->firstuse, time))) {
-               sb = curr_sb;
-               curr_sb = NULL;
+       spin_lock_bh(&ipvs->sync_buff_lock);
+       if (ipvs->sync_buff &&
+           time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
+               sb = ipvs->sync_buff;
+               ipvs->sync_buff = NULL;
        } else
                sb = NULL;
-       spin_unlock_bh(&curr_sb_lock);
+       spin_unlock_bh(&ipvs->sync_buff_lock);
        return sb;
 }
 
+/*
+ * Switch mode from sending version 0 or 1
+ *  - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
+               return;
+       if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff)
+               return;
+
+       spin_lock_bh(&ipvs->sync_buff_lock);
+       /* Buffer empty ? then let buf_create do the job  */
+       if (ipvs->sync_buff->mesg->size <=  sizeof(struct ip_vs_sync_mesg)) {
+               kfree(ipvs->sync_buff);
+               ipvs->sync_buff = NULL;
+       } else {
+               spin_lock_bh(&ipvs->sync_lock);
+               if (ipvs->sync_state & IP_VS_STATE_MASTER)
+                       list_add_tail(&ipvs->sync_buff->list,
+                                     &ipvs->sync_queue);
+               else
+                       ip_vs_sync_buff_release(ipvs->sync_buff);
+               spin_unlock_bh(&ipvs->sync_lock);
+       }
+       spin_unlock_bh(&ipvs->sync_buff_lock);
+}
 
 /*
+ * Create a new sync buffer for Version 0 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+{
+       struct ip_vs_sync_buff *sb;
+       struct ip_vs_sync_mesg_v0 *mesg;
+
+       if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+               return NULL;
+
+       sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+       if (!sb->mesg) {
+               kfree(sb);
+               return NULL;
+       }
+       mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+       mesg->nr_conns = 0;
+       mesg->syncid = ipvs->master_syncid;
+       mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+       sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+       sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+       sb->firstuse = jiffies;
+       return sb;
+}
+
+/*
+ *      Version 0 , could be switched in by sys_ctl.
  *      Add an ip_vs_conn information into the current sync_buff.
- *      Called by ip_vs_in.
  */
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
 {
-       struct ip_vs_sync_mesg *m;
-       struct ip_vs_sync_conn *s;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg_v0 *m;
+       struct ip_vs_sync_conn_v0 *s;
        int len;
 
-       spin_lock(&curr_sb_lock);
-       if (!curr_sb) {
-               if (!(curr_sb=ip_vs_sync_buff_create())) {
-                       spin_unlock(&curr_sb_lock);
+       if (unlikely(cp->af != AF_INET))
+               return;
+       /* Do not sync ONE PACKET */
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               return;
+
+       spin_lock(&ipvs->sync_buff_lock);
+       if (!ipvs->sync_buff) {
+               ipvs->sync_buff =
+                       ip_vs_sync_buff_create_v0(ipvs);
+               if (!ipvs->sync_buff) {
+                       spin_unlock(&ipvs->sync_buff_lock);
                        pr_err("ip_vs_sync_buff_create failed.\n");
                        return;
                }
@@ -253,10 +471,11 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
 
        len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
                SIMPLE_CONN_SIZE;
-       m = curr_sb->mesg;
-       s = (struct ip_vs_sync_conn *)curr_sb->head;
+       m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+       s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
 
        /* copy members */
+       s->reserved = 0;
        s->protocol = cp->protocol;
        s->cport = cp->cport;
        s->vport = cp->vport;
@@ -274,83 +493,365 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
 
        m->nr_conns++;
        m->size += len;
-       curr_sb->head += len;
+       ipvs->sync_buff->head += len;
 
        /* check if there is a space for next one */
-       if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
-               sb_queue_tail(curr_sb);
-               curr_sb = NULL;
+       if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+               sb_queue_tail(ipvs);
+               ipvs->sync_buff = NULL;
        }
-       spin_unlock(&curr_sb_lock);
+       spin_unlock(&ipvs->sync_buff_lock);
 
        /* synchronize its controller if it has */
        if (cp->control)
-               ip_vs_sync_conn(cp->control);
+               ip_vs_sync_conn(net, cp->control);
+}
+
+/*
+ *      Add an ip_vs_conn information into the current sync_buff.
+ *      Called by ip_vs_in.
+ *      Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg *m;
+       union ip_vs_sync_conn *s;
+       __u8 *p;
+       unsigned int len, pe_name_len, pad;
+
+       /* Handle old version of the protocol */
+       if (sysctl_sync_ver(ipvs) == 0) {
+               ip_vs_sync_conn_v0(net, cp);
+               return;
+       }
+       /* Do not sync ONE PACKET */
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               goto control;
+sloop:
+       /* Sanity checks */
+       pe_name_len = 0;
+       if (cp->pe_data_len) {
+               if (!cp->pe_data || !cp->dest) {
+                       IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+                       return;
+               }
+               pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+       }
+
+       spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+       if (cp->af == AF_INET6)
+               len = sizeof(struct ip_vs_sync_v6);
+       else
+#endif
+               len = sizeof(struct ip_vs_sync_v4);
+
+       if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+               len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+       if (cp->pe_data_len)
+               len += cp->pe_data_len + 2;     /* + Param hdr field */
+       if (pe_name_len)
+               len += pe_name_len + 2;
+
+       /* check if there is a space for this one  */
+       pad = 0;
+       if (ipvs->sync_buff) {
+               pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+               if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+                       sb_queue_tail(ipvs);
+                       ipvs->sync_buff = NULL;
+                       pad = 0;
+               }
+       }
+
+       if (!ipvs->sync_buff) {
+               ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+               if (!ipvs->sync_buff) {
+                       spin_unlock(&ipvs->sync_buff_lock);
+                       pr_err("ip_vs_sync_buff_create failed.\n");
+                       return;
+               }
+       }
+
+       m = ipvs->sync_buff->mesg;
+       p = ipvs->sync_buff->head;
+       ipvs->sync_buff->head += pad + len;
+       m->size += pad + len;
+       /* Add ev. padding from prev. sync_conn */
+       while (pad--)
+               *(p++) = 0;
+
+       s = (union ip_vs_sync_conn *)p;
+
+       /* Set message type  & copy members */
+       s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+       s->v4.ver_size = htons(len & SVER_MASK);        /* Version 0 */
+       s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+       s->v4.state = htons(cp->state);
+       s->v4.protocol = cp->protocol;
+       s->v4.cport = cp->cport;
+       s->v4.vport = cp->vport;
+       s->v4.dport = cp->dport;
+       s->v4.fwmark = htonl(cp->fwmark);
+       s->v4.timeout = htonl(cp->timeout / HZ);
+       m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+       if (cp->af == AF_INET6) {
+               p += sizeof(struct ip_vs_sync_v6);
+               ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+               ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+               ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+       } else
+#endif
+       {
+               p += sizeof(struct ip_vs_sync_v4);      /* options ptr */
+               s->v4.caddr = cp->caddr.ip;
+               s->v4.vaddr = cp->vaddr.ip;
+               s->v4.daddr = cp->daddr.ip;
+       }
+       if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+               *(p++) = IPVS_OPT_SEQ_DATA;
+               *(p++) = sizeof(struct ip_vs_sync_conn_options);
+               hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+               p += sizeof(struct ip_vs_seq);
+               hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+               p += sizeof(struct ip_vs_seq);
+       }
+       /* Handle pe data */
+       if (cp->pe_data_len && cp->pe_data) {
+               *(p++) = IPVS_OPT_PE_DATA;
+               *(p++) = cp->pe_data_len;
+               memcpy(p, cp->pe_data, cp->pe_data_len);
+               p += cp->pe_data_len;
+               if (pe_name_len) {
+                       /* Add PE_NAME */
+                       *(p++) = IPVS_OPT_PE_NAME;
+                       *(p++) = pe_name_len;
+                       memcpy(p, cp->pe->name, pe_name_len);
+                       p += pe_name_len;
+               }
+       }
+
+       spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+       /* synchronize its controller if it has */
+       cp = cp->control;
+       if (!cp)
+               return;
+       /*
+        * Reduce sync rate for templates
+        * i.e only increment in_pkts for Templates.
+        */
+       if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+               int pkts = atomic_add_return(1, &cp->in_pkts);
+
+               if (pkts % sysctl_sync_period(ipvs) != 1)
+                       return;
+       }
+       goto sloop;
 }
 
+/*
+ *  fill_param used by version 1
+ */
 static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
-                          const union nf_inet_addr *caddr, __be16 cport,
-                          const union nf_inet_addr *vaddr, __be16 vport,
-                          struct ip_vs_conn_param *p)
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+                          struct ip_vs_conn_param *p,
+                          __u8 *pe_data, unsigned int pe_data_len,
+                          __u8 *pe_name, unsigned int pe_name_len)
 {
-       /* XXX: Need to take into account persistence engine */
-       ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+#ifdef CONFIG_IP_VS_IPV6
+       if (af == AF_INET6)
+               ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+                                     (const union nf_inet_addr *)&sc->v6.caddr,
+                                     sc->v6.cport,
+                                     (const union nf_inet_addr *)&sc->v6.vaddr,
+                                     sc->v6.vport, p);
+       else
+#endif
+               ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+                                     (const union nf_inet_addr *)&sc->v4.caddr,
+                                     sc->v4.cport,
+                                     (const union nf_inet_addr *)&sc->v4.vaddr,
+                                     sc->v4.vport, p);
+       /* Handle pe data */
+       if (pe_data_len) {
+               if (pe_name_len) {
+                       char buff[IP_VS_PENAME_MAXLEN+1];
+
+                       memcpy(buff, pe_name, pe_name_len);
+                       buff[pe_name_len]=0;
+                       p->pe = __ip_vs_pe_getbyname(buff);
+                       if (!p->pe) {
+                               IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+                                            buff);
+                               return 1;
+                       }
+               } else {
+                       IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+                       return 1;
+               }
+
+               p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
+               if (!p->pe_data) {
+                       if (p->pe->module)
+                               module_put(p->pe->module);
+                       return -ENOMEM;
+               }
+               p->pe_data_len = pe_data_len;
+       }
        return 0;
 }
 
 /*
- *      Process received multicast message and create the corresponding
- *      ip_vs_conn entries.
+ *  Connection Add / Update.
+ *  Common for version 0 and 1 reception of backup sync_conns.
+ *  Param: ...
+ *         timeout is in sec.
  */
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+                           unsigned int flags, unsigned int state,
+                           unsigned int protocol, unsigned int type,
+                           const union nf_inet_addr *daddr, __be16 dport,
+                           unsigned long timeout, __u32 fwmark,
+                           struct ip_vs_sync_conn_options *opt)
 {
-       struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
-       struct ip_vs_sync_conn *s;
-       struct ip_vs_sync_conn_options *opt;
-       struct ip_vs_conn *cp;
-       struct ip_vs_protocol *pp;
        struct ip_vs_dest *dest;
-       struct ip_vs_conn_param param;
-       char *p;
-       int i;
+       struct ip_vs_conn *cp;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       if (buflen < sizeof(struct ip_vs_sync_mesg)) {
-               IP_VS_ERR_RL("sync message header too short\n");
-               return;
-       }
+       if (!(flags & IP_VS_CONN_F_TEMPLATE))
+               cp = ip_vs_conn_in_get(param);
+       else
+               cp = ip_vs_ct_in_get(param);
 
-       /* Convert size back to host byte order */
-       m->size = ntohs(m->size);
+       if (cp && param->pe_data)       /* Free pe_data */
+               kfree(param->pe_data);
+       if (!cp) {
+               /*
+                * Find the appropriate destination for the connection.
+                * If it is not found the connection will remain unbound
+                * but still handled.
+                */
+               dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+                                      param->vport, protocol, fwmark);
 
-       if (buflen != m->size) {
-               IP_VS_ERR_RL("bogus sync message size\n");
-               return;
+               /*  Set the approprite ativity flag */
+               if (protocol == IPPROTO_TCP) {
+                       if (state != IP_VS_TCP_S_ESTABLISHED)
+                               flags |= IP_VS_CONN_F_INACTIVE;
+                       else
+                               flags &= ~IP_VS_CONN_F_INACTIVE;
+               } else if (protocol == IPPROTO_SCTP) {
+                       if (state != IP_VS_SCTP_S_ESTABLISHED)
+                               flags |= IP_VS_CONN_F_INACTIVE;
+                       else
+                               flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
+               cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+               if (dest)
+                       atomic_dec(&dest->refcnt);
+               if (!cp) {
+                       if (param->pe_data)
+                               kfree(param->pe_data);
+                       IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+                       return;
+               }
+       } else if (!cp->dest) {
+               dest = ip_vs_try_bind_dest(cp);
+               if (dest)
+                       atomic_dec(&dest->refcnt);
+       } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+               (cp->state != state)) {
+               /* update active/inactive flag for the connection */
+               dest = cp->dest;
+               if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+                       (state != IP_VS_TCP_S_ESTABLISHED)) {
+                       atomic_dec(&dest->activeconns);
+                       atomic_inc(&dest->inactconns);
+                       cp->flags |= IP_VS_CONN_F_INACTIVE;
+               } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+                       (state == IP_VS_TCP_S_ESTABLISHED)) {
+                       atomic_inc(&dest->activeconns);
+                       atomic_dec(&dest->inactconns);
+                       cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
+       } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+               (cp->state != state)) {
+               dest = cp->dest;
+               if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+               (state != IP_VS_SCTP_S_ESTABLISHED)) {
+                       atomic_dec(&dest->activeconns);
+                       atomic_inc(&dest->inactconns);
+                       cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
        }
 
-       /* SyncID sanity check */
-       if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
-               IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
-                         m->syncid);
-               return;
+       if (opt)
+               memcpy(&cp->in_seq, opt, sizeof(*opt));
+       atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+       cp->state = state;
+       cp->old_state = cp->state;
+       /*
+        * For Ver 0 messages style
+        *  - Not possible to recover the right timeout for templates
+        *  - can not find the right fwmark
+        *    virtual service. If needed, we can do it for
+        *    non-fwmark persistent services.
+        * Ver 1 messages style.
+        *  - No problem.
+        */
+       if (timeout) {
+               if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+                       timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+               cp->timeout = timeout*HZ;
+       } else {
+               struct ip_vs_proto_data *pd;
+
+               pd = ip_vs_proto_data_get(net, protocol);
+               if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+                       cp->timeout = pd->timeout_table[state];
+               else
+                       cp->timeout = (3*60*HZ);
        }
+       ip_vs_conn_put(cp);
+}
 
-       p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+/*
+ *  Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+                                    const size_t buflen)
+{
+       struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+       struct ip_vs_sync_conn_v0 *s;
+       struct ip_vs_sync_conn_options *opt;
+       struct ip_vs_protocol *pp;
+       struct ip_vs_conn_param param;
+       char *p;
+       int i;
+
+       p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
        for (i=0; i<m->nr_conns; i++) {
                unsigned flags, state;
 
                if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
-                       IP_VS_ERR_RL("bogus conn in sync message\n");
+                       IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
                        return;
                }
-               s = (struct ip_vs_sync_conn *) p;
+               s = (struct ip_vs_sync_conn_v0 *) p;
                flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
                flags &= ~IP_VS_CONN_F_HASHED;
                if (flags & IP_VS_CONN_F_SEQ_MASK) {
                        opt = (struct ip_vs_sync_conn_options *)&s[1];
                        p += FULL_CONN_SIZE;
                        if (p > buffer+buflen) {
-                               IP_VS_ERR_RL("bogus conn options in sync message\n");
+                               IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
                                return;
                        }
                } else {
@@ -362,118 +863,286 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
                if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
                        pp = ip_vs_proto_get(s->protocol);
                        if (!pp) {
-                               IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
                                        s->protocol);
                                continue;
                        }
                        if (state >= pp->num_states) {
-                               IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
                                        pp->name, state);
                                continue;
                        }
                } else {
                        /* protocol in templates is not used for state/timeout */
-                       pp = NULL;
                        if (state > 0) {
-                               IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
                                        state);
                                state = 0;
                        }
                }
 
-               {
-                       if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
-                                             (union nf_inet_addr *)&s->caddr,
-                                             s->cport,
-                                             (union nf_inet_addr *)&s->vaddr,
-                                             s->vport, &param)) {
-                               pr_err("ip_vs_conn_fill_param_sync failed");
-                               return;
+               ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+                                     (const union nf_inet_addr *)&s->caddr,
+                                     s->cport,
+                                     (const union nf_inet_addr *)&s->vaddr,
+                                     s->vport, &param);
+
+               /* Send timeout as Zero */
+               ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+                               (union nf_inet_addr *)&s->daddr, s->dport,
+                               0, 0, opt);
+       }
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+                                   __u32 *opt_flags,
+                                   struct ip_vs_sync_conn_options *opt)
+{
+       struct ip_vs_sync_conn_options *topt;
+
+       topt = (struct ip_vs_sync_conn_options *)p;
+
+       if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+               IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+               return -EINVAL;
+       }
+       if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+               IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+               return -EINVAL;
+       }
+       ntoh_seq(&topt->in_seq, &opt->in_seq);
+       ntoh_seq(&topt->out_seq, &opt->out_seq);
+       *opt_flags |= IPVS_OPT_F_SEQ_DATA;
+       return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+                         __u8 **data, unsigned int maxlen,
+                         __u32 *opt_flags, __u32 flag)
+{
+       if (plen > maxlen) {
+               IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+               return -EINVAL;
+       }
+       if (*opt_flags & flag) {
+               IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+               return -EINVAL;
+       }
+       *data_len = plen;
+       *data = p;
+       *opt_flags |= flag;
+       return 0;
+}
+/*
+ *   Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+       struct ip_vs_sync_conn_options opt;
+       union  ip_vs_sync_conn *s;
+       struct ip_vs_protocol *pp;
+       struct ip_vs_conn_param param;
+       __u32 flags;
+       unsigned int af, state, pe_data_len=0, pe_name_len=0;
+       __u8 *pe_data=NULL, *pe_name=NULL;
+       __u32 opt_flags=0;
+       int retc=0;
+
+       s = (union ip_vs_sync_conn *) p;
+
+       if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+               af = AF_INET6;
+               p += sizeof(struct ip_vs_sync_v6);
+#else
+               IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+               retc = 10;
+               goto out;
+#endif
+       } else if (!s->v4.type) {
+               af = AF_INET;
+               p += sizeof(struct ip_vs_sync_v4);
+       } else {
+               return -10;
+       }
+       if (p > msg_end)
+               return -20;
+
+       /* Process optional params check Type & Len. */
+       while (p < msg_end) {
+               int ptype;
+               int plen;
+
+               if (p+2 > msg_end)
+                       return -30;
+               ptype = *(p++);
+               plen  = *(p++);
+
+               if (!plen || ((p + plen) > msg_end))
+                       return -40;
+               /* Handle seq option  p = param data */
+               switch (ptype & ~IPVS_OPT_F_PARAM) {
+               case IPVS_OPT_SEQ_DATA:
+                       if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+                               return -50;
+                       break;
+
+               case IPVS_OPT_PE_DATA:
+                       if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+                                          IP_VS_PEDATA_MAXLEN, &opt_flags,
+                                          IPVS_OPT_F_PE_DATA))
+                               return -60;
+                       break;
+
+               case IPVS_OPT_PE_NAME:
+                       if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+                                          IP_VS_PENAME_MAXLEN, &opt_flags,
+                                          IPVS_OPT_F_PE_NAME))
+                               return -70;
+                       break;
+
+               default:
+                       /* Param data mandatory ? */
+                       if (!(ptype & IPVS_OPT_F_PARAM)) {
+                               IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+                                         ptype & ~IPVS_OPT_F_PARAM);
+                               retc = 20;
+                               goto out;
                        }
-                       if (!(flags & IP_VS_CONN_F_TEMPLATE))
-                               cp = ip_vs_conn_in_get(&param);
-                       else
-                               cp = ip_vs_ct_in_get(&param);
                }
-               if (!cp) {
-                       /*
-                        * Find the appropriate destination for the connection.
-                        * If it is not found the connection will remain unbound
-                        * but still handled.
-                        */
-                       dest = ip_vs_find_dest(AF_INET,
-                                              (union nf_inet_addr *)&s->daddr,
-                                              s->dport,
-                                              (union nf_inet_addr *)&s->vaddr,
-                                              s->vport,
-                                              s->protocol);
-                       /*  Set the approprite ativity flag */
-                       if (s->protocol == IPPROTO_TCP) {
-                               if (state != IP_VS_TCP_S_ESTABLISHED)
-                                       flags |= IP_VS_CONN_F_INACTIVE;
-                               else
-                                       flags &= ~IP_VS_CONN_F_INACTIVE;
-                       } else if (s->protocol == IPPROTO_SCTP) {
-                               if (state != IP_VS_SCTP_S_ESTABLISHED)
-                                       flags |= IP_VS_CONN_F_INACTIVE;
-                               else
-                                       flags &= ~IP_VS_CONN_F_INACTIVE;
+               p += plen;  /* Next option */
+       }
+
+       /* Get flags and Mask off unsupported */
+       flags  = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+       flags |= IP_VS_CONN_F_SYNC;
+       state = ntohs(s->v4.state);
+
+       if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+               pp = ip_vs_proto_get(s->v4.protocol);
+               if (!pp) {
+                       IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+                               s->v4.protocol);
+                       retc = 30;
+                       goto out;
+               }
+               if (state >= pp->num_states) {
+                       IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+                               pp->name, state);
+                       retc = 40;
+                       goto out;
+               }
+       } else {
+               /* protocol in templates is not used for state/timeout */
+               if (state > 0) {
+                       IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+                               state);
+                       state = 0;
+               }
+       }
+       if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+                                      pe_data_len, pe_name, pe_name_len)) {
+               retc = 50;
+               goto out;
+       }
+       /* If only IPv4, just silent skip IPv6 */
+       if (af == AF_INET)
+               ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+                               (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+                               ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+                               (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+                               );
+#ifdef CONFIG_IP_VS_IPV6
+       else
+               ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+                               (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+                               ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+                               (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+                               );
+#endif
+       return 0;
+       /* Error exit */
+out:
+       IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+       return retc;
+
+}
+/*
+ *      Process received multicast message and create the corresponding
+ *      ip_vs_conn entries.
+ *      Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+                                 const size_t buflen)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+       __u8 *p, *msg_end;
+       int i, nr_conns;
+
+       if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+               IP_VS_DBG(2, "BACKUP, message header too short\n");
+               return;
+       }
+       /* Convert size back to host byte order */
+       m2->size = ntohs(m2->size);
+
+       if (buflen != m2->size) {
+               IP_VS_DBG(2, "BACKUP, bogus message size\n");
+               return;
+       }
+       /* SyncID sanity check */
+       if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+               IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+               return;
+       }
+       /* Handle version 1  message */
+       if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+           && (m2->spare == 0)) {
+
+               msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+               nr_conns = m2->nr_conns;
+
+               for (i=0; i<nr_conns; i++) {
+                       union ip_vs_sync_conn *s;
+                       unsigned size;
+                       int retc;
+
+                       p = msg_end;
+                       if (p + sizeof(s->v4) > buffer+buflen) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+                               return;
                        }
-                       cp = ip_vs_conn_new(&param,
-                                           (union nf_inet_addr *)&s->daddr,
-                                           s->dport, flags, dest);
-                       if (dest)
-                               atomic_dec(&dest->refcnt);
-                       if (!cp) {
-                               pr_err("ip_vs_conn_new failed\n");
+                       s = (union ip_vs_sync_conn *)p;
+                       size = ntohs(s->v4.ver_size) & SVER_MASK;
+                       msg_end = p + size;
+                       /* Basic sanity checks */
+                       if (msg_end  > buffer+buflen) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
                                return;
                        }
-               } else if (!cp->dest) {
-                       dest = ip_vs_try_bind_dest(cp);
-                       if (dest)
-                               atomic_dec(&dest->refcnt);
-               } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
-                          (cp->state != state)) {
-                       /* update active/inactive flag for the connection */
-                       dest = cp->dest;
-                       if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                               (state != IP_VS_TCP_S_ESTABLISHED)) {
-                               atomic_dec(&dest->activeconns);
-                               atomic_inc(&dest->inactconns);
-                               cp->flags |= IP_VS_CONN_F_INACTIVE;
-                       } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                               (state == IP_VS_TCP_S_ESTABLISHED)) {
-                               atomic_inc(&dest->activeconns);
-                               atomic_dec(&dest->inactconns);
-                               cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+                       if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+                                             ntohs(s->v4.ver_size) >> SVER_SHIFT);
+                               return;
                        }
-               } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
-                          (cp->state != state)) {
-                       dest = cp->dest;
-                       if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                            (state != IP_VS_SCTP_S_ESTABLISHED)) {
-                           atomic_dec(&dest->activeconns);
-                           atomic_inc(&dest->inactconns);
-                           cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+                       /* Process a single sync_conn */
+                       retc = ip_vs_proc_sync_conn(net, p, msg_end);
+                       if (retc < 0) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+                                            retc);
+                               return;
                        }
+                       /* Make sure we have 32 bit alignment */
+                       msg_end = p + ((size + 3) & ~3);
                }
-
-               if (opt)
-                       memcpy(&cp->in_seq, opt, sizeof(*opt));
-               atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
-               cp->state = state;
-               cp->old_state = cp->state;
-               /*
-                * We can not recover the right timeout for templates
-                * in all cases, we can not find the right fwmark
-                * virtual service. If needed, we can do it for
-                * non-fwmark persistent services.
-                */
-               if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
-                       cp->timeout = pp->timeout_table[state];
-               else
-                       cp->timeout = (3*60*HZ);
-               ip_vs_conn_put(cp);
+       } else {
+               /* Old type of message */
+               ip_vs_process_message_v0(net, buffer, buflen);
+               return;
        }
 }
 
@@ -511,8 +1180,10 @@ static int set_mcast_if(struct sock *sk, char *ifname)
 {
        struct net_device *dev;
        struct inet_sock *inet = inet_sk(sk);
+       struct net *net = sock_net(sk);
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
 
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1202,33 @@ static int set_mcast_if(struct sock *sk, char *ifname)
  *     Set the maximum length of sync message according to the
  *     specified interface's MTU.
  */
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct net_device *dev;
        int num;
 
        if (sync_state == IP_VS_STATE_MASTER) {
-               if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+               dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+               if (!dev)
                        return -ENODEV;
 
                num = (dev->mtu - sizeof(struct iphdr) -
                       sizeof(struct udphdr) -
                       SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
-               sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+               ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
                        SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
                IP_VS_DBG(7, "setting the maximum length of sync sending "
-                         "message %d.\n", sync_send_mesg_maxlen);
+                         "message %d.\n", ipvs->send_mesg_maxlen);
        } else if (sync_state == IP_VS_STATE_BACKUP) {
-               if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+               dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+               if (!dev)
                        return -ENODEV;
 
-               sync_recv_mesg_maxlen = dev->mtu -
+               ipvs->recv_mesg_maxlen = dev->mtu -
                        sizeof(struct iphdr) - sizeof(struct udphdr);
                IP_VS_DBG(7, "setting the maximum length of sync receiving "
-                         "message %d.\n", sync_recv_mesg_maxlen);
+                         "message %d.\n", ipvs->recv_mesg_maxlen);
        }
 
        return 0;
@@ -569,6 +1243,7 @@ static int set_sync_mesg_maxlen(int sync_state)
 static int
 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 {
+       struct net *net = sock_net(sk);
        struct ip_mreqn mreq;
        struct net_device *dev;
        int ret;
@@ -576,7 +1251,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
        memset(&mreq, 0, sizeof(mreq));
        memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
                return -EINVAL;
@@ -593,11 +1269,13 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 
 static int bind_mcastif_addr(struct socket *sock, char *ifname)
 {
+       struct net *net = sock_net(sock->sk);
        struct net_device *dev;
        __be32 addr;
        struct sockaddr_in sin;
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
 
        addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1297,20 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct socket *sock;
        int result;
 
        /* First create a socket */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
 
-       result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+       result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error setting outbound mcast interface\n");
                goto error;
@@ -640,7 +1319,7 @@ static struct socket * make_send_sock(void)
        set_mcast_loop(sock->sk, 0);
        set_mcast_ttl(sock->sk, 1);
 
-       result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+       result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error binding address of the mcast interface\n");
                goto error;
@@ -664,13 +1343,14 @@ static struct socket * make_send_sock(void)
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct socket *sock;
        int result;
 
        /* First create a socket */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
@@ -689,7 +1369,7 @@ static struct socket * make_receive_sock(void)
        /* join the multicast group */
        result = join_mcast_group(sock->sk,
                        (struct in_addr *) &mcast_addr.sin_addr,
-                       ip_vs_backup_mcast_ifn);
+                       ipvs->backup_mcast_ifn);
        if (result < 0) {
                pr_err("Error joining to the multicast group\n");
                goto error;
@@ -760,20 +1440,21 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
 static int sync_thread_master(void *data)
 {
        struct ip_vs_sync_thread_data *tinfo = data;
+       struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
        struct ip_vs_sync_buff *sb;
 
        pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
                "syncid = %d\n",
-               ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+               ipvs->master_mcast_ifn, ipvs->master_syncid);
 
        while (!kthread_should_stop()) {
-               while ((sb = sb_dequeue())) {
+               while ((sb = sb_dequeue(ipvs))) {
                        ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
                        ip_vs_sync_buff_release(sb);
                }
 
-               /* check if entries stay in curr_sb for 2 seconds */
-               sb = get_curr_sync_buff(2 * HZ);
+               /* check if entries stay in ipvs->sync_buff for 2 seconds */
+               sb = get_curr_sync_buff(ipvs, 2 * HZ);
                if (sb) {
                        ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
                        ip_vs_sync_buff_release(sb);
@@ -783,14 +1464,13 @@ static int sync_thread_master(void *data)
        }
 
        /* clean up the sync_buff queue */
-       while ((sb=sb_dequeue())) {
+       while ((sb = sb_dequeue(ipvs)))
                ip_vs_sync_buff_release(sb);
-       }
 
        /* clean up the current sync_buff */
-       if ((sb = get_curr_sync_buff(0))) {
+       sb = get_curr_sync_buff(ipvs, 0);
+       if (sb)
                ip_vs_sync_buff_release(sb);
-       }
 
        /* release the sending multicast socket */
        sock_release(tinfo->sock);
@@ -803,11 +1483,12 @@ static int sync_thread_master(void *data)
 static int sync_thread_backup(void *data)
 {
        struct ip_vs_sync_thread_data *tinfo = data;
+       struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
        int len;
 
        pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
                "syncid = %d\n",
-               ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+               ipvs->backup_mcast_ifn, ipvs->backup_syncid);
 
        while (!kthread_should_stop()) {
                wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1498,7 @@ static int sync_thread_backup(void *data)
                /* do we have data now? */
                while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
                        len = ip_vs_receive(tinfo->sock, tinfo->buf,
-                                       sync_recv_mesg_maxlen);
+                                       ipvs->recv_mesg_maxlen);
                        if (len <= 0) {
                                pr_err("receiving message error\n");
                                break;
@@ -826,7 +1507,7 @@ static int sync_thread_backup(void *data)
                        /* disable bottom half, because it accesses the data
                           shared by softirq while getting/creating conns */
                        local_bh_disable();
-                       ip_vs_process_message(tinfo->buf, len);
+                       ip_vs_process_message(tinfo->net, tinfo->buf, len);
                        local_bh_enable();
                }
        }
@@ -840,41 +1521,42 @@ static int sync_thread_backup(void *data)
 }
 
 
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
 {
        struct ip_vs_sync_thread_data *tinfo;
        struct task_struct **realtask, *task;
        struct socket *sock;
+       struct netns_ipvs *ipvs = net_ipvs(net);
        char *name, *buf = NULL;
        int (*threadfn)(void *data);
        int result = -ENOMEM;
 
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
-                 sizeof(struct ip_vs_sync_conn));
+                 sizeof(struct ip_vs_sync_conn_v0));
 
        if (state == IP_VS_STATE_MASTER) {
-               if (sync_master_thread)
+               if (ipvs->master_thread)
                        return -EEXIST;
 
-               strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
-                       sizeof(ip_vs_master_mcast_ifn));
-               ip_vs_master_syncid = syncid;
-               realtask = &sync_master_thread;
-               name = "ipvs_syncmaster";
+               strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+                       sizeof(ipvs->master_mcast_ifn));
+               ipvs->master_syncid = syncid;
+               realtask = &ipvs->master_thread;
+               name = "ipvs_master:%d";
                threadfn = sync_thread_master;
-               sock = make_send_sock();
+               sock = make_send_sock(net);
        } else if (state == IP_VS_STATE_BACKUP) {
-               if (sync_backup_thread)
+               if (ipvs->backup_thread)
                        return -EEXIST;
 
-               strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
-                       sizeof(ip_vs_backup_mcast_ifn));
-               ip_vs_backup_syncid = syncid;
-               realtask = &sync_backup_thread;
-               name = "ipvs_syncbackup";
+               strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+                       sizeof(ipvs->backup_mcast_ifn));
+               ipvs->backup_syncid = syncid;
+               realtask = &ipvs->backup_thread;
+               name = "ipvs_backup:%d";
                threadfn = sync_thread_backup;
-               sock = make_receive_sock();
+               sock = make_receive_sock(net);
        } else {
                return -EINVAL;
        }
@@ -884,9 +1566,9 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
                goto out;
        }
 
-       set_sync_mesg_maxlen(state);
+       set_sync_mesg_maxlen(net, state);
        if (state == IP_VS_STATE_BACKUP) {
-               buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+               buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
                if (!buf)
                        goto outsocket;
        }
@@ -895,10 +1577,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
        if (!tinfo)
                goto outbuf;
 
+       tinfo->net = net;
        tinfo->sock = sock;
        tinfo->buf = buf;
 
-       task = kthread_run(threadfn, tinfo, name);
+       task = kthread_run(threadfn, tinfo, name, ipvs->gen);
        if (IS_ERR(task)) {
                result = PTR_ERR(task);
                goto outtinfo;
@@ -906,7 +1589,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
 
        /* mark as active */
        *realtask = task;
-       ip_vs_sync_state |= state;
+       ipvs->sync_state |= state;
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -924,16 +1607,18 @@ out:
 }
 
 
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
        if (state == IP_VS_STATE_MASTER) {
-               if (!sync_master_thread)
+               if (!ipvs->master_thread)
                        return -ESRCH;
 
                pr_info("stopping master sync thread %d ...\n",
-                       task_pid_nr(sync_master_thread));
+                       task_pid_nr(ipvs->master_thread));
 
                /*
                 * The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1626,21 @@ int stop_sync_thread(int state)
                 * progress of stopping the master sync daemon.
                 */
 
-               spin_lock_bh(&ip_vs_sync_lock);
-               ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
-               spin_unlock_bh(&ip_vs_sync_lock);
-               kthread_stop(sync_master_thread);
-               sync_master_thread = NULL;
+               spin_lock_bh(&ipvs->sync_lock);
+               ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+               spin_unlock_bh(&ipvs->sync_lock);
+               kthread_stop(ipvs->master_thread);
+               ipvs->master_thread = NULL;
        } else if (state == IP_VS_STATE_BACKUP) {
-               if (!sync_backup_thread)
+               if (!ipvs->backup_thread)
                        return -ESRCH;
 
                pr_info("stopping backup sync thread %d ...\n",
-                       task_pid_nr(sync_backup_thread));
+                       task_pid_nr(ipvs->backup_thread));
 
-               ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
-               kthread_stop(sync_backup_thread);
-               sync_backup_thread = NULL;
+               ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+               kthread_stop(ipvs->backup_thread);
+               ipvs->backup_thread = NULL;
        } else {
                return -EINVAL;
        }
@@ -965,3 +1650,42 @@ int stop_sync_thread(int state)
 
        return 0;
 }
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->sync_queue);
+       spin_lock_init(&ipvs->sync_lock);
+       spin_lock_init(&ipvs->sync_buff_lock);
+
+       ipvs->sync_mcast_addr.sin_family = AF_INET;
+       ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+       ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+       return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+       stop_sync_thread(net, IP_VS_STATE_MASTER);
+       stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+       .init = __ip_vs_sync_init,
+       .exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+       return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+       unregister_pernet_subsys(&ipvs_sync_ops);
+}
index bbddfdb..bc1bfc4 100644 (file)
 
 #include <net/ip_vs.h>
 
-
-static inline unsigned int
-ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
-{
-       /*
-        * We think the overhead of processing active connections is 256
-        * times higher than that of inactive connections in average. (This
-        * 256 times might not be accurate, we will change it later) We
-        * use the following formula to estimate the overhead now:
-        *                dest->activeconns*256 + dest->inactconns
-        */
-       return (atomic_read(&dest->activeconns) << 8) +
-               atomic_read(&dest->inactconns);
-}
-
-
 /*
  *     Weighted Least Connection scheduling
  */
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
                    atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = ip_vs_wlc_dest_overhead(least);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
-       IP_VS_ERR_RL("WLC: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
        /*
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        list_for_each_entry_continue(dest, &svc->destinations, n_list) {
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
-               doh = ip_vs_wlc_dest_overhead(dest);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
index 30db633..1ef41f5 100644 (file)
@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 
                        if (mark->cl == mark->cl->next) {
                                /* no dest entry */
-                               IP_VS_ERR_RL("WRR: no destination available: "
-                                            "no destinations present\n");
+                               ip_vs_scheduler_err(svc,
+                                       "no destination available: "
+                                       "no destinations present");
                                dest = NULL;
                                goto out;
                        }
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                                 */
                                if (mark->cw == 0) {
                                        mark->cl = &svc->destinations;
-                                       IP_VS_ERR_RL("WRR: no destination "
-                                                    "available\n");
+                                       ip_vs_scheduler_err(svc,
+                                               "no destination available");
                                        dest = NULL;
                                        goto out;
                                }
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                        /* back to the start, and no dest is found.
                           It is only possible when all dests are OVERLOADED */
                        dest = NULL;
-                       IP_VS_ERR_RL("WRR: no destination available: "
-                                    "all destinations are overloaded\n");
+                       ip_vs_scheduler_err(svc,
+                               "no destination available: "
+                               "all destinations are overloaded");
                        goto out;
                }
        }
index 5325a3f..6132b21 100644 (file)
 
 #include <net/ip_vs.h>
 
+enum {
+       IP_VS_RT_MODE_LOCAL     = 1, /* Allow local dest */
+       IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
+       IP_VS_RT_MODE_RDR       = 4, /* Allow redirect from remote daddr to
+                                     * local
+                                     */
+};
 
 /*
  *      Destination cache to speed up outgoing route lookup
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
        return dst;
 }
 
-/*
- * Get route to destination or remote server
- * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
- *         &4=Allow redirect from remote daddr to local
- */
+/* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                   __be32 daddr, u32 rtos, int rt_mode)
@@ -95,12 +98,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                spin_lock(&dest->dst_lock);
                if (!(rt = (struct rtable *)
                      __ip_vs_dst_check(dest, rtos))) {
-                       struct flowi fl = {
-                               .fl4_dst = dest->addr.ip,
-                               .fl4_tos = rtos,
-                       };
-
-                       if (ip_route_output_key(net, &rt, &fl)) {
+                       rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0);
+                       if (IS_ERR(rt)) {
                                spin_unlock(&dest->dst_lock);
                                IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                                             &dest->addr.ip);
@@ -113,12 +112,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                }
                spin_unlock(&dest->dst_lock);
        } else {
-               struct flowi fl = {
-                       .fl4_dst = daddr,
-                       .fl4_tos = rtos,
-               };
-
-               if (ip_route_output_key(net, &rt, &fl)) {
+               rt = ip_route_output(net, daddr, 0, rtos, 0);
+               if (IS_ERR(rt)) {
                        IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                                     &daddr);
                        return NULL;
@@ -126,15 +121,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
        }
 
        local = rt->rt_flags & RTCF_LOCAL;
-       if (!((local ? 1 : 2) & rt_mode)) {
+       if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
+             rt_mode)) {
                IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
                             (rt->rt_flags & RTCF_LOCAL) ?
                             "local":"non-local", &rt->rt_dst);
                ip_rt_put(rt);
                return NULL;
        }
-       if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
-                                        ort->rt_flags & RTCF_LOCAL)) {
+       if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
+           !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
                IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
                             "requires NAT method, dest: %pI4\n",
                             &ip_hdr(skb)->daddr, &rt->rt_dst);
@@ -169,15 +165,15 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
                        return 0;
                refdst_drop(orefdst);
        } else {
-               struct flowi fl = {
-                       .fl4_dst = iph->daddr,
-                       .fl4_src = iph->saddr,
-                       .fl4_tos = RT_TOS(iph->tos),
-                       .mark = skb->mark,
+               struct flowi4 fl4 = {
+                       .daddr = iph->daddr,
+                       .saddr = iph->saddr,
+                       .flowi4_tos = RT_TOS(iph->tos),
+                       .flowi4_mark = skb->mark,
                };
-               struct rtable *rt;
 
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl4);
+               if (IS_ERR(rt))
                        return 0;
                if (!(rt->rt_flags & RTCF_LOCAL)) {
                        ip_rt_put(rt);
@@ -202,22 +198,27 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
                        struct in6_addr *ret_saddr, int do_xfrm)
 {
        struct dst_entry *dst;
-       struct flowi fl = {
-               .fl6_dst = *daddr,
+       struct flowi6 fl6 = {
+               .daddr = *daddr,
        };
 
-       dst = ip6_route_output(net, NULL, &fl);
+       dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error)
                goto out_err;
        if (!ret_saddr)
                return dst;
-       if (ipv6_addr_any(&fl.fl6_src) &&
+       if (ipv6_addr_any(&fl6.saddr) &&
            ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
-                              &fl.fl6_dst, 0, &fl.fl6_src) < 0)
-               goto out_err;
-       if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
+                              &fl6.daddr, 0, &fl6.saddr) < 0)
                goto out_err;
-       ipv6_addr_copy(ret_saddr, &fl.fl6_src);
+       if (do_xfrm) {
+               dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+               if (IS_ERR(dst)) {
+                       dst = NULL;
+                       goto out_err;
+               }
+       }
+       ipv6_addr_copy(ret_saddr, &fl6.saddr);
        return dst;
 
 out_err:
@@ -384,13 +385,14 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        EnterFunction(10);
 
-       if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
-                                     RT_TOS(iph->tos), 2)))
+       if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                ip_rt_put(rt);
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +445,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -512,7 +514,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        }
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(iph->tos), 1|2|4)))
+                                     RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL |
+                                       IP_VS_RT_MODE_RDR)))
                goto tx_error_icmp;
        local = rt->rt_flags & RTCF_LOCAL;
        /*
@@ -543,7 +548,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
                                 "ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +664,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -754,7 +760,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        EnterFunction(10);
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(tos), 1|2)))
+                                     RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
+                                                  IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
                ip_rt_put(rt);
@@ -773,8 +780,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        df |= (old_iph->frag_off & htons(IP_DF));
 
-       if ((old_iph->frag_off & htons(IP_DF))
-           && mtu < ntohs(old_iph->tot_len)) {
+       if ((old_iph->frag_off & htons(IP_DF) &&
+           mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -886,7 +893,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (skb_dst(skb))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 
-       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+           !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -982,7 +990,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        EnterFunction(10);
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(iph->tos), 1|2)))
+                                     RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
                ip_rt_put(rt);
@@ -991,7 +1001,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+       if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                ip_rt_put(rt);
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1125,7 +1136,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         */
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
+                                     RT_TOS(ip_hdr(skb)->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL |
+                                       IP_VS_RT_MODE_RDR)))
                goto tx_error_icmp;
        local = rt->rt_flags & RTCF_LOCAL;
 
@@ -1158,7 +1172,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -1272,7 +1287,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644 (file)
index 0000000..4e99cca
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ *      broadcast connection tracking helper
+ *
+ *      (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+                               unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo,
+                               unsigned int timeout)
+{
+       struct nf_conntrack_expect *exp;
+       struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt = skb_rtable(skb);
+       struct in_device *in_dev;
+       struct nf_conn_help *help = nfct_help(ct);
+       __be32 mask = 0;
+
+       /* we're only interested in locally generated packets */
+       if (skb->sk == NULL)
+               goto out;
+       if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+               goto out;
+       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+               goto out;
+
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(rt->dst.dev);
+       if (in_dev != NULL) {
+               for_primary_ifa(in_dev) {
+                       if (ifa->ifa_broadcast == iph->daddr) {
+                               mask = ifa->ifa_mask;
+                               break;
+                       }
+               } endfor_ifa(in_dev);
+       }
+       rcu_read_unlock();
+
+       if (mask == 0)
+               goto out;
+
+       exp = nf_ct_expect_alloc(ct);
+       if (exp == NULL)
+               goto out;
+
+       exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+       exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+       exp->mask.src.u3.ip       = mask;
+       exp->mask.src.u.udp.port  = htons(0xFFFF);
+
+       exp->expectfn             = NULL;
+       exp->flags                = NF_CT_EXPECT_PERMANENT;
+       exp->class                = NF_CT_EXPECT_CLASS_DEFAULT;
+       exp->helper               = NULL;
+
+       nf_ct_expect_related(exp);
+       nf_ct_expect_put(exp);
+
+       nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+       return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
index e615119..941286c 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
@@ -282,6 +283,11 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 static void death_by_timeout(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
+       struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp && tstamp->stop == 0)
+               tstamp->stop = ktime_to_ns(ktime_get_real());
 
        if (!test_bit(IPS_DYING_BIT, &ct->status) &&
            unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct nf_conn_help *help;
+       struct nf_conn_tstamp *tstamp;
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
@@ -486,8 +493,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        ct->timeout.expires += jiffies;
        add_timer(&ct->timeout);
        atomic_inc(&ct->ct_general.use);
-       set_bit(IPS_CONFIRMED_BIT, &ct->status);
+       ct->status |= IPS_CONFIRMED;
+
+       /* set conntrack timestamp, if enabled. */
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp) {
+               if (skb->tstamp.tv64 == 0)
+                       __net_timestamp((struct sk_buff *)skb);
 
+               tstamp->start = ktime_to_ns(skb->tstamp);
+       }
        /* Since the lookup is lockless, hash insertion must be done after
         * starting the timer and setting the CONFIRMED bit. The RCU barriers
         * guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
         * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
         */
        memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
-              sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+              offsetof(struct nf_conn, proto) -
+              offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+       nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
 
        ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
        nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -942,8 +959,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
                nf_conntrack_event_cache(IPCT_REPLY, ct);
 out:
-       if (tmpl)
-               nf_ct_put(tmpl);
+       if (tmpl) {
+               /* Special case: we have to repeat this hook, assign the
+                * template again to this packet. We assume that this packet
+                * has no conntrack assigned. This is used by nf_ct_tcp. */
+               if (ret == NF_REPEAT)
+                       skb->nfct = (struct nf_conntrack *)tmpl;
+               else
+                       nf_ct_put(tmpl);
+       }
 
        return ret;
 }
@@ -1185,6 +1209,11 @@ struct __nf_ct_flush_report {
 static int kill_report(struct nf_conn *i, void *data)
 {
        struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+       struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(i);
+       if (tstamp && tstamp->stop == 0)
+               tstamp->stop = ktime_to_ns(ktime_get_real());
 
        /* If we fail to deliver the event, death_by_timeout() will retry */
        if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1201,9 +1230,9 @@ static int kill_all(struct nf_conn *i, void *data)
        return 1;
 }
 
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
 {
-       if (vmalloced)
+       if (is_vmalloc_addr(hash))
                vfree(hash);
        else
                free_pages((unsigned long)hash,
@@ -1270,9 +1299,9 @@ static void nf_conntrack_cleanup_net(struct net *net)
                goto i_see_dead_people;
        }
 
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
+       nf_conntrack_tstamp_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
        kmem_cache_destroy(net->ct.nf_conntrack_cachep);
@@ -1300,21 +1329,18 @@ void nf_conntrack_cleanup(struct net *net)
        }
 }
 
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
 {
        struct hlist_nulls_head *hash;
        unsigned int nr_slots, i;
        size_t sz;
 
-       *vmalloced = 0;
-
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
        if (!hash) {
-               *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
                hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
                                 PAGE_KERNEL);
@@ -1330,7 +1356,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
-       int i, bucket, vmalloced, old_vmalloced;
+       int i, bucket;
        unsigned int hashsize, old_size;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
@@ -1347,7 +1373,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hashsize)
                return -EINVAL;
 
-       hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+       hash = nf_ct_alloc_hashtable(&hashsize, 1);
        if (!hash)
                return -ENOMEM;
 
@@ -1369,15 +1395,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                }
        }
        old_size = init_net.ct.htable_size;
-       old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
        init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-       init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash = hash;
        spin_unlock_bh(&nf_conntrack_lock);
 
-       nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+       nf_ct_free_hashtable(old_hash, old_size);
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1490,8 +1514,7 @@ static int nf_conntrack_init_net(struct net *net)
        }
 
        net->ct.htable_size = nf_conntrack_htable_size;
-       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
-                                            &net->ct.hash_vmalloc, 1);
+       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
        if (!net->ct.hash) {
                ret = -ENOMEM;
                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1503,6 +1526,9 @@ static int nf_conntrack_init_net(struct net *net)
        ret = nf_conntrack_acct_init(net);
        if (ret < 0)
                goto err_acct;
+       ret = nf_conntrack_tstamp_init(net);
+       if (ret < 0)
+               goto err_tstamp;
        ret = nf_conntrack_ecache_init(net);
        if (ret < 0)
                goto err_ecache;
@@ -1510,12 +1536,13 @@ static int nf_conntrack_init_net(struct net *net)
        return 0;
 
 err_ecache:
+       nf_conntrack_tstamp_fini(net);
+err_tstamp:
        nf_conntrack_acct_fini(net);
 err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 err_hash:
        kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 err_cache:
index 5702de3..63a1b91 100644 (file)
@@ -63,6 +63,9 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
                 * this does not harm and it happens very rarely. */
                unsigned long missed = e->missed;
 
+               if (!((events | missed) & e->ctmask))
+                       goto out_unlock;
+
                ret = notify->fcn(events | missed, &item);
                if (unlikely(ret < 0 || missed)) {
                        spin_lock_bh(&ct->lock);
index a20fb0b..cd1e8e0 100644 (file)
@@ -319,7 +319,8 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        const struct nf_conntrack_expect_policy *p;
        unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 
-       atomic_inc(&exp->use);
+       /* two references : one for hash insert, one for the timer */
+       atomic_add(2, &exp->use);
 
        if (master_help) {
                hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
                    (unsigned long)exp);
        if (master_help) {
-               p = &master_help->helper->expect_policy[exp->class];
+               p = &rcu_dereference_protected(
+                               master_help->helper,
+                               lockdep_is_held(&nf_conntrack_lock)
+                               )->expect_policy[exp->class];
                exp->timeout.expires = jiffies + p->timeout * HZ;
        }
        add_timer(&exp->timeout);
 
-       atomic_inc(&exp->use);
        NF_CT_STAT_INC(net, expect_create);
 }
 
@@ -369,7 +372,10 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
        if (!del_timer(&i->timeout))
                return 0;
 
-       p = &master_help->helper->expect_policy[i->class];
+       p = &rcu_dereference_protected(
+               master_help->helper,
+               lockdep_is_held(&nf_conntrack_lock)
+               )->expect_policy[i->class];
        i->timeout.expires = jiffies + p->timeout * HZ;
        add_timer(&i->timeout);
        return 1;
@@ -407,7 +413,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        }
        /* Will be over limit? */
        if (master_help) {
-               p = &master_help->helper->expect_policy[expect->class];
+               p = &rcu_dereference_protected(
+                       master_help->helper,
+                       lockdep_is_held(&nf_conntrack_lock)
+                       )->expect_policy[expect->class];
                if (p->max_expected &&
                    master_help->expecting[expect->class] >= p->max_expected) {
                        evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-               n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -491,11 +500,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_next_rcu(head));
        while (head == NULL) {
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
-               head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
        }
        return head;
 }
@@ -630,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
        }
 
        net->ct.expect_count = 0;
-       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
-                                                 &net->ct.expect_vmalloc, 0);
+       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
        if (net->ct.expect_hash == NULL)
                goto err1;
 
@@ -653,8 +661,7 @@ err3:
        if (net_eq(net, &init_net))
                kmem_cache_destroy(nf_ct_expect_cachep);
 err2:
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 err1:
        return err;
 }
@@ -666,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
                rcu_barrier(); /* Wait for call_rcu() before destroy */
                kmem_cache_destroy(nf_ct_expect_cachep);
        }
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
index bd82450..80a23ed 100644 (file)
@@ -140,15 +140,16 @@ static void update_alloc_size(struct nf_ct_ext_type *type)
        /* This assumes that extended areas in conntrack for the types
           whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
        for (i = min; i <= max; i++) {
-               t1 = nf_ct_ext_types[i];
+               t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+                               lockdep_is_held(&nf_ct_ext_type_mutex));
                if (!t1)
                        continue;
 
-               t1->alloc_size = sizeof(struct nf_ct_ext)
-                                + ALIGN(sizeof(struct nf_ct_ext), t1->align)
-                                + t1->len;
+               t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+                                t1->len;
                for (j = 0; j < NF_CT_EXT_NUM; j++) {
-                       t2 = nf_ct_ext_types[j];
+                       t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+                               lockdep_is_held(&nf_ct_ext_type_mutex));
                        if (t2 == NULL || t2 == t1 ||
                            (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
                                continue;
index b969025..533a183 100644 (file)
@@ -714,7 +714,6 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                                 u_int8_t family)
 {
        const struct nf_afinfo *afinfo;
-       struct flowi fl1, fl2;
        int ret = 0;
 
        /* rcu_read_lock()ed by nf_hook_slow() */
@@ -722,17 +721,20 @@ static int callforward_do_filter(const union nf_inet_addr *src,
        if (!afinfo)
                return 0;
 
-       memset(&fl1, 0, sizeof(fl1));
-       memset(&fl2, 0, sizeof(fl2));
-
        switch (family) {
        case AF_INET: {
+               struct flowi4 fl1, fl2;
                struct rtable *rt1, *rt2;
 
-               fl1.fl4_dst = src->ip;
-               fl2.fl4_dst = dst->ip;
-               if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
-                       if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
+               memset(&fl1, 0, sizeof(fl1));
+               fl1.daddr = src->ip;
+
+               memset(&fl2, 0, sizeof(fl2));
+               fl2.daddr = dst->ip;
+               if (!afinfo->route((struct dst_entry **)&rt1,
+                                  flowi4_to_flowi(&fl1))) {
+                       if (!afinfo->route((struct dst_entry **)&rt2,
+                                          flowi4_to_flowi(&fl2))) {
                                if (rt1->rt_gateway == rt2->rt_gateway &&
                                    rt1->dst.dev  == rt2->dst.dev)
                                        ret = 1;
@@ -745,12 +747,18 @@ static int callforward_do_filter(const union nf_inet_addr *src,
 #if defined(CONFIG_NF_CONNTRACK_IPV6) || \
     defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
        case AF_INET6: {
+               struct flowi6 fl1, fl2;
                struct rt6_info *rt1, *rt2;
 
-               memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst));
-               memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst));
-               if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
-                       if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
+               memset(&fl1, 0, sizeof(fl1));
+               ipv6_addr_copy(&fl1.daddr, &src->in6);
+
+               memset(&fl2, 0, sizeof(fl2));
+               ipv6_addr_copy(&fl2.daddr, &dst->in6);
+               if (!afinfo->route((struct dst_entry **)&rt1,
+                                  flowi6_to_flowi(&fl1))) {
+                       if (!afinfo->route((struct dst_entry **)&rt2,
+                                          flowi6_to_flowi(&fl2))) {
                                if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
                                            sizeof(rt1->rt6i_gateway)) &&
                                    rt1->dst.dev == rt2->dst.dev)
index 59e1a4c..1bdfea3 100644 (file)
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
 static struct hlist_head *nf_ct_helper_hash __read_mostly;
 static unsigned int nf_ct_helper_hsize __read_mostly;
 static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
 
 
 /* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
        struct nf_conn_help *help = nfct_help(ct);
 
-       if (help && help->helper == me) {
+       if (help && rcu_dereference_protected(
+                       help->helper,
+                       lockdep_is_held(&nf_conntrack_lock)
+                       ) == me) {
                nf_conntrack_event(IPCT_HELPER, ct);
                rcu_assign_pointer(help->helper, NULL);
        }
@@ -210,7 +212,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
                hlist_for_each_entry_safe(exp, n, next,
                                          &net->ct.expect_hash[i], hnode) {
                        struct nf_conn_help *help = nfct_help(exp->master);
-                       if ((help->helper == me || exp->helper == me) &&
+                       if ((rcu_dereference_protected(
+                                       help->helper,
+                                       lockdep_is_held(&nf_conntrack_lock)
+                                       ) == me || exp->helper == me) &&
                            del_timer(&exp->timeout)) {
                                nf_ct_unlink_expect(exp);
                                nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@ int nf_conntrack_helper_init(void)
        int err;
 
        nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
-       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
-                                                 &nf_ct_helper_vmalloc, 0);
+       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
        if (!nf_ct_helper_hash)
                return -ENOMEM;
 
@@ -273,14 +277,12 @@ int nf_conntrack_helper_init(void)
        return 0;
 
 err1:
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
        return err;
 }
 
 void nf_conntrack_helper_fini(void)
 {
        nf_ct_extend_unregister(&helper_extend);
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 }
index aadde01..4c8f30a 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
 #include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
 MODULE_ALIAS_NFCT_HELPER("netbios_ns");
 
 static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
 MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
 
-static int help(struct sk_buff *skb, unsigned int protoff,
-               struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
-       struct nf_conntrack_expect *exp;
-       struct iphdr *iph = ip_hdr(skb);
-       struct rtable *rt = skb_rtable(skb);
-       struct in_device *in_dev;
-       __be32 mask = 0;
-
-       /* we're only interested in locally generated packets */
-       if (skb->sk == NULL)
-               goto out;
-       if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
-               goto out;
-       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
-               goto out;
-
-       rcu_read_lock();
-       in_dev = __in_dev_get_rcu(rt->dst.dev);
-       if (in_dev != NULL) {
-               for_primary_ifa(in_dev) {
-                       if (ifa->ifa_broadcast == iph->daddr) {
-                               mask = ifa->ifa_mask;
-                               break;
-                       }
-               } endfor_ifa(in_dev);
-       }
-       rcu_read_unlock();
-
-       if (mask == 0)
-               goto out;
-
-       exp = nf_ct_expect_alloc(ct);
-       if (exp == NULL)
-               goto out;
-
-       exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-       exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
-       exp->mask.src.u3.ip       = mask;
-       exp->mask.src.u.udp.port  = htons(0xFFFF);
-
-       exp->expectfn             = NULL;
-       exp->flags                = NF_CT_EXPECT_PERMANENT;
-       exp->class                = NF_CT_EXPECT_CLASS_DEFAULT;
-       exp->helper               = NULL;
-
-       nf_ct_expect_related(exp);
-       nf_ct_expect_put(exp);
-
-       nf_ct_refresh(ct, skb, timeout * HZ);
-out:
-       return NF_ACCEPT;
-}
-
 static struct nf_conntrack_expect_policy exp_policy = {
        .max_expected   = 1,
 };
 
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+                  struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+       return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
 static struct nf_conntrack_helper helper __read_mostly = {
        .name                   = "netbios-ns",
-       .tuple.src.l3num        = AF_INET,
+       .tuple.src.l3num        = NFPROTO_IPV4,
        .tuple.src.u.udp.port   = cpu_to_be16(NMBD_PORT),
        .tuple.dst.protonum     = IPPROTO_UDP,
        .me                     = THIS_MODULE,
-       .help                   = help,
+       .help                   = netbios_ns_help,
        .expect_policy          = &exp_policy,
 };
 
index 93297aa..30bf8a1 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@ nla_put_failure:
        return -1;
 }
 
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+       struct nlattr *nest_count;
+       const struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (!tstamp)
+               return 0;
+
+       nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+       if (!nest_count)
+               goto nla_put_failure;
+
+       NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+       if (tstamp->stop != 0) {
+               NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+                            cpu_to_be64(tstamp->stop));
+       }
+       nla_nest_end(skb, nest_count);
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_MARK
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
            ctnetlink_dump_timeout(skb, ct) < 0 ||
            ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
            ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+           ctnetlink_dump_timestamp(skb, ct) < 0 ||
            ctnetlink_dump_protoinfo(skb, ct) < 0 ||
            ctnetlink_dump_helpinfo(skb, ct) < 0 ||
            ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -470,6 +499,18 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
 #endif
 }
 
+static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+               return 0;
+       return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+       return 0;
+#endif
+}
+
 static inline size_t
 ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
@@ -481,6 +522,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
               + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
               + ctnetlink_counters_size(ct)
+              + ctnetlink_timestamp_size(ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
               + nla_total_size(0) /* CTA_PROTOINFO */
               + nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 
        if (events & (1 << IPCT_DESTROY)) {
                if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+                   ctnetlink_dump_timestamp(skb, ct) < 0)
                        goto nla_put_failure;
        } else {
                if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -667,6 +710,7 @@ restart:
                        if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
                                                cb->nlh->nlmsg_seq,
                                                IPCTNL_MSG_CT_NEW, ct) < 0) {
+                               nf_conntrack_get(&ct->ct_general);
                                cb->args[1] = (unsigned long)ct;
                                goto out;
                        }
@@ -760,7 +804,7 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
                      struct nf_conntrack_tuple *tuple,
-                     enum ctattr_tuple type, u_int8_t l3num)
+                     enum ctattr_type type, u_int8_t l3num)
 {
        struct nlattr *tb[CTA_TUPLE_MAX+1];
        int err;
@@ -1357,6 +1401,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+       nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
        nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
        /* we must add conntrack extensions before confirmation. */
        ct->status |= IPS_CONFIRMED;
@@ -1375,6 +1420,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
        }
 #endif
 
+       memset(&ct->proto, 0, sizeof(ct->proto));
        if (cda[CTA_PROTOINFO]) {
                err = ctnetlink_change_protoinfo(ct, cda);
                if (err < 0)
index dc7bb74..5701c8d 100644 (file)
@@ -166,6 +166,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto
 int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
 {
        int ret = 0;
+       struct nf_conntrack_l3proto *old;
 
        if (proto->l3proto >= AF_MAX)
                return -EBUSY;
@@ -174,7 +175,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
                return -EINVAL;
 
        mutex_lock(&nf_ct_proto_mutex);
-       if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+       old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+                                       lockdep_is_held(&nf_ct_proto_mutex));
+       if (old != &nf_conntrack_l3proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -201,7 +204,9 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
        BUG_ON(proto->l3proto >= AF_MAX);
 
        mutex_lock(&nf_ct_proto_mutex);
-       BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+       BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+                                        lockdep_is_held(&nf_ct_proto_mutex)
+                                        ) != proto);
        rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
                           &nf_conntrack_l3proto_generic);
        nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
        mutex_lock(&nf_ct_proto_mutex);
        if (!nf_ct_protos[l4proto->l3proto]) {
                /* l3proto may be loaded latter. */
-               struct nf_conntrack_l4proto **proto_array;
+               struct nf_conntrack_l4proto __rcu **proto_array;
                int i;
 
                proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
                }
 
                for (i = 0; i < MAX_NF_CT_PROTO; i++)
-                       proto_array[i] = &nf_conntrack_l4proto_generic;
+                       RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
 
                /* Before making proto_array visible to lockless readers,
                 * we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
                smp_wmb();
 
                nf_ct_protos[l4proto->l3proto] = proto_array;
-       } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
-                                       &nf_conntrack_l4proto_generic) {
+       } else if (rcu_dereference_protected(
+                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_ct_proto_mutex)
+                       ) != &nf_conntrack_l4proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -331,7 +338,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
        BUG_ON(l4proto->l3proto >= PF_MAX);
 
        mutex_lock(&nf_ct_proto_mutex);
-       BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+       BUG_ON(rcu_dereference_protected(
+                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_ct_proto_mutex)
+                       ) != l4proto);
        rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
                           &nf_conntrack_l4proto_generic);
        nf_ct_l4proto_unregister_sysctl(l4proto);
index 5292560..9ae57c5 100644 (file)
@@ -452,6 +452,9 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
        ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
        ct->proto.dccp.state = CT_DCCP_NONE;
+       ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+       ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+       ct->proto.dccp.handshake_seq = 0;
        return true;
 
 out_invalid:
index c6049c2..6f4ee70 100644 (file)
@@ -413,6 +413,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
            test_bit(SCTP_CID_COOKIE_ACK, map))
                return false;
 
+       memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
        new_state = SCTP_CONNTRACK_MAX;
        for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
                /* Don't need lock here: this conntrack not in circulation yet */
index 3fb2b73..37bf943 100644 (file)
@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sCL -> sIV
  */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
-/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
 /*
  *     sSS -> sSR      Standard open.
  *     sS2 -> sSR      Simultaneous open
- *     sSR -> sSR      Retransmitted SYN/ACK.
+ *     sSR -> sIG      Retransmitted SYN/ACK, ignore it.
  *     sES -> sIG      Late retransmitted SYN/ACK?
  *     sFW -> sIG      Might be SYN/ACK answering ignored SYN
  *     sCW -> sIG
@@ -1066,9 +1066,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        BUG_ON(th == NULL);
 
        /* Don't need lock here: this conntrack not in circulation yet */
-       new_state
-               = tcp_conntracks[0][get_conntrack_index(th)]
-               [TCP_CONNTRACK_NONE];
+       new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
 
        /* Invalid: delete conntrack */
        if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        }
 
        if (new_state == TCP_CONNTRACK_SYN_SENT) {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
                /* SYN packet */
                ct->proto.tcp.seen[0].td_end =
                        segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        ct->proto.tcp.seen[0].td_end;
 
                tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
-               ct->proto.tcp.seen[1].flags = 0;
        } else if (nf_ct_tcp_loose == 0) {
                /* Don't try to pick up connections. */
                return false;
        } else {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
                /*
                 * We are in the middle of a connection,
                 * its history is lost for us.
@@ -1107,7 +1106,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                ct->proto.tcp.seen[0].td_maxend =
                        ct->proto.tcp.seen[0].td_end +
                        ct->proto.tcp.seen[0].td_maxwin;
-               ct->proto.tcp.seen[0].td_scale = 0;
 
                /* We assume SACK and liberal window checking to handle
                 * window scaling */
@@ -1116,13 +1114,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                                              IP_CT_TCP_FLAG_BE_LIBERAL;
        }
 
-       ct->proto.tcp.seen[1].td_end = 0;
-       ct->proto.tcp.seen[1].td_maxend = 0;
-       ct->proto.tcp.seen[1].td_maxwin = 0;
-       ct->proto.tcp.seen[1].td_scale = 0;
-
        /* tcp_packet will set them */
-       ct->proto.tcp.state = TCP_CONNTRACK_NONE;
        ct->proto.tcp.last_index = TCP_NONE_SET;
 
        pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644 (file)
index 0000000..6e545e2
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ *      SNMP service broadcast connection tracking helper
+ *
+ *      (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT      161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+                       unsigned int protoff,
+                       struct nf_conn *ct,
+                       enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+               struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+       typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+       nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+       nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+       if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+               return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+       return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+       .max_expected   = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+       .name                   = "snmp",
+       .tuple.src.l3num        = NFPROTO_IPV4,
+       .tuple.src.u.udp.port   = cpu_to_be16(SNMP_PORT),
+       .tuple.dst.protonum     = IPPROTO_UDP,
+       .me                     = THIS_MODULE,
+       .help                   = snmp_conntrack_help,
+       .expect_policy          = &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+       exp_policy.timeout = timeout;
+       return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+       nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
index b4d7f0f..0ae1428 100644 (file)
@@ -29,6 +29,8 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
 
 MODULE_LICENSE("GPL");
 
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(print_tuple);
 struct ct_iter_state {
        struct seq_net_private p;
        unsigned int bucket;
+       u_int64_t time_now;
 };
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        for (st->bucket = 0;
             st->bucket < net->ct.htable_size;
             st->bucket++) {
-               n = rcu_dereference(net->ct.hash[st->bucket].first);
+               n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -69,13 +72,15 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
                        if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
-               head = rcu_dereference(net->ct.hash[st->bucket].first);
+               head = rcu_dereference(
+                               hlist_nulls_first_rcu(
+                                       &net->ct.hash[st->bucket]));
        }
        return head;
 }
@@ -93,6 +98,9 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(RCU)
 {
+       struct ct_iter_state *st = seq->private;
+
+       st->time_now = ktime_to_ns(ktime_get_real());
        rcu_read_lock();
        return ct_get_idx(seq, *pos);
 }
@@ -132,6 +140,34 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+       struct ct_iter_state *st = s->private;
+       struct nf_conn_tstamp *tstamp;
+       s64 delta_time;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp) {
+               delta_time = st->time_now - tstamp->start;
+               if (delta_time > 0)
+                       delta_time = div_s64(delta_time, NSEC_PER_SEC);
+               else
+                       delta_time = 0;
+
+               return seq_printf(s, "delta-time=%llu ",
+                                 (unsigned long long)delta_time);
+       }
+       return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+       return 0;
+}
+#endif
+
 /* return 0 on success, 1 in case of error */
 static int ct_seq_show(struct seq_file *s, void *v)
 {
@@ -200,6 +236,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
                goto release;
 #endif
 
+       if (ct_show_delta_time(s, ct))
+               goto release;
+
        if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
                goto release;
 
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644 (file)
index 0000000..af7dd31
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+       {
+               .procname       = "nf_conntrack_timestamp",
+               .data           = &init_net.ct.sysctl_tstamp,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+       .len    = sizeof(struct nf_conn_tstamp),
+       .align  = __alignof__(struct nf_conn_tstamp),
+       .id     = NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+                       GFP_KERNEL);
+       if (!table)
+               goto out;
+
+       table[0].data = &net->ct.sysctl_tstamp;
+
+       net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+                       nf_net_netfilter_sysctl_path, table);
+       if (!net->ct.tstamp_sysctl_header) {
+               printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+               goto out_register;
+       }
+       return 0;
+
+out_register:
+       kfree(table);
+out:
+       return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+       unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+       kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+       return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+       int ret;
+
+       net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+       if (net_eq(net, &init_net)) {
+               ret = nf_ct_extend_register(&tstamp_extend);
+               if (ret < 0) {
+                       printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+                                       "extension\n");
+                       goto out_extend_register;
+               }
+       }
+
+       ret = nf_conntrack_tstamp_init_sysctl(net);
+       if (ret < 0)
+               goto out_sysctl;
+
+       return 0;
+
+out_sysctl:
+       if (net_eq(net, &init_net))
+               nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+       return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+       nf_conntrack_tstamp_fini_sysctl(net);
+       if (net_eq(net, &init_net))
+               nf_ct_extend_unregister(&tstamp_extend);
+}
index b07393e..20714ed 100644 (file)
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
 
 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return -EINVAL;
        mutex_lock(&nf_log_mutex);
        if (__find_logger(pf, logger->name) == NULL) {
                mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
 
 void nf_log_unbind_pf(u_int8_t pf)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return;
        mutex_lock(&nf_log_mutex);
        rcu_assign_pointer(nf_loggers[pf], NULL);
        mutex_unlock(&nf_log_mutex);
@@ -161,7 +165,8 @@ static int seq_show(struct seq_file *s, void *v)
        struct nf_logger *t;
        int ret;
 
-       logger = nf_loggers[*pos];
+       logger = rcu_dereference_protected(nf_loggers[*pos],
+                                          lockdep_is_held(&nf_log_mutex));
 
        if (!logger)
                ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -249,7 +254,8 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
                mutex_unlock(&nf_log_mutex);
        } else {
                mutex_lock(&nf_log_mutex);
-               logger = nf_loggers[tindex];
+               logger = rcu_dereference_protected(nf_loggers[tindex],
+                                                  lockdep_is_held(&nf_log_mutex));
                if (!logger)
                        table->data = "NONE";
                else
index 74aebed..5ab22e2 100644 (file)
@@ -27,14 +27,17 @@ static DEFINE_MUTEX(queue_handler_mutex);
 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
        int ret;
+       const struct nf_queue_handler *old;
 
        if (pf >= ARRAY_SIZE(queue_handler))
                return -EINVAL;
 
        mutex_lock(&queue_handler_mutex);
-       if (queue_handler[pf] == qh)
+       old = rcu_dereference_protected(queue_handler[pf],
+                                       lockdep_is_held(&queue_handler_mutex));
+       if (old == qh)
                ret = -EEXIST;
-       else if (queue_handler[pf])
+       else if (old)
                ret = -EBUSY;
        else {
                rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@ EXPORT_SYMBOL(nf_register_queue_handler);
 /* The caller must flush their queue before this */
 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
+       const struct nf_queue_handler *old;
+
        if (pf >= ARRAY_SIZE(queue_handler))
                return -EINVAL;
 
        mutex_lock(&queue_handler_mutex);
-       if (queue_handler[pf] && queue_handler[pf] != qh) {
+       old = rcu_dereference_protected(queue_handler[pf],
+                                       lockdep_is_held(&queue_handler_mutex));
+       if (old && old != qh) {
                mutex_unlock(&queue_handler_mutex);
                return -EINVAL;
        }
@@ -73,7 +80,10 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
 
        mutex_lock(&queue_handler_mutex);
        for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
-               if (queue_handler[pf] == qh)
+               if (rcu_dereference_protected(
+                               queue_handler[pf],
+                               lockdep_is_held(&queue_handler_mutex)
+                               ) == qh)
                        rcu_assign_pointer(queue_handler[pf], NULL);
        }
        mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
                      int (*okfn)(struct sk_buff *),
                      unsigned int queuenum)
 {
-       int status;
+       int status = -ENOENT;
        struct nf_queue_entry *entry = NULL;
 #ifdef CONFIG_BRIDGE_NETFILTER
        struct net_device *physindev;
@@ -128,16 +138,20 @@ static int __nf_queue(struct sk_buff *skb,
        rcu_read_lock();
 
        qh = rcu_dereference(queue_handler[pf]);
-       if (!qh)
+       if (!qh) {
+               status = -ESRCH;
                goto err_unlock;
+       }
 
        afinfo = nf_get_afinfo(pf);
        if (!afinfo)
                goto err_unlock;
 
        entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
-       if (!entry)
+       if (!entry) {
+               status = -ENOMEM;
                goto err_unlock;
+       }
 
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
@@ -151,11 +165,9 @@ static int __nf_queue(struct sk_buff *skb,
 
        /* If it's going away, ignore hook. */
        if (!try_module_get(entry->elem->owner)) {
-               rcu_read_unlock();
-               kfree(entry);
-               return 0;
+               status = -ECANCELED;
+               goto err_unlock;
        }
-
        /* Bump dev refs so they don't vanish while packet is out */
        if (indev)
                dev_hold(indev);
@@ -182,14 +194,13 @@ static int __nf_queue(struct sk_buff *skb,
                goto err;
        }
 
-       return 1;
+       return 0;
 
 err_unlock:
        rcu_read_unlock();
 err:
-       kfree_skb(skb);
        kfree(entry);
-       return 1;
+       return status;
 }
 
 int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
             unsigned int queuenum)
 {
        struct sk_buff *segs;
+       int err;
+       unsigned int queued;
 
        if (!skb_is_gso(skb))
                return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@ int nf_queue(struct sk_buff *skb,
        }
 
        segs = skb_gso_segment(skb, 0);
-       kfree_skb(skb);
+       /* Does not use PTR_ERR to limit the number of error codes that can be
+        * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
+        * 'ignore this hook'.
+        */
        if (IS_ERR(segs))
-               return 1;
+               return -EINVAL;
 
+       queued = 0;
+       err = 0;
        do {
                struct sk_buff *nskb = segs->next;
 
                segs->next = NULL;
-               if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
-                               queuenum))
+               if (err == 0)
+                       err = __nf_queue(segs, elem, pf, hook, indev,
+                                          outdev, okfn, queuenum);
+               if (err == 0)
+                       queued++;
+               else
                        kfree_skb(segs);
                segs = nskb;
        } while (segs);
-       return 1;
+
+       /* also free orig skb if only some segments were queued */
+       if (unlikely(err && queued))
+               err = 0;
+       if (err == 0)
+               kfree_skb(skb);
+       return err;
 }
 
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        struct sk_buff *skb = entry->skb;
        struct list_head *elem = &entry->elem->list;
        const struct nf_afinfo *afinfo;
+       int err;
 
        rcu_read_lock();
 
@@ -270,10 +299,17 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
                local_bh_enable();
                break;
        case NF_QUEUE:
-               if (!__nf_queue(skb, elem, entry->pf, entry->hook,
-                               entry->indev, entry->outdev, entry->okfn,
-                               verdict >> NF_VERDICT_BITS))
-                       goto next_hook;
+               err = __nf_queue(skb, elem, entry->pf, entry->hook,
+                                entry->indev, entry->outdev, entry->okfn,
+                                verdict >> NF_VERDICT_QBITS);
+               if (err < 0) {
+                       if (err == -ECANCELED)
+                               goto next_hook;
+                       if (err == -ESRCH &&
+                          (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+                               goto next_hook;
+                       kfree_skb(skb);
+               }
                break;
        case NF_STOLEN:
        default:
index 4d87bef..474d621 100644 (file)
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
        skb->destructor = NULL;
 
        if (sk)
-               nf_tproxy_put_sock(sk);
+               sock_put(sk);
 }
 
 /* consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-       bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
-                               inet_twsk(sk)->tw_transparent :
-                               inet_sk(sk)->transparent;
-
-       if (transparent) {
-               skb_orphan(skb);
-               skb->sk = sk;
-               skb->destructor = nf_tproxy_destructor;
-               return 1;
-       } else
-               nf_tproxy_put_sock(sk);
-
-       return 0;
+       /* assigning tw sockets complicates things; most
+        * skb->sk->X checks would have to test sk->sk_state first */
+       if (sk->sk_state == TCP_TIME_WAIT) {
+               inet_twsk_put(inet_twsk(sk));
+               return;
+       }
+
+       skb_orphan(skb);
+       skb->sk = sk;
+       skb->destructor = nf_tproxy_destructor;
 }
 EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
 
index 6a1572b..985e9b7 100644 (file)
@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
                        unsigned int hooknum,
                        const struct net_device *indev,
                        const struct net_device *outdev,
-                       const struct nf_loginfo *li,
                        const char *prefix, unsigned int plen)
 {
        struct nfulnl_msg_packet_hdr pmsg;
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
        inst->qlen++;
 
        __build_packet_message(inst, skb, data_len, pf,
-                               hooknum, in, out, li, prefix, plen);
+                               hooknum, in, out, prefix, plen);
 
        if (inst->qlen >= qthreshold)
                __nfulnl_flush(inst);
@@ -874,19 +873,19 @@ static struct hlist_node *get_first(struct iter_state *st)
 
        for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
                if (!hlist_empty(&instance_table[st->bucket]))
-                       return rcu_dereference_bh(instance_table[st->bucket].first);
+                       return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
        }
        return NULL;
 }
 
 static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 {
-       h = rcu_dereference_bh(h->next);
+       h = rcu_dereference_bh(hlist_next_rcu(h));
        while (!h) {
                if (++st->bucket >= INSTANCE_BUCKETS)
                        return NULL;
 
-               h = rcu_dereference_bh(instance_table[st->bucket].first);
+               h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
        }
        return h;
 }
index 68e67d1..b83123f 100644 (file)
@@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 {
        struct sk_buff *nskb;
        struct nfqnl_instance *queue;
-       int err;
+       int err = -ENOBUFS;
 
        /* rcu_read_lock()ed by nf_hook_slow() */
        queue = instance_lookup(queuenum);
-       if (!queue)
+       if (!queue) {
+               err = -ESRCH;
                goto err_out;
+       }
 
-       if (queue->copy_mode == NFQNL_COPY_NONE)
+       if (queue->copy_mode == NFQNL_COPY_NONE) {
+               err = -EINVAL;
                goto err_out;
+       }
 
        nskb = nfqnl_build_packet_message(queue, entry);
-       if (nskb == NULL)
+       if (nskb == NULL) {
+               err = -ENOMEM;
                goto err_out;
-
+       }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_pid)
+       if (!queue->peer_pid) {
+               err = -EINVAL;
                goto err_out_free_nskb;
-
+       }
        if (queue->queue_total >= queue->queue_maxlen) {
                queue->queue_dropped++;
                if (net_ratelimit())
@@ -432,7 +438,7 @@ err_out_free_nskb:
 err_out_unlock:
        spin_unlock_bh(&queue->lock);
 err_out:
-       return -1;
+       return err;
 }
 
 static int
index c942376..a9adf4c 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/audit.h>
 #include <net/net_namespace.h>
 
 #include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
 struct compat_delta {
-       struct compat_delta *next;
-       unsigned int offset;
-       int delta;
+       unsigned int offset; /* offset in kernel */
+       int delta; /* delta in 32bit user land */
 };
 
 struct xt_af {
@@ -49,7 +49,9 @@ struct xt_af {
        struct list_head target;
 #ifdef CONFIG_COMPAT
        struct mutex compat_mutex;
-       struct compat_delta *compat_offsets;
+       struct compat_delta *compat_tab;
+       unsigned int number; /* number of slots in compat_tab[] */
+       unsigned int cur; /* number of used slots in compat_tab[] */
 #endif
 };
 
@@ -181,14 +183,14 @@ EXPORT_SYMBOL(xt_unregister_matches);
 /*
  * These are weird, but module loading must not be done with mutex
  * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
+ * function to use.
  */
 
 /* Find match, grabs ref.  Returns ERR_PTR() on error. */
 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 {
        struct xt_match *m;
-       int err = 0;
+       int err = -ENOENT;
 
        if (mutex_lock_interruptible(&xt[af].mutex) != 0)
                return ERR_PTR(-EINTR);
@@ -219,9 +221,13 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
 {
        struct xt_match *match;
 
-       match = try_then_request_module(xt_find_match(nfproto, name, revision),
-                                       "%st_%s", xt_prefix[nfproto], name);
-       return (match != NULL) ? match : ERR_PTR(-ENOENT);
+       match = xt_find_match(nfproto, name, revision);
+       if (IS_ERR(match)) {
+               request_module("%st_%s", xt_prefix[nfproto], name);
+               match = xt_find_match(nfproto, name, revision);
+       }
+
+       return match;
 }
 EXPORT_SYMBOL_GPL(xt_request_find_match);
 
@@ -229,7 +235,7 @@ EXPORT_SYMBOL_GPL(xt_request_find_match);
 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 {
        struct xt_target *t;
-       int err = 0;
+       int err = -ENOENT;
 
        if (mutex_lock_interruptible(&xt[af].mutex) != 0)
                return ERR_PTR(-EINTR);
@@ -259,9 +265,13 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 {
        struct xt_target *target;
 
-       target = try_then_request_module(xt_find_target(af, name, revision),
-                                        "%st_%s", xt_prefix[af], name);
-       return (target != NULL) ? target : ERR_PTR(-ENOENT);
+       target = xt_find_target(af, name, revision);
+       if (IS_ERR(target)) {
+               request_module("%st_%s", xt_prefix[af], name);
+               target = xt_find_target(af, name, revision);
+       }
+
+       return target;
 }
 EXPORT_SYMBOL_GPL(xt_request_find_target);
 
@@ -414,54 +424,67 @@ int xt_check_match(struct xt_mtchk_param *par,
 EXPORT_SYMBOL_GPL(xt_check_match);
 
 #ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 {
-       struct compat_delta *tmp;
+       struct xt_af *xp = &xt[af];
 
-       tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
-       if (!tmp)
-               return -ENOMEM;
+       if (!xp->compat_tab) {
+               if (!xp->number)
+                       return -EINVAL;
+               xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+               if (!xp->compat_tab)
+                       return -ENOMEM;
+               xp->cur = 0;
+       }
 
-       tmp->offset = offset;
-       tmp->delta = delta;
+       if (xp->cur >= xp->number)
+               return -EINVAL;
 
-       if (xt[af].compat_offsets) {
-               tmp->next = xt[af].compat_offsets->next;
-               xt[af].compat_offsets->next = tmp;
-       } else {
-               xt[af].compat_offsets = tmp;
-               tmp->next = NULL;
-       }
+       if (xp->cur)
+               delta += xp->compat_tab[xp->cur - 1].delta;
+       xp->compat_tab[xp->cur].offset = offset;
+       xp->compat_tab[xp->cur].delta = delta;
+       xp->cur++;
        return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 
 void xt_compat_flush_offsets(u_int8_t af)
 {
-       struct compat_delta *tmp, *next;
-
-       if (xt[af].compat_offsets) {
-               for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
-                       next = tmp->next;
-                       kfree(tmp);
-               }
-               xt[af].compat_offsets = NULL;
+       if (xt[af].compat_tab) {
+               vfree(xt[af].compat_tab);
+               xt[af].compat_tab = NULL;
+               xt[af].number = 0;
        }
 }
 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 
 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 {
-       struct compat_delta *tmp;
-       int delta;
-
-       for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
-               if (tmp->offset < offset)
-                       delta += tmp->delta;
-       return delta;
+       struct compat_delta *tmp = xt[af].compat_tab;
+       int mid, left = 0, right = xt[af].cur - 1;
+
+       while (left <= right) {
+               mid = (left + right) >> 1;
+               if (offset > tmp[mid].offset)
+                       left = mid + 1;
+               else if (offset < tmp[mid].offset)
+                       right = mid - 1;
+               else
+                       return mid ? tmp[mid - 1].delta : 0;
+       }
+       WARN_ON_ONCE(1);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+       xt[af].number = number;
+       xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
 int xt_compat_match_offset(const struct xt_match *match)
 {
        u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +843,21 @@ xt_replace_table(struct xt_table *table,
         */
        local_bh_enable();
 
+#ifdef CONFIG_AUDIT
+       if (audit_enabled) {
+               struct audit_buffer *ab;
+
+               ab = audit_log_start(current->audit_context, GFP_KERNEL,
+                                    AUDIT_NETFILTER_CFG);
+               if (ab) {
+                       audit_log_format(ab, "table=%s family=%u entries=%u",
+                                        table->name, table->af,
+                                        private->number);
+                       audit_log_end(ab);
+               }
+       }
+#endif
+
        return private;
 }
 EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1376,7 @@ static int __init xt_init(void)
                mutex_init(&xt[i].mutex);
 #ifdef CONFIG_COMPAT
                mutex_init(&xt[i].compat_mutex);
-               xt[i].compat_offsets = NULL;
+               xt[i].compat_tab = NULL;
 #endif
                INIT_LIST_HEAD(&xt[i].target);
                INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644 (file)
index 0000000..81802d2
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+                       unsigned int proto, unsigned int offset)
+{
+       switch (proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE: {
+               const __be16 *pptr;
+               __be16 _ports[2];
+
+               pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+               if (pptr == NULL) {
+                       audit_log_format(ab, " truncated=1");
+                       return;
+               }
+
+               audit_log_format(ab, " sport=%hu dport=%hu",
+                                ntohs(pptr[0]), ntohs(pptr[1]));
+               }
+               break;
+
+       case IPPROTO_ICMP:
+       case IPPROTO_ICMPV6: {
+               const u8 *iptr;
+               u8 _ih[2];
+
+               iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+               if (iptr == NULL) {
+                       audit_log_format(ab, " truncated=1");
+                       return;
+               }
+
+               audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+                                iptr[0], iptr[1]);
+
+               }
+               break;
+       }
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+       struct iphdr _iph;
+       const struct iphdr *ih;
+
+       ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+       if (!ih) {
+               audit_log_format(ab, " truncated=1");
+               return;
+       }
+
+       audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+               &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+       if (ntohs(ih->frag_off) & IP_OFFSET) {
+               audit_log_format(ab, " frag=1");
+               return;
+       }
+
+       audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+       struct ipv6hdr _ip6h;
+       const struct ipv6hdr *ih;
+       u8 nexthdr;
+       int offset;
+
+       ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+       if (!ih) {
+               audit_log_format(ab, " truncated=1");
+               return;
+       }
+
+       nexthdr = ih->nexthdr;
+       offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+                                 &nexthdr);
+
+       audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+                        &ih->saddr, &ih->daddr, nexthdr);
+
+       if (offset)
+               audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_audit_info *info = par->targinfo;
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+       if (ab == NULL)
+               goto errout;
+
+       audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+                        info->type, par->hooknum, skb->len,
+                        par->in ? par->in->name : "?",
+                        par->out ? par->out->name : "?");
+
+       if (skb->mark)
+               audit_log_format(ab, " mark=%#x", skb->mark);
+
+       if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+               audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+                                eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                                ntohs(eth_hdr(skb)->h_proto));
+
+               if (par->family == NFPROTO_BRIDGE) {
+                       switch (eth_hdr(skb)->h_proto) {
+                       case __constant_htons(ETH_P_IP):
+                               audit_ip4(ab, skb);
+                               break;
+
+                       case __constant_htons(ETH_P_IPV6):
+                               audit_ip6(ab, skb);
+                               break;
+                       }
+               }
+       }
+
+       switch (par->family) {
+       case NFPROTO_IPV4:
+               audit_ip4(ab, skb);
+               break;
+
+       case NFPROTO_IPV6:
+               audit_ip6(ab, skb);
+               break;
+       }
+
+       audit_log_end(ab);
+
+errout:
+       return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+       const struct xt_audit_info *info = par->targinfo;
+
+       if (info->type > XT_AUDIT_TYPE_MAX) {
+               pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+                       XT_AUDIT_TYPE_MAX);
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+       .name           = "AUDIT",
+       .family         = NFPROTO_UNSPEC,
+       .target         = audit_tg,
+       .targetsize     = sizeof(struct xt_audit_info),
+       .checkentry     = audit_tg_check,
+       .me             = THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+       return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+       xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
index c2c0e4a..af9c4da 100644 (file)
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
 
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Xtables: Qdisc classification");
 MODULE_ALIAS("ipt_CLASSIFY");
 MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
 
 static unsigned int
 classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static struct xt_target classify_tg_reg __read_mostly = {
-       .name       = "CLASSIFY",
-       .revision   = 0,
-       .family     = NFPROTO_UNSPEC,
-       .table      = "mangle",
-       .hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
-                     (1 << NF_INET_POST_ROUTING),
-       .target     = classify_tg,
-       .targetsize = sizeof(struct xt_classify_target_info),
-       .me         = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+       {
+               .name       = "CLASSIFY",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+                             (1 << NF_INET_POST_ROUTING),
+               .target     = classify_tg,
+               .targetsize = sizeof(struct xt_classify_target_info),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "CLASSIFY",
+               .revision   = 0,
+               .family     = NFPROTO_ARP,
+               .hooks      = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+               .target     = classify_tg,
+               .targetsize = sizeof(struct xt_classify_target_info),
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init classify_tg_init(void)
 {
-       return xt_register_target(&classify_tg_reg);
+       return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 static void __exit classify_tg_exit(void)
 {
-       xt_unregister_target(&classify_tg_reg);
+       xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 module_init(classify_tg_init);
index be1f22e..3bdd443 100644 (file)
@@ -313,3 +313,5 @@ MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
index a414050..993de2b 100644 (file)
@@ -31,6 +31,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
 MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
 
 static LIST_HEAD(xt_led_triggers);
 static DEFINE_MUTEX(xt_led_mutex);
index 039cce1..d4f4b5d 100644 (file)
@@ -72,18 +72,31 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
 
        if (info->queues_total > 1) {
                if (par->family == NFPROTO_IPV4)
-                       queue = hash_v4(skb) % info->queues_total + queue;
+                       queue = (((u64) hash_v4(skb) * info->queues_total) >>
+                                32) + queue;
 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
                else if (par->family == NFPROTO_IPV6)
-                       queue = hash_v6(skb) % info->queues_total + queue;
+                       queue = (((u64) hash_v6(skb) * info->queues_total) >>
+                                32) + queue;
 #endif
        }
        return NF_QUEUE_NR(queue);
 }
 
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       const struct xt_NFQ_info_v1 *info = par->targinfo;
+       const struct xt_NFQ_info_v2 *info = par->targinfo;
+       unsigned int ret = nfqueue_tg_v1(skb, par);
+
+       if (info->bypass)
+               ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+       return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+       const struct xt_NFQ_info_v2 *info = par->targinfo;
        u32 maxid;
 
        if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@ static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
                       info->queues_total, maxid);
                return -ERANGE;
        }
+       if (par->target->revision == 2 && info->bypass > 1)
+               return -EINVAL;
        return 0;
 }
 
@@ -115,11 +130,20 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
                .name           = "NFQUEUE",
                .revision       = 1,
                .family         = NFPROTO_UNSPEC,
-               .checkentry     = nfqueue_tg_v1_check,
+               .checkentry     = nfqueue_tg_check,
                .target         = nfqueue_tg_v1,
                .targetsize     = sizeof(struct xt_NFQ_info_v1),
                .me             = THIS_MODULE,
        },
+       {
+               .name           = "NFQUEUE",
+               .revision       = 2,
+               .family         = NFPROTO_UNSPEC,
+               .checkentry     = nfqueue_tg_check,
+               .target         = nfqueue_tg_v2,
+               .targetsize     = sizeof(struct xt_NFQ_info_v2),
+               .me             = THIS_MODULE,
+       },
 };
 
 static int __init nfqueue_tg_init(void)
index eb81c38..6e6b46c 100644 (file)
@@ -148,16 +148,21 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
                                    unsigned int family)
 {
-       struct flowi fl = {};
+       struct flowi fl;
        const struct nf_afinfo *ai;
        struct rtable *rt = NULL;
        u_int32_t mtu     = ~0U;
 
-       if (family == PF_INET)
-               fl.fl4_dst = ip_hdr(skb)->saddr;
-       else
-               fl.fl6_dst = ipv6_hdr(skb)->saddr;
+       if (family == PF_INET) {
+               struct flowi4 *fl4 = &fl.u.ip4;
+               memset(fl4, 0, sizeof(*fl4));
+               fl4->daddr = ip_hdr(skb)->saddr;
+       } else {
+               struct flowi6 *fl6 = &fl.u.ip6;
 
+               memset(fl6, 0, sizeof(*fl6));
+               ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+       }
        rcu_read_lock();
        ai = nf_get_afinfo(family);
        if (ai != NULL)
index 5128a6c..5f054a0 100644 (file)
@@ -62,18 +62,19 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
        const struct iphdr *iph = ip_hdr(skb);
        struct net *net = pick_net(skb);
        struct rtable *rt;
-       struct flowi fl;
+       struct flowi4 fl4;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl4, 0, sizeof(fl4));
        if (info->priv) {
                if (info->priv->oif == -1)
                        return false;
-               fl.oif = info->priv->oif;
+               fl4.flowi4_oif = info->priv->oif;
        }
-       fl.fl4_dst = info->gw.ip;
-       fl.fl4_tos = RT_TOS(iph->tos);
-       fl.fl4_scope = RT_SCOPE_UNIVERSE;
-       if (ip_route_output_key(net, &rt, &fl) != 0)
+       fl4.daddr = info->gw.ip;
+       fl4.flowi4_tos = RT_TOS(iph->tos);
+       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
                return false;
 
        skb_dst_drop(skb);
@@ -142,18 +143,18 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = pick_net(skb);
        struct dst_entry *dst;
-       struct flowi fl;
+       struct flowi6 fl6;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
        if (info->priv) {
                if (info->priv->oif == -1)
                        return false;
-               fl.oif = info->priv->oif;
+               fl6.flowi6_oif = info->priv->oif;
        }
-       fl.fl6_dst = info->gw.in6;
-       fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+       fl6.daddr = info->gw.in6;
+       fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
                           (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
-       dst = ip6_route_output(net, NULL, &fl);
+       dst = ip6_route_output(net, NULL, &fl6);
        if (dst == NULL)
                return false;
 
index 640678f..dcfd57e 100644 (file)
 #include <net/netfilter/nf_tproxy_core.h>
 #include <linux/netfilter/xt_TPROXY.h>
 
+static bool tproxy_sk_is_transparent(struct sock *sk)
+{
+       if (sk->sk_state != TCP_TIME_WAIT) {
+               if (inet_sk(sk)->transparent)
+                       return true;
+               sock_put(sk);
+       } else {
+               if (inet_twsk(sk)->tw_transparent)
+                       return true;
+               inet_twsk_put(inet_twsk(sk));
+       }
+       return false;
+}
+
 static inline __be32
 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 {
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                                           skb->dev, NFT_LOOKUP_LISTENER);
 
        /* NOTE: assign_sock consumes our sk reference */
-       if (sk && nf_tproxy_assign_sock(skb, sk)) {
+       if (sk && tproxy_sk_is_transparent(sk)) {
                /* This should be in a separate target, but we don't do multiple
                   targets on the same rule yet */
                skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
                         iph->protocol, &iph->daddr, ntohs(hp->dest),
                         &laddr, ntohs(lport), skb->mark);
+
+               nf_tproxy_assign_sock(skb, sk);
                return NF_ACCEPT;
        }
 
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
                                           par->in, NFT_LOOKUP_LISTENER);
 
        /* NOTE: assign_sock consumes our sk reference */
-       if (sk && nf_tproxy_assign_sock(skb, sk)) {
+       if (sk && tproxy_sk_is_transparent(sk)) {
                /* This should be in a separate target, but we don't do multiple
                   targets on the same rule yet */
                skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
                pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
                         tproto, &iph->saddr, ntohs(hp->source),
                         laddr, ntohs(lport), skb->mark);
+
+               nf_tproxy_assign_sock(skb, sk);
                return NF_ACCEPT;
        }
 
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
new file mode 100644 (file)
index 0000000..2220b85
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ *  iptables module to match inet_addr_type() of an ip.
+ *
+ *  Copyright (c) 2004 Patrick McHardy <kaber@trash.net>
+ *  (C) 2007 Laszlo Attila Toth <panther@balabit.hu>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#endif
+
+#include <linux/netfilter/xt_addrtype.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Xtables: address type match");
+MODULE_ALIAS("ipt_addrtype");
+MODULE_ALIAS("ip6t_addrtype");
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
+{
+       u32 ret;
+
+       if (!rt)
+               return XT_ADDRTYPE_UNREACHABLE;
+
+       if (rt->rt6i_flags & RTF_REJECT)
+               ret = XT_ADDRTYPE_UNREACHABLE;
+       else
+               ret = 0;
+
+       if (rt->rt6i_flags & RTF_LOCAL)
+               ret |= XT_ADDRTYPE_LOCAL;
+       if (rt->rt6i_flags & RTF_ANYCAST)
+               ret |= XT_ADDRTYPE_ANYCAST;
+       return ret;
+}
+
+static bool match_type6(struct net *net, const struct net_device *dev,
+                               const struct in6_addr *addr, u16 mask)
+{
+       int addr_type = ipv6_addr_type(addr);
+
+       if ((mask & XT_ADDRTYPE_MULTICAST) &&
+           !(addr_type & IPV6_ADDR_MULTICAST))
+               return false;
+       if ((mask & XT_ADDRTYPE_UNICAST) && !(addr_type & IPV6_ADDR_UNICAST))
+               return false;
+       if ((mask & XT_ADDRTYPE_UNSPEC) && addr_type != IPV6_ADDR_ANY)
+               return false;
+
+       if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
+            XT_ADDRTYPE_UNREACHABLE) & mask) {
+               struct rt6_info *rt;
+               u32 type;
+               int ifindex = dev ? dev->ifindex : 0;
+
+               rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
+
+               type = xt_addrtype_rt6_to_type(rt);
+
+               dst_release(&rt->dst);
+               return !!(mask & type);
+       }
+       return true;
+}
+
+static bool
+addrtype_mt6(struct net *net, const struct net_device *dev,
+       const struct sk_buff *skb, const struct xt_addrtype_info_v1 *info)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       bool ret = true;
+
+       if (info->source)
+               ret &= match_type6(net, dev, &iph->saddr, info->source) ^
+                      (info->flags & XT_ADDRTYPE_INVERT_SOURCE);
+       if (ret && info->dest)
+               ret &= match_type6(net, dev, &iph->daddr, info->dest) ^
+                      !!(info->flags & XT_ADDRTYPE_INVERT_DEST);
+       return ret;
+}
+#endif
+
+static inline bool match_type(struct net *net, const struct net_device *dev,
+                             __be32 addr, u_int16_t mask)
+{
+       return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
+}
+
+static bool
+addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       struct net *net = dev_net(par->in ? par->in : par->out);
+       const struct xt_addrtype_info *info = par->matchinfo;
+       const struct iphdr *iph = ip_hdr(skb);
+       bool ret = true;
+
+       if (info->source)
+               ret &= match_type(net, NULL, iph->saddr, info->source) ^
+                      info->invert_source;
+       if (info->dest)
+               ret &= match_type(net, NULL, iph->daddr, info->dest) ^
+                      info->invert_dest;
+
+       return ret;
+}
+
+static bool
+addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       struct net *net = dev_net(par->in ? par->in : par->out);
+       const struct xt_addrtype_info_v1 *info = par->matchinfo;
+       const struct iphdr *iph;
+       const struct net_device *dev = NULL;
+       bool ret = true;
+
+       if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN)
+               dev = par->in;
+       else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
+               dev = par->out;
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+       if (par->family == NFPROTO_IPV6)
+               return addrtype_mt6(net, dev, skb, info);
+#endif
+       iph = ip_hdr(skb);
+       if (info->source)
+               ret &= match_type(net, dev, iph->saddr, info->source) ^
+                      (info->flags & XT_ADDRTYPE_INVERT_SOURCE);
+       if (ret && info->dest)
+               ret &= match_type(net, dev, iph->daddr, info->dest) ^
+                      !!(info->flags & XT_ADDRTYPE_INVERT_DEST);
+       return ret;
+}
+
+static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
+{
+       struct xt_addrtype_info_v1 *info = par->matchinfo;
+
+       if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
+           info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
+               pr_info("both incoming and outgoing "
+                       "interface limitation cannot be selected\n");
+               return -EINVAL;
+       }
+
+       if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
+           (1 << NF_INET_LOCAL_IN)) &&
+           info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
+               pr_info("output interface limitation "
+                       "not valid in PREROUTING and INPUT\n");
+               return -EINVAL;
+       }
+
+       if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
+           (1 << NF_INET_LOCAL_OUT)) &&
+           info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
+               pr_info("input interface limitation "
+                       "not valid in POSTROUTING and OUTPUT\n");
+               return -EINVAL;
+       }
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+       if (par->family == NFPROTO_IPV6) {
+               if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
+                       pr_err("ipv6 BLACKHOLE matching not supported\n");
+                       return -EINVAL;
+               }
+               if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
+                       pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+                       return -EINVAL;
+               }
+               if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
+                       pr_err("ipv6 does not support BROADCAST matching\n");
+                       return -EINVAL;
+               }
+       }
+#endif
+       return 0;
+}
+
+static struct xt_match addrtype_mt_reg[] __read_mostly = {
+       {
+               .name           = "addrtype",
+               .family         = NFPROTO_IPV4,
+               .match          = addrtype_mt_v0,
+               .matchsize      = sizeof(struct xt_addrtype_info),
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "addrtype",
+               .family         = NFPROTO_UNSPEC,
+               .revision       = 1,
+               .match          = addrtype_mt_v1,
+               .checkentry     = addrtype_mt_checkentry_v1,
+               .matchsize      = sizeof(struct xt_addrtype_info_v1),
+               .me             = THIS_MODULE
+       }
+};
+
+static int __init addrtype_mt_init(void)
+{
+       return xt_register_matches(addrtype_mt_reg,
+                                  ARRAY_SIZE(addrtype_mt_reg));
+}
+
+static void __exit addrtype_mt_exit(void)
+{
+       xt_unregister_matches(addrtype_mt_reg, ARRAY_SIZE(addrtype_mt_reg));
+}
+
+module_init(addrtype_mt_init);
+module_exit(addrtype_mt_exit);
index 5c5b6b9..c6d5a83 100644 (file)
 
 /* we will save the tuples of all connections we care about */
 struct xt_connlimit_conn {
-       struct list_head list;
-       struct nf_conntrack_tuple tuple;
+       struct hlist_node               node;
+       struct nf_conntrack_tuple       tuple;
+       union nf_inet_addr              addr;
 };
 
 struct xt_connlimit_data {
-       struct list_head iphash[256];
-       spinlock_t lock;
+       struct hlist_head       iphash[256];
+       spinlock_t              lock;
 };
 
 static u_int32_t connlimit_rnd __read_mostly;
-static bool connlimit_rnd_inited __read_mostly;
 
 static inline unsigned int connlimit_iphash(__be32 addr)
 {
@@ -101,9 +101,9 @@ static int count_them(struct net *net,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct xt_connlimit_conn *conn;
-       struct xt_connlimit_conn *tmp;
+       struct hlist_node *pos, *n;
        struct nf_conn *found_ct;
-       struct list_head *hash;
+       struct hlist_head *hash;
        bool addit = true;
        int matches = 0;
 
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
        rcu_read_lock();
 
        /* check the saved connections */
-       list_for_each_entry_safe(conn, tmp, hash, list) {
+       hlist_for_each_entry_safe(conn, pos, n, hash, node) {
                found    = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
                                                 &conn->tuple);
                found_ct = NULL;
@@ -135,7 +135,7 @@ static int count_them(struct net *net,
 
                if (found == NULL) {
                        /* this one is gone */
-                       list_del(&conn->list);
+                       hlist_del(&conn->node);
                        kfree(conn);
                        continue;
                }
@@ -146,12 +146,12 @@ static int count_them(struct net *net,
                         * closed already -> ditch it
                         */
                        nf_ct_put(found_ct);
-                       list_del(&conn->list);
+                       hlist_del(&conn->node);
                        kfree(conn);
                        continue;
                }
 
-               if (same_source_net(addr, mask, &conn->tuple.src.u3, family))
+               if (same_source_net(addr, mask, &conn->addr, family))
                        /* same source network -> be counted! */
                        ++matches;
                nf_ct_put(found_ct);
@@ -161,11 +161,12 @@ static int count_them(struct net *net,
 
        if (addit) {
                /* save the new connection in our list */
-               conn = kzalloc(sizeof(*conn), GFP_ATOMIC);
+               conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
                if (conn == NULL)
                        return -ENOMEM;
                conn->tuple = *tuple;
-               list_add(&conn->list, hash);
+               conn->addr = *addr;
+               hlist_add_head(&conn->node, hash);
                ++matches;
        }
 
@@ -186,17 +187,19 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
        ct = nf_ct_get(skb, &ctinfo);
        if (ct != NULL)
-               tuple_ptr = &ct->tuplehash[0].tuple;
+               tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
        else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
                                    par->family, &tuple))
                goto hotdrop;
 
        if (par->family == NFPROTO_IPV6) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
-               memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+               memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+                      &iph->daddr : &iph->saddr, sizeof(addr.ip6));
        } else {
                const struct iphdr *iph = ip_hdr(skb);
-               addr.ip = iph->saddr;
+               addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+                         iph->daddr : iph->saddr;
        }
 
        spin_lock_bh(&info->data->lock);
@@ -204,13 +207,12 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
                                 &info->mask, par->family);
        spin_unlock_bh(&info->data->lock);
 
-       if (connections < 0) {
+       if (connections < 0)
                /* kmalloc failed, drop it entirely */
-               par->hotdrop = true;
-               return false;
-       }
+               goto hotdrop;
 
-       return (connections > info->limit) ^ info->inverse;
+       return (connections > info->limit) ^
+              !!(info->flags & XT_CONNLIMIT_INVERT);
 
  hotdrop:
        par->hotdrop = true;
@@ -223,9 +225,13 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
        unsigned int i;
        int ret;
 
-       if (unlikely(!connlimit_rnd_inited)) {
-               get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
-               connlimit_rnd_inited = true;
+       if (unlikely(!connlimit_rnd)) {
+               u_int32_t rand;
+
+               do {
+                       get_random_bytes(&rand, sizeof(rand));
+               } while (!rand);
+               cmpxchg(&connlimit_rnd, 0, rand);
        }
        ret = nf_ct_l3proto_try_module_get(par->family);
        if (ret < 0) {
@@ -243,7 +249,7 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
 
        spin_lock_init(&info->data->lock);
        for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
-               INIT_LIST_HEAD(&info->data->iphash[i]);
+               INIT_HLIST_HEAD(&info->data->iphash[i]);
 
        return 0;
 }
@@ -252,15 +258,15 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
 {
        const struct xt_connlimit_info *info = par->matchinfo;
        struct xt_connlimit_conn *conn;
-       struct xt_connlimit_conn *tmp;
-       struct list_head *hash = info->data->iphash;
+       struct hlist_node *pos, *n;
+       struct hlist_head *hash = info->data->iphash;
        unsigned int i;
 
        nf_ct_l3proto_module_put(par->family);
 
        for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
-               list_for_each_entry_safe(conn, tmp, &hash[i], list) {
-                       list_del(&conn->list);
+               hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) {
+                       hlist_del(&conn->node);
                        kfree(conn);
                }
        }
@@ -268,25 +274,38 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
        kfree(info->data);
 }
 
-static struct xt_match connlimit_mt_reg __read_mostly = {
-       .name       = "connlimit",
-       .revision   = 0,
-       .family     = NFPROTO_UNSPEC,
-       .checkentry = connlimit_mt_check,
-       .match      = connlimit_mt,
-       .matchsize  = sizeof(struct xt_connlimit_info),
-       .destroy    = connlimit_mt_destroy,
-       .me         = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+       {
+               .name       = "connlimit",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = connlimit_mt_check,
+               .match      = connlimit_mt,
+               .matchsize  = sizeof(struct xt_connlimit_info),
+               .destroy    = connlimit_mt_destroy,
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "connlimit",
+               .revision   = 1,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = connlimit_mt_check,
+               .match      = connlimit_mt,
+               .matchsize  = sizeof(struct xt_connlimit_info),
+               .destroy    = connlimit_mt_destroy,
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init connlimit_mt_init(void)
 {
-       return xt_register_match(&connlimit_mt_reg);
+       return xt_register_matches(connlimit_mt_reg,
+              ARRAY_SIZE(connlimit_mt_reg));
 }
 
 static void __exit connlimit_mt_exit(void)
 {
-       xt_unregister_match(&connlimit_mt_reg);
+       xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
 }
 
 module_init(connlimit_mt_init);
index e536710..2c0086a 100644 (file)
@@ -112,6 +112,54 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
        return true;
 }
 
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+       return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+                      const struct nf_conn *ct)
+{
+       const struct nf_conntrack_tuple *tuple;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+           (nf_ct_protonum(ct) == info->l4proto) ^
+           !(info->invert_flags & XT_CONNTRACK_PROTO))
+               return false;
+
+       /* Shortcut to match all recognized protocols by using ->src.all. */
+       if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+           !port_match(info->origsrc_port, info->origsrc_port_high,
+                       ntohs(tuple->src.u.all),
+                       info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+               return false;
+
+       if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+           !port_match(info->origdst_port, info->origdst_port_high,
+                       ntohs(tuple->dst.u.all),
+                       info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+               return false;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+       if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+           !port_match(info->replsrc_port, info->replsrc_port_high,
+                       ntohs(tuple->src.u.all),
+                       info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+               return false;
+
+       if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+           !port_match(info->repldst_port, info->repldst_port_high,
+                       ntohs(tuple->dst.u.all),
+                       info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+               return false;
+
+       return true;
+}
+
 static bool
 conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
              u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
                    !(info->invert_flags & XT_CONNTRACK_REPLDST))
                        return false;
 
-       if (!ct_proto_port_check(info, ct))
-               return false;
+       if (par->match->revision != 3) {
+               if (!ct_proto_port_check(info, ct))
+                       return false;
+       } else {
+               if (!ct_proto_port_check_v3(par->matchinfo, ct))
+                       return false;
+       }
 
        if ((info->match_flags & XT_CONNTRACK_STATUS) &&
            (!!(status_mask & ct->status) ^
@@ -207,10 +260,23 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
        return conntrack_mt(skb, par, info->state_mask, info->status_mask);
 }
 
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+       return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
 static int conntrack_mt_check(const struct xt_mtchk_param *par)
 {
        int ret;
 
+       if (strcmp(par->table, "raw") == 0) {
+               pr_info("state is undetermined at the time of raw table\n");
+               return -EINVAL;
+       }
+
        ret = nf_ct_l3proto_try_module_get(par->family);
        if (ret < 0)
                pr_info("cannot load conntrack support for proto=%u\n",
@@ -244,6 +310,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
                .destroy    = conntrack_mt_destroy,
                .me         = THIS_MODULE,
        },
+       {
+               .name       = "conntrack",
+               .revision   = 3,
+               .family     = NFPROTO_UNSPEC,
+               .matchsize  = sizeof(struct xt_conntrack_mtinfo3),
+               .match      = conntrack_mt_v3,
+               .checkentry = conntrack_mt_check,
+               .destroy    = conntrack_mt_destroy,
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init conntrack_mt_init(void)
index b39db8a..c7a2e54 100644 (file)
@@ -22,6 +22,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
 MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
 
 static int cpu_mt_check(const struct xt_mtchk_param *par)
 {
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644 (file)
index 0000000..d9202cd
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_devgroup_info *info = par->matchinfo;
+
+       if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+           (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+            ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+               return false;
+
+       if (info->flags & XT_DEVGROUP_MATCH_DST &&
+           (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+            ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+               return false;
+
+       return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+       const struct xt_devgroup_info *info = par->matchinfo;
+
+       if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+                           XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+               return -EINVAL;
+
+       if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+           par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+                              (1 << NF_INET_LOCAL_IN) |
+                              (1 << NF_INET_FORWARD)))
+               return -EINVAL;
+
+       if (info->flags & XT_DEVGROUP_MATCH_DST &&
+           par->hook_mask & ~((1 << NF_INET_FORWARD) |
+                              (1 << NF_INET_LOCAL_OUT) |
+                              (1 << NF_INET_POST_ROUTING)))
+               return -EINVAL;
+
+       return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+       .name           = "devgroup",
+       .match          = devgroup_mt,
+       .checkentry     = devgroup_mt_checkentry,
+       .matchsize      = sizeof(struct xt_devgroup_info),
+       .family         = NFPROTO_UNSPEC,
+       .me             = THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+       return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+       xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
index 88f7c35..b46626c 100644 (file)
@@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
                        pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
                                 &iph->saddr,
                                 (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
-                                &info->src_max.ip,
+                                &info->src_min.ip,
                                 &info->src_max.ip);
                        return false;
                }
@@ -53,15 +53,13 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
 }
 
 static inline int
-iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
+iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
 {
        unsigned int i;
-       int r;
 
        for (i = 0; i < 4; ++i) {
-               r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
-               if (r != 0)
-                       return r;
+               if (a->s6_addr32[i] != b->s6_addr32[i])
+                       return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
        }
 
        return 0;
@@ -75,18 +73,30 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
        bool m;
 
        if (info->flags & IPRANGE_SRC) {
-               m  = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
-               m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
+               m  = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
+               m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
                m ^= !!(info->flags & IPRANGE_SRC_INV);
-               if (m)
+               if (m) {
+                       pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+                                &iph->saddr,
+                                (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+                                &info->src_min.in6,
+                                &info->src_max.in6);
                        return false;
+               }
        }
        if (info->flags & IPRANGE_DST) {
-               m  = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
-               m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
+               m  = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
+               m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
                m ^= !!(info->flags & IPRANGE_DST_INV);
-               if (m)
+               if (m) {
+                       pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+                                &iph->daddr,
+                                (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+                                &info->dst_min.in6,
+                                &info->dst_max.in6);
                        return false;
+               }
        }
        return true;
 }
index 9127a3d..bb10b07 100644 (file)
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
        /*
         * Check if the packet belongs to an existing entry
         */
-       cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+       cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
        if (unlikely(cp == NULL)) {
                match = false;
                goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644 (file)
index 0000000..061d48c
--- /dev/null
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+         u8 pf, u8 dim, u8 flags, int inv)
+{
+       if (ip_set_test(index, skb, pf, dim, flags))
+               inv = !inv;
+       return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+       return match_set(info->match_set.index, skb, par->family,
+                        info->match_set.u.compat.dim,
+                        info->match_set.u.compat.flags,
+                        info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+       u_int8_t i;
+
+       /* Fill out compatibility data according to enum ip_set_kopt */
+       info->u.compat.dim = IPSET_DIM_ZERO;
+       if (info->u.flags[0] & IPSET_MATCH_INV)
+               info->u.compat.flags |= IPSET_INV_MATCH;
+       for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+               info->u.compat.dim++;
+               if (info->u.flags[i] & IPSET_SRC)
+                       info->u.compat.flags |= (1<<info->u.compat.dim);
+       }
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match_v0 *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       /* Fill out compatibility data */
+       compat_flags(&info->match_set);
+
+       return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match_v0 *info = par->matchinfo;
+
+       ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_set_info_target_v0 *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_add(info->add_set.index, skb, par->family,
+                          info->add_set.u.compat.dim,
+                          info->add_set.u.compat.flags);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_del(info->del_set.index, skb, par->family,
+                          info->del_set.u.compat.dim,
+                          info->del_set.u.compat.flags);
+
+       return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+       struct xt_set_info_target_v0 *info = par->targinfo;
+       ip_set_id_t index;
+
+       if (info->add_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find add_set index %u as target\n",
+                                  info->add_set.index);
+                       return -ENOENT;
+               }
+       }
+
+       if (info->del_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find del_set index %u as target\n",
+                                  info->del_set.index);
+                       return -ENOENT;
+               }
+       }
+       if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+           info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+               pr_warning("Protocol error: SET target dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       /* Fill out compatibility data */
+       compat_flags(&info->add_set);
+       compat_flags(&info->del_set);
+
+       return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+       const struct xt_set_info_target_v0 *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->add_set.index);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match *info = par->matchinfo;
+
+       return match_set(info->match_set.index, skb, par->family,
+                        info->match_set.dim,
+                        info->match_set.flags,
+                        info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.dim > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match *info = par->matchinfo;
+
+       ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_add(info->add_set.index,
+                          skb, par->family,
+                          info->add_set.dim,
+                          info->add_set.flags);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_del(info->del_set.index,
+                          skb, par->family,
+                          info->add_set.dim,
+                          info->del_set.flags);
+
+       return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+       ip_set_id_t index;
+
+       if (info->add_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find add_set index %u as target\n",
+                                  info->add_set.index);
+                       return -ENOENT;
+               }
+       }
+
+       if (info->del_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find del_set index %u as target\n",
+                                  info->del_set.index);
+                       return -ENOENT;
+               }
+       }
+       if (info->add_set.dim > IPSET_DIM_MAX ||
+           info->del_set.flags > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: SET target dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->add_set.index);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 0,
+               .match          = set_match_v0,
+               .matchsize      = sizeof(struct xt_set_info_match_v0),
+               .checkentry     = set_match_v0_checkentry,
+               .destroy        = set_match_v0_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 1,
+               .match          = set_match,
+               .matchsize      = sizeof(struct xt_set_info_match),
+               .checkentry     = set_match_checkentry,
+               .destroy        = set_match_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV6,
+               .revision       = 1,
+               .match          = set_match,
+               .matchsize      = sizeof(struct xt_set_info_match),
+               .checkentry     = set_match_checkentry,
+               .destroy        = set_match_destroy,
+               .me             = THIS_MODULE
+       },
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+       {
+               .name           = "SET",
+               .revision       = 0,
+               .family         = NFPROTO_IPV4,
+               .target         = set_target_v0,
+               .targetsize     = sizeof(struct xt_set_info_target_v0),
+               .checkentry     = set_target_v0_checkentry,
+               .destroy        = set_target_v0_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "SET",
+               .revision       = 1,
+               .family         = NFPROTO_IPV4,
+               .target         = set_target,
+               .targetsize     = sizeof(struct xt_set_info_target),
+               .checkentry     = set_target_checkentry,
+               .destroy        = set_target_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "SET",
+               .revision       = 1,
+               .family         = NFPROTO_IPV6,
+               .target         = set_target,
+               .targetsize     = sizeof(struct xt_set_info_target),
+               .checkentry     = set_target_checkentry,
+               .destroy        = set_target_destroy,
+               .me             = THIS_MODULE
+       },
+};
+
+static int __init xt_set_init(void)
+{
+       int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+       if (!ret) {
+               ret = xt_register_targets(set_targets,
+                                         ARRAY_SIZE(set_targets));
+               if (ret)
+                       xt_unregister_matches(set_matches,
+                                             ARRAY_SIZE(set_matches));
+       }
+       return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+       xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+       xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
index 00d6ae8..9cc4635 100644 (file)
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
+static void
+xt_socket_put_sk(struct sock *sk)
+{
+       if (sk->sk_state == TCP_TIME_WAIT)
+               inet_twsk_put(inet_twsk(sk));
+       else
+               sock_put(sk);
+}
+
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
                    u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               nf_tproxy_put_sock(sk);
+               xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               nf_tproxy_put_sock(sk);
+               xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
index 6caef8b..f4fc4c9 100644 (file)
@@ -49,9 +49,9 @@
 static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
                                            struct netlbl_audit *audit_info)
 {
-       audit_info->secid = NETLINK_CB(skb).sid;
-       audit_info->loginuid = NETLINK_CB(skb).loginuid;
-       audit_info->sessionid = NETLINK_CB(skb).sessionid;
+       security_task_getsecid(current, &audit_info->secid);
+       audit_info->loginuid = audit_get_loginuid(current);
+       audit_info->sessionid = audit_get_sessionid(current);
 }
 
 /* NetLabel NETLINK I/O functions */
index 478181d..c8f35b5 100644 (file)
@@ -1362,17 +1362,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        NETLINK_CB(skb).pid     = nlk->pid;
        NETLINK_CB(skb).dst_group = dst_group;
-       NETLINK_CB(skb).loginuid = audit_get_loginuid(current);
-       NETLINK_CB(skb).sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &(NETLINK_CB(skb).sid));
        memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
 
-       /* What can I do? Netlink is asynchronous, so that
-          we will have to save current capabilities to
-          check them, when this message will be delivered
-          to corresponding kernel module.   --ANK (980802)
-        */
-
        err = -EFAULT;
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
                kfree_skb(skb);
@@ -1407,7 +1398,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
        struct sk_buff *skb, *data_skb;
-       int err;
+       int err, ret;
 
        if (flags&MSG_OOB)
                return -EOPNOTSUPP;
@@ -1470,8 +1461,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
 
        skb_free_datagram(sk, skb);
 
-       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
-               netlink_dump(sk);
+       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+               ret = netlink_dump(sk);
+               if (ret) {
+                       sk->sk_err = ret;
+                       sk->sk_error_report(sk);
+               }
+       }
 
        scm_recv(sock, msg, siocb->scm, flags);
 out:
@@ -1736,6 +1732,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        struct netlink_callback *cb;
        struct sock *sk;
        struct netlink_sock *nlk;
+       int ret;
 
        cb = kzalloc(sizeof(*cb), GFP_KERNEL);
        if (cb == NULL)
@@ -1764,9 +1761,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        nlk->cb = cb;
        mutex_unlock(nlk->cb_mutex);
 
-       netlink_dump(sk);
+       ret = netlink_dump(sk);
+
        sock_put(sk);
 
+       if (ret)
+               return ret;
+
        /* We successfully started a dump, by returning -EINTR we
         * signal not to send ACK even if it was requested.
         */
index 91cb1d7..b5362e9 100644 (file)
@@ -164,7 +164,6 @@ struct packet_mreq_max {
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
                int closing, int tx_ring);
 
-#define PGV_FROM_VMALLOC 1
 struct pgv {
        char *buffer;
 };
@@ -466,7 +465,7 @@ retry:
         */
 
        err = -EMSGSIZE;
-       if (len > dev->mtu + dev->hard_header_len)
+       if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
                goto out_unlock;
 
        if (!skb) {
@@ -497,6 +496,19 @@ retry:
                goto retry;
        }
 
+       if (len > (dev->mtu + dev->hard_header_len)) {
+               /* Earlier code assumed this would be a VLAN pkt,
+                * double-check this now that we have the actual
+                * packet in hand.
+                */
+               struct ethhdr *ehdr;
+               skb_reset_mac_header(skb);
+               ehdr = eth_hdr(skb);
+               if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+                       err = -EMSGSIZE;
+                       goto out_unlock;
+               }
+       }
 
        skb->protocol = proto;
        skb->dev = dev;
@@ -523,11 +535,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
 {
        struct sk_filter *filter;
 
-       rcu_read_lock_bh();
-       filter = rcu_dereference_bh(sk->sk_filter);
+       rcu_read_lock();
+       filter = rcu_dereference(sk->sk_filter);
        if (filter != NULL)
                res = sk_run_filter(skb, filter->insns);
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return res;
 }
@@ -954,7 +966,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 
 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 {
-       struct socket *sock;
        struct sk_buff *skb;
        struct net_device *dev;
        __be16 proto;
@@ -966,8 +977,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        int len_sum = 0;
        int status = 0;
 
-       sock = po->sk.sk_socket;
-
        mutex_lock(&po->pg_vec_lock);
 
        err = -EBUSY;
@@ -1200,7 +1209,7 @@ static int packet_snd(struct socket *sock,
        }
 
        err = -EMSGSIZE;
-       if (!gso_type && (len > dev->mtu+reserve))
+       if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
                goto out_unlock;
 
        err = -ENOBUFS;
@@ -1225,6 +1234,20 @@ static int packet_snd(struct socket *sock,
        if (err < 0)
                goto out_free;
 
+       if (!gso_type && (len > dev->mtu + reserve)) {
+               /* Earlier code assumed this would be a VLAN pkt,
+                * double-check this now that we have the actual
+                * packet in hand.
+                */
+               struct ethhdr *ehdr;
+               skb_reset_mac_header(skb);
+               ehdr = eth_hdr(skb);
+               if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+                       err = -EMSGSIZE;
+                       goto out_free;
+               }
+       }
+
        skb->protocol = proto;
        skb->dev = dev;
        skb->priority = sk->sk_priority;
index 0d9b8a2..6ec7d55 100644 (file)
@@ -14,15 +14,3 @@ config PHONET
 
          To compile this driver as a module, choose M here: the module
          will be called phonet. If unsure, say N.
-
-config PHONET_PIPECTRLR
-       bool "Phonet Pipe Controller (EXPERIMENTAL)"
-       depends on PHONET && EXPERIMENTAL
-       default N
-       help
-         The Pipe Controller implementation in Phonet stack to support Pipe
-         data with Nokia Slim modems like WG2.5 used on ST-Ericsson U8500
-         platform.
-
-         This option is incompatible with older Nokia modems.
-         Say N here unless you really know what you are doing.
index 1072b2c..c6fffd9 100644 (file)
@@ -110,6 +110,7 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        pn = pn_sk(sk);
        pn->sobject = 0;
+       pn->dobject = 0;
        pn->resource = 0;
        sk->sk_prot->init(sk);
        err = 0;
@@ -194,11 +195,7 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
        if (skb->pkt_type == PACKET_LOOPBACK) {
                skb_reset_mac_header(skb);
                skb_orphan(skb);
-               if (irq)
-                       netif_rx(skb);
-               else
-                       netif_rx_ni(skb);
-               err = 0;
+               err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0;
        } else {
                err = dev_hard_header(skb, dev, ntohs(skb->protocol),
                                        NULL, NULL, skb->len);
@@ -207,6 +204,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
                        goto drop;
                }
                err = dev_queue_xmit(skb);
+               if (unlikely(err > 0))
+                       err = net_xmit_errno(err);
        }
 
        return err;
@@ -242,8 +241,18 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        struct net_device *dev;
        struct pn_sock *pn = pn_sk(sk);
        int err;
-       u16 src;
-       u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
+       u16 src, dst;
+       u8 daddr, saddr, res;
+
+       src = pn->sobject;
+       if (target != NULL) {
+               dst = pn_sockaddr_get_object(target);
+               res = pn_sockaddr_get_resource(target);
+       } else {
+               dst = pn->dobject;
+               res = pn->resource;
+       }
+       daddr = pn_addr(dst);
 
        err = -EHOSTUNREACH;
        if (sk->sk_bound_dev_if)
@@ -251,10 +260,9 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        else if (phonet_address_lookup(net, daddr) == 0) {
                dev = phonet_device_get(net);
                skb->pkt_type = PACKET_LOOPBACK;
-       } else if (pn_sockaddr_get_object(target) == 0) {
+       } else if (dst == 0) {
                /* Resource routing (small race until phonet_rcv()) */
-               struct sock *sk = pn_find_sock_by_res(net,
-                                                       target->spn_resource);
+               struct sock *sk = pn_find_sock_by_res(net, res);
                if (sk) {
                        sock_put(sk);
                        dev = phonet_device_get(net);
@@ -271,12 +279,10 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        if (saddr == PN_NO_ADDR)
                goto drop;
 
-       src = pn->sobject;
        if (!pn_addr(src))
                src = pn_object(saddr, pn_obj(src));
 
-       err = pn_send(skb, dev, pn_sockaddr_get_object(target),
-                       src, pn_sockaddr_get_resource(target), 0);
+       err = pn_send(skb, dev, dst, src, res, 0);
        dev_put(dev);
        return err;
 
index 3e60f2e..f17fd84 100644 (file)
@@ -42,7 +42,7 @@
  * TCP_ESTABLISHED     connected pipe in enabled state
  *
  * pep_sock locking:
- *  - sk_state, ackq, hlist: sock lock needed
+ *  - sk_state, hlist: sock lock needed
  *  - listener: read only
  *  - pipe_handle: read only
  */
 #define CREDITS_MAX    10
 #define CREDITS_THR    7
 
-static const struct sockaddr_pn pipe_srv = {
-       .spn_family = AF_PHONET,
-       .spn_resource = 0xD9, /* pipe service */
-};
-
 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
 
 /* Get the next TLV sub-block. */
@@ -82,236 +77,95 @@ static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
        return data;
 }
 
-static int pep_reply(struct sock *sk, struct sk_buff *oskb,
-                       u8 code, const void *data, int len, gfp_t priority)
+static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
+                                       int len, gfp_t priority)
 {
-       const struct pnpipehdr *oph = pnp_hdr(oskb);
-       struct pnpipehdr *ph;
-       struct sk_buff *skb;
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+       struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
        if (!skb)
-               return -ENOMEM;
+               return NULL;
        skb_set_owner_w(skb, sk);
 
        skb_reserve(skb, MAX_PNPIPE_HEADER);
        __skb_put(skb, len);
-       skb_copy_to_linear_data(skb, data, len);
-       __skb_push(skb, sizeof(*ph));
+       skb_copy_to_linear_data(skb, payload, len);
+       __skb_push(skb, sizeof(struct pnpipehdr));
        skb_reset_transport_header(skb);
-       ph = pnp_hdr(skb);
-       ph->utid = oph->utid;
-       ph->message_id = oph->message_id + 1; /* REQ -> RESP */
-       ph->pipe_handle = oph->pipe_handle;
-       ph->error_code = code;
-
-       return pn_skb_send(sk, skb, &pipe_srv);
-}
-
-#define PAD 0x00
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
-{
-       int i, j;
-       u8 base_fc, final_fc;
-
-       for (i = 0; i < len; i++) {
-               base_fc = host_fc[i];
-               for (j = 0; j < len; j++) {
-                       if (remote_fc[j] == base_fc) {
-                               final_fc = base_fc;
-                               goto done;
-                       }
-               }
-       }
-       return -EINVAL;
-
-done:
-       return final_fc;
-
-}
-
-static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
-               u8 *pref_rx_fc, u8 *req_tx_fc)
-{
-       struct pnpipehdr *hdr;
-       u8 n_sb;
-
-       if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
-               return -EINVAL;
-
-       hdr = pnp_hdr(skb);
-       n_sb = hdr->data[4];
-
-       __skb_pull(skb, sizeof(*hdr) + 4);
-       while (n_sb > 0) {
-               u8 type, buf[3], len = sizeof(buf);
-               u8 *data = pep_get_sb(skb, &type, &len, buf);
-
-               if (data == NULL)
-                       return -EINVAL;
-
-               switch (type) {
-               case PN_PIPE_SB_REQUIRED_FC_TX:
-                       if (len < 3 || (data[2] | data[3] | data[4]) > 3)
-                               break;
-                       req_tx_fc[0] = data[2];
-                       req_tx_fc[1] = data[3];
-                       req_tx_fc[2] = data[4];
-                       break;
-
-               case PN_PIPE_SB_PREFERRED_FC_RX:
-                       if (len < 3 || (data[2] | data[3] | data[4]) > 3)
-                               break;
-                       pref_rx_fc[0] = data[2];
-                       pref_rx_fc[1] = data[3];
-                       pref_rx_fc[2] = data[4];
-                       break;
-
-               }
-               n_sb--;
-       }
-       return 0;
+       return skb;
 }
 
-static int pipe_handler_send_req(struct sock *sk, u8 utid,
-               u8 msg_id, gfp_t priority)
+static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
+                       const void *data, int len, gfp_t priority)
 {
-       int len;
+       const struct pnpipehdr *oph = pnp_hdr(oskb);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
-       struct pep_sock *pn = pep_sk(sk);
-
-       static const u8 data[4] = {
-               PAD, PAD, PAD, PAD,
-       };
+       struct sockaddr_pn peer;
 
-       switch (msg_id) {
-       case PNS_PEP_CONNECT_REQ:
-               len = sizeof(data);
-               break;
-
-       case PNS_PEP_DISCONNECT_REQ:
-       case PNS_PEP_ENABLE_REQ:
-       case PNS_PEP_DISABLE_REQ:
-               len = 0;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+       skb = pep_alloc_skb(sk, data, len, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       if (len) {
-               __skb_put(skb, len);
-               skb_copy_to_linear_data(skb, data, len);
-       }
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
-       ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = PN_PIPE_NO_ERROR;
+       ph->utid = oph->utid;
+       ph->message_id = oph->message_id + 1; /* REQ -> RESP */
+       ph->pipe_handle = oph->pipe_handle;
+       ph->error_code = code;
 
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       pn_skb_get_src_sockaddr(oskb, &peer);
+       return pn_skb_send(sk, skb, &peer);
 }
 
-static int pipe_handler_send_created_ind(struct sock *sk,
-               u8 utid, u8 msg_id)
+static int pep_indicate(struct sock *sk, u8 id, u8 code,
+                       const void *data, int len, gfp_t priority)
 {
-       int err_code;
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 
-       struct pep_sock *pn = pep_sk(sk);
-       static u8 data[4] = {
-               0x03, 0x04,
-       };
-       data[2] = pn->tx_fc;
-       data[3] = pn->rx_fc;
-
-       /*
-        * actually, below is number of sub-blocks and not error code.
-        * Pipe_created_ind message format does not have any
-        * error code field. However, the Phonet stack will always send
-        * an error code as part of pnpipehdr. So, use that err_code to
-        * specify the number of sub-blocks.
-        */
-       err_code = 0x01;
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + sizeof(data), GFP_ATOMIC);
+       skb = pep_alloc_skb(sk, data, len, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_put(skb, sizeof(data));
-       skb_copy_to_linear_data(skb, data, sizeof(data));
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
+       ph->utid = 0;
+       ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = err_code;
-
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       ph->data[0] = code;
+       return pn_skb_send(sk, skb, NULL);
 }
 
-static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id)
+#define PAD 0x00
+
+static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
+                               const void *data, int len)
 {
-       int err_code;
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
-       struct pep_sock *pn = pep_sk(sk);
-
-       /*
-        * actually, below is a filler.
-        * Pipe_enabled/disabled_ind message format does not have any
-        * error code field. However, the Phonet stack will always send
-        * an error code as part of pnpipehdr. So, use that err_code to
-        * specify the filler value.
-        */
-       err_code = 0x0;
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
+       skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
+       ph->utid = id; /* whatever */
+       ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = err_code;
-
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       ph->data[0] = code;
+       return pn_skb_send(sk, skb, NULL);
 }
 
-static int pipe_handler_enable_pipe(struct sock *sk, int enable)
+static int pipe_handler_send_created_ind(struct sock *sk)
 {
-       int utid, req;
-
-       if (enable) {
-               utid = PNS_PIPE_ENABLE_UTID;
-               req = PNS_PEP_ENABLE_REQ;
-       } else {
-               utid = PNS_PIPE_DISABLE_UTID;
-               req = PNS_PEP_DISABLE_REQ;
-       }
-       return pipe_handler_send_req(sk, utid, req, GFP_ATOMIC);
+       struct pep_sock *pn = pep_sk(sk);
+       u8 data[4] = {
+               PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
+               pn->tx_fc, pn->rx_fc,
+       };
+
+       return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
+                               data, 4, GFP_ATOMIC);
 }
-#endif
 
 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 {
@@ -334,11 +188,12 @@ static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
                                GFP_KERNEL);
 }
 
-static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
+static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
+                               gfp_t priority)
 {
        static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
        WARN_ON(code == PN_PIPE_NO_ERROR);
-       return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
+       return pep_reply(sk, skb, code, data, sizeof(data), priority);
 }
 
 /* Control requests are not sent by the pipe service and have a specific
@@ -350,23 +205,21 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
        struct sk_buff *skb;
        struct pnpipehdr *ph;
        struct sockaddr_pn dst;
+       u8 data[4] = {
+               oph->data[0], /* PEP type */
+               code, /* error code, at an unusual offset */
+               PAD, PAD,
+       };
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
+       skb = pep_alloc_skb(sk, data, 4, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
-
-       skb_reserve(skb, MAX_PHONET_HEADER);
-       ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);
 
+       ph = pnp_hdr(skb);
        ph->utid = oph->utid;
        ph->message_id = PNS_PEP_CTRL_RESP;
        ph->pipe_handle = oph->pipe_handle;
        ph->data[0] = oph->data[1]; /* CTRL id */
-       ph->data[1] = oph->data[0]; /* PEP type */
-       ph->data[2] = code; /* error code, at an usual offset */
-       ph->data[3] = PAD;
-       ph->data[4] = PAD;
 
        pn_skb_get_src_sockaddr(oskb, &dst);
        return pn_skb_send(sk, skb, &dst);
@@ -374,38 +227,15 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 
 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 {
-       struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *ph;
-       struct sk_buff *skb;
+       u8 data[4] = { type, PAD, PAD, status };
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
-       if (!skb)
-               return -ENOMEM;
-       skb_set_owner_w(skb, sk);
-
-       skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
-       __skb_push(skb, sizeof(*ph) + 4);
-       skb_reset_transport_header(skb);
-       ph = pnp_hdr(skb);
-       ph->utid = 0;
-       ph->message_id = PNS_PEP_STATUS_IND;
-       ph->pipe_handle = pn->pipe_handle;
-       ph->pep_type = PN_PEP_TYPE_COMMON;
-       ph->data[1] = type;
-       ph->data[2] = PAD;
-       ph->data[3] = PAD;
-       ph->data[4] = status;
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       return pn_skb_send(sk, skb, &pn->remote_pep);
-#else
-       return pn_skb_send(sk, skb, &pipe_srv);
-#endif
+       return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
+                               data, 4, priority);
 }
 
 /* Send our RX flow control information to the sender.
  * Socket must be locked. */
-static void pipe_grant_credits(struct sock *sk)
+static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 {
        struct pep_sock *pn = pep_sk(sk);
 
@@ -415,16 +245,16 @@ static void pipe_grant_credits(struct sock *sk)
        case PN_LEGACY_FLOW_CONTROL: /* TODO */
                break;
        case PN_ONE_CREDIT_FLOW_CONTROL:
-               pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
-                               PEP_IND_READY, GFP_ATOMIC);
-               pn->rx_credits = 1;
+               if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
+                                       PEP_IND_READY, priority) == 0)
+                       pn->rx_credits = 1;
                break;
        case PN_MULTI_CREDIT_FLOW_CONTROL:
                if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
                        break;
                if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
                                        CREDITS_MAX - pn->rx_credits,
-                                       GFP_ATOMIC) == 0)
+                                       priority) == 0)
                        pn->rx_credits = CREDITS_MAX;
                break;
        }
@@ -522,7 +352,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        switch (hdr->message_id) {
        case PNS_PEP_CONNECT_REQ:
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
                break;
 
        case PNS_PEP_DISCONNECT_REQ:
@@ -532,35 +362,11 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                        sk->sk_state_change(sk);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_DISCONNECT_RESP:
-               pn->pipe_state = PIPE_IDLE;
-               sk->sk_state = TCP_CLOSE;
-               break;
-#endif
-
        case PNS_PEP_ENABLE_REQ:
                /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_ENABLE_RESP:
-               pn->pipe_state = PIPE_ENABLED;
-               pipe_handler_send_ind(sk, PNS_PIPE_ENABLED_IND_UTID,
-                               PNS_PIPE_ENABLED_IND);
-
-               if (!pn_flow_safe(pn->tx_fc)) {
-                       atomic_set(&pn->tx_credits, 1);
-                       sk->sk_write_space(sk);
-               }
-               if (sk->sk_state == TCP_ESTABLISHED)
-                       break; /* Nothing to do */
-               sk->sk_state = TCP_ESTABLISHED;
-               pipe_grant_credits(sk);
-               break;
-#endif
-
        case PNS_PEP_RESET_REQ:
                switch (hdr->state_after_reset) {
                case PN_PIPE_DISABLE:
@@ -579,17 +385,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_DISABLE_RESP:
-               pn->pipe_state = PIPE_DISABLED;
-               atomic_set(&pn->tx_credits, 0);
-               pipe_handler_send_ind(sk, PNS_PIPE_DISABLED_IND_UTID,
-                               PNS_PIPE_DISABLED_IND);
-               sk->sk_state = TCP_SYN_RECV;
-               pn->rx_credits = 0;
-               break;
-#endif
-
        case PNS_PEP_CTRL_REQ:
                if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
                        atomic_inc(&sk->sk_drops);
@@ -607,7 +402,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                if (!pn_flow_safe(pn->rx_fc)) {
                        err = sock_queue_rcv_skb(sk, skb);
                        if (!err)
-                               return 0;
+                               return NET_RX_SUCCESS;
+                       err = -ENOBUFS;
                        break;
                }
 
@@ -645,7 +441,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                if (sk->sk_state == TCP_ESTABLISHED)
                        break; /* Nothing to do */
                sk->sk_state = TCP_ESTABLISHED;
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_ATOMIC);
                break;
 
        case PNS_PIPE_DISABLED_IND:
@@ -660,7 +456,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
        }
 out:
        kfree_skb(skb);
-       return err;
+       return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 
 queue:
        skb->dev = NULL;
@@ -669,7 +465,7 @@ queue:
        skb_queue_tail(queue, skb);
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_data_ready(sk, err);
-       return 0;
+       return NET_RX_SUCCESS;
 }
 
 /* Destroy connected sock. */
@@ -681,133 +477,126 @@ static void pipe_destruct(struct sock *sk)
        skb_queue_purge(&pn->ctrlreq_queue);
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
 {
-       struct pep_sock *pn = pep_sk(sk);
-       u8 host_pref_rx_fc[3] = {3, 2, 1}, host_req_tx_fc[3] = {3, 2, 1};
-       u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
-       u8 negotiated_rx_fc, negotiated_tx_fc;
-       int ret;
-
-       pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
-                       remote_req_tx_fc);
-       negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
-                       host_pref_rx_fc,
-                       sizeof(host_pref_rx_fc));
-       negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
-                       remote_pref_rx_fc,
-                       sizeof(host_pref_rx_fc));
-
-       pn->pipe_state = PIPE_DISABLED;
-       sk->sk_state = TCP_SYN_RECV;
-       sk->sk_backlog_rcv = pipe_do_rcv;
-       sk->sk_destruct = pipe_destruct;
-       pn->rx_credits = 0;
-       pn->rx_fc = negotiated_rx_fc;
-       pn->tx_fc = negotiated_tx_fc;
-       sk->sk_state_change(sk);
+       unsigned i;
+       u8 final_fc = PN_NO_FLOW_CONTROL;
 
-       ret = pipe_handler_send_created_ind(sk,
-                       PNS_PIPE_CREATED_IND_UTID,
-                       PNS_PIPE_CREATED_IND
-                       );
+       for (i = 0; i < n; i++) {
+               u8 fc = fcs[i];
 
-       return ret;
+               if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
+                       final_fc = fc;
+       }
+       return final_fc;
 }
-#endif
 
-static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
+static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 {
-       struct sock *newsk;
-       struct pep_sock *newpn, *pn = pep_sk(sk);
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *hdr;
-       struct sockaddr_pn dst;
-       u16 peer_type;
-       u8 pipe_handle, enabled, n_sb;
-       u8 aligned = 0;
+       u8 n_sb;
 
        if (!pskb_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
 
        hdr = pnp_hdr(skb);
-       pipe_handle = hdr->pipe_handle;
-       switch (hdr->state_after_connect) {
-       case PN_PIPE_DISABLE:
-               enabled = 0;
-               break;
-       case PN_PIPE_ENABLE:
-               enabled = 1;
-               break;
-       default:
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
-               return -EINVAL;
-       }
-       peer_type = hdr->other_pep_type << 8;
-
-       if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
-               return -ENOBUFS;
-       }
+       if (hdr->error_code != PN_PIPE_NO_ERROR)
+               return -ECONNREFUSED;
 
-       /* Parse sub-blocks (options) */
+       /* Parse sub-blocks */
        n_sb = hdr->data[4];
        while (n_sb > 0) {
-               u8 type, buf[1], len = sizeof(buf);
+               u8 type, buf[6], len = sizeof(buf);
                const u8 *data = pep_get_sb(skb, &type, &len, buf);
 
                if (data == NULL)
                        return -EINVAL;
+
                switch (type) {
-               case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
-                       if (len < 1)
-                               return -EINVAL;
-                       peer_type = (peer_type & 0xff00) | data[0];
+               case PN_PIPE_SB_REQUIRED_FC_TX:
+                       if (len < 2 || len < data[0])
+                               break;
+                       pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
                        break;
-               case PN_PIPE_SB_ALIGNED_DATA:
-                       aligned = data[0] != 0;
+
+               case PN_PIPE_SB_PREFERRED_FC_RX:
+                       if (len < 2 || len < data[0])
+                               break;
+                       pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
                        break;
+
                }
                n_sb--;
        }
 
-       skb = skb_clone(skb, GFP_ATOMIC);
-       if (!skb)
-               return -ENOMEM;
+       return pipe_handler_send_created_ind(sk);
+}
 
-       /* Create a new to-be-accepted sock */
-       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
-       if (!newsk) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-       sock_init_data(NULL, newsk);
-       newsk->sk_state = TCP_SYN_RECV;
-       newsk->sk_backlog_rcv = pipe_do_rcv;
-       newsk->sk_protocol = sk->sk_protocol;
-       newsk->sk_destruct = pipe_destruct;
+/* Queue an skb to an actively connected sock.
+ * Socket lock must be held. */
+static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       struct pep_sock *pn = pep_sk(sk);
+       struct pnpipehdr *hdr = pnp_hdr(skb);
+       int err = NET_RX_SUCCESS;
 
-       newpn = pep_sk(newsk);
-       pn_skb_get_dst_sockaddr(skb, &dst);
-       newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
-       newpn->pn_sk.resource = pn->pn_sk.resource;
-       skb_queue_head_init(&newpn->ctrlreq_queue);
-       newpn->pipe_handle = pipe_handle;
-       atomic_set(&newpn->tx_credits, 0);
-       newpn->peer_type = peer_type;
-       newpn->rx_credits = 0;
-       newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
-       newpn->init_enable = enabled;
-       newpn->aligned = aligned;
+       switch (hdr->message_id) {
+       case PNS_PIPE_ALIGNED_DATA:
+               __skb_pull(skb, 1);
+               /* fall through */
+       case PNS_PIPE_DATA:
+               __skb_pull(skb, 3); /* Pipe data header */
+               if (!pn_flow_safe(pn->rx_fc)) {
+                       err = sock_queue_rcv_skb(sk, skb);
+                       if (!err)
+                               return NET_RX_SUCCESS;
+                       err = NET_RX_DROP;
+                       break;
+               }
 
-       BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
-       skb_queue_head(&newsk->sk_receive_queue, skb);
-       if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_data_ready(sk, 0);
+               if (pn->rx_credits == 0) {
+                       atomic_inc(&sk->sk_drops);
+                       err = NET_RX_DROP;
+                       break;
+               }
+               pn->rx_credits--;
+               skb->dev = NULL;
+               skb_set_owner_r(skb, sk);
+               err = skb->len;
+               skb_queue_tail(&sk->sk_receive_queue, skb);
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_data_ready(sk, err);
+               return NET_RX_SUCCESS;
 
-       sk_acceptq_added(sk);
-       sk_add_node(newsk, &pn->ackq);
-       return 0;
+       case PNS_PEP_CONNECT_RESP:
+               if (sk->sk_state != TCP_SYN_SENT)
+                       break;
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_state_change(sk);
+               if (pep_connresp_rcv(sk, skb)) {
+                       sk->sk_state = TCP_CLOSE_WAIT;
+                       break;
+               }
+
+               sk->sk_state = TCP_ESTABLISHED;
+               if (!pn_flow_safe(pn->tx_fc)) {
+                       atomic_set(&pn->tx_credits, 1);
+                       sk->sk_write_space(sk);
+               }
+               pipe_grant_credits(sk, GFP_ATOMIC);
+               break;
+
+       case PNS_PEP_DISCONNECT_RESP:
+               /* sock should already be dead, nothing to do */
+               break;
+
+       case PNS_PEP_STATUS_IND:
+               pipe_rcv_status(sk, skb);
+               break;
+       }
+       kfree_skb(skb);
+       return err;
 }
 
 /* Listening sock must be locked */
@@ -847,7 +636,6 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        struct sock *sknode;
        struct pnpipehdr *hdr;
        struct sockaddr_pn dst;
-       int err = NET_RX_SUCCESS;
        u8 pipe_handle;
 
        if (!pskb_may_pull(skb, sizeof(*hdr)))
@@ -865,26 +653,18 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        if (sknode)
                return sk_receive_skb(sknode, skb, 1);
 
-       /* Look for a pipe handle pending accept */
-       sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
-       if (sknode) {
-               sock_put(sknode);
-               if (net_ratelimit())
-                       printk(KERN_WARNING"Phonet unconnected PEP ignored");
-               err = NET_RX_DROP;
-               goto drop;
-       }
-
        switch (hdr->message_id) {
        case PNS_PEP_CONNECT_REQ:
-               err = pep_connreq_rcv(sk, skb);
-               break;
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_CONNECT_RESP:
-               err = pep_connresp_rcv(sk, skb);
-               break;
-#endif
+               if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
+                       pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
+                                       GFP_ATOMIC);
+                       break;
+               }
+               skb_queue_head(&sk->sk_receive_queue, skb);
+               sk_acceptq_added(sk);
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_data_ready(sk, 0);
+               return NET_RX_SUCCESS;
 
        case PNS_PEP_DISCONNECT_REQ:
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
@@ -898,12 +678,17 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        case PNS_PEP_ENABLE_REQ:
        case PNS_PEP_DISABLE_REQ:
                /* invalid handle is not even allowed here! */
+               break;
+
        default:
-               err = NET_RX_DROP;
+               if ((1 << sk->sk_state)
+                               & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
+                       /* actively connected socket */
+                       return pipe_handler_do_rcv(sk, skb);
        }
 drop:
        kfree_skb(skb);
-       return err;
+       return NET_RX_SUCCESS;
 }
 
 static int pipe_do_remove(struct sock *sk)
@@ -912,20 +697,16 @@ static int pipe_do_remove(struct sock *sk)
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
+       skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
        ph->utid = 0;
        ph->message_id = PNS_PIPE_REMOVE_REQ;
        ph->pipe_handle = pn->pipe_handle;
        ph->data[0] = PAD;
-
-       return pn_skb_send(sk, skb, &pipe_srv);
+       return pn_skb_send(sk, skb, NULL);
 }
 
 /* associated socket ceases to exist */
@@ -938,29 +719,15 @@ static void pep_sock_close(struct sock *sk, long timeout)
        sk_common_release(sk);
 
        lock_sock(sk);
-       if (sk->sk_state == TCP_LISTEN) {
-               /* Destroy the listen queue */
-               struct sock *sknode;
-               struct hlist_node *p, *n;
-
-               sk_for_each_safe(sknode, p, n, &pn->ackq)
-                       sk_del_node_init(sknode);
-               sk->sk_state = TCP_CLOSE;
-       } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
-               /* Forcefully remove dangling Phonet pipe */
-               pipe_do_remove(sk);
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       if (pn->pipe_state != PIPE_IDLE) {
-               /* send pep disconnect request */
-               pipe_handler_send_req(sk,
-                               PNS_PEP_DISCONNECT_UTID, PNS_PEP_DISCONNECT_REQ,
-                               GFP_KERNEL);
-
-               pn->pipe_state = PIPE_IDLE;
-               sk->sk_state = TCP_CLOSE;
+       if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
+               if (sk->sk_backlog_rcv == pipe_do_rcv)
+                       /* Forcefully remove dangling Phonet pipe */
+                       pipe_do_remove(sk);
+               else
+                       pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
+                                               NULL, 0);
        }
-#endif
+       sk->sk_state = TCP_CLOSE;
 
        ifindex = pn->ifindex;
        pn->ifindex = 0;
@@ -971,86 +738,141 @@ static void pep_sock_close(struct sock *sk, long timeout)
        sock_put(sk);
 }
 
-static int pep_wait_connreq(struct sock *sk, int noblock)
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
 {
-       struct task_struct *tsk = current;
-       struct pep_sock *pn = pep_sk(sk);
-       long timeo = sock_rcvtimeo(sk, noblock);
-
-       for (;;) {
-               DEFINE_WAIT(wait);
+       struct pep_sock *pn = pep_sk(sk), *newpn;
+       struct sock *newsk = NULL;
+       struct sk_buff *skb;
+       struct pnpipehdr *hdr;
+       struct sockaddr_pn dst, src;
+       int err;
+       u16 peer_type;
+       u8 pipe_handle, enabled, n_sb;
+       u8 aligned = 0;
 
-               if (sk->sk_state != TCP_LISTEN)
-                       return -EINVAL;
-               if (!hlist_empty(&pn->ackq))
-                       break;
-               if (!timeo)
-                       return -EWOULDBLOCK;
-               if (signal_pending(tsk))
-                       return sock_intr_errno(timeo);
+       skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
+       if (!skb)
+               return NULL;
 
-               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
-                                               TASK_INTERRUPTIBLE);
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-               finish_wait(sk_sleep(sk), &wait);
+       lock_sock(sk);
+       if (sk->sk_state != TCP_LISTEN) {
+               err = -EINVAL;
+               goto drop;
        }
+       sk_acceptq_removed(sk);
 
-       return 0;
-}
+       err = -EPROTO;
+       if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+               goto drop;
 
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
-{
-       struct pep_sock *pn = pep_sk(sk);
-       struct sock *newsk = NULL;
-       struct sk_buff *oskb;
-       int err;
+       hdr = pnp_hdr(skb);
+       pipe_handle = hdr->pipe_handle;
+       switch (hdr->state_after_connect) {
+       case PN_PIPE_DISABLE:
+               enabled = 0;
+               break;
+       case PN_PIPE_ENABLE:
+               enabled = 1;
+               break;
+       default:
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
+                               GFP_KERNEL);
+               goto drop;
+       }
+       peer_type = hdr->other_pep_type << 8;
 
-       lock_sock(sk);
-       err = pep_wait_connreq(sk, flags & O_NONBLOCK);
-       if (err)
-               goto out;
+       /* Parse sub-blocks (options) */
+       n_sb = hdr->data[4];
+       while (n_sb > 0) {
+               u8 type, buf[1], len = sizeof(buf);
+               const u8 *data = pep_get_sb(skb, &type, &len, buf);
 
-       newsk = __sk_head(&pn->ackq);
+               if (data == NULL)
+                       goto drop;
+               switch (type) {
+               case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
+                       if (len < 1)
+                               goto drop;
+                       peer_type = (peer_type & 0xff00) | data[0];
+                       break;
+               case PN_PIPE_SB_ALIGNED_DATA:
+                       aligned = data[0] != 0;
+                       break;
+               }
+               n_sb--;
+       }
 
-       oskb = skb_dequeue(&newsk->sk_receive_queue);
-       err = pep_accept_conn(newsk, oskb);
-       if (err) {
-               skb_queue_head(&newsk->sk_receive_queue, oskb);
+       /* Check for duplicate pipe handle */
+       newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+       if (unlikely(newsk)) {
+               __sock_put(newsk);
                newsk = NULL;
-               goto out;
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
+               goto drop;
+       }
+
+       /* Create a new to-be-accepted sock */
+       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
+       if (!newsk) {
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
+               err = -ENOBUFS;
+               goto drop;
        }
-       kfree_skb(oskb);
 
+       sock_init_data(NULL, newsk);
+       newsk->sk_state = TCP_SYN_RECV;
+       newsk->sk_backlog_rcv = pipe_do_rcv;
+       newsk->sk_protocol = sk->sk_protocol;
+       newsk->sk_destruct = pipe_destruct;
+
+       newpn = pep_sk(newsk);
+       pn_skb_get_dst_sockaddr(skb, &dst);
+       pn_skb_get_src_sockaddr(skb, &src);
+       newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
+       newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+       newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
        sock_hold(sk);
-       pep_sk(newsk)->listener = sk;
+       newpn->listener = sk;
+       skb_queue_head_init(&newpn->ctrlreq_queue);
+       newpn->pipe_handle = pipe_handle;
+       atomic_set(&newpn->tx_credits, 0);
+       newpn->ifindex = 0;
+       newpn->peer_type = peer_type;
+       newpn->rx_credits = 0;
+       newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+       newpn->init_enable = enabled;
+       newpn->aligned = aligned;
 
-       sock_hold(newsk);
-       sk_del_node_init(newsk);
-       sk_acceptq_removed(sk);
+       err = pep_accept_conn(newsk, skb);
+       if (err) {
+               sock_put(newsk);
+               newsk = NULL;
+               goto drop;
+       }
        sk_add_node(newsk, &pn->hlist);
-       __sock_put(newsk);
-
-out:
+drop:
        release_sock(sk);
+       kfree_skb(skb);
        *errp = err;
        return newsk;
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
 static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 {
        struct pep_sock *pn = pep_sk(sk);
-       struct sockaddr_pn *spn =  (struct sockaddr_pn *)addr;
-
-       memcpy(&pn->remote_pep, spn, sizeof(struct sockaddr_pn));
+       int err;
+       u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
-       return pipe_handler_send_req(sk,
-                       PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
-                       GFP_ATOMIC);
+       pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+       err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
+                                       PN_PIPE_ENABLE, data, 4);
+       if (err) {
+               pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+               return err;
+       }
+       sk->sk_state = TCP_SYN_SENT;
+       return 0;
 }
-#endif
 
 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -1081,10 +903,18 @@ static int pep_init(struct sock *sk)
 {
        struct pep_sock *pn = pep_sk(sk);
 
-       INIT_HLIST_HEAD(&pn->ackq);
+       sk->sk_destruct = pipe_destruct;
        INIT_HLIST_HEAD(&pn->hlist);
+       pn->listener = NULL;
        skb_queue_head_init(&pn->ctrlreq_queue);
+       atomic_set(&pn->tx_credits, 0);
+       pn->ifindex = 0;
+       pn->peer_type = 0;
        pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+       pn->rx_credits = 0;
+       pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+       pn->init_enable = 1;
+       pn->aligned = 0;
        return 0;
 }
 
@@ -1103,18 +933,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
 
        lock_sock(sk);
        switch (optname) {
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_PIPE_HANDLE:
-               if (val) {
-                       if (pn->pipe_state > PIPE_IDLE) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       pn->pipe_handle = val;
-                       break;
-               }
-#endif
-
        case PNPIPE_ENCAP:
                if (val && val != PNPIPE_ENCAP_IP) {
                        err = -EINVAL;
@@ -1141,16 +959,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
                }
                goto out_norel;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_ENABLE:
-               if (pn->pipe_state <= PIPE_IDLE) {
-                       err = -ENOTCONN;
-                       break;
-               }
-               err = pipe_handler_enable_pipe(sk, val);
-               break;
-#endif
-
        default:
                err = -ENOPROTOOPT;
        }
@@ -1180,13 +988,11 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
                val = pn->ifindex;
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_ENABLE:
-               if (pn->pipe_state <= PIPE_IDLE)
-                       return -ENOTCONN;
-               val = pn->pipe_state != PIPE_DISABLED;
+       case PNPIPE_HANDLE:
+               val = pn->pipe_handle;
+               if (val == PN_PIPE_INVALID_HANDLE)
+                       return -EINVAL;
                break;
-#endif
 
        default:
                return -ENOPROTOOPT;
@@ -1222,11 +1028,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
        } else
                ph->message_id = PNS_PIPE_DATA;
        ph->pipe_handle = pn->pipe_handle;
-#ifdef CONFIG_PHONET_PIPECTRLR
-       err = pn_skb_send(sk, skb, &pn->remote_pep);
-#else
-       err = pn_skb_send(sk, skb, &pipe_srv);
-#endif
+       err = pn_skb_send(sk, skb, NULL);
 
        if (err && pn_flow_safe(pn->tx_fc))
                atomic_inc(&pn->tx_credits);
@@ -1253,7 +1055,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (!skb)
                return err;
 
-       skb_reserve(skb, MAX_PHONET_HEADER + 3);
+       skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
        err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
        if (err < 0)
                goto outfree;
@@ -1355,7 +1157,7 @@ struct sk_buff *pep_read(struct sock *sk)
        struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_ATOMIC);
        return skb;
 }
 
@@ -1400,7 +1202,7 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
        }
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_KERNEL);
        release_sock(sk);
 copy:
        msg->msg_flags |= MSG_EOR;
@@ -1424,9 +1226,9 @@ static void pep_sock_unhash(struct sock *sk)
 
        lock_sock(sk);
 
-#ifndef CONFIG_PHONET_PIPECTRLR
-       if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
+       if (pn->listener != NULL) {
                skparent = pn->listener;
+               pn->listener = NULL;
                release_sock(sk);
 
                pn = pep_sk(skparent);
@@ -1434,7 +1236,7 @@ static void pep_sock_unhash(struct sock *sk)
                sk_del_node_init(sk);
                sk = skparent;
        }
-#endif
+
        /* Unhash a listening sock only when it is closed
         * and all of its active connected pipes are closed. */
        if (hlist_empty(&pn->hlist))
@@ -1448,9 +1250,7 @@ static void pep_sock_unhash(struct sock *sk)
 static struct proto pep_proto = {
        .close          = pep_sock_close,
        .accept         = pep_sock_accept,
-#ifdef CONFIG_PHONET_PIPECTRLR
        .connect        = pep_sock_connect,
-#endif
        .ioctl          = pep_ioctl,
        .init           = pep_init,
        .setsockopt     = pep_setsockopt,
index 25f746d..b1adafa 100644 (file)
@@ -225,15 +225,18 @@ static int pn_socket_autobind(struct socket *sock)
        return 0; /* socket was already bound */
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
                int len, int flags)
 {
        struct sock *sk = sock->sk;
+       struct pn_sock *pn = pn_sk(sk);
        struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
-       long timeo;
+       struct task_struct *tsk = current;
+       long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
        int err;
 
+       if (pn_socket_autobind(sock))
+               return -ENOBUFS;
        if (len < sizeof(struct sockaddr_pn))
                return -EINVAL;
        if (spn->spn_family != AF_PHONET)
@@ -243,82 +246,61 @@ static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
 
        switch (sock->state) {
        case SS_UNCONNECTED:
-               sk->sk_state = TCP_CLOSE;
-               break;
-       case SS_CONNECTING:
-               switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-                       sock->state = SS_CONNECTED;
+               if (sk->sk_state != TCP_CLOSE) {
                        err = -EISCONN;
                        goto out;
-               case TCP_CLOSE:
-                       err = -EALREADY;
-                       if (flags & O_NONBLOCK)
-                               goto out;
-                       goto wait_connect;
                }
                break;
-       case SS_CONNECTED:
-               switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-                       err = -EISCONN;
-                       goto out;
-               case TCP_CLOSE:
-                       sock->state = SS_UNCONNECTED;
-                       break;
-               }
-               break;
-       case SS_DISCONNECTING:
-       case SS_FREE:
-               break;
+       case SS_CONNECTING:
+               err = -EALREADY;
+               goto out;
+       default:
+               err = -EISCONN;
+               goto out;
        }
-       sk->sk_state = TCP_CLOSE;
-       sk_stream_kill_queues(sk);
 
+       pn->dobject = pn_sockaddr_get_object(spn);
+       pn->resource = pn_sockaddr_get_resource(spn);
        sock->state = SS_CONNECTING;
+
        err = sk->sk_prot->connect(sk, addr, len);
-       if (err < 0) {
+       if (err) {
                sock->state = SS_UNCONNECTED;
-               sk->sk_state = TCP_CLOSE;
+               pn->dobject = 0;
                goto out;
        }
 
-       err = -EINPROGRESS;
-wait_connect:
-       if (sk->sk_state != TCP_SYN_RECV && (flags & O_NONBLOCK))
-               goto out;
-
-       timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
-       release_sock(sk);
-
-       err = -ERESTARTSYS;
-       timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
-                       sk->sk_state != TCP_CLOSE,
-                       timeo);
-
-       lock_sock(sk);
-       if (timeo < 0)
-               goto out; /* -ERESTARTSYS */
+       while (sk->sk_state == TCP_SYN_SENT) {
+               DEFINE_WAIT(wait);
 
-       err = -ETIMEDOUT;
-       if (timeo == 0 && sk->sk_state != TCP_SYN_RECV)
-               goto out;
+               if (!timeo) {
+                       err = -EINPROGRESS;
+                       goto out;
+               }
+               if (signal_pending(tsk)) {
+                       err = sock_intr_errno(timeo);
+                       goto out;
+               }
 
-       if (sk->sk_state != TCP_SYN_RECV) {
-               sock->state = SS_UNCONNECTED;
-               err = sock_error(sk);
-               if (!err)
-                       err = -ECONNREFUSED;
-               goto out;
+               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+                                               TASK_INTERRUPTIBLE);
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+               finish_wait(sk_sleep(sk), &wait);
        }
-       sock->state = SS_CONNECTED;
-       err = 0;
 
+       if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+               err = 0;
+       else if (sk->sk_state == TCP_CLOSE_WAIT)
+               err = -ECONNRESET;
+       else
+               err = -ECONNREFUSED;
+       sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
 out:
        release_sock(sk);
        return err;
 }
-#endif
 
 static int pn_socket_accept(struct socket *sock, struct socket *newsock,
                                int flags)
@@ -327,6 +309,9 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
        struct sock *newsk;
        int err;
 
+       if (unlikely(sk->sk_state != TCP_LISTEN))
+               return -EINVAL;
+
        newsk = sk->sk_prot->accept(sk, flags, &err);
        if (!newsk)
                return err;
@@ -363,13 +348,8 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
 
        poll_wait(file, sk_sleep(sk), wait);
 
-       switch (sk->sk_state) {
-       case TCP_LISTEN:
-               return hlist_empty(&pn->ackq) ? 0 : POLLIN;
-       case TCP_CLOSE:
+       if (sk->sk_state == TCP_CLOSE)
                return POLLERR;
-       }
-
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
        if (!skb_queue_empty(&pn->ctrlreq_queue))
@@ -428,19 +408,19 @@ static int pn_socket_listen(struct socket *sock, int backlog)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (sock->state != SS_UNCONNECTED)
-               return -EINVAL;
        if (pn_socket_autobind(sock))
                return -ENOBUFS;
 
        lock_sock(sk);
-       if (sk->sk_state != TCP_CLOSE) {
+       if (sock->state != SS_UNCONNECTED) {
                err = -EINVAL;
                goto out;
        }
 
-       sk->sk_state = TCP_LISTEN;
-       sk->sk_ack_backlog = 0;
+       if (sk->sk_state != TCP_LISTEN) {
+               sk->sk_state = TCP_LISTEN;
+               sk->sk_ack_backlog = 0;
+       }
        sk->sk_max_ack_backlog = backlog;
 out:
        release_sock(sk);
@@ -488,11 +468,7 @@ const struct proto_ops phonet_stream_ops = {
        .owner          = THIS_MODULE,
        .release        = pn_socket_release,
        .bind           = pn_socket_bind,
-#ifdef CONFIG_PHONET_PIPECTRLR
        .connect        = pn_socket_connect,
-#else
-       .connect        = sock_no_connect,
-#endif
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
@@ -633,8 +609,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
 
                seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
                        "%d %p %d%n",
-                       sk->sk_protocol, pn->sobject, 0, pn->resource,
-                       sk->sk_state,
+                       sk->sk_protocol, pn->sobject, pn->dobject,
+                       pn->resource, sk->sk_state,
                        sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
                        sock_i_uid(sk), sock_i_ino(sk),
                        atomic_read(&sk->sk_refcnt), sk,
index 71f373c..c47a511 100644 (file)
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
        if (conn->c_loopback
            && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
                rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-               return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               scat = &rm->data.op_sg[sg];
+               ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+               return ret;
        }
 
        /* FIXME we may overallocate here */
index aeec1d4..bca6761 100644 (file)
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
                         unsigned int hdr_off, unsigned int sg,
                         unsigned int off)
 {
+       struct scatterlist *sgp = &rm->data.op_sg[sg];
+       int ret = sizeof(struct rds_header) +
+                       be32_to_cpu(rm->m_inc.i_hdr.h_len);
+
        /* Do not send cong updates to loopback */
        if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
                rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-               return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+               ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
+               goto out;
        }
 
        BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
                            NULL);
 
        rds_inc_put(&rm->m_inc);
-
-       return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
+out:
+       return ret;
 }
 
 /*
index 9542449..da8adac 100644 (file)
@@ -50,7 +50,6 @@ rdsdebug(char *fmt, ...)
 #define RDS_FRAG_SIZE  ((unsigned int)(1 << RDS_FRAG_SHIFT))
 
 #define RDS_CONG_MAP_BYTES     (65536 / 8)
-#define RDS_CONG_MAP_LONGS     (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
 #define RDS_CONG_MAP_PAGES     (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
 
index d952e7e..5ee0c62 100644 (file)
@@ -803,7 +803,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
 
                rose_insert_socket(sk);         /* Finish the bind */
        }
-rose_try_next_neigh:
        rose->dest_addr   = addr->srose_addr;
        rose->dest_call   = addr->srose_call;
        rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
@@ -865,12 +864,6 @@ rose_try_next_neigh:
        }
 
        if (sk->sk_state != TCP_ESTABLISHED) {
-       /* Try next neighbour */
-               rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
-               if (rose->neighbour)
-                       goto rose_try_next_neigh;
-
-               /* No more neighbours */
                sock->state = SS_UNCONNECTED;
                err = sock_error(sk);   /* Always set at this point */
                goto out_release;
index b4fdaac..88a77e9 100644 (file)
@@ -674,29 +674,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
  *     Find a neighbour or a route given a ROSE address.
  */
 struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
-       unsigned char *diagnostic, int new)
+       unsigned char *diagnostic, int route_frame)
 {
        struct rose_neigh *res = NULL;
        struct rose_node *node;
        int failed = 0;
        int i;
 
-       if (!new) spin_lock_bh(&rose_node_list_lock);
+       if (!route_frame) spin_lock_bh(&rose_node_list_lock);
        for (node = rose_node_list; node != NULL; node = node->next) {
                if (rosecmpm(addr, &node->address, node->mask) == 0) {
                        for (i = 0; i < node->count; i++) {
-                               if (new) {
-                                       if (node->neighbour[i]->restarted) {
-                                               res = node->neighbour[i];
-                                               goto out;
-                                       }
+                               if (node->neighbour[i]->restarted) {
+                                       res = node->neighbour[i];
+                                       goto out;
                                }
-                               else {
+                       }
+               }
+       }
+       if (!route_frame) { /* connect request */
+               for (node = rose_node_list; node != NULL; node = node->next) {
+                       if (rosecmpm(addr, &node->address, node->mask) == 0) {
+                               for (i = 0; i < node->count; i++) {
                                        if (!rose_ftimer_running(node->neighbour[i])) {
                                                res = node->neighbour[i];
+                                               failed = 0;
                                                goto out;
-                                       } else
-                                               failed = 1;
+                                       }
+                                       failed = 1;
                                }
                        }
                }
@@ -711,8 +716,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
        }
 
 out:
-       if (!new) spin_unlock_bh(&rose_node_list_lock);
-
+       if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
        return res;
 }
 
index 8931500..1a2b063 100644 (file)
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
                        goto protocol_error;
                }
 
+       case RXRPC_PACKET_TYPE_ACKALL:
        case RXRPC_PACKET_TYPE_ACK:
                /* ACK processing is done in process context */
                read_lock_bh(&call->state_lock);
index 5ee16f0..d763793 100644 (file)
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
                return ret;
 
        plen -= sizeof(*token);
-       token = kmalloc(sizeof(*token), GFP_KERNEL);
+       token = kzalloc(sizeof(*token), GFP_KERNEL);
        if (!token)
                return -ENOMEM;
 
-       token->kad = kmalloc(plen, GFP_KERNEL);
+       token->kad = kzalloc(plen, GFP_KERNEL);
        if (!token->kad) {
                kfree(token);
                return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
                goto error;
 
        ret = -ENOMEM;
-       token = kmalloc(sizeof(*token), GFP_KERNEL);
+       token = kzalloc(sizeof(*token), GFP_KERNEL);
        if (!token)
                goto error;
-       token->kad = kmalloc(plen, GFP_KERNEL);
+       token->kad = kzalloc(plen, GFP_KERNEL);
        if (!token->kad)
                goto error_free;
 
index a53fb25..55b93dc 100644 (file)
@@ -36,31 +36,15 @@ static void rxrpc_destroy_peer(struct work_struct *work);
 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
 {
        struct rtable *rt;
-       struct flowi fl;
-       int ret;
 
        peer->if_mtu = 1500;
 
-       memset(&fl, 0, sizeof(fl));
-
-       switch (peer->srx.transport.family) {
-       case AF_INET:
-               fl.oif = 0;
-               fl.proto = IPPROTO_UDP,
-               fl.fl4_dst = peer->srx.transport.sin.sin_addr.s_addr;
-               fl.fl4_src = 0;
-               fl.fl4_tos = 0;
-               /* assume AFS.CM talking to AFS.FS */
-               fl.fl_ip_sport = htons(7001);
-               fl.fl_ip_dport = htons(7000);
-               break;
-       default:
-               BUG();
-       }
-
-       ret = ip_route_output_key(&init_net, &rt, &fl);
-       if (ret < 0) {
-               _leave(" [route err %d]", ret);
+       rt = ip_route_output_ports(&init_net, NULL,
+                                  peer->srx.transport.sin.sin_addr.s_addr, 0,
+                                  htons(7000), htons(7001),
+                                  IPPROTO_UDP, 0, 0);
+       if (IS_ERR(rt)) {
+               _leave(" [route err %ld]", PTR_ERR(rt));
                return;
        }
 
index f04d4a4..a7a5583 100644 (file)
@@ -126,6 +126,17 @@ config NET_SCH_RED
          To compile this code as a module, choose M here: the
          module will be called sch_red.
 
+config NET_SCH_SFB
+       tristate "Stochastic Fair Blue (SFB)"
+       ---help---
+         Say Y here if you want to use the Stochastic Fair Blue (SFB)
+         packet scheduling algorithm.
+
+         See the top of <file:net/sched/sch_sfb.c> for more details.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_sfb.
+
 config NET_SCH_SFQ
        tristate "Stochastic Fairness Queueing (SFQ)"
        ---help---
@@ -205,6 +216,29 @@ config NET_SCH_DRR
 
          If unsure, say N.
 
+config NET_SCH_MQPRIO
+       tristate "Multi-queue priority scheduler (MQPRIO)"
+       help
+         Say Y here if you want to use the Multi-queue Priority scheduler.
+         This scheduler allows QOS to be offloaded on NICs that have support
+         for offloading QOS schedulers.
+
+         To compile this driver as a module, choose M here: the module will
+         be called sch_mqprio.
+
+         If unsure, say N.
+
+config NET_SCH_CHOKE
+       tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+       help
+         Say Y here if you want to use the CHOKe packet scheduler (CHOose
+         and Keep for responsive flows, CHOose and Kill for unresponsive
+         flows). This is a variation of RED which trys to penalize flows
+         that monopolize the queue.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_choke.
+
 config NET_SCH_INGRESS
        tristate "Ingress Qdisc"
        depends on NET_CLS_ACT
@@ -243,7 +277,7 @@ config NET_CLS_TCINDEX
 
 config NET_CLS_ROUTE4
        tristate "Routing decision (ROUTE)"
-       select NET_CLS_ROUTE
+       select IP_ROUTE_CLASSID
        select NET_CLS
        ---help---
          If you say Y here, you will be able to classify packets
@@ -252,9 +286,6 @@ config NET_CLS_ROUTE4
          To compile this code as a module, choose M here: the
          module will be called cls_route.
 
-config NET_CLS_ROUTE
-       bool
-
 config NET_CLS_FW
        tristate "Netfilter mark (FW)"
        select NET_CLS
index 960f5db..2e77b8d 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_NET_SCH_RED)     += sch_red.o
 obj-$(CONFIG_NET_SCH_GRED)     += sch_gred.o
 obj-$(CONFIG_NET_SCH_INGRESS)  += sch_ingress.o 
 obj-$(CONFIG_NET_SCH_DSMARK)   += sch_dsmark.o
+obj-$(CONFIG_NET_SCH_SFB)      += sch_sfb.o
 obj-$(CONFIG_NET_SCH_SFQ)      += sch_sfq.o
 obj-$(CONFIG_NET_SCH_TBF)      += sch_tbf.o
 obj-$(CONFIG_NET_SCH_TEQL)     += sch_teql.o
@@ -32,6 +33,9 @@ obj-$(CONFIG_NET_SCH_MULTIQ)  += sch_multiq.o
 obj-$(CONFIG_NET_SCH_ATM)      += sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)    += sch_netem.o
 obj-$(CONFIG_NET_SCH_DRR)      += sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO)   += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE)    += sch_choke.o
+
 obj-$(CONFIG_NET_CLS_U32)      += cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)   += cls_route.o
 obj-$(CONFIG_NET_CLS_FW)       += cls_fw.o
index 23b25f8..15873e1 100644 (file)
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tc_action *a, struct tcf_hashinfo *hinfo)
 {
        struct tcf_common *p;
-       int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+       int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
        struct nlattr *nest;
 
        read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
 {
        struct tcf_common *p, *s_p;
        struct nlattr *nest;
-       int i= 0, n_i = 0;
+       int i = 0, n_i = 0;
 
        nest = nla_nest_start(skb, a->order);
        if (nest == NULL)
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
                while (p != NULL) {
                        s_p = p->tcfc_next;
                        if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
-                                module_put(a->ops->owner);
+                               module_put(a->ops->owner);
                        n_i++;
                        p = s_p;
                }
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+       err = tcf_action_dump_old(skb, a, bind, ref);
+       if (err > 0) {
                nla_nest_end(skb, nest);
                return err;
        }
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
        struct tc_action *a;
        struct tc_action_ops *a_o;
        char act_name[IFNAMSIZ];
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
        int err;
 
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
                goto err_free;
 
        /* module count goes up only when brand new policy is created
-          if it exists and is only bound to in a_o->init() then
-          ACT_P_CREATED is not returned (a zero is).
-       */
+        * if it exists and is only bound to in a_o->init() then
+        * ACT_P_CREATED is not returned (a zero is).
+        */
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
        a->ops = a_o;
@@ -569,7 +570,7 @@ err_out:
 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
                                  char *name, int ovr, int bind)
 {
-       struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+       struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *head = NULL, *act, *act_prev = NULL;
        int err;
        int i;
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
 static struct tc_action *
 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
 {
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct tc_action *a;
        int index;
        int err;
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        struct tcamsg *t;
        struct netlink_callback dcb;
        struct nlattr *nest;
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
        struct tc_action *a = create_a(0);
        int err = -ENOMEM;
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        nlh->nlmsg_flags |= NLM_F_ROOT;
        module_put(a->ops->owner);
        kfree(a);
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                            n->nlmsg_flags & NLM_F_ECHO);
        if (err > 0)
                return 0;
 
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
              u32 pid, int event)
 {
        int i, ret;
-       struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+       struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *head = NULL, *act, *act_prev = NULL;
 
        ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
        if (ret < 0)
                return ret;
 
-       if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+       if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
                if (tb[1] != NULL)
                        return tca_action_flush(net, tb[1], n, pid);
                else
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                /* now do the delete */
                tcf_action_destroy(head, 0);
                ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
-                                    n->nlmsg_flags&NLM_F_ECHO);
+                                    n->nlmsg_flags & NLM_F_ECHO);
                if (ret > 0)
                        return 0;
                return ret;
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
        if (err > 0)
                err = 0;
        return err;
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
        /* dump then free all the actions after update; inserted policy
         * stays intact
-        * */
+        */
        ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
        for (a = act; a; a = act) {
                act = a->next;
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                return -EINVAL;
        }
 
-       /* n->nlmsg_flags&NLM_F_CREATE
-        * */
+       /* n->nlmsg_flags & NLM_F_CREATE */
        switch (n->nlmsg_type) {
        case RTM_NEWACTION:
                /* we are going to assume all other flags
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                 * but since we want avoid ambiguity (eg when flags
                 * is zero) then just set this
                 */
-               if (n->nlmsg_flags&NLM_F_REPLACE)
+               if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
 replay:
                ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@ replay:
 static struct nlattr *
 find_dump_kind(const struct nlmsghdr *n)
 {
-       struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+       struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct nlattr *nla[TCAA_MAX + 1];
        struct nlattr *kind;
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
        }
 
        a_o = tc_lookup_action(kind);
-       if (a_o == NULL) {
+       if (a_o == NULL)
                return 0;
-       }
 
        memset(&a, 0, sizeof(struct tc_action));
        a.ops = a_o;
index 83ddfc0..6cdf9ab 100644 (file)
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
        if (nla == NULL)
                return -EINVAL;
 
-       err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+       err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
        if (err < 0)
                return err;
 
index c2ed90a..2b4ab4b 100644 (file)
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
 }
 
 typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
 #endif /* CONFIG_GACT_PROB */
 
 static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
                                     bind, &gact_idx_gen, &gact_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
                if (!ovr) {
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
 static int __init gact_init_module(void)
 {
 #ifdef CONFIG_GACT_PROB
-       printk(KERN_INFO "GACT probability on\n");
+       pr_info("GACT probability on\n");
 #else
-       printk(KERN_INFO "GACT probability NOT on\n");
+       pr_info("GACT probability NOT on\n");
 #endif
        return tcf_register_action(&act_gact_ops);
 }
index c2a7c20..9fc211a 100644 (file)
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
                                     &ipt_idx_gen, &ipt_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
                if (!ovr) {
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
        if (unlikely(!t))
                goto err2;
 
-       if ((err = ipt_init_target(t, tname, hook)) < 0)
+       err = ipt_init_target(t, tname, hook);
+       if (err < 0)
                goto err3;
 
        spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
        bstats_update(&ipt->tcf_bstats, skb);
 
        /* yes, we have to worry about both in and out dev
-        worry later - danger - this API seems to have changed
-        from earlier kernels */
+        * worry later - danger - this API seems to have changed
+        * from earlier kernels
+        */
        par.in       = skb->dev;
        par.out      = NULL;
        par.hooknum  = ipt->tcfi_hook;
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        struct tc_cnt c;
 
        /* for simple targets kernel size == user size
-       ** user name = target name
-       ** for foolproof you need to not assume this
-       */
+        * user name = target name
+        * for foolproof you need to not assume this
+        */
 
        t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
        if (unlikely(!t))
index d765067..961386e 100644 (file)
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
        .lock   =       &mirred_lock,
 };
 
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
 {
        if (m) {
                if (bind)
                        m->tcf_bindcnt--;
                m->tcf_refcnt--;
-               if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+               if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
                        list_del(&m->tcfm_list);
                        if (m->tcfm_dev)
                                dev_put(m->tcfm_dev);
index 178a4bd..762b027 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
                                     &nat_idx_gen, &nat_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                p = to_tcf_nat(pc);
                ret = ACT_P_CREATED;
        } else {
index 445bef7..50c7c06 100644 (file)
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
                                     &pedit_idx_gen, &pedit_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                p = to_pedit(pc);
                keys = kmalloc(ksize, GFP_KERNEL);
                if (keys == NULL) {
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
        int i, munged = 0;
        unsigned int off;
 
-       if (skb_cloned(skb)) {
-               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-                       return p->tcf_action;
-               }
-       }
+       if (skb_cloned(skb) &&
+           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+               return p->tcf_action;
 
        off = skb_network_offset(skb);
 
index e2f08b1..8a16307 100644 (file)
@@ -22,8 +22,8 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-#define L2T(p,L)   qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L)   qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
 
 #define POL_TAB_MASK     15
 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
 };
 
 /* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
        u32                     index;
        int                     action;
        u32                     limit;
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
                                 struct tc_action *a, int ovr, int bind)
 {
-       unsigned h;
+       unsigned int h;
        int ret = 0, err;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
index 7287cff..a34a22d 100644 (file)
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
        /* print policy string followed by _ then packet count
         * Example if this was the 3rd packet and the string was "hello"
         * then it would look like "hello_3" (without quotes)
-        **/
+        */
        pr_info("simple: %s_%d\n",
               (char *)d->tcfd_defdata, d->tcf_bstats.packets);
        spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
                                     &simp_idx_gen, &simp_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
 
                d = to_defact(pc);
                ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
        return ret;
 }
 
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
 {
        struct tcf_defact *d = a->priv;
 
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
        return 0;
 }
 
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
-                               int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+                        int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_defact *d = a->priv;
index 836f5fe..5f6f0c7 100644 (file)
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
                                     &skbedit_idx_gen, &skbedit_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
 
                d = to_skbedit(pc);
                ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
        return ret;
 }
 
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
 {
        struct tcf_skbedit *d = a->priv;
 
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
        return 0;
 }
 
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
-                               int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+                           int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_skbedit *d = a->priv;
index 5fd0c28..bb2c523 100644 (file)
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
        int rc = -ENOENT;
 
        write_lock(&cls_mod_lock);
-       for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+       for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
                if (t == ops)
                        break;
 
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
        u32 first = TC_H_MAKE(0xC0000000U, 0U);
 
        if (tp)
-               first = tp->prio-1;
+               first = tp->prio - 1;
 
        return first;
 }
@@ -149,7 +149,8 @@ replay:
 
        if (prio == 0) {
                /* If no priority is given, user wants we allocated it. */
-               if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTFILTER ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        return -ENOENT;
                prio = TC_H_MAKE(0x80000000U, 0U);
        }
@@ -176,7 +177,8 @@ replay:
        }
 
        /* Is it classful? */
-       if ((cops = q->ops->cl_ops) == NULL)
+       cops = q->ops->cl_ops;
+       if (!cops)
                return -EINVAL;
 
        if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@ replay:
                goto errout;
 
        /* Check the chain for existence of proto-tcf with this priority */
-       for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+       for (back = chain; (tp = *back) != NULL; back = &tp->next) {
                if (tp->prio >= prio) {
                        if (tp->prio == prio) {
-                               if (!nprio || (tp->protocol != protocol && protocol))
+                               if (!nprio ||
+                                   (tp->protocol != protocol && protocol))
                                        goto errout;
                        } else
                                tp = NULL;
@@ -216,7 +219,8 @@ replay:
                        goto errout;
 
                err = -ENOENT;
-               if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTFILTER ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        goto errout;
 
 
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 
        if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
                return skb->len;
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return skb->len;
 
        if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
        if (!q)
                goto out;
-       if ((cops = q->ops->cl_ops) == NULL)
+       cops = q->ops->cl_ops;
+       if (!cops)
                goto errout;
        if (cops->tcf_chain == NULL)
                goto errout;
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 
        s_t = cb->args[0];
 
-       for (tp=*chain, t=0; tp; tp = tp->next, t++) {
-               if (t < s_t) continue;
+       for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+               if (t < s_t)
+                       continue;
                if (TC_H_MAJ(tcm->tcm_info) &&
                    TC_H_MAJ(tcm->tcm_info) != tp->prio)
                        continue;
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                arg.skb = skb;
                arg.cb = cb;
                arg.w.stop = 0;
-               arg.w.skip = cb->args[1]-1;
+               arg.w.skip = cb->args[1] - 1;
                arg.w.count = 0;
                tp->ops->walk(tp, &arg.w);
-               cb->args[1] = arg.w.count+1;
+               cb->args[1] = arg.w.count + 1;
                if (arg.w.stop)
                        break;
        }
index f23d915..8be8872 100644 (file)
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct basic_head
-{
+struct basic_head {
        u32                     hgenerator;
        struct list_head        flist;
 };
 
-struct basic_filter
-{
+struct basic_filter {
        u32                     handle;
        struct tcf_exts         exts;
        struct tcf_ematch_tree  ematches;
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void basic_delete_filter(struct tcf_proto *tp,
-                                      struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
        tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
        [TCA_BASIC_EMATCHES]    = { .type = NLA_NESTED },
 };
 
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
-                                 unsigned long base, struct nlattr **tb,
-                                 struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+                          unsigned long base, struct nlattr **tb,
+                          struct nlattr *est)
 {
        int err = -EINVAL;
        struct tcf_exts e;
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
                } while (--i > 0 && basic_get(tp, head->hgenerator));
 
                if (i <= 0) {
-                       printk(KERN_ERR "Insufficient number of handles\n");
+                       pr_err("Insufficient number of handles\n");
                        goto errout;
                }
 
index d49c40f..32a3351 100644 (file)
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
 {
        struct cgroup_cls_state *cs;
 
-       if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+       if (!cs)
                return ERR_PTR(-ENOMEM);
 
        if (cgrp->parent)
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
        return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
 }
 
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
        u32                     handle;
        struct tcf_exts         exts;
        struct tcf_ematch_tree  ematches;
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
                             unsigned long *arg)
 {
-       struct nlattr *tb[TCA_CGROUP_MAX+1];
+       struct nlattr *tb[TCA_CGROUP_MAX + 1];
        struct cls_cgroup_head *head = tp->root;
        struct tcf_ematch_tree t;
        struct tcf_exts e;
index 5b271a1..8ec0139 100644 (file)
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
                if (!pskb_network_may_pull(skb, sizeof(*iph)))
                        break;
                iph = ip_hdr(skb);
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
                if (!pskb_network_may_pull(skb, sizeof(*iph)))
                        break;
                iph = ip_hdr(skb);
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
@@ -276,7 +276,7 @@ fallback:
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (skb_dst(skb))
                return skb_dst(skb)->tclassid;
 #endif
index 93b0a7b..26e7bc4 100644 (file)
 
 #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
 
-struct fw_head
-{
+struct fw_head {
        struct fw_filter *ht[HTSIZE];
        u32 mask;
 };
 
-struct fw_filter
-{
+struct fw_filter {
        struct fw_filter        *next;
        u32                     id;
        struct tcf_result       res;
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
        .police = TCA_FW_POLICE
 };
 
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
 {
        if (HTSIZE == 4096)
                return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
 static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
                          struct tcf_result *res)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f;
        int r;
        u32 id = skb->mark;
 
        if (head != NULL) {
                id &= head->mask;
-               for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+               for (f = head->ht[fw_hash(id)]; f; f = f->next) {
                        if (f->id == id) {
                                *res = f->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
                }
        } else {
                /* old method */
-               if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+               if (id && (TC_H_MAJ(id) == 0 ||
+                          !(TC_H_MAJ(id ^ tp->q->handle)))) {
                        res->classid = id;
                        res->class = 0;
                        return 0;
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
 
 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f;
 
        if (head == NULL)
                return 0;
 
-       for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+       for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
                if (f->id == handle)
                        return (unsigned long)f;
        }
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
        tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
        if (head == NULL)
                return;
 
-       for (h=0; h<HTSIZE; h++) {
-               while ((f=head->ht[h]) != NULL) {
+       for (h = 0; h < HTSIZE; h++) {
+               while ((f = head->ht[h]) != NULL) {
                        head->ht[h] = f->next;
                        fw_delete_filter(tp, f);
                }
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
 
 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
-       struct fw_filter *f = (struct fw_filter*)arg;
+       struct fw_head *head = (struct fw_head *)tp->root;
+       struct fw_filter *f = (struct fw_filter *)arg;
        struct fw_filter **fp;
 
        if (head == NULL || f == NULL)
                goto out;
 
-       for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+       for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
                     struct nlattr **tca,
                     unsigned long *arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f = (struct fw_filter *) *arg;
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@ errout:
 
 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        int h;
 
        if (head == NULL)
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
                   struct sk_buff *skb, struct tcmsg *t)
 {
        struct fw_head *head = (struct fw_head *)tp->root;
-       struct fw_filter *f = (struct fw_filter*)fh;
+       struct fw_filter *f = (struct fw_filter *)fh;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
 
index 694dcd8..a907905 100644 (file)
 #include <net/pkt_cls.h>
 
 /*
  1. For now we assume that route tags < 256.
     It allows to use direct table lookups, instead of hash tables.
  2. For now we assume that "from TAG" and "fromdev DEV" statements
     are mutually  exclusive.
  3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
* 1. For now we assume that route tags < 256.
*    It allows to use direct table lookups, instead of hash tables.
* 2. For now we assume that "from TAG" and "fromdev DEV" statements
*    are mutually  exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
  */
 
-struct route4_fastmap
-{
+struct route4_fastmap {
        struct route4_filter    *filter;
        u32                     id;
        int                     iif;
 };
 
-struct route4_head
-{
+struct route4_head {
        struct route4_fastmap   fastmap[16];
-       struct route4_bucket    *table[256+1];
+       struct route4_bucket    *table[256 + 1];
 };
 
-struct route4_bucket
-{
+struct route4_bucket {
        /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
-       struct route4_filter    *ht[16+16+1];
+       struct route4_filter    *ht[16 + 16 + 1];
 };
 
-struct route4_filter
-{
+struct route4_filter {
        struct route4_filter    *next;
        u32                     id;
        int                     iif;
@@ -61,20 +57,20 @@ struct route4_filter
        struct route4_bucket    *bkt;
 };
 
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
 
 static const struct tcf_ext_map route_ext_map = {
        .police = TCA_ROUTE4_POLICE,
        .action = TCA_ROUTE4_ACT
 };
 
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
 {
-       return id&0xF;
+       return id & 0xF;
 }
 
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
 {
        spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
 
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
        spin_unlock_bh(root_lock);
 }
 
-static inline void
+static void
 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
                   struct route4_filter *f)
 {
        int h = route4_fastmap_hash(id, iif);
+
        head->fastmap[h].id = id;
        head->fastmap[h].iif = iif;
        head->fastmap[h].filter = f;
 }
 
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
 {
-       return id&0xFF;
+       return id & 0xFF;
 }
 
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
 {
-       return (id>>16)&0xF;
+       return (id >> 16) & 0xF;
 }
 
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
 {
-       return 16 + ((iif>>16)&0xF);
+       return 16 + ((iif >> 16) & 0xF);
 }
 
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
 {
        return 32;
 }
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
                           struct tcf_result *res)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
+       struct route4_head *head = (struct route4_head *)tp->root;
        struct dst_entry *dst;
        struct route4_bucket *b;
        struct route4_filter *f;
        u32 id, h;
        int iif, dont_cache = 0;
 
-       if ((dst = skb_dst(skb)) == NULL)
+       dst = skb_dst(skb);
+       if (!dst)
                goto failure;
 
        id = dst->tclassid;
        if (head == NULL)
                goto old_method;
 
-       iif = ((struct rtable*)dst)->fl.iif;
+       iif = ((struct rtable *)dst)->rt_iif;
 
        h = route4_fastmap_hash(id, iif);
        if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
        h = route4_hash_to(id);
 
 restart:
-       if ((b = head->table[h]) != NULL) {
+       b = head->table[h];
+       if (b) {
                for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
                        if (f->id == id)
                                ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@ old_method:
 
 static inline u32 to_hash(u32 id)
 {
-       u32 h = id&0xFF;
-       if (id&0x8000)
+       u32 h = id & 0xFF;
+
+       if (id & 0x8000)
                h += 256;
        return h;
 }
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
        if (!(id & 0x8000)) {
                if (id > 255)
                        return 256;
-               return id&0xF;
+               return id & 0xF;
        }
-       return 16 + (id&0xF);
+       return 16 + (id & 0xF);
 }
 
 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
+       struct route4_head *head = (struct route4_head *)tp->root;
        struct route4_bucket *b;
        struct route4_filter *f;
-       unsigned h1, h2;
+       unsigned int h1, h2;
 
        if (!head)
                return 0;
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
        if (h1 > 256)
                return 0;
 
-       h2 = from_hash(handle>>16);
+       h2 = from_hash(handle >> 16);
        if (h2 > 32)
                return 0;
 
-       if ((b = head->table[h1]) != NULL) {
+       b = head->table[h1];
+       if (b) {
                for (f = b->ht[h2]; f; f = f->next)
                        if (f->handle == handle)
                                return (unsigned long)f;
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void
+static void
 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
        if (head == NULL)
                return;
 
-       for (h1=0; h1<=256; h1++) {
+       for (h1 = 0; h1 <= 256; h1++) {
                struct route4_bucket *b;
 
-               if ((b = head->table[h1]) != NULL) {
-                       for (h2=0; h2<=32; h2++) {
+               b = head->table[h1];
+               if (b) {
+                       for (h2 = 0; h2 <= 32; h2++) {
                                struct route4_filter *f;
 
                                while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
 
 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
-       struct route4_filter **fp, *f = (struct route4_filter*)arg;
-       unsigned h = 0;
+       struct route4_head *head = (struct route4_head *)tp->root;
+       struct route4_filter **fp, *f = (struct route4_filter *)arg;
+       unsigned int h = 0;
        struct route4_bucket *b;
        int i;
 
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
        h = f->handle;
        b = f->bkt;
 
-       for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+       for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 
                        /* Strip tree */
 
-                       for (i=0; i<=32; i++)
+                       for (i = 0; i <= 32; i++)
                                if (b->ht[i])
                                        return 0;
 
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
        }
 
        h1 = to_hash(nhandle);
-       if ((b = head->table[h1]) == NULL) {
+       b = head->table[h1];
+       if (!b) {
                err = -ENOBUFS;
                b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
                if (b == NULL)
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
                tcf_tree_unlock(tp);
        } else {
                unsigned int h2 = from_hash(nhandle >> 16);
+
                err = -EEXIST;
                for (fp = b->ht[h2]; fp; fp = fp->next)
                        if (fp->handle == f->handle)
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       if ((f = (struct route4_filter*)*arg) != NULL) {
+       f = (struct route4_filter *)*arg;
+       if (f) {
                if (f->handle != handle && handle)
                        return -EINVAL;
 
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
 
 reinsert:
        h = from_hash(f->handle >> 16);
-       for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+       for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
                if (f->handle < f1->handle)
                        break;
 
@@ -492,7 +497,8 @@ reinsert:
        if (old_handle && f->handle != old_handle) {
                th = to_hash(old_handle);
                h = from_hash(old_handle >> 16);
-               if ((b = head->table[th]) != NULL) {
+               b = head->table[th];
+               if (b) {
                        for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
                                if (*fp == f) {
                                        *fp = f->next;
@@ -515,7 +521,7 @@ errout:
 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct route4_head *head = tp->root;
-       unsigned h, h1;
+       unsigned int h, h1;
 
        if (head == NULL)
                arg->stop = 1;
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
                       struct sk_buff *skb, struct tcmsg *t)
 {
-       struct route4_filter *f = (struct route4_filter*)fh;
+       struct route4_filter *f = (struct route4_filter *)fh;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
        u32 id;
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (!(f->handle&0x8000)) {
-               id = f->id&0xFF;
+       if (!(f->handle & 0x8000)) {
+               id = f->id & 0xFF;
                NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
        }
-       if (f->handle&0x80000000) {
-               if ((f->handle>>16) != 0xFFFF)
+       if (f->handle & 0x80000000) {
+               if ((f->handle >> 16) != 0xFFFF)
                        NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
        } else {
-               id = f->id>>16;
+               id = f->id >> 16;
                NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
        }
        if (f->res.classid)
index 425a179..402c44b 100644 (file)
    powerful classification engine.  */
 
 
-struct rsvp_head
-{
+struct rsvp_head {
        u32                     tmap[256/32];
        u32                     hgenerator;
        u8                      tgenerator;
        struct rsvp_session     *ht[256];
 };
 
-struct rsvp_session
-{
+struct rsvp_session {
        struct rsvp_session     *next;
        __be32                  dst[RSVP_DST_LEN];
        struct tc_rsvp_gpi      dpi;
        u8                      protocol;
        u8                      tunnelid;
        /* 16 (src,sport) hash slots, and one wildcard source slot */
-       struct rsvp_filter      *ht[16+1];
+       struct rsvp_filter      *ht[16 + 1];
 };
 
 
-struct rsvp_filter
-{
+struct rsvp_filter {
        struct rsvp_filter      *next;
        __be32                  src[RSVP_DST_LEN];
        struct tc_rsvp_gpi      spi;
@@ -100,17 +97,19 @@ struct rsvp_filter
        struct rsvp_session     *sess;
 };
 
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
 {
-       unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+       unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
        h ^= h>>16;
        h ^= h>>8;
        return (h ^ protocol ^ tunnelid) & 0xFF;
 }
 
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
 {
-       unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+       unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
        h ^= h>>16;
        h ^= h>>8;
        h ^= h>>4;
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
 static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
                         struct tcf_result *res)
 {
-       struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+       struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
        struct rsvp_session *s;
        struct rsvp_filter *f;
-       unsigned h1, h2;
+       unsigned int h1, h2;
        __be32 *dst, *src;
        u8 protocol;
        u8 tunnelid = 0;
@@ -162,13 +161,13 @@ restart:
        src = &nhptr->saddr.s6_addr32[0];
        dst = &nhptr->daddr.s6_addr32[0];
        protocol = nhptr->nexthdr;
-       xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+       xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
 #else
        src = &nhptr->saddr;
        dst = &nhptr->daddr;
        protocol = nhptr->protocol;
-       xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
-       if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+       xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+       if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
                return -1;
 #endif
 
@@ -176,10 +175,10 @@ restart:
        h2 = hash_src(src);
 
        for (s = sht[h1]; s; s = s->next) {
-               if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+               if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
                    protocol == s->protocol &&
                    !(s->dpi.mask &
-                     (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+                     (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
 #if RSVP_DST_LEN == 4
                    dst[0] == s->dst[0] &&
                    dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@ restart:
                    tunnelid == s->tunnelid) {
 
                        for (f = s->ht[h2]; f; f = f->next) {
-                               if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
-                                   !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+                               if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+                                   !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
 #if RSVP_DST_LEN == 4
                                    &&
                                    src[0] == f->src[0] &&
@@ -205,7 +204,7 @@ matched:
                                                return 0;
 
                                        tunnelid = f->res.classid;
-                                       nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+                                       nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
                                        goto restart;
                                }
                        }
@@ -224,11 +223,11 @@ matched:
 
 static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
 {
-       struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+       struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
        struct rsvp_session *s;
        struct rsvp_filter *f;
-       unsigned h1 = handle&0xFF;
-       unsigned h2 = (handle>>8)&0xFF;
+       unsigned int h1 = handle & 0xFF;
+       unsigned int h2 = (handle >> 8) & 0xFF;
 
        if (h2 > 16)
                return 0;
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
        return -ENOBUFS;
 }
 
-static inline void
+static void
 rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
 
        sht = data->ht;
 
-       for (h1=0; h1<256; h1++) {
+       for (h1 = 0; h1 < 256; h1++) {
                struct rsvp_session *s;
 
                while ((s = sht[h1]) != NULL) {
                        sht[h1] = s->next;
 
-                       for (h2=0; h2<=16; h2++) {
+                       for (h2 = 0; h2 <= 16; h2++) {
                                struct rsvp_filter *f;
 
                                while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
 
 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
-       unsigned h = f->handle;
+       struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+       unsigned int h = f->handle;
        struct rsvp_session **sp;
        struct rsvp_session *s = f->sess;
        int i;
 
-       for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+       for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 
                        /* Strip tree */
 
-                       for (i=0; i<=16; i++)
+                       for (i = 0; i <= 16; i++)
                                if (s->ht[i])
                                        return 0;
 
                        /* OK, session has no flows */
-                       for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+                       for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
                             *sp; sp = &(*sp)->next) {
                                if (*sp == s) {
                                        tcf_tree_lock(tp);
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
 {
        struct rsvp_head *data = tp->root;
        int i = 0xFFFF;
 
        while (i-- > 0) {
                u32 h;
+
                if ((data->hgenerator += 0x10000) == 0)
                        data->hgenerator = 0x10000;
                h = data->hgenerator|salt;
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
 
 static int tunnel_bts(struct rsvp_head *data)
 {
-       int n = data->tgenerator>>5;
-       u32 b = 1<<(data->tgenerator&0x1F);
+       int n = data->tgenerator >> 5;
+       u32 b = 1 << (data->tgenerator & 0x1F);
 
-       if (data->tmap[n]&b)
+       if (data->tmap[n] & b)
                return 0;
        data->tmap[n] |= b;
        return 1;
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
 
        memset(tmap, 0, sizeof(tmap));
 
-       for (h1=0; h1<256; h1++) {
+       for (h1 = 0; h1 < 256; h1++) {
                struct rsvp_session *s;
                for (s = sht[h1]; s; s = s->next) {
-                       for (h2=0; h2<=16; h2++) {
+                       for (h2 = 0; h2 <= 16; h2++) {
                                struct rsvp_filter *f;
 
                                for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
 {
        int i, k;
 
-       for (k=0; k<2; k++) {
-               for (i=255; i>0; i--) {
+       for (k = 0; k < 2; k++) {
+               for (i = 255; i > 0; i--) {
                        if (++data->tgenerator == 0)
                                data->tgenerator = 1;
                        if (tunnel_bts(data))
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        struct nlattr *opt = tca[TCA_OPTIONS-1];
        struct nlattr *tb[TCA_RSVP_MAX + 1];
        struct tcf_exts e;
-       unsigned h1, h2;
+       unsigned int h1, h2;
        __be32 *dst;
        int err;
 
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       if ((f = (struct rsvp_filter*)*arg) != NULL) {
+       f = (struct rsvp_filter *)*arg;
+       if (f) {
                /* Node exists: adjust only classid */
 
                if (f->handle != handle && handle)
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
                        goto errout;
        }
 
-       for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+       for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
                if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
                    pinfo && pinfo->protocol == s->protocol &&
                    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@ insert:
                        tcf_exts_change(tp, &f->exts, &e);
 
                        for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
-                               if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+                               if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
                                        break;
                        f->next = *fp;
                        wmb();
@@ -567,7 +568,7 @@ errout2:
 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct rsvp_head *head = tp->root;
-       unsigned h, h1;
+       unsigned int h, h1;
 
        if (arg->stop)
                return;
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct rsvp_filter *f = (struct rsvp_filter*)fh;
+       struct rsvp_filter *f = (struct rsvp_filter *)fh;
        struct rsvp_session *s;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
        NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
        if (f->res.classid)
                NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-       if (((f->handle>>8)&0xFF) != 16)
+       if (((f->handle >> 8) & 0xFF) != 16)
                NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
 
        if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
index 20ef330..36667fa 100644 (file)
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
                 * of the hashing index is below the threshold.
                 */
                if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
-                       cp.hash = (cp.mask >> cp.shift)+1;
+                       cp.hash = (cp.mask >> cp.shift) + 1;
                else
                        cp.hash = DEFAULT_HASH_SIZE;
        }
index b0c2a82..3b93fc0 100644 (file)
@@ -42,8 +42,7 @@
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct tc_u_knode
-{
+struct tc_u_knode {
        struct tc_u_knode       *next;
        u32                     handle;
        struct tc_u_hnode       *ht_up;
@@ -63,19 +62,17 @@ struct tc_u_knode
        struct tc_u32_sel       sel;
 };
 
-struct tc_u_hnode
-{
+struct tc_u_hnode {
        struct tc_u_hnode       *next;
        u32                     handle;
        u32                     prio;
        struct tc_u_common      *tp_c;
        int                     refcnt;
-       unsigned                divisor;
+       unsigned int            divisor;
        struct tc_u_knode       *ht[1];
 };
 
-struct tc_u_common
-{
+struct tc_u_common {
        struct tc_u_hnode       *hlist;
        struct Qdisc            *q;
        int                     refcnt;
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
        .police = TCA_U32_POLICE
 };
 
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+                                        const struct tc_u32_sel *sel,
+                                        u8 fshift)
 {
-       unsigned h = ntohl(key & sel->hmask)>>fshift;
+       unsigned int h = ntohl(key & sel->hmask) >> fshift;
 
        return h;
 }
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
                unsigned int      off;
        } stack[TC_U32_MAXDEPTH];
 
-       struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+       struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
        unsigned int off = skb_network_offset(skb);
        struct tc_u_knode *n;
        int sdepth = 0;
@@ -120,7 +119,7 @@ next_knode:
                struct tc_u32_key *key = n->sel.keys;
 
 #ifdef CONFIG_CLS_U32_PERF
-               n->pf->rcnt +=1;
+               n->pf->rcnt += 1;
                j = 0;
 #endif
 
@@ -133,14 +132,14 @@ next_knode:
                }
 #endif
 
-               for (i = n->sel.nkeys; i>0; i--, key++) {
+               for (i = n->sel.nkeys; i > 0; i--, key++) {
                        int toff = off + key->off + (off2 & key->offmask);
-                       __be32 *data, _data;
+                       __be32 *data, hdata;
 
                        if (skb_headroom(skb) + toff > INT_MAX)
                                goto out;
 
-                       data = skb_header_pointer(skb, toff, 4, &_data);
+                       data = skb_header_pointer(skb, toff, 4, &hdata);
                        if (!data)
                                goto out;
                        if ((*data ^ key->val) & key->mask) {
@@ -148,13 +147,13 @@ next_knode:
                                goto next_knode;
                        }
 #ifdef CONFIG_CLS_U32_PERF
-                       n->pf->kcnts[j] +=1;
+                       n->pf->kcnts[j] += 1;
                        j++;
 #endif
                }
                if (n->ht_down == NULL) {
 check_terminal:
-                       if (n->sel.flags&TC_U32_TERMINAL) {
+                       if (n->sel.flags & TC_U32_TERMINAL) {
 
                                *res = n->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@ check_terminal:
                                }
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-                               n->pf->rhit +=1;
+                               n->pf->rhit += 1;
 #endif
                                r = tcf_exts_exec(skb, &n->exts, res);
                                if (r < 0) {
@@ -188,26 +187,26 @@ check_terminal:
                ht = n->ht_down;
                sel = 0;
                if (ht->divisor) {
-                       __be32 *data, _data;
+                       __be32 *data, hdata;
 
                        data = skb_header_pointer(skb, off + n->sel.hoff, 4,
-                                                 &_data);
+                                                 &hdata);
                        if (!data)
                                goto out;
                        sel = ht->divisor & u32_hash_fold(*data, &n->sel,
                                                          n->fshift);
                }
-               if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+               if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
                        goto next_ht;
 
-               if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+               if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
                        off2 = n->sel.off + 3;
                        if (n->sel.flags & TC_U32_VAROFFSET) {
-                               __be16 *data, _data;
+                               __be16 *data, hdata;
 
                                data = skb_header_pointer(skb,
                                                          off + n->sel.offoff,
-                                                         2, &_data);
+                                                         2, &hdata);
                                if (!data)
                                        goto out;
                                off2 += ntohs(n->sel.offmask & *data) >>
@@ -215,7 +214,7 @@ check_terminal:
                        }
                        off2 &= ~3;
                }
-               if (n->sel.flags&TC_U32_EAT) {
+               if (n->sel.flags & TC_U32_EAT) {
                        off += off2;
                        off2 = 0;
                }
@@ -236,11 +235,11 @@ out:
 
 deadloop:
        if (net_ratelimit())
-               printk(KERN_WARNING "cls_u32: dead loop\n");
+               pr_warning("cls_u32: dead loop\n");
        return -1;
 }
 
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 {
        struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
        return ht;
 }
 
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 {
-       unsigned sel;
+       unsigned int sel;
        struct tc_u_knode *n = NULL;
 
        sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
        do {
                if (++tp_c->hgenerator == 0x7FF)
                        tp_c->hgenerator = 1;
-       } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+       } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
 
        return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
 }
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 {
        struct tc_u_knode *n;
-       unsigned h;
+       unsigned int h;
 
-       for (h=0; h<=ht->divisor; h++) {
+       for (h = 0; h <= ht->divisor; h++) {
                while ((n = ht->ht[h]) != NULL) {
                        ht->ht[h] = n->next;
 
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
 
 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+       struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
 
        if (ht == NULL)
                return 0;
 
        if (TC_U32_KEY(ht->handle))
-               return u32_delete_key(tp, (struct tc_u_knode*)ht);
+               return u32_delete_key(tp, (struct tc_u_knode *)ht);
 
        if (tp->root == ht)
                return -EINVAL;
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned i = 0x7FF;
+       unsigned int i = 0x7FF;
 
-       for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+       for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
                if (i < TC_U32_NODE(n->handle))
                        i = TC_U32_NODE(n->handle);
        i++;
 
-       return handle|(i>0xFFF ? 0xFFF : i);
+       return handle | (i > 0xFFF ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
        if (err < 0)
                return err;
 
-       if ((n = (struct tc_u_knode*)*arg) != NULL) {
+       n = (struct tc_u_knode *)*arg;
+       if (n) {
                if (TC_U32_KEY(n->handle) == 0)
                        return -EINVAL;
 
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
        }
 
        if (tb[TCA_U32_DIVISOR]) {
-               unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+               unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 
                if (--divisor > 0x100)
                        return -EINVAL;
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
                        if (handle == 0)
                                return -ENOMEM;
                }
-               ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+               ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
                if (ht == NULL)
                        return -ENOBUFS;
                ht->tp_c = tp_c;
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *ht;
        struct tc_u_knode *n;
-       unsigned h;
+       unsigned int h;
 
        if (arg->stop)
                return;
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct tc_u_knode *n = (struct tc_u_knode*)fh;
+       struct tc_u_knode *n = (struct tc_u_knode *)fh;
        struct nlattr *nest;
 
        if (n == NULL)
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                goto nla_put_failure;
 
        if (TC_U32_KEY(n->handle) == 0) {
-               struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
-               u32 divisor = ht->divisor+1;
+               struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+               u32 divisor = ht->divisor + 1;
+
                NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
        } else {
                NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                        goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-               if(strlen(n->indev))
+               if (strlen(n->indev))
                        NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
 #endif
 #ifdef CONFIG_CLS_U32_PERF
index bc45039..1c8360a 100644 (file)
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
                return 0;
 
        switch (cmp->align) {
-               case TCF_EM_ALIGN_U8:
-                       val = *ptr;
-                       break;
+       case TCF_EM_ALIGN_U8:
+               val = *ptr;
+               break;
 
-               case TCF_EM_ALIGN_U16:
-                       val = get_unaligned_be16(ptr);
+       case TCF_EM_ALIGN_U16:
+               val = get_unaligned_be16(ptr);
 
-                       if (cmp_needs_transformation(cmp))
-                               val = be16_to_cpu(val);
-                       break;
+               if (cmp_needs_transformation(cmp))
+                       val = be16_to_cpu(val);
+               break;
 
-               case TCF_EM_ALIGN_U32:
-                       /* Worth checking boundries? The branching seems
-                        * to get worse. Visit again. */
-                       val = get_unaligned_be32(ptr);
+       case TCF_EM_ALIGN_U32:
+               /* Worth checking boundries? The branching seems
+                * to get worse. Visit again.
+                */
+               val = get_unaligned_be32(ptr);
 
-                       if (cmp_needs_transformation(cmp))
-                               val = be32_to_cpu(val);
-                       break;
+               if (cmp_needs_transformation(cmp))
+                       val = be32_to_cpu(val);
+               break;
 
-               default:
-                       return 0;
+       default:
+               return 0;
        }
 
        if (cmp->mask)
                val &= cmp->mask;
 
        switch (cmp->opnd) {
-               case TCF_EM_OPND_EQ:
-                       return val == cmp->val;
-               case TCF_EM_OPND_LT:
-                       return val < cmp->val;
-               case TCF_EM_OPND_GT:
-                       return val > cmp->val;
+       case TCF_EM_OPND_EQ:
+               return val == cmp->val;
+       case TCF_EM_OPND_LT:
+               return val < cmp->val;
+       case TCF_EM_OPND_GT:
+               return val > cmp->val;
        }
 
        return 0;
index 34da5e2..a4de67e 100644 (file)
 #include <net/pkt_cls.h>
 #include <net/sock.h>
 
-struct meta_obj
-{
+struct meta_obj {
        unsigned long           value;
        unsigned int            len;
 };
 
-struct meta_value
-{
+struct meta_value {
        struct tcf_meta_val     hdr;
        unsigned long           val;
        unsigned int            len;
 };
 
-struct meta_match
-{
+struct meta_match {
        struct meta_value       lvalue;
        struct meta_value       rvalue;
 };
@@ -255,7 +252,7 @@ META_COLLECTOR(int_rtclassid)
        if (unlikely(skb_dst(skb) == NULL))
                *err = -1;
        else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                dst->value = skb_dst(skb)->tclassid;
 #else
                dst->value = 0;
@@ -267,7 +264,7 @@ META_COLLECTOR(int_rtiif)
        if (unlikely(skb_rtable(skb) == NULL))
                *err = -1;
        else
-               dst->value = skb_rtable(skb)->fl.iif;
+               dst->value = skb_rtable(skb)->rt_iif;
 }
 
 /**************************************************************************
@@ -404,7 +401,7 @@ META_COLLECTOR(int_sk_sndbuf)
 META_COLLECTOR(int_sk_alloc)
 {
        SKIP_NONLOCAL(skb);
-       dst->value = skb->sk->sk_allocation;
+       dst->value = (__force int) skb->sk->sk_allocation;
 }
 
 META_COLLECTOR(int_sk_route_caps)
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
  * Meta value collectors assignment table
  **************************************************************************/
 
-struct meta_ops
-{
+struct meta_ops {
        void            (*get)(struct sk_buff *, struct tcf_pkt_info *,
                               struct meta_value *, struct meta_obj *, int *);
 };
@@ -494,7 +490,7 @@ struct meta_ops
 
 /* Meta value operations table listing all meta value collectors and
  * assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
        [TCF_META_TYPE_VAR] = {
                [META_ID(DEV)]                  = META_FUNC(var_dev),
                [META_ID(SK_BOUND_IF)]          = META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
        }
 };
 
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
 {
        return &__meta_ops[meta_type(val)][meta_id(val)];
 }
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
        if (v->len == sizeof(unsigned long))
                NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-       else if (v->len == sizeof(u32)) {
+       else if (v->len == sizeof(u32))
                NLA_PUT_U32(skb, tlv, v->val);
-       }
 
        return 0;
 
@@ -663,8 +658,7 @@ nla_put_failure:
  * Type specific operations table
  **************************************************************************/
 
-struct meta_type_ops
-{
+struct meta_type_ops {
        void    (*destroy)(struct meta_value *);
        int     (*compare)(struct meta_obj *, struct meta_obj *);
        int     (*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@ struct meta_type_ops
        int     (*dump)(struct sk_buff *, struct meta_value *, int);
 };
 
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
        [TCF_META_TYPE_VAR] = {
                .destroy = meta_var_destroy,
                .compare = meta_var_compare,
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
        }
 };
 
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
 {
        return &__meta_type_ops[meta_type(v)];
 }
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
                return err;
 
        if (meta_type_ops(v)->apply_extras)
-           meta_type_ops(v)->apply_extras(v, dst);
+               meta_type_ops(v)->apply_extras(v, dst);
 
        return 0;
 }
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
        r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
 
        switch (meta->lvalue.hdr.op) {
-               case TCF_EM_OPND_EQ:
-                       return !r;
-               case TCF_EM_OPND_LT:
-                       return r < 0;
-               case TCF_EM_OPND_GT:
-                       return r > 0;
+       case TCF_EM_OPND_EQ:
+               return !r;
+       case TCF_EM_OPND_LT:
+               return r < 0;
+       case TCF_EM_OPND_GT:
+               return r > 0;
        }
 
        return 0;
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
 
 static inline int meta_is_supported(struct meta_value *val)
 {
-       return (!meta_id(val) || meta_ops(val)->get);
+       return !meta_id(val) || meta_ops(val)->get;
 }
 
 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
index 1a4176a..a3bed07 100644 (file)
@@ -18,8 +18,7 @@
 #include <linux/tc_ematch/tc_em_nbyte.h>
 #include <net/pkt_cls.h>
 
-struct nbyte_data
-{
+struct nbyte_data {
        struct tcf_em_nbyte     hdr;
        char                    pattern[0];
 };
index ea8f566..15d353d 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/tc_ematch/tc_em_text.h>
 #include <net/pkt_cls.h>
 
-struct text_match
-{
+struct text_match {
        u16                     from_offset;
        u16                     to_offset;
        u8                      from_layer;
index 953f147..797bdb8 100644 (file)
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
        if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
                return 0;
 
-       return !(((*(__be32*) ptr)  ^ key->val) & key->mask);
+       return !(((*(__be32 *) ptr)  ^ key->val) & key->mask);
 }
 
 static struct tcf_ematch_ops em_u32_ops = {
index 5e37da9..88d93eb 100644 (file)
@@ -93,7 +93,7 @@
 static LIST_HEAD(ematch_ops);
 static DEFINE_RWLOCK(ematch_mod_lock);
 
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
 {
        struct tcf_ematch_ops *e = NULL;
 
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
 }
 EXPORT_SYMBOL(tcf_em_unregister);
 
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
-                                                  int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+                                                 int index)
 {
        return &tree->matches[index];
 }
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
 
        if (em_hdr->kind == TCF_EM_CONTAINER) {
                /* Special ematch called "container", carries an index
-                * referencing an external ematch sequence. */
+                * referencing an external ematch sequence.
+                */
                u32 ref;
 
                if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                        goto errout;
 
                /* We do not allow backward jumps to avoid loops and jumps
-                * to our own position are of course illegal. */
+                * to our own position are of course illegal.
+                */
                if (ref <= idx)
                        goto errout;
 
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                 * which automatically releases the reference again, therefore
                 * the module MUST not be given back under any circumstances
                 * here. Be aware, the destroy function assumes that the
-                * module is held if the ops field is non zero. */
+                * module is held if the ops field is non zero.
+                */
                em->ops = tcf_em_lookup(em_hdr->kind);
 
                if (em->ops == NULL) {
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                        if (em->ops) {
                                /* We dropped the RTNL mutex in order to
                                 * perform the module load. Tell the caller
-                                * to replay the request. */
+                                * to replay the request.
+                                */
                                module_put(em->ops->owner);
                                err = -EAGAIN;
                        }
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                }
 
                /* ematch module provides expected length of data, so we
-                * can do a basic sanity check. */
+                * can do a basic sanity check.
+                */
                if (em->ops->datalen && data_len < em->ops->datalen)
                        goto errout;
 
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                         * TCF_EM_SIMPLE may be specified stating that the
                         * data only consists of a u32 integer and the module
                         * does not expected a memory reference but rather
-                        * the value carried. */
+                        * the value carried.
+                        */
                        if (em_hdr->flags & TCF_EM_SIMPLE) {
                                if (data_len < sizeof(u32))
                                        goto errout;
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
         * The array of rt attributes is parsed in the order as they are
         * provided, their type must be incremental from 1 to n. Even
         * if it does not serve any real purpose, a failure of sticking
-        * to this policy will result in parsing failure. */
+        * to this policy will result in parsing failure.
+        */
        for (idx = 0; nla_ok(rt_match, list_len); idx++) {
                err = -EINVAL;
 
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
        /* Check if the number of matches provided by userspace actually
         * complies with the array of matches. The number was used for
         * the validation of references and a mismatch could lead to
-        * undefined references during the matching process. */
+        * undefined references during the matching process.
+        */
        if (idx != tree_hdr->nmatches) {
                err = -EINVAL;
                goto errout_abort;
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
                        .flags = em->flags
                };
 
-               NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+               NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
 
                if (em->ops && em->ops->dump) {
                        if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
                               struct tcf_pkt_info *info)
 {
        int r = em->ops->match(skb, em, info);
+
        return tcf_em_is_inverted(em) ? !r : r;
 }
 
@@ -527,8 +536,8 @@ pop_stack:
 
 stack_overflow:
        if (net_ratelimit())
-               printk(KERN_WARNING "tc ematch: local stack overflow,"
-                       " increase NET_EMATCH_STACK\n");
+               pr_warning("tc ematch: local stack overflow,"
+                          " increase NET_EMATCH_STACK\n");
        return -1;
 }
 EXPORT_SYMBOL(__tcf_em_tree_match);
index b22ca2d..7490f3f 100644 (file)
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
        int err = -ENOENT;
 
        write_lock(&qdisc_mod_lock);
-       for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+       for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
                if (q == qops)
                        break;
        if (q) {
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
        if (!tab || --tab->refcnt)
                return;
 
-       for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+       for (rtabp = &qdisc_rtab_list;
+            (rtab = *rtabp) != NULL;
+            rtabp = &rtab->next) {
                if (rtab == tab) {
                        *rtabp = rtab->next;
                        kfree(rtab);
@@ -396,6 +398,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        return stab;
 }
 
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
 void qdisc_put_stab(struct qdisc_size_table *tab)
 {
        if (!tab)
@@ -405,7 +412,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
 
        if (--tab->refcnt == 0) {
                list_del(&tab->list);
-               kfree(tab);
+               call_rcu_bh(&tab->rcu, stab_kfree_rcu);
        }
 
        spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@ nla_put_failure:
        return -1;
 }
 
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 {
        int pkt_len, slot;
 
@@ -454,14 +461,13 @@ out:
                pkt_len = 1;
        qdisc_skb_cb(skb)->pkt_len = pkt_len;
 }
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 
 void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
 {
        if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
-               printk(KERN_WARNING
-                      "%s: %s qdisc %X: is non-work-conserving?\n",
-                      txt, qdisc->ops->id, qdisc->handle >> 16);
+               pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+                       txt, qdisc->ops->id, qdisc->handle >> 16);
                qdisc->flags |= TCQ_F_WARN_NONWC;
        }
 }
@@ -472,7 +478,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
                                                 timer);
 
-       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(wd->qdisc);
        __netif_schedule(qdisc_root(wd->qdisc));
 
        return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
                     &qdisc_root_sleeping(wd->qdisc)->state))
                return;
 
-       wd->qdisc->flags |= TCQ_F_THROTTLED;
+       qdisc_throttled(wd->qdisc);
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
        hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule);
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
        hrtimer_cancel(&wd->timer);
-       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(wd->qdisc);
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
@@ -625,7 +631,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
                        autohandle = TC_H_MAKE(0x80000000U, 0);
        } while (qdisc_lookup(dev, autohandle) && --i > 0);
 
-       return i>0 ? autohandle : 0;
+       return i > 0 ? autohandle : 0;
 }
 
 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                                err = PTR_ERR(stab);
                                goto err_out4;
                        }
-                       sch->stab = stab;
+                       rcu_assign_pointer(sch->stab, stab);
                }
                if (tca[TCA_RATE]) {
                        spinlock_t *root_lock;
@@ -874,7 +880,7 @@ err_out4:
         * Any broken qdiscs that would require a ops->reset() here?
         * The qdisc was never in action so it shouldn't be necessary.
         */
-       qdisc_put_stab(sch->stab);
+       qdisc_put_stab(rtnl_dereference(sch->stab));
        if (ops->destroy)
                ops->destroy(sch);
        goto err_out3;
@@ -882,7 +888,7 @@ err_out4:
 
 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 {
-       struct qdisc_size_table *stab = NULL;
+       struct qdisc_size_table *ostab, *stab = NULL;
        int err = 0;
 
        if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
                        return PTR_ERR(stab);
        }
 
-       qdisc_put_stab(sch->stab);
-       sch->stab = stab;
+       ostab = rtnl_dereference(sch->stab);
+       rcu_assign_pointer(sch->stab, stab);
+       qdisc_put_stab(ostab);
 
        if (tca[TCA_RATE]) {
                /* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@ out:
        return 0;
 }
 
-struct check_loop_arg
-{
-       struct qdisc_walker     w;
+struct check_loop_arg {
+       struct qdisc_walker     w;
        struct Qdisc            *p;
        int                     depth;
 };
@@ -970,7 +976,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        struct Qdisc *p = NULL;
        int err;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        if (clid) {
                if (clid != TC_H_ROOT) {
                        if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
-                               if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+                               p = qdisc_lookup(dev, TC_H_MAJ(clid));
+                               if (!p)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
-                       } else { /* ingress */
-                               if (dev_ingress_queue(dev))
-                                       q = dev_ingress_queue(dev)->qdisc_sleeping;
+                       } else if (dev_ingress_queue(dev)) {
+                               q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
                        q = dev->qdisc;
@@ -996,7 +1003,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
                        return -EINVAL;
        } else {
-               if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+               q = qdisc_lookup(dev, tcm->tcm_handle);
+               if (!q)
                        return -ENOENT;
        }
 
@@ -1008,7 +1016,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        return -EINVAL;
                if (q->handle == 0)
                        return -ENOENT;
-               if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+               err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+               if (err != 0)
                        return err;
        } else {
                qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 }
 
 /*
  Create/change qdisc.
* Create/change qdisc.
  */
 
 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@ replay:
        clid = tcm->tcm_parent;
        q = p = NULL;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@ replay:
        if (clid) {
                if (clid != TC_H_ROOT) {
                        if (clid != TC_H_INGRESS) {
-                               if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+                               p = qdisc_lookup(dev, TC_H_MAJ(clid));
+                               if (!p)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
-                       } else { /* ingress */
-                               if (dev_ingress_queue_create(dev))
-                                       q = dev_ingress_queue(dev)->qdisc_sleeping;
+                       } else if (dev_ingress_queue_create(dev)) {
+                               q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
                        q = dev->qdisc;
@@ -1063,13 +1073,14 @@ replay:
 
                if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
                        if (tcm->tcm_handle) {
-                               if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
                                        return -EEXIST;
                                if (TC_H_MIN(tcm->tcm_handle))
                                        return -EINVAL;
-                               if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+                               q = qdisc_lookup(dev, tcm->tcm_handle);
+                               if (!q)
                                        goto create_n_graft;
-                               if (n->nlmsg_flags&NLM_F_EXCL)
+                               if (n->nlmsg_flags & NLM_F_EXCL)
                                        return -EEXIST;
                                if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
                                        return -EINVAL;
@@ -1079,7 +1090,7 @@ replay:
                                atomic_inc(&q->refcnt);
                                goto graft;
                        } else {
-                               if (q == NULL)
+                               if (!q)
                                        goto create_n_graft;
 
                                /* This magic test requires explanation.
@@ -1101,9 +1112,9 @@ replay:
                                 *   For now we select create/graft, if
                                 *   user gave KIND, which does not match existing.
                                 */
-                               if ((n->nlmsg_flags&NLM_F_CREATE) &&
-                                   (n->nlmsg_flags&NLM_F_REPLACE) &&
-                                   ((n->nlmsg_flags&NLM_F_EXCL) ||
+                               if ((n->nlmsg_flags & NLM_F_CREATE) &&
+                                   (n->nlmsg_flags & NLM_F_REPLACE) &&
+                                   ((n->nlmsg_flags & NLM_F_EXCL) ||
                                     (tca[TCA_KIND] &&
                                      nla_strcmp(tca[TCA_KIND], q->ops->id))))
                                        goto create_n_graft;
@@ -1118,7 +1129,7 @@ replay:
        /* Change qdisc parameters */
        if (q == NULL)
                return -ENOENT;
-       if (n->nlmsg_flags&NLM_F_EXCL)
+       if (n->nlmsg_flags & NLM_F_EXCL)
                return -EEXIST;
        if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
                return -EINVAL;
@@ -1128,7 +1139,7 @@ replay:
        return err;
 
 create_n_graft:
-       if (!(n->nlmsg_flags&NLM_F_CREATE))
+       if (!(n->nlmsg_flags & NLM_F_CREATE))
                return -ENOENT;
        if (clid == TC_H_INGRESS) {
                if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        struct nlmsghdr  *nlh;
        unsigned char *b = skb_tail_pointer(skb);
        struct gnet_dump d;
+       struct qdisc_size_table *stab;
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto nla_put_failure;
        q->qstats.qlen = q->q.qlen;
 
-       if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+       stab = rtnl_dereference(q->stab);
+       if (stab && qdisc_dump_stab(skb, stab) < 0)
                goto nla_put_failure;
 
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
                return -ENOBUFS;
 
        if (old && !tc_qdisc_dump_ignore(old)) {
-               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+                                 0, RTM_DELQDISC) < 0)
                        goto err_out;
        }
        if (new && !tc_qdisc_dump_ignore(new)) {
-               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+                                 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
                        goto err_out;
        }
 
        if (skb->len)
-               return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+               return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                                     n->nlmsg_flags & NLM_F_ECHO);
 
 err_out:
        kfree_skb(skb);
@@ -1275,7 +1291,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                        q_idx++;
                        continue;
                }
-               if (!tc_qdisc_dump_ignore(q) && 
+               if (!tc_qdisc_dump_ignore(q) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
@@ -1356,7 +1372,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        u32 qid = TC_H_MAJ(clid);
        int err;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        qid = dev->qdisc->handle;
 
                /* Now qid is genuine qdisc handle consistent
-                  both with parent and child.
-
-                  TC_H_MAJ(pid) still may be unspecified, complete it now.
+                * both with parent and child.
+                *
+                * TC_H_MAJ(pid) still may be unspecified, complete it now.
                 */
                if (pid)
                        pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        }
 
        /* OK. Locate qdisc */
-       if ((q = qdisc_lookup(dev, qid)) == NULL)
+       q = qdisc_lookup(dev, qid);
+       if (!q)
                return -ENOENT;
 
        /* An check that it supports classes */
@@ -1423,13 +1441,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        if (cl == 0) {
                err = -ENOENT;
-               if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTCLASS ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        goto out;
        } else {
                switch (n->nlmsg_type) {
                case RTM_NEWTCLASS:
                        err = -EEXIST;
-                       if (n->nlmsg_flags&NLM_F_EXCL)
+                       if (n->nlmsg_flags & NLM_F_EXCL)
                                goto out;
                        break;
                case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                             n->nlmsg_flags & NLM_F_ECHO);
 }
 
-struct qdisc_dump_args
-{
-       struct qdisc_walker w;
-       struct sk_buff *skb;
-       struct netlink_callback *cb;
+struct qdisc_dump_args {
+       struct qdisc_walker     w;
+       struct sk_buff          *skb;
+       struct netlink_callback *cb;
 };
 
 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
 
 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+       struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
        struct net *net = sock_net(skb->sk);
        struct netdev_queue *dev_queue;
        struct net_device *dev;
@@ -1598,7 +1617,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 
        if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
                return 0;
-       if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return 0;
 
        s_t = cb->args[0];
@@ -1621,19 +1641,22 @@ done:
 }
 
 /* Main classifier routine: scans classifier chain attached
  to this qdisc, (optionally) tests for protocol and asks
  specific classifiers.
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
  */
 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
                       struct tcf_result *res)
 {
        __be16 protocol = skb->protocol;
-       int err = 0;
+       int err;
 
        for (; tp; tp = tp->next) {
-               if ((tp->protocol == protocol ||
-                    tp->protocol == htons(ETH_P_ALL)) &&
-                   (err = tp->classify(skb, tp, res)) >= 0) {
+               if (tp->protocol != protocol &&
+                   tp->protocol != htons(ETH_P_ALL))
+                       continue;
+               err = tp->classify(skb, tp, res);
+
+               if (err >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
                        if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
                                skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1649,12 +1672,12 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
                struct tcf_result *res)
 {
        int err = 0;
-       __be16 protocol;
 #ifdef CONFIG_NET_CLS_ACT
+       __be16 protocol;
        struct tcf_proto *otp = tp;
 reclassify:
-#endif
        protocol = skb->protocol;
+#endif
 
        err = tc_classify_compat(skb, tp, res);
 #ifdef CONFIG_NET_CLS_ACT
@@ -1664,11 +1687,11 @@ reclassify:
 
                if (verd++ >= MAX_REC_LOOP) {
                        if (net_ratelimit())
-                               printk(KERN_NOTICE
-                                      "%s: packet reclassify loop"
+                               pr_notice("%s: packet reclassify loop"
                                          " rule prio %u protocol %02x\n",
-                                      tp->q->ops->id,
-                                      tp->prio & 0xffff, ntohs(tp->protocol));
+                                         tp->q->ops->id,
+                                         tp->prio & 0xffff,
+                                         ntohs(tp->protocol));
                        return TC_ACT_SHOT;
                }
                skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@ static int __init pktsched_init(void)
 
        err = register_pernet_subsys(&psched_net_ops);
        if (err) {
-               printk(KERN_ERR "pktsched_init: "
+               pr_err("pktsched_init: "
                       "cannot initialize per netns operations\n");
                return err;
        }
index 943d733..3f08158 100644 (file)
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
         * creation), and one for the reference held when calling delete.
         */
        if (flow->ref < 2) {
-               printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+               pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
                return -EINVAL;
        }
        if (flow->ref > 2)
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                        }
                }
                flow = NULL;
-       done:
-               ;               
+done:
+               ;
        }
-       if (!flow)
+       if (!flow) {
                flow = &p->link;
-       else {
+       else {
                if (flow->vcc)
                        ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
                /*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
 
        list_for_each_entry_safe(flow, tmp, &p->flows, list) {
                if (flow->ref > 1)
-                       printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
-                              flow->ref);
+                       pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
                atm_tc_put(sch, (unsigned long)flow);
        }
        tasklet_kill(&p->task);
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
        }
        if (flow->excess)
                NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-       else {
+       else
                NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-       }
 
        nla_nest_end(skb, nest);
        return skb->len;
index c80d1c2..24d94c0 100644 (file)
@@ -72,8 +72,7 @@
 struct cbq_sched_data;
 
 
-struct cbq_class
-{
+struct cbq_class {
        struct Qdisc_class_common common;
        struct cbq_class        *next_alive;    /* next class with backlog in this priority band */
 
@@ -139,19 +138,18 @@ struct cbq_class
        int                     refcnt;
        int                     filters;
 
-       struct cbq_class        *defaults[TC_PRIO_MAX+1];
+       struct cbq_class        *defaults[TC_PRIO_MAX + 1];
 };
 
-struct cbq_sched_data
-{
+struct cbq_sched_data {
        struct Qdisc_class_hash clhash;                 /* Hash table of all classes */
-       int                     nclasses[TC_CBQ_MAXPRIO+1];
-       unsigned                quanta[TC_CBQ_MAXPRIO+1];
+       int                     nclasses[TC_CBQ_MAXPRIO + 1];
+       unsigned int            quanta[TC_CBQ_MAXPRIO + 1];
 
        struct cbq_class        link;
 
-       unsigned                activemask;
-       struct cbq_class        *active[TC_CBQ_MAXPRIO+1];      /* List of all classes
+       unsigned int            activemask;
+       struct cbq_class        *active[TC_CBQ_MAXPRIO + 1];    /* List of all classes
                                                                   with backlog */
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
        int                     tx_len;
        psched_time_t           now;            /* Cached timestamp */
        psched_time_t           now_rt;         /* Cached real time */
-       unsigned                pmask;
+       unsigned int            pmask;
 
        struct hrtimer          delay_timer;
        struct qdisc_watchdog   watchdog;       /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
 };
 
 
-#define L2T(cl,len)    qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len)   qdisc_l2t((cl)->R_tab, len)
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
 {
        struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
 static struct cbq_class *
 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
 {
-       struct cbq_class *cl, *new;
+       struct cbq_class *cl;
 
-       for (cl = this->tparent; cl; cl = cl->tparent)
-               if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
-                       return new;
+       for (cl = this->tparent; cl; cl = cl->tparent) {
+               struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
 
+               if (new != NULL && new != this)
+                       return new;
+       }
        return NULL;
 }
 
 #endif
 
 /* Classify packet. The procedure is pretty complicated, but
  it allows us to combine link sharing and priority scheduling
  transparently.
-
  Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
  so that it resolves to split nodes. Then packets are classified
  by logical priority, or a more specific classifier may be attached
  to the split node.
* it allows us to combine link sharing and priority scheduling
* transparently.
+ *
* Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
* so that it resolves to split nodes. Then packets are classified
* by logical priority, or a more specific classifier may be attached
* to the split node.
  */
 
 static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
        /*
         *  Step 1. If skb->priority points to one of our classes, use it.
         */
-       if (TC_H_MAJ(prio^sch->handle) == 0 &&
+       if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
            (cl = cbq_class_lookup(q, prio)) != NULL)
                return cl;
 
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                    (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
                        goto fallback;
 
-               if ((cl = (void*)res.class) == NULL) {
+               cl = (void *)res.class;
+               if (!cl) {
                        if (TC_H_MAJ(res.classid))
                                cl = cbq_class_lookup(q, res.classid);
-                       else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+                       else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
                                cl = defmap[TC_PRIO_BESTEFFORT];
 
                        if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
         * Step 4. No success...
         */
        if (TC_H_MAJ(prio) == 0 &&
-           !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+           !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
            !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
                return head;
 
@@ -290,12 +291,12 @@ fallback:
 }
 
 /*
  A packet has just been enqueued on the empty class.
  cbq_activate_class adds it to the tail of active class list
  of its priority band.
* A packet has just been enqueued on the empty class.
* cbq_activate_class adds it to the tail of active class list
* of its priority band.
  */
 
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
        int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
 }
 
 /*
  Unlink class from active chain.
  Note that this same procedure is done directly in cbq_dequeue*
  during round-robin procedure.
* Unlink class from active chain.
* Note that this same procedure is done directly in cbq_dequeue*
* during round-robin procedure.
  */
 
 static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
 {
        int toplevel = q->toplevel;
 
-       if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+       if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
                psched_time_t now;
                psched_tdiff_t incr;
 
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
                                q->toplevel = cl->level;
                                return;
                        }
-               } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+               } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
        }
 }
 
@@ -390,7 +391,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        ret = qdisc_enqueue(skb, cl->q);
        if (ret == NET_XMIT_SUCCESS) {
                sch->q.qlen++;
-               qdisc_bstats_update(sch, skb);
                cbq_mark_toplevel(q, cl);
                if (!cl->next_alive)
                        cbq_activate_class(cl);
@@ -418,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                delay += cl->offtime;
 
                /*
-                  Class goes to sleep, so that it will have no
-                  chance to work avgidle. Let's forgive it 8)
-
-                  BTW cbq-2.0 has a crap in this
-                  place, apparently they forgot to shift it by cl->ewma_log.
+                * Class goes to sleep, so that it will have no
+                * chance to work avgidle. Let's forgive it 8)
+                *
+                * BTW cbq-2.0 has a crap in this
+                * place, apparently they forgot to shift it by cl->ewma_log.
                 */
                if (cl->avgidle < 0)
                        delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -439,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                q->wd_expires = delay;
 
        /* Dirty work! We must schedule wakeups based on
-          real available rate, rather than leaf rate,
-          which may be tiny (even zero).
+        * real available rate, rather than leaf rate,
+        * which may be tiny (even zero).
         */
        if (q->toplevel == TC_CBQ_MAXLEVEL) {
                struct cbq_class *b;
@@ -460,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
 }
 
 /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
  they go overlimit
* they go overlimit
  */
 
 static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -595,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
        struct Qdisc *sch = q->watchdog.qdisc;
        psched_time_t now;
        psched_tdiff_t delay = 0;
-       unsigned pmask;
+       unsigned int pmask;
 
        now = psched_get_time();
 
@@ -624,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
                hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
-       sch->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(sch);
        __netif_schedule(qdisc_root(sch));
        return HRTIMER_NORESTART;
 }
@@ -649,7 +649,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
                ret = qdisc_enqueue(skb, cl->q);
                if (ret == NET_XMIT_SUCCESS) {
                        sch->q.qlen++;
-                       qdisc_bstats_update(sch, skb);
                        if (!cl->next_alive)
                                cbq_activate_class(cl);
                        return 0;
@@ -665,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
 #endif
 
 /*
  It is mission critical procedure.
-
  We "regenerate" toplevel cutoff, if transmitting class
  has backlog and it is not regulated. It is not part of
  original CBQ description, but looks more reasonable.
  Probably, it is wrong. This question needs further investigation.
-*/
* It is mission critical procedure.
+ *
* We "regenerate" toplevel cutoff, if transmitting class
* has backlog and it is not regulated. It is not part of
* original CBQ description, but looks more reasonable.
* Probably, it is wrong. This question needs further investigation.
+ */
 
-static __inline__ void
+static inline void
 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
                    struct cbq_class *borrowed)
 {
@@ -684,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
                                        q->toplevel = borrowed->level;
                                        return;
                                }
-                       } while ((borrowed=borrowed->borrow) != NULL);
+                       } while ((borrowed = borrowed->borrow) != NULL);
                }
 #if 0
        /* It is not necessary now. Uncommenting it
@@ -712,10 +711,10 @@ cbq_update(struct cbq_sched_data *q)
                cl->bstats.bytes += len;
 
                /*
-                  (now - last) is total time between packet right edges.
-                  (last_pktlen/rate) is "virtual" busy time, so that
-
-                        idle = (now - last) - last_pktlen/rate
+                * (now - last) is total time between packet right edges.
+                * (last_pktlen/rate) is "virtual" busy time, so that
+                *
+                *      idle = (now - last) - last_pktlen/rate
                 */
 
                idle = q->now - cl->last;
@@ -725,9 +724,9 @@ cbq_update(struct cbq_sched_data *q)
                        idle -= L2T(cl, len);
 
                /* true_avgidle := (1-W)*true_avgidle + W*idle,
-                  where W=2^{-ewma_log}. But cl->avgidle is scaled:
-                  cl->avgidle == true_avgidle/W,
-                  hence:
+                * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+                * cl->avgidle == true_avgidle/W,
+                * hence:
                 */
                        avgidle += idle - (avgidle>>cl->ewma_log);
                }
@@ -741,22 +740,22 @@ cbq_update(struct cbq_sched_data *q)
                        cl->avgidle = avgidle;
 
                        /* Calculate expected time, when this class
-                          will be allowed to send.
-                          It will occur, when:
-                          (1-W)*true_avgidle + W*delay = 0, i.e.
-                          idle = (1/W - 1)*(-true_avgidle)
-                          or
-                          idle = (1 - W)*(-cl->avgidle);
+                        * will be allowed to send.
+                        * It will occur, when:
+                        * (1-W)*true_avgidle + W*delay = 0, i.e.
+                        * idle = (1/W - 1)*(-true_avgidle)
+                        * or
+                        * idle = (1 - W)*(-cl->avgidle);
                         */
                        idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
 
                        /*
-                          That is not all.
-                          To maintain the rate allocated to the class,
-                          we add to undertime virtual clock,
-                          necessary to complete transmitted packet.
-                          (len/phys_bandwidth has been already passed
-                          to the moment of cbq_update)
+                        * That is not all.
+                        * To maintain the rate allocated to the class,
+                        * we add to undertime virtual clock,
+                        * necessary to complete transmitted packet.
+                        * (len/phys_bandwidth has been already passed
+                        * to the moment of cbq_update)
                         */
 
                        idle -= L2T(&q->link, len);
@@ -778,7 +777,7 @@ cbq_update(struct cbq_sched_data *q)
        cbq_update_toplevel(q, this, q->tx_borrowed);
 }
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_under_limit(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -794,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl)
 
        do {
                /* It is very suspicious place. Now overlimit
-                  action is generated for not bounded classes
-                  only if link is completely congested.
-                  Though it is in agree with ancestor-only paradigm,
-                  it looks very stupid. Particularly,
-                  it means that this chunk of code will either
-                  never be called or result in strong amplification
-                  of burstiness. Dangerous, silly, and, however,
-                  no another solution exists.
+                * action is generated for not bounded classes
+                * only if link is completely congested.
+                * Though it is in agree with ancestor-only paradigm,
+                * it looks very stupid. Particularly,
+                * it means that this chunk of code will either
+                * never be called or result in strong amplification
+                * of burstiness. Dangerous, silly, and, however,
+                * no another solution exists.
                 */
-               if ((cl = cl->borrow) == NULL) {
+               cl = cl->borrow;
+               if (!cl) {
                        this_cl->qstats.overlimits++;
                        this_cl->overlimit(this_cl);
                        return NULL;
@@ -816,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl)
        return cl;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_prio(struct Qdisc *sch, int prio)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
@@ -840,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
 
                        if (cl->deficit <= 0) {
                                /* Class exhausted its allotment per
-                                  this round. Switch to the next one.
+                                * this round. Switch to the next one.
                                 */
                                deficit = 1;
                                cl->deficit += cl->quantum;
@@ -850,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
                        skb = cl->q->dequeue(cl->q);
 
                        /* Class did not give us any skb :-(
-                          It could occur even if cl->q->q.qlen != 0
-                          f.e. if cl->q == "tbf"
+                        * It could occur even if cl->q->q.qlen != 0
+                        * f.e. if cl->q == "tbf"
                         */
                        if (skb == NULL)
                                goto skip_class;
@@ -880,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
 skip_class:
                        if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
                                /* Class is empty or penalized.
-                                  Unlink it from active chain.
+                                * Unlink it from active chain.
                                 */
                                cl_prev->next_alive = cl->next_alive;
                                cl->next_alive = NULL;
@@ -919,14 +919,14 @@ next_class:
        return NULL;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_1(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
-       unsigned activemask;
+       unsigned int activemask;
 
-       activemask = q->activemask&0xFF;
+       activemask = q->activemask & 0xFF;
        while (activemask) {
                int prio = ffz(~activemask);
                activemask &= ~(1<<prio);
@@ -951,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch)
        if (q->tx_class) {
                psched_tdiff_t incr2;
                /* Time integrator. We calculate EOS time
-                  by adding expected packet transmission time.
-                  If real time is greater, we warp artificial clock,
-                  so that:
-
-                  cbq_time = max(real_time, work);
+                * by adding expected packet transmission time.
+                * If real time is greater, we warp artificial clock,
+                * so that:
+                *
+                * cbq_time = max(real_time, work);
                 */
                incr2 = L2T(&q->link, q->tx_len);
                q->now += incr2;
@@ -971,28 +971,29 @@ cbq_dequeue(struct Qdisc *sch)
 
                skb = cbq_dequeue_1(sch);
                if (skb) {
+                       qdisc_bstats_update(sch, skb);
                        sch->q.qlen--;
-                       sch->flags &= ~TCQ_F_THROTTLED;
+                       qdisc_unthrottled(sch);
                        return skb;
                }
 
                /* All the classes are overlimit.
-
-                  It is possible, if:
-
-                  1. Scheduler is empty.
-                  2. Toplevel cutoff inhibited borrowing.
-                  3. Root class is overlimit.
-
-                  Reset 2d and 3d conditions and retry.
-
-                  Note, that NS and cbq-2.0 are buggy, peeking
-                  an arbitrary class is appropriate for ancestor-only
-                  sharing, but not for toplevel algorithm.
-
-                  Our version is better, but slower, because it requires
-                  two passes, but it is unavoidable with top-level sharing.
-               */
+                *
+                * It is possible, if:
+                *
+                * 1. Scheduler is empty.
+                * 2. Toplevel cutoff inhibited borrowing.
+                * 3. Root class is overlimit.
+                *
+                * Reset 2d and 3d conditions and retry.
+                *
+                * Note, that NS and cbq-2.0 are buggy, peeking
+                * an arbitrary class is appropriate for ancestor-only
+                * sharing, but not for toplevel algorithm.
+                *
+                * Our version is better, but slower, because it requires
+                * two passes, but it is unavoidable with top-level sharing.
+                */
 
                if (q->toplevel == TC_CBQ_MAXLEVEL &&
                    q->link.undertime == PSCHED_PASTPERFECT)
@@ -1003,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch)
        }
 
        /* No packets in scheduler or nobody wants to give them to us :-(
-          Sigh... start watchdog timer in the last case. */
+        * Sigh... start watchdog timer in the last case.
+        */
 
        if (sch->q.qlen) {
                sch->qstats.overlimits++;
@@ -1025,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
                int level = 0;
                struct cbq_class *cl;
 
-               if ((cl = this->children) != NULL) {
+               cl = this->children;
+               if (cl) {
                        do {
                                if (cl->level > level)
                                        level = cl->level;
                        } while ((cl = cl->sibling) != this->children);
                }
-               this->level = level+1;
+               this->level = level + 1;
        } while ((this = this->tparent) != NULL);
 }
 
@@ -1047,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
        for (h = 0; h < q->clhash.hashsize; h++) {
                hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
                        /* BUGGGG... Beware! This expression suffer of
-                          arithmetic overflows!
+                        * arithmetic overflows!
                         */
                        if (cl->priority == prio) {
                                cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
                                        q->quanta[prio];
                        }
                        if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
-                               printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+                               pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+                                          cl->common.classid, cl->quantum);
                                cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
                        }
                }
@@ -1065,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
        struct cbq_class *split = cl->split;
-       unsigned h;
+       unsigned int h;
        int i;
 
        if (split == NULL)
                return;
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
-               if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
+               if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
                        split->defaults[i] = NULL;
        }
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
                int level = split->level;
 
                if (split->defaults[i])
@@ -1089,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
                        hlist_for_each_entry(c, n, &q->clhash.hash[h],
                                             common.hnode) {
                                if (c->split == split && c->level < level &&
-                                   c->defmap&(1<<i)) {
+                                   c->defmap & (1<<i)) {
                                        split->defaults[i] = c;
                                        level = c->level;
                                }
@@ -1103,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
        struct cbq_class *split = NULL;
 
        if (splitid == 0) {
-               if ((split = cl->split) == NULL)
+               split = cl->split;
+               if (!split)
                        return;
                splitid = split->common.classid;
        }
@@ -1121,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
                cl->defmap = 0;
                cbq_sync_defmap(cl);
                cl->split = split;
-               cl->defmap = def&mask;
+               cl->defmap = def & mask;
        } else
-               cl->defmap = (cl->defmap&~mask)|(def&mask);
+               cl->defmap = (cl->defmap & ~mask) | (def & mask);
 
        cbq_sync_defmap(cl);
 }
@@ -1136,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this)
        qdisc_class_hash_remove(&q->clhash, &this->common);
 
        if (this->tparent) {
-               clp=&this->sibling;
+               clp = &this->sibling;
                cl = *clp;
                do {
                        if (cl == this) {
@@ -1175,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this)
        }
 }
 
-static unsigned int cbq_drop(struct Qdiscsch)
+static unsigned int cbq_drop(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl, *cl_head;
@@ -1183,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
        unsigned int len;
 
        for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
-               if ((cl_head = q->active[prio]) == NULL)
+               cl_head = q->active[prio];
+               if (!cl_head)
                        continue;
 
                cl = cl_head;
@@ -1200,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
 }
 
 static void
-cbq_reset(struct Qdiscsch)
+cbq_reset(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
        struct hlist_node *n;
        int prio;
-       unsigned h;
+       unsigned int h;
 
        q->activemask = 0;
        q->pmask = 0;
@@ -1238,21 +1244,21 @@ cbq_reset(struct Qdisc* sch)
 
 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
 {
-       if (lss->change&TCF_CBQ_LSS_FLAGS) {
-               cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
-               cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+       if (lss->change & TCF_CBQ_LSS_FLAGS) {
+               cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+               cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
        }
-       if (lss->change&TCF_CBQ_LSS_EWMA)
+       if (lss->change & TCF_CBQ_LSS_EWMA)
                cl->ewma_log = lss->ewma_log;
-       if (lss->change&TCF_CBQ_LSS_AVPKT)
+       if (lss->change & TCF_CBQ_LSS_AVPKT)
                cl->avpkt = lss->avpkt;
-       if (lss->change&TCF_CBQ_LSS_MINIDLE)
+       if (lss->change & TCF_CBQ_LSS_MINIDLE)
                cl->minidle = -(long)lss->minidle;
-       if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+       if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
                cl->maxidle = lss->maxidle;
                cl->avgidle = lss->maxidle;
        }
-       if (lss->change&TCF_CBQ_LSS_OFFTIME)
+       if (lss->change & TCF_CBQ_LSS_OFFTIME)
                cl->offtime = lss->offtime;
        return 0;
 }
@@ -1280,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
        if (wrr->weight)
                cl->weight = wrr->weight;
        if (wrr->priority) {
-               cl->priority = wrr->priority-1;
+               cl->priority = wrr->priority - 1;
                cl->cpriority = cl->priority;
                if (cl->priority >= cl->priority2)
-                       cl->priority2 = TC_CBQ_MAXPRIO-1;
+                       cl->priority2 = TC_CBQ_MAXPRIO - 1;
        }
 
        cbq_addprio(q, cl);
@@ -1300,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
                cl->overlimit = cbq_ovl_delay;
                break;
        case TC_CBQ_OVL_LOWPRIO:
-               if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
-                   ovl->priority2-1 <= cl->priority)
+               if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+                   ovl->priority2 - 1 <= cl->priority)
                        return -EINVAL;
-               cl->priority2 = ovl->priority2-1;
+               cl->priority2 = ovl->priority2 - 1;
                cl->overlimit = cbq_ovl_lowprio;
                break;
        case TC_CBQ_OVL_DROP:
@@ -1382,9 +1388,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        if (!q->link.q)
                q->link.q = &noop_qdisc;
 
-       q->link.priority = TC_CBQ_MAXPRIO-1;
-       q->link.priority2 = TC_CBQ_MAXPRIO-1;
-       q->link.cpriority = TC_CBQ_MAXPRIO-1;
+       q->link.priority = TC_CBQ_MAXPRIO - 1;
+       q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+       q->link.cpriority = TC_CBQ_MAXPRIO - 1;
        q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
        q->link.overlimit = cbq_ovl_classic;
        q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1415,7 +1421,7 @@ put_rtab:
        return err;
 }
 
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
 
@@ -1427,7 +1433,7 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_lssopt opt;
@@ -1452,15 +1458,15 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_wrropt opt;
 
        opt.flags = 0;
        opt.allot = cl->allot;
-       opt.priority = cl->priority+1;
-       opt.cpriority = cl->cpriority+1;
+       opt.priority = cl->priority + 1;
+       opt.cpriority = cl->cpriority + 1;
        opt.weight = cl->weight;
        NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
        return skb->len;
@@ -1470,13 +1476,13 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_ovl opt;
 
        opt.strategy = cl->ovl_strategy;
-       opt.priority2 = cl->priority2+1;
+       opt.priority2 = cl->priority2 + 1;
        opt.pad = 0;
        opt.penalty = cl->penalty;
        NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1487,7 +1493,7 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_fopt opt;
@@ -1506,7 +1512,7 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_police opt;
@@ -1570,7 +1576,7 @@ static int
 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
               struct sk_buff *skb, struct tcmsg *tcm)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
        struct nlattr *nest;
 
        if (cl->tparent)
@@ -1598,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        struct gnet_dump *d)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        cl->qstats.qlen = cl->q->q.qlen;
        cl->xstats.avgidle = cl->avgidle;
@@ -1618,7 +1624,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                     struct Qdisc **old)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue,
@@ -1641,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
        return 0;
 }
 
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        return cl->q;
 }
@@ -1683,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
                kfree(cl);
 }
 
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct hlist_node *n, *next;
        struct cbq_class *cl;
-       unsigned h;
+       unsigned int h;
 
 #ifdef CONFIG_NET_CLS_ACT
        q->rx_class = NULL;
@@ -1713,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch)
 
 static void cbq_put(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (--cl->refcnt == 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -1736,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 {
        int err;
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)*arg;
+       struct cbq_class *cl = (struct cbq_class *)*arg;
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_CBQ_MAX + 1];
        struct cbq_class *parent;
@@ -1828,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        if (classid) {
                err = -EINVAL;
-               if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+               if (TC_H_MAJ(classid ^ sch->handle) ||
+                   cbq_class_lookup(q, classid))
                        goto failure;
        } else {
                int i;
-               classid = TC_H_MAKE(sch->handle,0x8000);
+               classid = TC_H_MAKE(sch->handle, 0x8000);
 
-               for (i=0; i<0x8000; i++) {
+               for (i = 0; i < 0x8000; i++) {
                        if (++q->hgenerator >= 0x8000)
                                q->hgenerator = 1;
                        if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1891,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        cl->minidle = -0x7FFFFFFF;
        cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
        cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
-       if (cl->ewma_log==0)
+       if (cl->ewma_log == 0)
                cl->ewma_log = q->link.ewma_log;
-       if (cl->maxidle==0)
+       if (cl->maxidle == 0)
                cl->maxidle = q->link.maxidle;
-       if (cl->avpkt==0)
+       if (cl->avpkt == 0)
                cl->avpkt = q->link.avpkt;
        cl->overlimit = cbq_ovl_classic;
        if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1921,7 +1926,7 @@ failure:
 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
        unsigned int qlen;
 
        if (cl->filters || cl->children || cl == &q->link)
@@ -1979,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
                                     u32 classid)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *p = (struct cbq_class*)parent;
+       struct cbq_class *p = (struct cbq_class *)parent;
        struct cbq_class *cl = cbq_class_lookup(q, classid);
 
        if (cl) {
@@ -1993,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
 
 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        cl->filters--;
 }
@@ -2003,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
        struct hlist_node *n;
-       unsigned h;
+       unsigned int h;
 
        if (arg->stop)
                return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644 (file)
index 0000000..06afbae
--- /dev/null
@@ -0,0 +1,688 @@
+/*
+ * net/sched/sch_choke.c       CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+   CHOKe stateless AQM for fair bandwidth allocation
+   =================================================
+
+   CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+   unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+   maintains no flow state. The difference from RED is an additional step
+   during the enqueuing process. If average queue size is over the
+   low threshold (qmin), a packet is chosen at random from the queue.
+   If both the new and chosen packet are from the same flow, both
+   are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+   needs to access packets in queue randomly. It has a minimal class
+   interface to allow overriding the builtin flow classifier with
+   filters.
+
+   Source:
+   R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+   Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+   IEEE INFOCOM, 2000.
+
+   A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+   Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE        (128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+       u32              limit;
+       unsigned char    flags;
+
+       struct red_parms parms;
+
+/* Variables */
+       struct tcf_proto *filter_list;
+       struct {
+               u32     prob_drop;      /* Early probability drops */
+               u32     prob_mark;      /* Early probability marks */
+               u32     forced_drop;    /* Forced drops, qavg > max_thresh */
+               u32     forced_mark;    /* Forced marks, qavg > max_thresh */
+               u32     pdrop;          /* Drops due to queue limits */
+               u32     other;          /* Drops due to drop() calls */
+               u32     matched;        /* Drops to flow match */
+       } stats;
+
+       unsigned int     head;
+       unsigned int     tail;
+
+       unsigned int     tab_mask; /* size - 1 */
+
+       struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+       return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+       return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+       return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+       return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+       do {
+               q->head = (q->head + 1) & q->tab_mask;
+               if (q->head == q->tail)
+                       break;
+       } while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+       do {
+               q->tail = (q->tail - 1) & q->tab_mask;
+               if (q->head == q->tail)
+                       break;
+       } while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb = q->tab[idx];
+
+       q->tab[idx] = NULL;
+
+       if (idx == q->head)
+               choke_zap_head_holes(q);
+       if (idx == q->tail)
+               choke_zap_tail_holes(q);
+
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_drop(skb, sch);
+       qdisc_tree_decrease_qlen(sch, 1);
+       --sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+                            struct sk_buff *skb2)
+{
+       int off1, off2, poff;
+       const u32 *ports1, *ports2;
+       u8 ip_proto;
+       __u32 hash1;
+
+       if (skb1->protocol != skb2->protocol)
+               return false;
+
+       /* Use hash value as quick check
+        * Assumes that __skb_get_rxhash makes IP header and ports linear
+        */
+       hash1 = skb_get_rxhash(skb1);
+       if (!hash1 || hash1 != skb_get_rxhash(skb2))
+               return false;
+
+       /* Probably match, but be sure to avoid hash collisions */
+       off1 = skb_network_offset(skb1);
+       off2 = skb_network_offset(skb2);
+
+       switch (skb1->protocol) {
+       case __constant_htons(ETH_P_IP): {
+               const struct iphdr *ip1, *ip2;
+
+               ip1 = (const struct iphdr *) (skb1->data + off1);
+               ip2 = (const struct iphdr *) (skb2->data + off2);
+
+               ip_proto = ip1->protocol;
+               if (ip_proto != ip2->protocol ||
+                   ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+                       return false;
+
+               if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+                       ip_proto = 0;
+               off1 += ip1->ihl * 4;
+               off2 += ip2->ihl * 4;
+               break;
+       }
+
+       case __constant_htons(ETH_P_IPV6): {
+               const struct ipv6hdr *ip1, *ip2;
+
+               ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+               ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+               ip_proto = ip1->nexthdr;
+               if (ip_proto != ip2->nexthdr ||
+                   ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+                   ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+                       return false;
+               off1 += 40;
+               off2 += 40;
+       }
+
+       default: /* Maybe compare MAC header here? */
+               return false;
+       }
+
+       poff = proto_ports_offset(ip_proto);
+       if (poff < 0)
+               return true;
+
+       off1 += poff;
+       off2 += poff;
+
+       ports1 = (__force u32 *)(skb1->data + off1);
+       ports2 = (__force u32 *)(skb2->data + off2);
+       return *ports1 == *ports2;
+}
+
+struct choke_skb_cb {
+       u16 classid;
+};
+
+static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
+       return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+       choke_skb_cb(skb)->classid = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+       return choke_skb_cb(skb)->classid;
+}
+
+/*
+ * Classify flow using either:
+ *  1. pre-existing classification result in skb
+ *  2. fast internal classification
+ *  3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+                          struct Qdisc *sch, int *qerr)
+
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct tcf_result res;
+       int result;
+
+       result = tc_classify(skb, q->filter_list, &res);
+       if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_STOLEN:
+               case TC_ACT_QUEUED:
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+               case TC_ACT_SHOT:
+                       return false;
+               }
+#endif
+               choke_set_classid(skb, TC_H_MIN(res.classid));
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ *   times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+                                        unsigned int *pidx)
+{
+       struct sk_buff *skb;
+       int retrys = 3;
+
+       do {
+               *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+               skb = q->tab[*pidx];
+               if (skb)
+                       return skb;
+       } while (--retrys > 0);
+
+       return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+                              struct sk_buff *nskb,
+                              unsigned int *pidx)
+{
+       struct sk_buff *oskb;
+
+       if (q->head == q->tail)
+               return false;
+
+       oskb = choke_peek_random(q, pidx);
+       if (q->filter_list)
+               return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+       return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct red_parms *p = &q->parms;
+       int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+       if (q->filter_list) {
+               /* If using external classifiers, get result and record it. */
+               if (!choke_classify(skb, sch, &ret))
+                       goto other_drop;        /* Packet was eaten by filter */
+       }
+
+       /* Compute average queue usage (see RED) */
+       p->qavg = red_calc_qavg(p, sch->q.qlen);
+       if (red_is_idling(p))
+               red_end_of_idle_period(p);
+
+       /* Is queue small? */
+       if (p->qavg <= p->qth_min)
+               p->qcount = -1;
+       else {
+               unsigned int idx;
+
+               /* Draw a packet at random from queue and compare flow */
+               if (choke_match_random(q, skb, &idx)) {
+                       q->stats.matched++;
+                       choke_drop_by_idx(sch, idx);
+                       goto congestion_drop;
+               }
+
+               /* Queue is large, always mark/drop */
+               if (p->qavg > p->qth_max) {
+                       p->qcount = -1;
+
+                       sch->qstats.overlimits++;
+                       if (use_harddrop(q) || !use_ecn(q) ||
+                           !INET_ECN_set_ce(skb)) {
+                               q->stats.forced_drop++;
+                               goto congestion_drop;
+                       }
+
+                       q->stats.forced_mark++;
+               } else if (++p->qcount) {
+                       if (red_mark_probability(p, p->qavg)) {
+                               p->qcount = 0;
+                               p->qR = red_random(p);
+
+                               sch->qstats.overlimits++;
+                               if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+                                       q->stats.prob_drop++;
+                                       goto congestion_drop;
+                               }
+
+                               q->stats.prob_mark++;
+                       }
+               } else
+                       p->qR = red_random(p);
+       }
+
+       /* Admit new packet */
+       if (sch->q.qlen < q->limit) {
+               q->tab[q->tail] = skb;
+               q->tail = (q->tail + 1) & q->tab_mask;
+               ++sch->q.qlen;
+               sch->qstats.backlog += qdisc_pkt_len(skb);
+               return NET_XMIT_SUCCESS;
+       }
+
+       q->stats.pdrop++;
+       sch->qstats.drops++;
+       kfree_skb(skb);
+       return NET_XMIT_DROP;
+
+ congestion_drop:
+       qdisc_drop(skb, sch);
+       return NET_XMIT_CN;
+
+ other_drop:
+       if (ret & __NET_XMIT_BYPASS)
+               sch->qstats.drops++;
+       kfree_skb(skb);
+       return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb;
+
+       if (q->head == q->tail) {
+               if (!red_is_idling(&q->parms))
+                       red_start_of_idle_period(&q->parms);
+               return NULL;
+       }
+
+       skb = q->tab[q->head];
+       q->tab[q->head] = NULL;
+       choke_zap_head_holes(q);
+       --sch->q.qlen;
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_bstats_update(sch, skb);
+
+       return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       unsigned int len;
+
+       len = qdisc_queue_drop(sch);
+       if (len > 0)
+               q->stats.other++;
+       else {
+               if (!red_is_idling(&q->parms))
+                       red_start_of_idle_period(&q->parms);
+       }
+
+       return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+       [TCA_CHOKE_PARMS]       = { .len = sizeof(struct tc_red_qopt) },
+       [TCA_CHOKE_STAB]        = { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+       if (addr) {
+               if (is_vmalloc_addr(addr))
+                       vfree(addr);
+               else
+                       kfree(addr);
+       }
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_CHOKE_MAX + 1];
+       const struct tc_red_qopt *ctl;
+       int err;
+       struct sk_buff **old = NULL;
+       unsigned int mask;
+
+       if (opt == NULL)
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CHOKE_PARMS] == NULL ||
+           tb[TCA_CHOKE_STAB] == NULL)
+               return -EINVAL;
+
+       ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+       if (ctl->limit > CHOKE_MAX_QUEUE)
+               return -EINVAL;
+
+       mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+       if (mask != q->tab_mask) {
+               struct sk_buff **ntab;
+
+               ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+               if (!ntab)
+                       ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+               if (!ntab)
+                       return -ENOMEM;
+
+               sch_tree_lock(sch);
+               old = q->tab;
+               if (old) {
+                       unsigned int oqlen = sch->q.qlen, tail = 0;
+
+                       while (q->head != q->tail) {
+                               struct sk_buff *skb = q->tab[q->head];
+
+                               q->head = (q->head + 1) & q->tab_mask;
+                               if (!skb)
+                                       continue;
+                               if (tail < mask) {
+                                       ntab[tail++] = skb;
+                                       continue;
+                               }
+                               sch->qstats.backlog -= qdisc_pkt_len(skb);
+                               --sch->q.qlen;
+                               qdisc_drop(skb, sch);
+                       }
+                       qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+                       q->head = 0;
+                       q->tail = tail;
+               }
+
+               q->tab_mask = mask;
+               q->tab = ntab;
+       } else
+               sch_tree_lock(sch);
+
+       q->flags = ctl->flags;
+       q->limit = ctl->limit;
+
+       red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+                     ctl->Plog, ctl->Scell_log,
+                     nla_data(tb[TCA_CHOKE_STAB]));
+
+       if (q->head == q->tail)
+               red_end_of_idle_period(&q->parms);
+
+       sch_tree_unlock(sch);
+       choke_free(old);
+       return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts = NULL;
+       struct tc_red_qopt opt = {
+               .limit          = q->limit,
+               .flags          = q->flags,
+               .qth_min        = q->parms.qth_min >> q->parms.Wlog,
+               .qth_max        = q->parms.qth_max >> q->parms.Wlog,
+               .Wlog           = q->parms.Wlog,
+               .Plog           = q->parms.Plog,
+               .Scell_log      = q->parms.Scell_log,
+       };
+
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
+
+       NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct tc_choke_xstats st = {
+               .early  = q->stats.prob_drop + q->stats.forced_drop,
+               .marked = q->stats.prob_mark + q->stats.forced_mark,
+               .pdrop  = q->stats.pdrop,
+               .other  = q->stats.other,
+               .matched = q->stats.matched,
+       };
+
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       tcf_destroy_chain(&q->filter_list);
+       choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+       return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+                               u32 classid)
+{
+       return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       if (cl)
+               return NULL;
+       return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       if (!arg->stop) {
+               if (arg->fn(sch, 1, arg) < 0) {
+                       arg->stop = 1;
+                       return;
+               }
+               arg->count++;
+       }
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+       .leaf           =       choke_leaf,
+       .get            =       choke_get,
+       .put            =       choke_put,
+       .tcf_chain      =       choke_find_tcf,
+       .bind_tcf       =       choke_bind,
+       .unbind_tcf     =       choke_put,
+       .dump           =       choke_dump_class,
+       .walk           =       choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+       .id             =       "choke",
+       .priv_size      =       sizeof(struct choke_sched_data),
+
+       .enqueue        =       choke_enqueue,
+       .dequeue        =       choke_dequeue,
+       .peek           =       choke_peek_head,
+       .drop           =       choke_drop,
+       .init           =       choke_init,
+       .destroy        =       choke_destroy,
+       .reset          =       choke_reset,
+       .change         =       choke_change,
+       .dump           =       choke_dump,
+       .dump_stats     =       choke_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+       return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+       unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
index de55e64..6b7fe4a 100644 (file)
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        bstats_update(&cl->bstats, skb);
-       qdisc_bstats_update(sch, skb);
 
        sch->q.qlen++;
        return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
                        skb = qdisc_dequeue_peeked(cl->qdisc);
                        if (cl->qdisc->q.qlen == 0)
                                list_del(&cl->alist);
+                       qdisc_bstats_update(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
index 60f4bdd..2c79020 100644 (file)
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
                mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 
        if (tb[TCA_DSMARK_VALUE])
-               p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+               p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 
        if (tb[TCA_DSMARK_MASK])
-               p->mask[*arg-1] = mask;
+               p->mask[*arg - 1] = mask;
 
        err = 0;
 
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
        if (!dsmark_valid_index(p, arg))
                return -EINVAL;
 
-       p->mask[arg-1] = 0xff;
-       p->value[arg-1] = 0;
+       p->mask[arg - 1] = 0xff;
+       p->value[arg - 1] = 0;
 
        return 0;
 }
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
                if (p->mask[i] == 0xff && !p->value[i])
                        goto ignore;
                if (walker->count >= walker->skip) {
-                       if (walker->fn(sch, i+1, walker) < 0) {
+                       if (walker->fn(sch, i + 1, walker) < 0) {
                                walker->stop = 1;
                                break;
                        }
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return err;
        }
 
-       qdisc_bstats_update(sch, skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        if (skb == NULL)
                return NULL;
 
+       qdisc_bstats_update(sch, skb);
        sch->q.qlen--;
 
        index = skb->tc_index & (p->indices - 1);
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 * and don't need yet another qdisc as a bypass.
                 */
                if (p->mask[index] != 0xff || p->value[index])
-                       printk(KERN_WARNING
-                              "dsmark_dequeue: unsupported protocol %d\n",
-                              ntohs(skb->protocol));
+                       pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+                                  ntohs(skb->protocol));
                break;
        }
 
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
        if (!dsmark_valid_index(p, cl))
                return -EINVAL;
 
-       tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+       tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
        tcm->tcm_info = p->q->handle;
 
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
-       NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+       NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+       NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
 
        return nla_nest_end(skb, opts);
 
index aa4d633..66effe2 100644 (file)
 
 /* 1 band FIFO pseudo-"scheduler" */
 
-struct fifo_sched_data
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       u32 limit;
-};
-
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
-{
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
+       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(skb_queue_len(&sch->q) < q->limit))
+       if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct sk_buff *skb_head;
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(skb_queue_len(&sch->q) < q->limit))
+       if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        /* queue full, remove one skb to fulfill the limit */
-       skb_head = qdisc_dequeue_head(sch);
+       __qdisc_queue_drop_head(sch, &sch->q);
        sch->qstats.drops++;
-       kfree_skb(skb_head);
-
        qdisc_enqueue_tail(skb, sch);
 
        return NET_XMIT_CN;
@@ -64,31 +50,40 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 
 static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
+       bool bypass;
+       bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
        if (opt == NULL) {
                u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
 
-               if (sch->ops == &bfifo_qdisc_ops)
+               if (is_bfifo)
                        limit *= psched_mtu(qdisc_dev(sch));
 
-               q->limit = limit;
+               sch->limit = limit;
        } else {
                struct tc_fifo_qopt *ctl = nla_data(opt);
 
                if (nla_len(opt) < sizeof(*ctl))
                        return -EINVAL;
 
-               q->limit = ctl->limit;
+               sch->limit = ctl->limit;
        }
 
+       if (is_bfifo)
+               bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
+       else
+               bypass = sch->limit >= 1;
+
+       if (bypass)
+               sch->flags |= TCQ_F_CAN_BYPASS;
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
-       struct tc_fifo_qopt opt = { .limit = q->limit };
+       struct tc_fifo_qopt opt = { .limit = sch->limit };
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
        return skb->len;
@@ -99,7 +94,7 @@ nla_put_failure:
 
 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
        .id             =       "pfifo",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       pfifo_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
@@ -114,7 +109,7 @@ EXPORT_SYMBOL(pfifo_qdisc_ops);
 
 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
        .id             =       "bfifo",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       bfifo_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
@@ -129,7 +124,7 @@ EXPORT_SYMBOL(bfifo_qdisc_ops);
 
 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
        .id             =       "pfifo_head_drop",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       pfifo_tail_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
index 34dc598..c84b659 100644 (file)
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
                 */
                kfree_skb(skb);
                if (net_ratelimit())
-                       printk(KERN_WARNING "Dead loop on netdevice %s, "
-                              "fix it urgently!\n", dev_queue->dev->name);
+                       pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+                                  dev_queue->dev->name);
                ret = qdisc_qlen(q);
        } else {
                /*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        } else {
                /* Driver returned NETDEV_TX_BUSY - requeue skb */
                if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
-                       printk(KERN_WARNING "BUG %s code %d qlen %d\n",
-                              dev->name, ret, q->q.qlen);
+                       pr_warning("BUG %s code %d qlen %d\n",
+                                  dev->name, ret, q->q.qlen);
 
                ret = dev_requeue_skb(skb, q);
        }
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
 };
 
 
-static const u8 prio2band[TC_PRIO_MAX+1] =
-       { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+       1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
 
 /* 3-band FIFO queue: old style, but should be a bit faster than
    generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
        return priv->q + band;
 }
 
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdiscqdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
 {
        if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
                int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
        return qdisc_drop(skb, qdisc);
 }
 
-static struct sk_buff *pfifo_fast_dequeue(struct Qdiscqdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
        int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
        return NULL;
 }
 
-static struct sk_buff *pfifo_fast_peek(struct Qdiscqdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 {
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
        int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
        return NULL;
 }
 
-static void pfifo_fast_reset(struct Qdiscqdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
 {
        int prio;
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 {
        struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
-       memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+       memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
        return skb->len;
 
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
                skb_queue_head_init(band2list(priv, prio));
 
+       /* Can by-pass the queue discipline */
+       qdisc->flags |= TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -540,27 +543,32 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
        .dump           =       pfifo_fast_dump,
        .owner          =       THIS_MODULE,
 };
+EXPORT_SYMBOL(pfifo_fast_ops);
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          struct Qdisc_ops *ops)
 {
        void *p;
        struct Qdisc *sch;
-       unsigned int size;
+       unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
        int err = -ENOBUFS;
 
-       /* ensure that the Qdisc and the private data are 64-byte aligned */
-       size = QDISC_ALIGN(sizeof(*sch));
-       size += ops->priv_size + (QDISC_ALIGNTO - 1);
-
        p = kzalloc_node(size, GFP_KERNEL,
                         netdev_queue_numa_node_read(dev_queue));
 
        if (!p)
                goto errout;
        sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
-       sch->padded = (char *) sch - (char *) p;
-
+       /* if we got non aligned memory, ask more and do alignment ourself */
+       if (sch != p) {
+               kfree(p);
+               p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
+                                netdev_queue_numa_node_read(dev_queue));
+               if (!p)
+                       goto errout;
+               sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
+               sch->padded = (char *) sch - (char *) p;
+       }
        INIT_LIST_HEAD(&sch->list);
        skb_queue_head_init(&sch->q);
        spin_lock_init(&sch->busylock);
@@ -630,7 +638,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
 #ifdef CONFIG_NET_SCHED
        qdisc_list_del(qdisc);
 
-       qdisc_put_stab(qdisc->stab);
+       qdisc_put_stab(rtnl_dereference(qdisc->stab));
 #endif
        gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
        if (ops->reset)
@@ -674,25 +682,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 
        return oqdisc;
 }
+EXPORT_SYMBOL(dev_graft_qdisc);
 
 static void attach_one_default_qdisc(struct net_device *dev,
                                     struct netdev_queue *dev_queue,
                                     void *_unused)
 {
-       struct Qdisc *qdisc;
+       struct Qdisc *qdisc = &noqueue_qdisc;
 
        if (dev->tx_queue_len) {
                qdisc = qdisc_create_dflt(dev_queue,
                                          &pfifo_fast_ops, TC_H_ROOT);
                if (!qdisc) {
-                       printk(KERN_INFO "%s: activation failed\n", dev->name);
+                       netdev_info(dev, "activation failed\n");
                        return;
                }
-
-               /* Can by-pass the queue discipline for default qdisc */
-               qdisc->flags |= TCQ_F_CAN_BYPASS;
-       } else {
-               qdisc =  &noqueue_qdisc;
        }
        dev_queue->qdisc_sleeping = qdisc;
 }
@@ -761,6 +765,7 @@ void dev_activate(struct net_device *dev)
                dev_watchdog_up(dev);
        }
 }
+EXPORT_SYMBOL(dev_activate);
 
 static void dev_deactivate_queue(struct net_device *dev,
                                 struct netdev_queue *dev_queue,
@@ -839,7 +844,9 @@ void dev_deactivate(struct net_device *dev)
 
        list_add(&dev->unreg_list, &single);
        dev_deactivate_many(&single);
+       list_del(&single);
 }
+EXPORT_SYMBOL(dev_deactivate);
 
 static void dev_init_scheduler_queue(struct net_device *dev,
                                     struct netdev_queue *dev_queue,
index 51dcc2a..b9493a0 100644 (file)
@@ -32,8 +32,7 @@
 struct gred_sched_data;
 struct gred_sched;
 
-struct gred_sched_data
-{
+struct gred_sched_data {
        u32             limit;          /* HARD maximal queue length    */
        u32             DP;             /* the drop pramaters */
        u32             bytesin;        /* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@ enum {
        GRED_RIO_MODE,
 };
 
-struct gred_sched
-{
+struct gred_sched {
        struct gred_sched_data *tab[MAX_DPs];
        unsigned long   flags;
        u32             red_flags;
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
        return t->red_flags & TC_RED_HARDDROP;
 }
 
-static int gred_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct gred_sched_data *q=NULL;
-       struct gred_sched *t= qdisc_priv(sch);
+       struct gred_sched_data *q = NULL;
+       struct gred_sched *t = qdisc_priv(sch);
        unsigned long qavg = 0;
        u16 dp = tc_index_to_dp(skb);
 
-       if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
+       if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                dp = t->def;
 
-               if ((q = t->tab[dp]) == NULL) {
+               q = t->tab[dp];
+               if (!q) {
                        /* Pass through packets not assigned to a DP
                         * if no default DP has been configured. This
                         * allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                for (i = 0; i < t->DPs; i++) {
                        if (t->tab[i] && t->tab[i]->prio < q->prio &&
                            !red_is_idling(&t->tab[i]->parms))
-                               qavg +=t->tab[i]->parms.qavg;
+                               qavg += t->tab[i]->parms.qavg;
                }
 
        }
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                gred_store_wred_set(t, q);
 
        switch (red_action(&q->parms, q->parms.qavg + qavg)) {
-               case RED_DONT_MARK:
-                       break;
-
-               case RED_PROB_MARK:
-                       sch->qstats.overlimits++;
-                       if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
-                               q->stats.prob_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.prob_mark++;
-                       break;
-
-               case RED_HARD_MARK:
-                       sch->qstats.overlimits++;
-                       if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
-                           !INET_ECN_set_ce(skb)) {
-                               q->stats.forced_drop++;
-                               goto congestion_drop;
-                       }
-                       q->stats.forced_mark++;
-                       break;
+       case RED_DONT_MARK:
+               break;
+
+       case RED_PROB_MARK:
+               sch->qstats.overlimits++;
+               if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+                       q->stats.prob_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.prob_mark++;
+               break;
+
+       case RED_HARD_MARK:
+               sch->qstats.overlimits++;
+               if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+                   !INET_ECN_set_ce(skb)) {
+                       q->stats.forced_drop++;
+                       goto congestion_drop;
+               }
+               q->stats.forced_mark++;
+               break;
        }
 
        if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@ congestion_drop:
        return NET_XMIT_CN;
 }
 
-static struct sk_buff *gred_dequeue(struct Qdiscsch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
 
                if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                        if (net_ratelimit())
-                               printk(KERN_WARNING "GRED: Unable to relocate "
-                                      "VQ 0x%x after dequeue, screwing up "
-                                      "backlog.\n", tc_index_to_dp(skb));
+                               pr_warning("GRED: Unable to relocate VQ 0x%x "
+                                          "after dequeue, screwing up "
+                                          "backlog.\n", tc_index_to_dp(skb));
                } else {
                        q->backlog -= qdisc_pkt_len(skb);
 
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
        return NULL;
 }
 
-static unsigned int gred_drop(struct Qdiscsch)
+static unsigned int gred_drop(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
 
                if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                        if (net_ratelimit())
-                               printk(KERN_WARNING "GRED: Unable to relocate "
-                                      "VQ 0x%x while dropping, screwing up "
-                                      "backlog.\n", tc_index_to_dp(skb));
+                               pr_warning("GRED: Unable to relocate VQ 0x%x "
+                                          "while dropping, screwing up "
+                                          "backlog.\n", tc_index_to_dp(skb));
                } else {
                        q->backlog -= len;
                        q->stats.other++;
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
 
 }
 
-static void gred_reset(struct Qdiscsch)
+static void gred_reset(struct Qdisc *sch)
 {
        int i;
        struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
        for (i = table->DPs; i < MAX_DPs; i++) {
                if (table->tab[i]) {
-                       printk(KERN_WARNING "GRED: Warning: Destroying "
-                              "shadowed VQ 0x%x\n", i);
+                       pr_warning("GRED: Warning: Destroying "
+                                  "shadowed VQ 0x%x\n", i);
                        gred_destroy_vq(table->tab[i]);
                        table->tab[i] = NULL;
                }
index 2e45791..6488e64 100644 (file)
@@ -81,8 +81,7 @@
  *   that are expensive on 32-bit architectures.
  */
 
-struct internal_sc
-{
+struct internal_sc {
        u64     sm1;    /* scaled slope of the 1st segment */
        u64     ism1;   /* scaled inverse-slope of the 1st segment */
        u64     dx;     /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
 };
 
 /* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
        u64     x;      /* current starting position on x-axis */
        u64     y;      /* current starting position on y-axis */
        u64     sm1;    /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
        u64     ism2;   /* scaled inverse-slope of the 2nd segment */
 };
 
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
        HFSC_RSC = 0x1,
        HFSC_FSC = 0x2,
        HFSC_USC = 0x4
 };
 
-struct hfsc_class
-{
+struct hfsc_class {
        struct Qdisc_class_common cl_common;
        unsigned int    refcnt;         /* usage count */
 
@@ -140,8 +136,8 @@ struct hfsc_class
        u64     cl_cumul;               /* cumulative work in bytes done by
                                           real-time criteria */
 
-       u64     cl_d;                   /* deadline*/
-       u64     cl_e;                   /* eligible time */
+       u64     cl_d;                   /* deadline*/
+       u64     cl_e;                   /* eligible time */
        u64     cl_vt;                  /* virtual time */
        u64     cl_f;                   /* time when this class will fit for
                                           link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
        unsigned long   cl_nactive;     /* number of active children */
 };
 
-struct hfsc_sched
-{
+struct hfsc_sched {
        u16     defcls;                         /* default class id */
        struct hfsc_class root;                 /* root class */
        struct Qdisc_class_hash clhash;         /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
                if (go_active) {
                        n = rb_last(&cl->cl_parent->vt_tree);
                        if (n != NULL) {
-                               max_cl = rb_entry(n, struct hfsc_class,vt_node);
+                               max_cl = rb_entry(n, struct hfsc_class, vt_node);
                                /*
                                 * set vt to the average of the min and max
                                 * classes.  if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                        return NULL;
                }
 #endif
-               if ((cl = (struct hfsc_class *)res.class) == NULL) {
-                       if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+               cl = (struct hfsc_class *)res.class;
+               if (!cl) {
+                       cl = hfsc_find_class(res.classid, sch);
+                       if (!cl)
                                break; /* filter selected invalid classid */
                        if (cl->level >= head->level)
                                break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
        return -1;
 }
 
-static inline int
+static int
 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
 {
        if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
        struct hfsc_class *cl;
        u64 next_time = 0;
 
-       if ((cl = eltree_get_minel(q)) != NULL)
+       cl = eltree_get_minel(q);
+       if (cl)
                next_time = cl->cl_e;
        if (q->root.cl_cfmin != 0) {
                if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1600,7 +1598,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                set_active(cl, qdisc_pkt_len(skb));
 
        bstats_update(&cl->bstats, skb);
-       qdisc_bstats_update(sch, skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
@@ -1626,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch)
         * find the class with the minimum deadline among
         * the eligible classes.
         */
-       if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+       cl = eltree_get_mindl(q, cur_time);
+       if (cl) {
                realtime = 1;
        } else {
                /*
@@ -1665,7 +1663,8 @@ hfsc_dequeue(struct Qdisc *sch)
                set_passive(cl);
        }
 
-       sch->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(sch);
+       qdisc_bstats_update(sch, skb);
        sch->q.qlen--;
 
        return skb;
index 984c1b0..e1429a8 100644 (file)
@@ -99,9 +99,10 @@ struct htb_class {
                        struct rb_root feed[TC_HTB_NUMPRIO];    /* feed trees */
                        struct rb_node *ptr[TC_HTB_NUMPRIO];    /* current class ptr */
                        /* When class changes from state 1->2 and disconnects from
-                          parent's feed then we lost ptr value and start from the
-                          first child again. Here we store classid of the
-                          last valid ptr (used when ptr is NULL). */
+                        * parent's feed then we lost ptr value and start from the
+                        * first child again. Here we store classid of the
+                        * last valid ptr (used when ptr is NULL).
+                        */
                        u32 last_ptr_id[TC_HTB_NUMPRIO];
                } inner;
        } un;
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
  * then finish and return direct queue.
  */
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
 
 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                                      int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
        int result;
 
        /* allow to select class by setting skb->priority to valid classid;
-          note that nfmark can be used too by attaching filter fw with no
-          rules in it */
+        * note that nfmark can be used too by attaching filter fw with no
+        * rules in it
+        */
        if (skb->priority == sch->handle)
                return HTB_DIRECT;      /* X:0 (direct flow) selected */
-       if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+       cl = htb_find(skb->priority, sch);
+       if (cl && cl->level == 0)
                return cl;
 
        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                        return NULL;
                }
 #endif
-               if ((cl = (void *)res.class) == NULL) {
+               cl = (void *)res.class;
+               if (!cl) {
                        if (res.classid == sch->handle)
                                return HTB_DIRECT;      /* X:0 (direct flow) */
-                       if ((cl = htb_find(res.classid, sch)) == NULL)
+                       cl = htb_find(res.classid, sch);
+                       if (!cl)
                                break;  /* filter selected invalid classid */
                }
                if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 
                        if (p->un.inner.feed[prio].rb_node)
                                /* parent already has its feed in use so that
-                                  reset bit in mask as parent is already ok */
+                                * reset bit in mask as parent is already ok
+                                */
                                mask &= ~(1 << prio);
 
                        htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 
                        if (p->un.inner.ptr[prio] == cl->node + prio) {
                                /* we are removing child which is pointed to from
-                                  parent feed - forget the pointer but remember
-                                  classid */
+                                * parent feed - forget the pointer but remember
+                                * classid
+                                */
                                p->un.inner.last_ptr_id[prio] = cl->common.classid;
                                p->un.inner.ptr[prio] = NULL;
                        }
@@ -574,7 +581,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        sch->q.qlen++;
-       qdisc_bstats_update(sch, skb);
        return NET_XMIT_SUCCESS;
 }
 
@@ -664,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
                                   unsigned long start)
 {
        /* don't run for longer than 2 jiffies; 2 is used instead of
-          1 to simplify things when jiffy is going to be incremented
-          too soon */
+        * 1 to simplify things when jiffy is going to be incremented
+        * too soon
+        */
        unsigned long stop_at = start + 2;
        while (time_before(jiffies, stop_at)) {
                struct htb_class *cl;
@@ -688,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
 
        /* too much load - let's continue after a break for scheduling */
        if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
-               printk(KERN_WARNING "htb: too many events!\n");
+               pr_warning("htb: too many events!\n");
                q->warned |= HTB_WARN_TOOMANYEVENTS;
        }
 
@@ -696,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
 }
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
-   is no such one exists. */
+ * is no such one exists.
+ */
 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
                                              u32 id)
 {
@@ -740,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
        for (i = 0; i < 65535; i++) {
                if (!*sp->pptr && *sp->pid) {
                        /* ptr was invalidated but id is valid - try to recover
-                          the original or next ptr */
+                        * the original or next ptr
+                        */
                        *sp->pptr =
                            htb_id_find_next_upper(prio, sp->root, *sp->pid);
                }
                *sp->pid = 0;   /* ptr is valid now so that remove this hint as it
-                                  can become out of date quickly */
+                                * can become out of date quickly
+                                */
                if (!*sp->pptr) {       /* we are at right end; rewind & go up */
                        *sp->pptr = sp->root;
                        while ((*sp->pptr)->rb_left)
@@ -773,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 }
 
 /* dequeues packet at given priority and level; call only if
-   you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
                                        int level)
 {
@@ -790,9 +801,10 @@ next:
                        return NULL;
 
                /* class can be empty - it is unlikely but can be true if leaf
-                  qdisc drops packets in enqueue routine or if someone used
-                  graft operation on the leaf since last dequeue;
-                  simply deactivate and skip such class */
+                * qdisc drops packets in enqueue routine or if someone used
+                * graft operation on the leaf since last dequeue;
+                * simply deactivate and skip such class
+                */
                if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
                        struct htb_class *next;
                        htb_deactivate(q, cl);
@@ -832,7 +844,8 @@ next:
                                          ptr[0]) + prio);
                }
                /* this used to be after charge_class but this constelation
-                  gives us slightly better performance */
+                * gives us slightly better performance
+                */
                if (!cl->un.leaf.q->q.qlen)
                        htb_deactivate(q, cl);
                htb_charge_class(q, cl, level, skb);
@@ -842,7 +855,7 @@ next:
 
 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 {
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb;
        struct htb_sched *q = qdisc_priv(sch);
        int level;
        psched_time_t next_event;
@@ -851,7 +864,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
        skb = __skb_dequeue(&q->direct_queue);
        if (skb != NULL) {
-               sch->flags &= ~TCQ_F_THROTTLED;
+ok:
+               qdisc_bstats_update(sch, skb);
+               qdisc_unthrottled(sch);
                sch->q.qlen--;
                return skb;
        }
@@ -882,13 +897,11 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
                m = ~q->row_mask[level];
                while (m != (int)(-1)) {
                        int prio = ffz(m);
+
                        m |= 1 << prio;
                        skb = htb_dequeue_tree(q, prio, level);
-                       if (likely(skb != NULL)) {
-                               sch->q.qlen--;
-                               sch->flags &= ~TCQ_F_THROTTLED;
-                               goto fin;
-                       }
+                       if (likely(skb != NULL))
+                               goto ok;
                }
        }
        sch->qstats.overlimits++;
@@ -989,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
                return err;
 
        if (tb[TCA_HTB_INIT] == NULL) {
-               printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+               pr_err("HTB: hey probably you have bad tc tool ?\n");
                return -EINVAL;
        }
        gopt = nla_data(tb[TCA_HTB_INIT]);
        if (gopt->version != HTB_VER >> 16) {
-               printk(KERN_ERR
-                      "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+               pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
                       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
                return -EINVAL;
        }
@@ -1208,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
        cancel_work_sync(&q->work);
        qdisc_watchdog_cancel(&q->watchdog);
        /* This line used to be after htb_destroy_class call below
-          and surprisingly it worked in 2.4. But it must precede it
-          because filter need its target class alive to be able to call
-          unbind_filter on it (without Oops). */
+        * and surprisingly it worked in 2.4. But it must precede it
+        * because filter need its target class alive to be able to call
+        * unbind_filter on it (without Oops).
+        */
        tcf_destroy_chain(&q->filter_list);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1344,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
                /* check maximal depth */
                if (parent && parent->parent && parent->parent->level < 2) {
-                       printk(KERN_ERR "htb: tree is too deep\n");
+                       pr_err("htb: tree is too deep\n");
                        goto failure;
                }
                err = -ENOBUFS;
-               if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+               cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+               if (!cl)
                        goto failure;
 
                err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1368,8 +1382,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        RB_CLEAR_NODE(&cl->node[prio]);
 
                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
-                  so that can't be used inside of sch_tree_lock
-                  -- thanks to Karlis Peisenieks */
+                * so that can't be used inside of sch_tree_lock
+                * -- thanks to Karlis Peisenieks
+                */
                new_q = qdisc_create_dflt(sch->dev_queue,
                                          &pfifo_qdisc_ops, classid);
                sch_tree_lock(sch);
@@ -1421,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        }
 
        /* it used to be a nasty bug here, we have to check that node
-          is really leaf before changing cl->un.leaf ! */
+        * is really leaf before changing cl->un.leaf !
+        */
        if (!cl->level) {
                cl->quantum = rtab->rate.rate / q->rate2quantum;
                if (!hopt->quantum && cl->quantum < 1000) {
-                       printk(KERN_WARNING
+                       pr_warning(
                               "HTB: quantum of class %X is small. Consider r2q change.\n",
                               cl->common.classid);
                        cl->quantum = 1000;
                }
                if (!hopt->quantum && cl->quantum > 200000) {
-                       printk(KERN_WARNING
+                       pr_warning(
                               "HTB: quantum of class %X is big. Consider r2q change.\n",
                               cl->common.classid);
                        cl->quantum = 200000;
@@ -1480,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
        struct htb_class *cl = htb_find(classid, sch);
 
        /*if (cl && !cl->level) return 0;
-          The line above used to be there to prevent attaching filters to
-          leaves. But at least tc_index filter uses this just to get class
-          for other reasons so that we have to allow for it.
-          ----
-          19.6.2002 As Werner explained it is ok - bind filter is just
-          another way to "lock" the class - unlike "get" this lock can
-          be broken by class during destroy IIUC.
+        * The line above used to be there to prevent attaching filters to
+        * leaves. But at least tc_index filter uses this just to get class
+        * for other reasons so that we have to allow for it.
+        * ----
+        * 19.6.2002 As Werner explained it is ok - bind filter is just
+        * another way to "lock" the class - unlike "get" this lock can
+        * be broken by class during destroy IIUC.
         */
        if (cl)
                cl->filter_cnt++;
index ecc302f..ec5cbc8 100644 (file)
@@ -61,7 +61,6 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                                                    TC_H_MIN(ntx + 1)));
                if (qdisc == NULL)
                        goto err;
-               qdisc->flags |= TCQ_F_CAN_BYPASS;
                priv->qdiscs[ntx] = qdisc;
        }
 
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644 (file)
index 0000000..ea17cbe
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+       struct Qdisc            **qdiscs;
+       int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       unsigned int ntx;
+
+       if (priv->qdiscs) {
+               for (ntx = 0;
+                    ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+                    ntx++)
+                       qdisc_destroy(priv->qdiscs[ntx]);
+               kfree(priv->qdiscs);
+       }
+
+       if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+               dev->netdev_ops->ndo_setup_tc(dev, 0);
+       else
+               netdev_set_num_tc(dev, 0);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+       int i, j;
+
+       /* Verify num_tc is not out of max range */
+       if (qopt->num_tc > TC_MAX_QUEUE)
+               return -EINVAL;
+
+       /* Verify priority mapping uses valid tcs */
+       for (i = 0; i < TC_BITMASK + 1; i++) {
+               if (qopt->prio_tc_map[i] >= qopt->num_tc)
+                       return -EINVAL;
+       }
+
+       /* net_device does not support requested operation */
+       if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+               return -EINVAL;
+
+       /* if hw owned qcount and qoffset are taken from LLD so
+        * no reason to verify them here
+        */
+       if (qopt->hw)
+               return 0;
+
+       for (i = 0; i < qopt->num_tc; i++) {
+               unsigned int last = qopt->offset[i] + qopt->count[i];
+
+               /* Verify the queue count is in tx range being equal to the
+                * real_num_tx_queues indicates the last queue is in use.
+                */
+               if (qopt->offset[i] >= dev->real_num_tx_queues ||
+                   !qopt->count[i] ||
+                   last > dev->real_num_tx_queues)
+                       return -EINVAL;
+
+               /* Verify that the offset and counts do not overlap */
+               for (j = i + 1; j < qopt->num_tc; j++) {
+                       if (last > qopt->offset[j])
+                               return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       struct netdev_queue *dev_queue;
+       struct Qdisc *qdisc;
+       int i, err = -EOPNOTSUPP;
+       struct tc_mqprio_qopt *qopt = NULL;
+
+       BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+       BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+       if (sch->parent != TC_H_ROOT)
+               return -EOPNOTSUPP;
+
+       if (!netif_is_multiqueue(dev))
+               return -EOPNOTSUPP;
+
+       if (nla_len(opt) < sizeof(*qopt))
+               return -EINVAL;
+
+       qopt = nla_data(opt);
+       if (mqprio_parse_opt(dev, qopt))
+               return -EINVAL;
+
+       /* pre-allocate qdisc, attachment can't fail */
+       priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+                              GFP_KERNEL);
+       if (priv->qdiscs == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               dev_queue = netdev_get_tx_queue(dev, i);
+               qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+                                         TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                                   TC_H_MIN(i + 1)));
+               if (qdisc == NULL) {
+                       err = -ENOMEM;
+                       goto err;
+               }
+               priv->qdiscs[i] = qdisc;
+       }
+
+       /* If the mqprio options indicate that hardware should own
+        * the queue mapping then run ndo_setup_tc otherwise use the
+        * supplied and verified mapping
+        */
+       if (qopt->hw) {
+               priv->hw_owned = 1;
+               err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+               if (err)
+                       goto err;
+       } else {
+               netdev_set_num_tc(dev, qopt->num_tc);
+               for (i = 0; i < qopt->num_tc; i++)
+                       netdev_set_tc_queue(dev, i,
+                                           qopt->count[i], qopt->offset[i]);
+       }
+
+       /* Always use supplied priority mappings */
+       for (i = 0; i < TC_BITMASK + 1; i++)
+               netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+       sch->flags |= TCQ_F_MQROOT;
+       return 0;
+
+err:
+       mqprio_destroy(sch);
+       return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       struct Qdisc *qdisc;
+       unsigned int ntx;
+
+       /* Attach underlying qdisc */
+       for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+               qdisc = priv->qdiscs[ntx];
+               qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+               if (qdisc)
+                       qdisc_destroy(qdisc);
+       }
+       kfree(priv->qdiscs);
+       priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+                                            unsigned long cl)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+       if (ntx >= dev->num_tx_queues)
+               return NULL;
+       return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+                   struct Qdisc **old)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return -EINVAL;
+
+       if (dev->flags & IFF_UP)
+               dev_deactivate(dev);
+
+       *old = dev_graft_qdisc(dev_queue, new);
+
+       if (dev->flags & IFF_UP)
+               dev_activate(dev);
+
+       return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       unsigned char *b = skb_tail_pointer(skb);
+       struct tc_mqprio_qopt opt = { 0 };
+       struct Qdisc *qdisc;
+       unsigned int i;
+
+       sch->q.qlen = 0;
+       memset(&sch->bstats, 0, sizeof(sch->bstats));
+       memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+               spin_lock_bh(qdisc_lock(qdisc));
+               sch->q.qlen             += qdisc->q.qlen;
+               sch->bstats.bytes       += qdisc->bstats.bytes;
+               sch->bstats.packets     += qdisc->bstats.packets;
+               sch->qstats.qlen        += qdisc->qstats.qlen;
+               sch->qstats.backlog     += qdisc->qstats.backlog;
+               sch->qstats.drops       += qdisc->qstats.drops;
+               sch->qstats.requeues    += qdisc->qstats.requeues;
+               sch->qstats.overlimits  += qdisc->qstats.overlimits;
+               spin_unlock_bh(qdisc_lock(qdisc));
+       }
+
+       opt.num_tc = netdev_get_num_tc(dev);
+       memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+       opt.hw = priv->hw_owned;
+
+       for (i = 0; i < netdev_get_num_tc(dev); i++) {
+               opt.count[i] = dev->tc_to_txq[i].count;
+               opt.offset[i] = dev->tc_to_txq[i].offset;
+       }
+
+       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+       return skb->len;
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+       struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return NULL;
+
+       return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned int ntx = TC_H_MIN(classid);
+
+       if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+               return 0;
+       return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+                        struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct net_device *dev = qdisc_dev(sch);
+
+       if (cl <= netdev_get_num_tc(dev)) {
+               tcm->tcm_parent = TC_H_ROOT;
+               tcm->tcm_info = 0;
+       } else {
+               int i;
+               struct netdev_queue *dev_queue;
+
+               dev_queue = mqprio_queue_get(sch, cl);
+               tcm->tcm_parent = 0;
+               for (i = 0; i < netdev_get_num_tc(dev); i++) {
+                       struct netdev_tc_txq tc = dev->tc_to_txq[i];
+                       int q_idx = cl - netdev_get_num_tc(dev);
+
+                       if (q_idx > tc.offset &&
+                           q_idx <= tc.offset + tc.count) {
+                               tcm->tcm_parent =
+                                       TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                                 TC_H_MIN(i + 1));
+                               break;
+                       }
+               }
+               tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+       }
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+                                  struct gnet_dump *d)
+       __releases(d->lock)
+       __acquires(d->lock)
+{
+       struct net_device *dev = qdisc_dev(sch);
+
+       if (cl <= netdev_get_num_tc(dev)) {
+               int i;
+               struct Qdisc *qdisc;
+               struct gnet_stats_queue qstats = {0};
+               struct gnet_stats_basic_packed bstats = {0};
+               struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+               /* Drop lock here it will be reclaimed before touching
+                * statistics this is required because the d->lock we
+                * hold here is the look on dev_queue->qdisc_sleeping
+                * also acquired below.
+                */
+               spin_unlock_bh(d->lock);
+
+               for (i = tc.offset; i < tc.offset + tc.count; i++) {
+                       qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+                       spin_lock_bh(qdisc_lock(qdisc));
+                       bstats.bytes      += qdisc->bstats.bytes;
+                       bstats.packets    += qdisc->bstats.packets;
+                       qstats.qlen       += qdisc->qstats.qlen;
+                       qstats.backlog    += qdisc->qstats.backlog;
+                       qstats.drops      += qdisc->qstats.drops;
+                       qstats.requeues   += qdisc->qstats.requeues;
+                       qstats.overlimits += qdisc->qstats.overlimits;
+                       spin_unlock_bh(qdisc_lock(qdisc));
+               }
+               /* Reclaim root sleeping lock before completing stats */
+               spin_lock_bh(d->lock);
+               if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+                   gnet_stats_copy_queue(d, &qstats) < 0)
+                       return -1;
+       } else {
+               struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+               sch = dev_queue->qdisc_sleeping;
+               sch->qstats.qlen = sch->q.qlen;
+               if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+                   gnet_stats_copy_queue(d, &sch->qstats) < 0)
+                       return -1;
+       }
+       return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx;
+
+       if (arg->stop)
+               return;
+
+       /* Walk hierarchy with a virtual class per tc */
+       arg->count = arg->skip;
+       for (ntx = arg->skip;
+            ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+            ntx++) {
+               if (arg->fn(sch, ntx + 1, arg) < 0) {
+                       arg->stop = 1;
+                       break;
+               }
+               arg->count++;
+       }
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+       .graft          = mqprio_graft,
+       .leaf           = mqprio_leaf,
+       .get            = mqprio_get,
+       .put            = mqprio_put,
+       .walk           = mqprio_walk,
+       .dump           = mqprio_dump_class,
+       .dump_stats     = mqprio_dump_class_stats,
+};
+
+static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+       .cl_ops         = &mqprio_class_ops,
+       .id             = "mqprio",
+       .priv_size      = sizeof(struct mqprio_sched),
+       .init           = mqprio_init,
+       .destroy        = mqprio_destroy,
+       .attach         = mqprio_attach,
+       .dump           = mqprio_dump,
+       .owner          = THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+       return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+       unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
index 21f13da..edc1950 100644 (file)
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
-               qdisc_bstats_update(sch, skb);
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
                        qdisc = q->queues[q->curband];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
+                               qdisc_bstats_update(sch, skb);
                                sch->q.qlen--;
                                return skb;
                        }
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
        unsigned int len;
        struct Qdisc *qdisc;
 
-       for (band = q->bands-1; band >= 0; band--) {
+       for (band = q->bands - 1; band >= 0; band--) {
                qdisc = q->queues[band];
                if (qdisc->ops->drop) {
                        len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < q->max_bands; i++)
                q->queues[i] = &noop_qdisc;
 
-       err = multiq_tune(sch,opt);
+       err = multiq_tune(sch, opt);
 
        if (err)
                kfree(q->queues);
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
        struct multiq_sched_data *q = qdisc_priv(sch);
 
        tcm->tcm_handle |= TC_H_MIN(cl);
-       tcm->tcm_info = q->queues[cl-1]->handle;
+       tcm->tcm_info = q->queues[cl - 1]->handle;
        return 0;
 }
 
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                        arg->count++;
                        continue;
                }
-               if (arg->fn(sch, band+1, arg) < 0) {
+               if (arg->fn(sch, band + 1, arg) < 0) {
                        arg->stop = 1;
                        break;
                }
index 1c4bce8..edbbf7a 100644 (file)
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-#define VERSION "1.2"
+#define VERSION "1.3"
 
 /*     Network Emulation Queuing algorithm.
        ====================================
         layering other disciplines.  It does not need to do bandwidth
         control either since that can be handled by using token
         bucket or other rate control.
+
+     Correlated Loss Generator models
+
+       Added generation of correlated loss according to the
+       "Gilbert-Elliot" model, a 4-state markov model.
+
+       References:
+       [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
+       [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
+       and intuitive loss model for packet networks and its implementation
+       in the Netem module in the Linux kernel", available in [1]
+
+       Authors: Stefano Salsano <stefano.salsano at uniroma2.it
+                Fabio Ludovici <fabio.ludovici at yahoo.it>
 */
 
 struct netem_sched_data {
@@ -73,6 +88,26 @@ struct netem_sched_data {
                u32  size;
                s16 table[0];
        } *delay_dist;
+
+       enum  {
+               CLG_RANDOM,
+               CLG_4_STATES,
+               CLG_GILB_ELL,
+       } loss_model;
+
+       /* Correlated Loss Generation models */
+       struct clgstate {
+               /* state of the Markov chain */
+               u8 state;
+
+               /* 4-states and Gilbert-Elliot models */
+               u32 a1; /* p13 for 4-states or p for GE */
+               u32 a2; /* p31 for 4-states or r for GE */
+               u32 a3; /* p32 for 4-states or h for GE */
+               u32 a4; /* p14 for 4-states or 1-k for GE */
+               u32 a5; /* p23 used only in 4-states */
+       } clg;
+
 };
 
 /* Time stamp put into socket buffer control block */
@@ -115,6 +150,122 @@ static u32 get_crandom(struct crndstate *state)
        return answer;
 }
 
+/* loss_4state - 4-state model loss generator
+ * Generates losses according to the 4-state Markov chain adopted in
+ * the GI (General and Intuitive) loss model.
+ */
+static bool loss_4state(struct netem_sched_data *q)
+{
+       struct clgstate *clg = &q->clg;
+       u32 rnd = net_random();
+
+       /*
+        * Makes a comparision between rnd and the transition
+        * probabilities outgoing from the current state, then decides the
+        * next state and if the next packet has to be transmitted or lost.
+        * The four states correspond to:
+        *   1 => successfully transmitted packets within a gap period
+        *   4 => isolated losses within a gap period
+        *   3 => lost packets within a burst period
+        *   2 => successfully transmitted packets within a burst period
+        */
+       switch (clg->state) {
+       case 1:
+               if (rnd < clg->a4) {
+                       clg->state = 4;
+                       return true;
+               } else if (clg->a4 < rnd && rnd < clg->a1) {
+                       clg->state = 3;
+                       return true;
+               } else if (clg->a1 < rnd)
+                       clg->state = 1;
+
+               break;
+       case 2:
+               if (rnd < clg->a5) {
+                       clg->state = 3;
+                       return true;
+               } else
+                       clg->state = 2;
+
+               break;
+       case 3:
+               if (rnd < clg->a3)
+                       clg->state = 2;
+               else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
+                       clg->state = 1;
+                       return true;
+               } else if (clg->a2 + clg->a3 < rnd) {
+                       clg->state = 3;
+                       return true;
+               }
+               break;
+       case 4:
+               clg->state = 1;
+               break;
+       }
+
+       return false;
+}
+
+/* loss_gilb_ell - Gilbert-Elliot model loss generator
+ * Generates losses according to the Gilbert-Elliot loss model or
+ * its special cases  (Gilbert or Simple Gilbert)
+ *
+ * Makes a comparision between random number and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state. A second random number is extracted and the comparision
+ * with the loss probability of the current state decides if the next
+ * packet will be transmitted or lost.
+ */
+static bool loss_gilb_ell(struct netem_sched_data *q)
+{
+       struct clgstate *clg = &q->clg;
+
+       switch (clg->state) {
+       case 1:
+               if (net_random() < clg->a1)
+                       clg->state = 2;
+               if (net_random() < clg->a4)
+                       return true;
+       case 2:
+               if (net_random() < clg->a2)
+                       clg->state = 1;
+               if (clg->a3 > net_random())
+                       return true;
+       }
+
+       return false;
+}
+
+static bool loss_event(struct netem_sched_data *q)
+{
+       switch (q->loss_model) {
+       case CLG_RANDOM:
+               /* Random packet drop 0 => none, ~0 => all */
+               return q->loss && q->loss >= get_crandom(&q->loss_cor);
+
+       case CLG_4_STATES:
+               /* 4state loss model algorithm (used also for GI model)
+               * Extracts a value from the markov 4 state loss generator,
+               * if it is 1 drops a packet and if needed writes the event in
+               * the kernel logs
+               */
+               return loss_4state(q);
+
+       case CLG_GILB_ELL:
+               /* Gilbert-Elliot loss model algorithm
+               * Extracts a value from the Gilbert-Elliot loss generator,
+               * if it is 1 drops a packet and if needed writes the event in
+               * the kernel logs
+               */
+               return loss_gilb_ell(q);
+       }
+
+       return false;   /* not reached */
+}
+
+
 /* tabledist - return a pseudo-randomly distributed value with mean mu and
  * std deviation sigma.  Uses table lookup to approximate the desired
  * distribution, and a uniformly-distributed pseudo-random source.
@@ -161,14 +312,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        int ret;
        int count = 1;
 
-       pr_debug("netem_enqueue skb=%p\n", skb);
-
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
                ++count;
 
-       /* Random packet drop 0 => none, ~0 => all */
-       if (q->loss && q->loss >= get_crandom(&q->loss_cor))
+       /* Drop packet? */
+       if (loss_event(q))
                --count;
 
        if (count == 0) {
@@ -211,8 +360,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        cb = netem_skb_cb(skb);
-       if (q->gap == 0 ||              /* not doing reordering */
-           q->counter < q->gap ||      /* inside last reordering gap */
+       if (q->gap == 0 ||              /* not doing reordering */
+           q->counter < q->gap ||      /* inside last reordering gap */
            q->reorder < get_crandom(&q->reorder_cor)) {
                psched_time_t now;
                psched_tdiff_t delay;
@@ -238,18 +387,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                ret = NET_XMIT_SUCCESS;
        }
 
-       if (likely(ret == NET_XMIT_SUCCESS)) {
-               sch->q.qlen++;
-               qdisc_bstats_update(sch, skb);
-       } else if (net_xmit_drop_count(ret)) {
-               sch->qstats.drops++;
+       if (ret != NET_XMIT_SUCCESS) {
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       return ret;
+               }
        }
 
-       pr_debug("netem: enqueue ret %d\n", ret);
-       return ret;
+       sch->q.qlen++;
+       return NET_XMIT_SUCCESS;
 }
 
-static unsigned int netem_drop(struct Qdiscsch)
+static unsigned int netem_drop(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        unsigned int len = 0;
@@ -266,7 +415,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
 
-       if (sch->flags & TCQ_F_THROTTLED)
+       if (qdisc_is_throttled(sch))
                return NULL;
 
        skb = q->qdisc->ops->peek(q->qdisc);
@@ -288,8 +437,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
                        if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
                                skb->tstamp.tv64 = 0;
 #endif
-                       pr_debug("netem_dequeue: return skb=%p\n", skb);
+
                        sch->q.qlen--;
+                       qdisc_unthrottled(sch);
+                       qdisc_bstats_update(sch, skb);
                        return skb;
                }
 
@@ -308,6 +459,16 @@ static void netem_reset(struct Qdisc *sch)
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
+static void dist_free(struct disttable *d)
+{
+       if (d) {
+               if (is_vmalloc_addr(d))
+                       vfree(d);
+               else
+                       kfree(d);
+       }
+}
+
 /*
  * Distribution data is a variable size payload containing
  * signed 16 bit values.
@@ -315,16 +476,20 @@ static void netem_reset(struct Qdisc *sch)
 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned long n = nla_len(attr)/sizeof(__s16);
+       size_t n = nla_len(attr)/sizeof(__s16);
        const __s16 *data = nla_data(attr);
        spinlock_t *root_lock;
        struct disttable *d;
        int i;
+       size_t s;
 
-       if (n > 65536)
+       if (n > NETEM_DIST_MAX)
                return -EINVAL;
 
-       d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
+       s = sizeof(struct disttable) + n * sizeof(s16);
+       d = kmalloc(s, GFP_KERNEL);
+       if (!d)
+               d = vmalloc(s);
        if (!d)
                return -ENOMEM;
 
@@ -335,7 +500,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       kfree(q->delay_dist);
+       dist_free(q->delay_dist);
        q->delay_dist = d;
        spin_unlock_bh(root_lock);
        return 0;
@@ -369,10 +534,66 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
        init_crandom(&q->corrupt_cor, r->correlation);
 }
 
+static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       const struct nlattr *la;
+       int rem;
+
+       nla_for_each_nested(la, attr, rem) {
+               u16 type = nla_type(la);
+
+               switch(type) {
+               case NETEM_LOSS_GI: {
+                       const struct tc_netem_gimodel *gi = nla_data(la);
+
+                       if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+                               pr_info("netem: incorrect gi model size\n");
+                               return -EINVAL;
+                       }
+
+                       q->loss_model = CLG_4_STATES;
+
+                       q->clg.state = 1;
+                       q->clg.a1 = gi->p13;
+                       q->clg.a2 = gi->p31;
+                       q->clg.a3 = gi->p32;
+                       q->clg.a4 = gi->p14;
+                       q->clg.a5 = gi->p23;
+                       break;
+               }
+
+               case NETEM_LOSS_GE: {
+                       const struct tc_netem_gemodel *ge = nla_data(la);
+
+                       if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
+                               pr_info("netem: incorrect gi model size\n");
+                               return -EINVAL;
+                       }
+
+                       q->loss_model = CLG_GILB_ELL;
+                       q->clg.state = 1;
+                       q->clg.a1 = ge->p;
+                       q->clg.a2 = ge->r;
+                       q->clg.a3 = ge->h;
+                       q->clg.a4 = ge->k1;
+                       break;
+               }
+
+               default:
+                       pr_info("netem: unknown loss type %u\n", type);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
        [TCA_NETEM_CORR]        = { .len = sizeof(struct tc_netem_corr) },
        [TCA_NETEM_REORDER]     = { .len = sizeof(struct tc_netem_reorder) },
        [TCA_NETEM_CORRUPT]     = { .len = sizeof(struct tc_netem_corrupt) },
+       [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -380,11 +601,15 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 {
        int nested_len = nla_len(nla) - NLA_ALIGN(len);
 
-       if (nested_len < 0)
+       if (nested_len < 0) {
+               pr_info("netem: invalid attributes len %d\n", nested_len);
                return -EINVAL;
+       }
+
        if (nested_len >= nla_attr_size(0))
                return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
                                 nested_len, policy);
+
        memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
        return 0;
 }
@@ -407,7 +632,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
        ret = fifo_set_limit(q->qdisc, qopt->limit);
        if (ret) {
-               pr_debug("netem: can't set fifo limit\n");
+               pr_info("netem: can't set fifo limit\n");
                return ret;
        }
 
@@ -440,7 +665,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_NETEM_CORRUPT])
                get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 
-       return 0;
+       q->loss_model = CLG_RANDOM;
+       if (tb[TCA_NETEM_LOSS])
+               ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
+
+       return ret;
 }
 
 /*
@@ -476,7 +705,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
                __skb_queue_after(list, skb, nskb);
 
                sch->qstats.backlog += qdisc_pkt_len(nskb);
-               qdisc_bstats_update(sch, nskb);
 
                return NET_XMIT_SUCCESS;
        }
@@ -536,16 +764,17 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
+       q->loss_model = CLG_RANDOM;
        q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
                                     TC_H_MAKE(sch->handle, 1));
        if (!q->qdisc) {
-               pr_debug("netem: qdisc create failed\n");
+               pr_notice("netem: qdisc create tfifo qdisc failed\n");
                return -ENOMEM;
        }
 
        ret = netem_change(sch, opt);
        if (ret) {
-               pr_debug("netem: change failed\n");
+               pr_info("netem: change failed\n");
                qdisc_destroy(q->qdisc);
        }
        return ret;
@@ -557,14 +786,61 @@ static void netem_destroy(struct Qdisc *sch)
 
        qdisc_watchdog_cancel(&q->watchdog);
        qdisc_destroy(q->qdisc);
-       kfree(q->delay_dist);
+       dist_free(q->delay_dist);
+}
+
+static int dump_loss_model(const struct netem_sched_data *q,
+                          struct sk_buff *skb)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, TCA_NETEM_LOSS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       switch (q->loss_model) {
+       case CLG_RANDOM:
+               /* legacy loss model */
+               nla_nest_cancel(skb, nest);
+               return 0;       /* no data */
+
+       case CLG_4_STATES: {
+               struct tc_netem_gimodel gi = {
+                       .p13 = q->clg.a1,
+                       .p31 = q->clg.a2,
+                       .p32 = q->clg.a3,
+                       .p14 = q->clg.a4,
+                       .p23 = q->clg.a5,
+               };
+
+               NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+               break;
+       }
+       case CLG_GILB_ELL: {
+               struct tc_netem_gemodel ge = {
+                       .p = q->clg.a1,
+                       .r = q->clg.a2,
+                       .h = q->clg.a3,
+                       .k1 = q->clg.a4,
+               };
+
+               NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+               break;
+       }
+       }
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+       return -1;
 }
 
 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        const struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned char *b = skb_tail_pointer(skb);
-       struct nlattr *nla = (struct nlattr *) b;
+       struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
        struct tc_netem_qopt qopt;
        struct tc_netem_corr cor;
        struct tc_netem_reorder reorder;
@@ -591,17 +867,87 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        corrupt.correlation = q->corrupt_cor.rho;
        NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
-       nla->nla_len = skb_tail_pointer(skb) - b;
+       if (dump_loss_model(q, skb) != 0)
+               goto nla_put_failure;
 
-       return skb->len;
+       return nla_nest_end(skb, nla);
 
 nla_put_failure:
-       nlmsg_trim(skb, b);
+       nlmsg_trim(skb, nla);
        return -1;
 }
 
+static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+
+       if (cl != 1)    /* only one class */
+               return -ENOENT;
+
+       tcm->tcm_handle |= TC_H_MIN(1);
+       tcm->tcm_info = q->qdisc->handle;
+
+       return 0;
+}
+
+static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+                    struct Qdisc **old)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+
+       if (new == NULL)
+               new = &noop_qdisc;
+
+       sch_tree_lock(sch);
+       *old = q->qdisc;
+       q->qdisc = new;
+       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+       qdisc_reset(*old);
+       sch_tree_unlock(sch);
+
+       return 0;
+}
+
+static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       return q->qdisc;
+}
+
+static unsigned long netem_get(struct Qdisc *sch, u32 classid)
+{
+       return 1;
+}
+
+static void netem_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+       if (!walker->stop) {
+               if (walker->count >= walker->skip)
+                       if (walker->fn(sch, 1, walker) < 0) {
+                               walker->stop = 1;
+                               return;
+                       }
+               walker->count++;
+       }
+}
+
+static const struct Qdisc_class_ops netem_class_ops = {
+       .graft          =       netem_graft,
+       .leaf           =       netem_leaf,
+       .get            =       netem_get,
+       .put            =       netem_put,
+       .walk           =       netem_walk,
+       .dump           =       netem_dump_class,
+};
+
 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
        .id             =       "netem",
+       .cl_ops         =       &netem_class_ops,
        .priv_size      =       sizeof(struct netem_sched_data),
        .enqueue        =       netem_enqueue,
        .dequeue        =       netem_dequeue,
index 966158d..2a318f2 100644 (file)
@@ -22,8 +22,7 @@
 #include <net/pkt_sched.h>
 
 
-struct prio_sched_data
-{
+struct prio_sched_data {
        int bands;
        struct tcf_proto *filter_list;
        u8  prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                if (!q->filter_list || err < 0) {
                        if (TC_H_MAJ(band))
                                band = 0;
-                       return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+                       return q->queues[q->prio2band[band & TC_PRIO_MAX]];
                }
                band = res.classid;
        }
@@ -84,7 +83,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
-               qdisc_bstats_update(sch, skb);
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
@@ -107,7 +105,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
        return NULL;
 }
 
-static struct sk_buff *prio_dequeue(struct Qdiscsch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int prio;
@@ -116,6 +114,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
                struct Qdisc *qdisc = q->queues[prio];
                struct sk_buff *skb = qdisc->dequeue(qdisc);
                if (skb) {
+                       qdisc_bstats_update(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
 
 }
 
-static unsigned int prio_drop(struct Qdiscsch)
+static unsigned int prio_drop(struct Qdisc *sch)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int prio;
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
 
 
 static void
-prio_reset(struct Qdiscsch)
+prio_reset(struct Qdisc *sch)
 {
        int prio;
        struct prio_sched_data *q = qdisc_priv(sch);
 
-       for (prio=0; prio<q->bands; prio++)
+       for (prio = 0; prio < q->bands; prio++)
                qdisc_reset(q->queues[prio]);
        sch->q.qlen = 0;
 }
 
 static void
-prio_destroy(struct Qdiscsch)
+prio_destroy(struct Qdisc *sch)
 {
        int prio;
        struct prio_sched_data *q = qdisc_priv(sch);
 
        tcf_destroy_chain(&q->filter_list);
-       for (prio=0; prio<q->bands; prio++)
+       for (prio = 0; prio < q->bands; prio++)
                qdisc_destroy(q->queues[prio]);
 }
 
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
                return -EINVAL;
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
                if (qopt->priomap[i] >= qopt->bands)
                        return -EINVAL;
        }
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+       for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
                struct Qdisc *child = q->queues[i];
                q->queues[i] = &noop_qdisc;
                if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        }
        sch_tree_unlock(sch);
 
-       for (i=0; i<q->bands; i++) {
+       for (i = 0; i < q->bands; i++) {
                if (q->queues[i] == &noop_qdisc) {
                        struct Qdisc *child, *old;
+
                        child = qdisc_create_dflt(sch->dev_queue,
                                                  &pfifo_qdisc_ops,
                                                  TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        struct prio_sched_data *q = qdisc_priv(sch);
        int i;
 
-       for (i=0; i<TCQ_PRIO_BANDS; i++)
+       for (i = 0; i < TCQ_PRIO_BANDS; i++)
                q->queues[i] = &noop_qdisc;
 
        if (opt == NULL) {
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        } else {
                int err;
 
-               if ((err= prio_tune(sch, opt)) != 0)
+               if ((err = prio_tune(sch, opt)) != 0)
                        return err;
        }
        return 0;
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct tc_prio_qopt opt;
 
        opt.bands = q->bands;
-       memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+       memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                        arg->count++;
                        continue;
                }
-               if (arg->fn(sch, prio+1, arg) < 0) {
+               if (arg->fn(sch, prio + 1, arg) < 0) {
                        arg->stop = 1;
                        break;
                }
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
 
index a6009c5..6649463 100644 (file)
@@ -36,8 +36,7 @@
        if RED works correctly.
  */
 
-struct red_sched_data
-{
+struct red_sched_data {
        u32                     limit;          /* HARD maximal queue length */
        unsigned char           flags;
        struct red_parms        parms;
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
        return q->flags & TC_RED_HARDDROP;
 }
 
-static int red_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -67,34 +66,33 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                red_end_of_idle_period(&q->parms);
 
        switch (red_action(&q->parms, q->parms.qavg)) {
-               case RED_DONT_MARK:
-                       break;
-
-               case RED_PROB_MARK:
-                       sch->qstats.overlimits++;
-                       if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
-                               q->stats.prob_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.prob_mark++;
-                       break;
-
-               case RED_HARD_MARK:
-                       sch->qstats.overlimits++;
-                       if (red_use_harddrop(q) || !red_use_ecn(q) ||
-                           !INET_ECN_set_ce(skb)) {
-                               q->stats.forced_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.forced_mark++;
-                       break;
+       case RED_DONT_MARK:
+               break;
+
+       case RED_PROB_MARK:
+               sch->qstats.overlimits++;
+               if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+                       q->stats.prob_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.prob_mark++;
+               break;
+
+       case RED_HARD_MARK:
+               sch->qstats.overlimits++;
+               if (red_use_harddrop(q) || !red_use_ecn(q) ||
+                   !INET_ECN_set_ce(skb)) {
+                       q->stats.forced_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.forced_mark++;
+               break;
        }
 
        ret = qdisc_enqueue(skb, child);
        if (likely(ret == NET_XMIT_SUCCESS)) {
-               qdisc_bstats_update(sch, skb);
                sch->q.qlen++;
        } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
@@ -107,22 +105,24 @@ congestion_drop:
        return NET_XMIT_CN;
 }
 
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
 
        skb = child->dequeue(child);
-       if (skb)
+       if (skb) {
+               qdisc_bstats_update(sch, skb);
                sch->q.qlen--;
-       else if (!red_is_idling(&q->parms))
-               red_start_of_idle_period(&q->parms);
-
+       } else {
+               if (!red_is_idling(&q->parms))
+                       red_start_of_idle_period(&q->parms);
+       }
        return skb;
 }
 
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -130,7 +130,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
        return child->ops->peek(child);
 }
 
-static unsigned int red_drop(struct Qdiscsch)
+static unsigned int red_drop(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -149,7 +149,7 @@ static unsigned int red_drop(struct Qdisc* sch)
        return 0;
 }
 
-static void red_reset(struct Qdiscsch)
+static void red_reset(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
@@ -216,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int red_init(struct Qdiscsch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
new file mode 100644 (file)
index 0000000..0a833d0
--- /dev/null
@@ -0,0 +1,709 @@
+/*
+ * net/sched/sch_sfb.c   Stochastic Fair Blue
+ *
+ * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
+ * A New Class of Active Queue Management Algorithms.
+ * U. Michigan CSE-TR-387-99, April 1999.
+ *
+ * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/*
+ * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
+ * This implementation uses L = 8 and N = 16
+ * This permits us to split one 32bit hash (provided per packet by rxhash or
+ * external classifier) into 8 subhashes of 4 bits.
+ */
+#define SFB_BUCKET_SHIFT 4
+#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
+#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
+#define SFB_LEVELS     (32 / SFB_BUCKET_SHIFT) /* L */
+
+/* SFB algo uses a virtual queue, named "bin" */
+struct sfb_bucket {
+       u16             qlen; /* length of virtual queue */
+       u16             p_mark; /* marking probability */
+};
+
+/* We use a double buffering right before hash change
+ * (Section 4.4 of SFB reference : moving hash functions)
+ */
+struct sfb_bins {
+       u32               perturbation; /* jhash perturbation */
+       struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
+};
+
+struct sfb_sched_data {
+       struct Qdisc    *qdisc;
+       struct tcf_proto *filter_list;
+       unsigned long   rehash_interval;
+       unsigned long   warmup_time;    /* double buffering warmup time in jiffies */
+       u32             max;
+       u32             bin_size;       /* maximum queue length per bin */
+       u32             increment;      /* d1 */
+       u32             decrement;      /* d2 */
+       u32             limit;          /* HARD maximal queue length */
+       u32             penalty_rate;
+       u32             penalty_burst;
+       u32             tokens_avail;
+       unsigned long   rehash_time;
+       unsigned long   token_time;
+
+       u8              slot;           /* current active bins (0 or 1) */
+       bool            double_buffering;
+       struct sfb_bins bins[2];
+
+       struct {
+               u32     earlydrop;
+               u32     penaltydrop;
+               u32     bucketdrop;
+               u32     queuedrop;
+               u32     childdrop;      /* drops in child qdisc */
+               u32     marked;         /* ECN mark */
+       } stats;
+};
+
+/*
+ * Each queued skb might be hashed on one or two bins
+ * We store in skb_cb the two hash values.
+ * (A zero value means double buffering was not used)
+ */
+struct sfb_skb_cb {
+       u32 hashes[2];
+};
+
+static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
+       return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+/*
+ * If using 'internal' SFB flow classifier, hash comes from skb rxhash
+ * If using external classifier, hash comes from the classid.
+ */
+static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
+{
+       return sfb_skb_cb(skb)->hashes[slot];
+}
+
+/* Probabilities are coded as Q0.16 fixed-point values,
+ * with 0xFFFF representing 65535/65536 (almost 1.0)
+ * Addition and subtraction are saturating in [0, 65535]
+ */
+static u32 prob_plus(u32 p1, u32 p2)
+{
+       u32 res = p1 + p2;
+
+       return min_t(u32, res, SFB_MAX_PROB);
+}
+
+static u32 prob_minus(u32 p1, u32 p2)
+{
+       return p1 > p2 ? p1 - p2 : 0;
+}
+
+static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
+{
+       int i;
+       struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b[hash].qlen < 0xFFFF)
+                       b[hash].qlen++;
+               b += SFB_NUMBUCKETS; /* next level */
+       }
+}
+
+static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       u32 sfbhash;
+
+       sfbhash = sfb_hash(skb, 0);
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 0, q);
+
+       sfbhash = sfb_hash(skb, 1);
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_one_qlen(u32 sfbhash, u32 slot,
+                              struct sfb_sched_data *q)
+{
+       int i;
+       struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b[hash].qlen > 0)
+                       b[hash].qlen--;
+               b += SFB_NUMBUCKETS; /* next level */
+       }
+}
+
+static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       u32 sfbhash;
+
+       sfbhash = sfb_hash(skb, 0);
+       if (sfbhash)
+               decrement_one_qlen(sfbhash, 0, q);
+
+       sfbhash = sfb_hash(skb, 1);
+       if (sfbhash)
+               decrement_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+       b->p_mark = prob_minus(b->p_mark, q->decrement);
+}
+
+static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+       b->p_mark = prob_plus(b->p_mark, q->increment);
+}
+
+static void sfb_zero_all_buckets(struct sfb_sched_data *q)
+{
+       memset(&q->bins, 0, sizeof(q->bins));
+}
+
+/*
+ * compute max qlen, max p_mark, and avg p_mark
+ */
+static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
+{
+       int i;
+       u32 qlen = 0, prob = 0, totalpm = 0;
+       const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
+               if (qlen < b->qlen)
+                       qlen = b->qlen;
+               totalpm += b->p_mark;
+               if (prob < b->p_mark)
+                       prob = b->p_mark;
+               b++;
+       }
+       *prob_r = prob;
+       *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
+       return qlen;
+}
+
+
+static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
+{
+       q->bins[slot].perturbation = net_random();
+}
+
+static void sfb_swap_slot(struct sfb_sched_data *q)
+{
+       sfb_init_perturbation(q->slot, q);
+       q->slot ^= 1;
+       q->double_buffering = false;
+}
+
+/* Non elastic flows are allowed to use part of the bandwidth, expressed
+ * in "penalty_rate" packets per second, with "penalty_burst" burst
+ */
+static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       if (q->penalty_rate == 0 || q->penalty_burst == 0)
+               return true;
+
+       if (q->tokens_avail < 1) {
+               unsigned long age = min(10UL * HZ, jiffies - q->token_time);
+
+               q->tokens_avail = (age * q->penalty_rate) / HZ;
+               if (q->tokens_avail > q->penalty_burst)
+                       q->tokens_avail = q->penalty_burst;
+               q->token_time = jiffies;
+               if (q->tokens_avail < 1)
+                       return true;
+       }
+
+       q->tokens_avail--;
+       return false;
+}
+
+static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
+                        int *qerr, u32 *salt)
+{
+       struct tcf_result res;
+       int result;
+
+       result = tc_classify(skb, q->filter_list, &res);
+       if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_STOLEN:
+               case TC_ACT_QUEUED:
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+               case TC_ACT_SHOT:
+                       return false;
+               }
+#endif
+               *salt = TC_H_MIN(res.classid);
+               return true;
+       }
+       return false;
+}
+
+static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+       int i;
+       u32 p_min = ~0;
+       u32 minqlen = ~0;
+       u32 r, slot, salt, sfbhash;
+       int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+       if (q->rehash_interval > 0) {
+               unsigned long limit = q->rehash_time + q->rehash_interval;
+
+               if (unlikely(time_after(jiffies, limit))) {
+                       sfb_swap_slot(q);
+                       q->rehash_time = jiffies;
+               } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
+                                   time_after(jiffies, limit - q->warmup_time))) {
+                       q->double_buffering = true;
+               }
+       }
+
+       if (q->filter_list) {
+               /* If using external classifiers, get result and record it. */
+               if (!sfb_classify(skb, q, &ret, &salt))
+                       goto other_drop;
+       } else {
+               salt = skb_get_rxhash(skb);
+       }
+
+       slot = q->slot;
+
+       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+       if (!sfbhash)
+               sfbhash = 1;
+       sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+               struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b->qlen == 0)
+                       decrement_prob(b, q);
+               else if (b->qlen >= q->bin_size)
+                       increment_prob(b, q);
+               if (minqlen > b->qlen)
+                       minqlen = b->qlen;
+               if (p_min > b->p_mark)
+                       p_min = b->p_mark;
+       }
+
+       slot ^= 1;
+       sfb_skb_cb(skb)->hashes[slot] = 0;
+
+       if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+               sch->qstats.overlimits++;
+               if (minqlen >= q->max)
+                       q->stats.bucketdrop++;
+               else
+                       q->stats.queuedrop++;
+               goto drop;
+       }
+
+       if (unlikely(p_min >= SFB_MAX_PROB)) {
+               /* Inelastic flow */
+               if (q->double_buffering) {
+                       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+                       if (!sfbhash)
+                               sfbhash = 1;
+                       sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+                       for (i = 0; i < SFB_LEVELS; i++) {
+                               u32 hash = sfbhash & SFB_BUCKET_MASK;
+                               struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+                               sfbhash >>= SFB_BUCKET_SHIFT;
+                               if (b->qlen == 0)
+                                       decrement_prob(b, q);
+                               else if (b->qlen >= q->bin_size)
+                                       increment_prob(b, q);
+                       }
+               }
+               if (sfb_rate_limit(skb, q)) {
+                       sch->qstats.overlimits++;
+                       q->stats.penaltydrop++;
+                       goto drop;
+               }
+               goto enqueue;
+       }
+
+       r = net_random() & SFB_MAX_PROB;
+
+       if (unlikely(r < p_min)) {
+               if (unlikely(p_min > SFB_MAX_PROB / 2)) {
+                       /* If we're marking that many packets, then either
+                        * this flow is unresponsive, or we're badly congested.
+                        * In either case, we want to start dropping packets.
+                        */
+                       if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
+                               q->stats.earlydrop++;
+                               goto drop;
+                       }
+               }
+               if (INET_ECN_set_ce(skb)) {
+                       q->stats.marked++;
+               } else {
+                       q->stats.earlydrop++;
+                       goto drop;
+               }
+       }
+
+enqueue:
+       ret = qdisc_enqueue(skb, child);
+       if (likely(ret == NET_XMIT_SUCCESS)) {
+               sch->q.qlen++;
+               increment_qlen(skb, q);
+       } else if (net_xmit_drop_count(ret)) {
+               q->stats.childdrop++;
+               sch->qstats.drops++;
+       }
+       return ret;
+
+drop:
+       qdisc_drop(skb, sch);
+       return NET_XMIT_CN;
+other_drop:
+       if (ret & __NET_XMIT_BYPASS)
+               sch->qstats.drops++;
+       kfree_skb(skb);
+       return ret;
+}
+
+static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+       struct sk_buff *skb;
+
+       skb = child->dequeue(q->qdisc);
+
+       if (skb) {
+               qdisc_bstats_update(sch, skb);
+               sch->q.qlen--;
+               decrement_qlen(skb, q);
+       }
+
+       return skb;
+}
+
+static struct sk_buff *sfb_peek(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+
+       return child->ops->peek(child);
+}
+
+/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
+
+static void sfb_reset(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       qdisc_reset(q->qdisc);
+       sch->q.qlen = 0;
+       q->slot = 0;
+       q->double_buffering = false;
+       sfb_zero_all_buckets(q);
+       sfb_init_perturbation(0, q);
+}
+
+static void sfb_destroy(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       tcf_destroy_chain(&q->filter_list);
+       qdisc_destroy(q->qdisc);
+}
+
+static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
+       [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
+};
+
+static const struct tc_sfb_qopt sfb_default_ops = {
+       .rehash_interval = 600 * MSEC_PER_SEC,
+       .warmup_time = 60 * MSEC_PER_SEC,
+       .limit = 0,
+       .max = 25,
+       .bin_size = 20,
+       .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
+       .decrement = (SFB_MAX_PROB + 3000) / 6000,
+       .penalty_rate = 10,
+       .penalty_burst = 20,
+};
+
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child;
+       struct nlattr *tb[TCA_SFB_MAX + 1];
+       const struct tc_sfb_qopt *ctl = &sfb_default_ops;
+       u32 limit;
+       int err;
+
+       if (opt) {
+               err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
+               if (err < 0)
+                       return -EINVAL;
+
+               if (tb[TCA_SFB_PARMS] == NULL)
+                       return -EINVAL;
+
+               ctl = nla_data(tb[TCA_SFB_PARMS]);
+       }
+
+       limit = ctl->limit;
+       if (limit == 0)
+               limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+
+       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+       if (IS_ERR(child))
+               return PTR_ERR(child);
+
+       sch_tree_lock(sch);
+
+       qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+       qdisc_destroy(q->qdisc);
+       q->qdisc = child;
+
+       q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
+       q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
+       q->rehash_time = jiffies;
+       q->limit = limit;
+       q->increment = ctl->increment;
+       q->decrement = ctl->decrement;
+       q->max = ctl->max;
+       q->bin_size = ctl->bin_size;
+       q->penalty_rate = ctl->penalty_rate;
+       q->penalty_burst = ctl->penalty_burst;
+       q->tokens_avail = ctl->penalty_burst;
+       q->token_time = jiffies;
+
+       q->slot = 0;
+       q->double_buffering = false;
+       sfb_zero_all_buckets(q);
+       sfb_init_perturbation(0, q);
+       sfb_init_perturbation(1, q);
+
+       sch_tree_unlock(sch);
+
+       return 0;
+}
+
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       q->qdisc = &noop_qdisc;
+       return sfb_change(sch, opt);
+}
+
+static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts;
+       struct tc_sfb_qopt opt = {
+               .rehash_interval = jiffies_to_msecs(q->rehash_interval),
+               .warmup_time = jiffies_to_msecs(q->warmup_time),
+               .limit = q->limit,
+               .max = q->max,
+               .bin_size = q->bin_size,
+               .increment = q->increment,
+               .decrement = q->decrement,
+               .penalty_rate = q->penalty_rate,
+               .penalty_burst = q->penalty_burst,
+       };
+
+       sch->qstats.backlog = q->qdisc->qstats.backlog;
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -EMSGSIZE;
+}
+
+static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct tc_sfb_xstats st = {
+               .earlydrop = q->stats.earlydrop,
+               .penaltydrop = q->stats.penaltydrop,
+               .bucketdrop = q->stats.bucketdrop,
+               .queuedrop = q->stats.queuedrop,
+               .childdrop = q->stats.childdrop,
+               .marked = q->stats.marked,
+       };
+
+       st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
+
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       return -ENOSYS;
+}
+
+static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+                    struct Qdisc **old)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       if (new == NULL)
+               new = &noop_qdisc;
+
+       sch_tree_lock(sch);
+       *old = q->qdisc;
+       q->qdisc = new;
+       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+       qdisc_reset(*old);
+       sch_tree_unlock(sch);
+       return 0;
+}
+
+static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       return q->qdisc;
+}
+
+static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
+{
+       return 1;
+}
+
+static void sfb_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+                           struct nlattr **tca, unsigned long *arg)
+{
+       return -ENOSYS;
+}
+
+static int sfb_delete(struct Qdisc *sch, unsigned long cl)
+{
+       return -ENOSYS;
+}
+
+static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+       if (!walker->stop) {
+               if (walker->count >= walker->skip)
+                       if (walker->fn(sch, 1, walker) < 0) {
+                               walker->stop = 1;
+                               return;
+                       }
+               walker->count++;
+       }
+}
+
+static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       if (cl)
+               return NULL;
+       return &q->filter_list;
+}
+
+static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
+                             u32 classid)
+{
+       return 0;
+}
+
+
+static const struct Qdisc_class_ops sfb_class_ops = {
+       .graft          =       sfb_graft,
+       .leaf           =       sfb_leaf,
+       .get            =       sfb_get,
+       .put            =       sfb_put,
+       .change         =       sfb_change_class,
+       .delete         =       sfb_delete,
+       .walk           =       sfb_walk,
+       .tcf_chain      =       sfb_find_tcf,
+       .bind_tcf       =       sfb_bind,
+       .unbind_tcf     =       sfb_put,
+       .dump           =       sfb_dump_class,
+};
+
+static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
+       .id             =       "sfb",
+       .priv_size      =       sizeof(struct sfb_sched_data),
+       .cl_ops         =       &sfb_class_ops,
+       .enqueue        =       sfb_enqueue,
+       .dequeue        =       sfb_dequeue,
+       .peek           =       sfb_peek,
+       .init           =       sfb_init,
+       .reset          =       sfb_reset,
+       .destroy        =       sfb_destroy,
+       .change         =       sfb_change,
+       .dump           =       sfb_dump,
+       .dump_stats     =       sfb_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init sfb_module_init(void)
+{
+       return register_qdisc(&sfb_qdisc_ops);
+}
+
+static void __exit sfb_module_exit(void)
+{
+       unregister_qdisc(&sfb_qdisc_ops);
+}
+
+module_init(sfb_module_init)
+module_exit(sfb_module_exit)
+
+MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
+MODULE_AUTHOR("Juliusz Chroboczek");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
index 239ec53..c2e628d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
 #define SFQ_DEPTH              128 /* max number of packets per flow */
 #define SFQ_SLOTS              128 /* max number of flows */
 #define SFQ_EMPTY_SLOT         255
-#define SFQ_HASH_DIVISOR       1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
 /* We use 16 bits to store allot, and want to handle packets up to 64K
  * Scale allot by 8 (1<<3) so that no overflow occurs.
  */
@@ -92,8 +94,7 @@ typedef unsigned char sfq_index;
  * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
  * are 'pointers' to dep[] array
  */
-struct sfq_head
-{
+struct sfq_head {
        sfq_index       next;
        sfq_index       prev;
 };
@@ -108,13 +109,12 @@ struct sfq_slot {
        short           allot; /* credit for this slot */
 };
 
-struct sfq_sched_data
-{
+struct sfq_sched_data {
 /* Parameters */
        int             perturb_period;
-       unsigned        quantum;        /* Allotment per round: MUST BE >= MTU */
+       unsigned int    quantum;        /* Allotment per round: MUST BE >= MTU */
        int             limit;
-
+       unsigned int    divisor;        /* number of slots in hash table */
 /* Variables */
        struct tcf_proto *filter_list;
        struct timer_list perturb_timer;
@@ -122,7 +122,7 @@ struct sfq_sched_data
        sfq_index       cur_depth;      /* depth of longest slot */
        unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
        struct sfq_slot *tail;          /* current slot in round */
-       sfq_index       ht[SFQ_HASH_DIVISOR];   /* Hash table */
+       sfq_index       *ht;            /* Hash table (divisor slots) */
        struct sfq_slot slots[SFQ_SLOTS];
        struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
 };
@@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_SLOTS];
 }
 
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
 {
-       return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+       return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
 }
 
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
 {
        u32 h, h2;
 
@@ -157,13 +157,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                iph = ip_hdr(skb);
                h = (__force u32)iph->daddr;
                h2 = (__force u32)iph->saddr ^ iph->protocol;
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
                    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
                        iph = ip_hdr(skb);
-                       h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+                       h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
                }
                break;
        }
@@ -181,7 +181,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                if (poff >= 0 &&
                    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
                        iph = ipv6_hdr(skb);
-                       h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+                       h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
                }
                break;
        }
@@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
 
        if (TC_H_MAJ(skb->priority) == sch->handle &&
            TC_H_MIN(skb->priority) > 0 &&
-           TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+           TC_H_MIN(skb->priority) <= q->divisor)
                return TC_H_MIN(skb->priority);
 
        if (!q->filter_list)
@@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                        return 0;
                }
 #endif
-               if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+               if (TC_H_MIN(res.classid) <= q->divisor)
                        return TC_H_MIN(res.classid);
        }
        return 0;
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                q->tail = slot;
                slot->allot = q->scaled_quantum;
        }
-       if (++sch->q.qlen <= q->limit) {
-               qdisc_bstats_update(sch, skb);
+       if (++sch->q.qlen <= q->limit)
                return NET_XMIT_SUCCESS;
-       }
 
        sfq_drop(sch);
        return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
        }
        skb = slot_dequeue_head(slot);
        sfq_dec(q, a);
+       qdisc_bstats_update(sch, skb);
        sch->q.qlen--;
        sch->qstats.backlog -= qdisc_pkt_len(skb);
 
@@ -492,13 +491,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
                return -EINVAL;
 
+       if (ctl->divisor &&
+           (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+               return -EINVAL;
+
        sch_tree_lock(sch);
        q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
        q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
        q->perturb_period = ctl->perturb_period * HZ;
        if (ctl->limit)
                q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+       if (ctl->divisor)
+               q->divisor = ctl->divisor;
        qlen = sch->q.qlen;
        while (sch->q.qlen > q->limit)
                sfq_drop(sch);
@@ -516,15 +520,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
+       size_t sz;
        int i;
 
        q->perturb_timer.function = sfq_perturbation;
        q->perturb_timer.data = (unsigned long)sch;
        init_timer_deferrable(&q->perturb_timer);
 
-       for (i = 0; i < SFQ_HASH_DIVISOR; i++)
-               q->ht[i] = SFQ_EMPTY_SLOT;
-
        for (i = 0; i < SFQ_DEPTH; i++) {
                q->dep[i].next = i + SFQ_SLOTS;
                q->dep[i].prev = i + SFQ_SLOTS;
@@ -533,6 +535,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->limit = SFQ_DEPTH - 1;
        q->cur_depth = 0;
        q->tail = NULL;
+       q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
        if (opt == NULL) {
                q->quantum = psched_mtu(qdisc_dev(sch));
                q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -544,10 +547,23 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
                        return err;
        }
 
+       sz = sizeof(q->ht[0]) * q->divisor;
+       q->ht = kmalloc(sz, GFP_KERNEL);
+       if (!q->ht && sz > PAGE_SIZE)
+               q->ht = vmalloc(sz);
+       if (!q->ht)
+               return -ENOMEM;
+       for (i = 0; i < q->divisor; i++)
+               q->ht[i] = SFQ_EMPTY_SLOT;
+
        for (i = 0; i < SFQ_SLOTS; i++) {
                slot_queue_init(&q->slots[i]);
                sfq_link(q, i);
        }
+       if (q->limit >= 1)
+               sch->flags |= TCQ_F_CAN_BYPASS;
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -558,6 +574,10 @@ static void sfq_destroy(struct Qdisc *sch)
        tcf_destroy_chain(&q->filter_list);
        q->perturb_period = 0;
        del_timer_sync(&q->perturb_timer);
+       if (is_vmalloc_addr(q->ht))
+               vfree(q->ht);
+       else
+               kfree(q->ht);
 }
 
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -570,7 +590,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
        opt.perturb_period = q->perturb_period / HZ;
 
        opt.limit = q->limit;
-       opt.divisor = SFQ_HASH_DIVISOR;
+       opt.divisor = q->divisor;
        opt.flows = q->limit;
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -595,6 +615,8 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
                              u32 classid)
 {
+       /* we cannot bypass queue discipline anymore */
+       sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -648,7 +670,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        if (arg->stop)
                return;
 
-       for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+       for (i = 0; i < q->divisor; i++) {
                if (q->ht[i] == SFQ_EMPTY_SLOT ||
                    arg->count < arg->skip) {
                        arg->count++;
index 77565e7..1dcfb52 100644 (file)
@@ -97,8 +97,7 @@
        changed the limit is not effective anymore.
 */
 
-struct tbf_sched_data
-{
+struct tbf_sched_data {
 /* Parameters */
        u32             limit;          /* Maximal length of backlog: bytes */
        u32             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@ struct tbf_sched_data
        struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
-#define L2T(q,L)   qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L)   qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
 
-static int tbf_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        int ret;
@@ -134,11 +133,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        }
 
        sch->q.qlen++;
-       qdisc_bstats_update(sch, skb);
        return NET_XMIT_SUCCESS;
 }
 
-static unsigned int tbf_drop(struct Qdiscsch)
+static unsigned int tbf_drop(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        unsigned int len = 0;
@@ -150,7 +148,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
        return len;
 }
 
-static struct sk_buff *tbf_dequeue(struct Qdiscsch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
@@ -186,7 +184,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
                        q->tokens = toks;
                        q->ptokens = ptoks;
                        sch->q.qlen--;
-                       sch->flags &= ~TCQ_F_THROTTLED;
+                       qdisc_unthrottled(sch);
+                       qdisc_bstats_update(sch, skb);
                        return skb;
                }
 
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
        return NULL;
 }
 
-static void tbf_reset(struct Qdiscsch)
+static void tbf_reset(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
        [TCA_TBF_PTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 };
 
-static int tbf_change(struct Qdiscsch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 {
        int err;
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
        struct qdisc_rate_table *rtab = NULL;
        struct qdisc_rate_table *ptab = NULL;
        struct Qdisc *child = NULL;
-       int max_size,n;
+       int max_size, n;
 
        err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
        if (err < 0)
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
        }
 
        for (n = 0; n < 256; n++)
-               if (rtab->data[n] > qopt->buffer) break;
-       max_size = (n << qopt->rate.cell_log)-1;
+               if (rtab->data[n] > qopt->buffer)
+                       break;
+       max_size = (n << qopt->rate.cell_log) - 1;
        if (ptab) {
                int size;
 
                for (n = 0; n < 256; n++)
-                       if (ptab->data[n] > qopt->mtu) break;
-               size = (n << qopt->peakrate.cell_log)-1;
-               if (size < max_size) max_size = size;
+                       if (ptab->data[n] > qopt->mtu)
+                               break;
+               size = (n << qopt->peakrate.cell_log) - 1;
+               if (size < max_size)
+                       max_size = size;
        }
        if (max_size < 0)
                goto done;
@@ -310,7 +312,7 @@ done:
        return err;
 }
 
-static int tbf_init(struct Qdiscsch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
        .graft          =       tbf_graft,
        .leaf           =       tbf_leaf,
        .get            =       tbf_get,
index 84ce48e..45cd300 100644 (file)
@@ -53,8 +53,7 @@
       which will not break load balancing, though native slave
       traffic will have the highest priority.  */
 
-struct teql_master
-{
+struct teql_master {
        struct Qdisc_ops qops;
        struct net_device *dev;
        struct Qdisc *slaves;
@@ -65,29 +64,27 @@ struct teql_master
        unsigned long   tx_dropped;
 };
 
-struct teql_sched_data
-{
+struct teql_sched_data {
        struct Qdisc *next;
        struct teql_master *m;
        struct neighbour *ncache;
        struct sk_buff_head q;
 };
 
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
 
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
 
 /* "teql*" qdisc routines */
 
 static int
-teql_enqueue(struct sk_buff *skb, struct Qdiscsch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct teql_sched_data *q = qdisc_priv(sch);
 
        if (q->q.qlen < dev->tx_queue_len) {
                __skb_queue_tail(&q->q, skb);
-               qdisc_bstats_update(sch, skb);
                return NET_XMIT_SUCCESS;
        }
 
@@ -97,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 }
 
 static struct sk_buff *
-teql_dequeue(struct Qdiscsch)
+teql_dequeue(struct Qdisc *sch)
 {
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct netdev_queue *dat_queue;
@@ -111,19 +108,21 @@ teql_dequeue(struct Qdisc* sch)
                        dat->m->slaves = sch;
                        netif_wake_queue(m);
                }
+       } else {
+               qdisc_bstats_update(sch, skb);
        }
        sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
        return skb;
 }
 
 static struct sk_buff *
-teql_peek(struct Qdiscsch)
+teql_peek(struct Qdisc *sch)
 {
        /* teql is meant to be used as root qdisc */
        return NULL;
 }
 
-static __inline__ void
+static inline void
 teql_neigh_release(struct neighbour *n)
 {
        if (n)
@@ -131,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
 }
 
 static void
-teql_reset(struct Qdiscsch)
+teql_reset(struct Qdisc *sch)
 {
        struct teql_sched_data *dat = qdisc_priv(sch);
 
@@ -141,13 +140,14 @@ teql_reset(struct Qdisc* sch)
 }
 
 static void
-teql_destroy(struct Qdiscsch)
+teql_destroy(struct Qdisc *sch)
 {
        struct Qdisc *q, *prev;
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct teql_master *master = dat->m;
 
-       if ((prev = master->slaves) != NULL) {
+       prev = master->slaves;
+       if (prev) {
                do {
                        q = NEXT_SLAVE(prev);
                        if (q == sch) {
@@ -179,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct net_device *dev = qdisc_dev(sch);
-       struct teql_master *m = (struct teql_master*)sch->ops;
+       struct teql_master *m = (struct teql_master *)sch->ops;
        struct teql_sched_data *q = qdisc_priv(sch);
 
        if (dev->hard_header_len > m->dev->hard_header_len)
@@ -290,7 +290,8 @@ restart:
        nores = 0;
        busy = 0;
 
-       if ((q = start) == NULL)
+       q = start;
+       if (!q)
                goto drop;
 
        do {
@@ -355,10 +356,10 @@ drop:
 
 static int teql_master_open(struct net_device *dev)
 {
-       struct Qdisc * q;
+       struct Qdisc *q;
        struct teql_master *m = netdev_priv(dev);
        int mtu = 0xFFFE;
-       unsigned flags = IFF_NOARP|IFF_MULTICAST;
+       unsigned int flags = IFF_NOARP | IFF_MULTICAST;
 
        if (m->slaves == NULL)
                return -EUNATCH;
@@ -426,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
                do {
                        if (new_mtu > qdisc_dev(q)->mtu)
                                return -EINVAL;
-               } while ((q=NEXT_SLAVE(q)) != m->slaves);
+               } while ((q = NEXT_SLAVE(q)) != m->slaves);
        }
 
        dev->mtu = new_mtu;
index 5f1fb8b..6b04287 100644 (file)
@@ -1089,7 +1089,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                             base.inqueue.immediate);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
-       struct sock *sk;
        struct sctp_inq *inqueue;
        int state;
        sctp_subtype_t subtype;
@@ -1097,7 +1096,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
 
        /* The association should be held so we should be safe. */
        ep = asoc->ep;
-       sk = asoc->base.sk;
 
        inqueue = &asoc->base.inqueue;
        sctp_association_hold(asoc);
index ea21924..826661b 100644 (file)
@@ -948,14 +948,11 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
        union sctp_addr addr;
        union sctp_addr *paddr = &addr;
        struct sctphdr *sh = sctp_hdr(skb);
-       sctp_chunkhdr_t *ch;
        union sctp_params params;
        sctp_init_chunk_t *init;
        struct sctp_transport *transport;
        struct sctp_af *af;
 
-       ch = (sctp_chunkhdr_t *) skb->data;
-
        /*
         * This code will NOT touch anything inside the chunk--it is
         * strictly READ-ONLY.
index 95e0c8e..865ce7b 100644 (file)
@@ -201,40 +201,40 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 {
        struct sock *sk = skb->sk;
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct flowi fl;
+       struct flowi6 fl6;
 
-       memset(&fl, 0, sizeof(fl));
+       memset(&fl6, 0, sizeof(fl6));
 
-       fl.proto = sk->sk_protocol;
+       fl6.flowi6_proto = sk->sk_protocol;
 
        /* Fill in the dest address from the route entry passed with the skb
         * and the source address from the transport.
         */
-       ipv6_addr_copy(&fl.fl6_dst, &transport->ipaddr.v6.sin6_addr);
-       ipv6_addr_copy(&fl.fl6_src, &transport->saddr.v6.sin6_addr);
+       ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
+       ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
 
-       fl.fl6_flowlabel = np->flow_label;
-       IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
-       if (ipv6_addr_type(&fl.fl6_src) & IPV6_ADDR_LINKLOCAL)
-               fl.oif = transport->saddr.v6.sin6_scope_id;
+       fl6.flowlabel = np->flow_label;
+       IP6_ECN_flow_xmit(sk, fl6.flowlabel);
+       if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
+               fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
        else
-               fl.oif = sk->sk_bound_dev_if;
+               fl6.flowi6_oif = sk->sk_bound_dev_if;
 
        if (np->opt && np->opt->srcrt) {
                struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-               ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+               ipv6_addr_copy(&fl6.daddr, rt0->addr);
        }
 
        SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
                          __func__, skb, skb->len,
-                         &fl.fl6_src, &fl.fl6_dst);
+                         &fl6.saddr, &fl6.daddr);
 
        SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
                skb->local_df = 1;
 
-       return ip6_xmit(sk, skb, &fl, np->opt);
+       return ip6_xmit(sk, skb, &fl6, np->opt);
 }
 
 /* Returns the dst cache entry for the given source and destination ip
@@ -245,22 +245,22 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
                                         union sctp_addr *saddr)
 {
        struct dst_entry *dst;
-       struct flowi fl;
+       struct flowi6 fl6;
 
-       memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr);
+       memset(&fl6, 0, sizeof(fl6));
+       ipv6_addr_copy(&fl6.daddr, &daddr->v6.sin6_addr);
        if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
-               fl.oif = daddr->v6.sin6_scope_id;
+               fl6.flowi6_oif = daddr->v6.sin6_scope_id;
 
 
-       SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl.fl6_dst);
+       SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6.daddr);
 
        if (saddr) {
-               ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr);
-               SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl.fl6_src);
+               ipv6_addr_copy(&fl6.saddr, &saddr->v6.sin6_addr);
+               SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6.saddr);
        }
 
-       dst = ip6_route_output(&init_net, NULL, &fl);
+       dst = ip6_route_output(&init_net, NULL, &fl6);
        if (!dst->error) {
                struct rt6_info *rt;
                rt = (struct rt6_info *)dst;
index 8c6d379..26dc005 100644 (file)
@@ -545,13 +545,11 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
        struct sctp_transport *transport = pkt->transport;
        sctp_xmit_t status;
        struct sctp_chunk *chunk, *chunk1;
-       struct sctp_association *asoc;
        int fast_rtx;
        int error = 0;
        int timer = 0;
        int done = 0;
 
-       asoc = q->asoc;
        lqueue = &q->retransmit;
        fast_rtx = q->fast_rtx;
 
index e58f947..152976e 100644 (file)
@@ -468,32 +468,32 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
                                         union sctp_addr *saddr)
 {
        struct rtable *rt;
-       struct flowi fl;
+       struct flowi4 fl4;
        struct sctp_bind_addr *bp;
        struct sctp_sockaddr_entry *laddr;
        struct dst_entry *dst = NULL;
        union sctp_addr dst_saddr;
 
-       memset(&fl, 0x0, sizeof(struct flowi));
-       fl.fl4_dst  = daddr->v4.sin_addr.s_addr;
-       fl.fl_ip_dport = daddr->v4.sin_port;
-       fl.proto = IPPROTO_SCTP;
+       memset(&fl4, 0x0, sizeof(struct flowi4));
+       fl4.daddr  = daddr->v4.sin_addr.s_addr;
+       fl4.fl4_dport = daddr->v4.sin_port;
+       fl4.flowi4_proto = IPPROTO_SCTP;
        if (asoc) {
-               fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
-               fl.oif = asoc->base.sk->sk_bound_dev_if;
-               fl.fl_ip_sport = htons(asoc->base.bind_addr.port);
+               fl4.flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
+               fl4.flowi4_oif = asoc->base.sk->sk_bound_dev_if;
+               fl4.fl4_sport = htons(asoc->base.bind_addr.port);
        }
        if (saddr) {
-               fl.fl4_src = saddr->v4.sin_addr.s_addr;
-               fl.fl_ip_sport = saddr->v4.sin_port;
+               fl4.saddr = saddr->v4.sin_addr.s_addr;
+               fl4.fl4_sport = saddr->v4.sin_port;
        }
 
        SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
-                         __func__, &fl.fl4_dst, &fl.fl4_src);
+                         __func__, &fl4.daddr, &fl4.saddr);
 
-       if (!ip_route_output_key(&init_net, &rt, &fl)) {
+       rt = ip_route_output_key(&init_net, &fl4);
+       if (!IS_ERR(rt))
                dst = &rt->dst;
-       }
 
        /* If there is no association or if a source address is passed, no
         * more validation is required.
@@ -533,9 +533,10 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
                        continue;
                if ((laddr->state == SCTP_ADDR_SRC) &&
                    (AF_INET == laddr->a.sa.sa_family)) {
-                       fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
-                       fl.fl_ip_sport = laddr->a.v4.sin_port;
-                       if (!ip_route_output_key(&init_net, &rt, &fl)) {
+                       fl4.saddr = laddr->a.v4.sin_addr.s_addr;
+                       fl4.fl4_sport = laddr->a.v4.sin_port;
+                       rt = ip_route_output_key(&init_net, &fl4);
+                       if (!IS_ERR(rt)) {
                                dst = &rt->dst;
                                goto out_unlock;
                        }
index 2cc46f0..de98665 100644 (file)
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
                        *errp = sctp_make_op_error_fixed(asoc, chunk);
 
                if (*errp) {
-                       sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-                                       WORD_ROUND(ntohs(param.p->length)));
-                       sctp_addto_chunk_fixed(*errp,
-                                       WORD_ROUND(ntohs(param.p->length)),
-                                       param.v);
+                       if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+                                       WORD_ROUND(ntohs(param.p->length))))
+                               sctp_addto_chunk_fixed(*errp,
+                                               WORD_ROUND(ntohs(param.p->length)),
+                                               param.v);
                } else {
                        /* If there is no memory for generating the ERROR
                         * report as specified, an ABORT will be triggered
@@ -3375,7 +3375,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
                                    struct sctp_fwdtsn_skip *skiplist)
 {
        struct sctp_chunk *retval = NULL;
-       struct sctp_fwdtsn_chunk *ftsn_chunk;
        struct sctp_fwdtsn_hdr ftsn_hdr;
        struct sctp_fwdtsn_skip skip;
        size_t hint;
@@ -3388,8 +3387,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
        if (!retval)
                return NULL;
 
-       ftsn_chunk = (struct sctp_fwdtsn_chunk *)retval->subh.fwdtsn_hdr;
-
        ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
        retval->subh.fwdtsn_hdr =
                sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
index 8e02550..3951a10 100644 (file)
@@ -2928,7 +2928,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
                                             unsigned int optlen)
 {
        struct sctp_sock        *sp;
-       struct sctp_endpoint    *ep;
        struct sctp_association *asoc = NULL;
        struct sctp_setpeerprim prim;
        struct sctp_chunk       *chunk;
@@ -2936,7 +2935,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
        int                     err;
 
        sp = sctp_sk(sk);
-       ep = sp->ep;
 
        if (!sctp_addip_enable)
                return -EPERM;
@@ -6102,15 +6100,16 @@ static void __sctp_write_space(struct sctp_association *asoc)
                        wake_up_interruptible(&asoc->wait);
 
                if (sctp_writeable(sk)) {
-                       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-                               wake_up_interruptible(sk_sleep(sk));
+                       wait_queue_head_t *wq = sk_sleep(sk);
+
+                       if (wq && waitqueue_active(wq))
+                               wake_up_interruptible(wq);
 
                        /* Note that we try to include the Async I/O support
                         * here by modeling from the current TCP/UDP code.
                         * We have not tested with it yet.
                         */
-                       if (sock->wq->fasync_list &&
-                           !(sk->sk_shutdown & SEND_SHUTDOWN))
+                       if (!(sk->sk_shutdown & SEND_SHUTDOWN))
                                sock_wake_async(sock,
                                                SOCK_WAKE_SPACE, POLL_OUT);
                }
index 747d541..f1e40ce 100644 (file)
@@ -344,7 +344,7 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
 
        /* Refresh the gap ack information. */
        if (sctp_tsnmap_has_gap(map)) {
-               __u16 start, end;
+               __u16 start = 0, end = 0;
                sctp_tsnmap_iter_init(map, &iter);
                while (sctp_tsnmap_next_gap_ack(map, &iter,
                                                &start,
index c7f7e49..1767818 100644 (file)
@@ -105,11 +105,8 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                        gfp_t gfp)
 {
        struct sk_buff_head temp;
-       sctp_data_chunk_t *hdr;
        struct sctp_ulpevent *event;
 
-       hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
-
        /* Create an event from the incoming chunk. */
        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
        if (!event)
@@ -743,11 +740,9 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
        struct sctp_stream *in;
-       __u16 sid, csid;
-       __u16 ssn, cssn;
+       __u16 sid, csid, cssn;
 
        sid = event->stream;
-       ssn = event->ssn;
        in  = &ulpq->asoc->ssnmap->in;
 
        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
index ac2219f..937d0fc 100644 (file)
@@ -240,17 +240,19 @@ static struct kmem_cache *sock_inode_cachep __read_mostly;
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
        struct socket_alloc *ei;
+       struct socket_wq *wq;
 
        ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
-       ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
-       if (!ei->socket.wq) {
+       wq = kmalloc(sizeof(*wq), GFP_KERNEL);
+       if (!wq) {
                kmem_cache_free(sock_inode_cachep, ei);
                return NULL;
        }
-       init_waitqueue_head(&ei->socket.wq->wait);
-       ei->socket.wq->fasync_list = NULL;
+       init_waitqueue_head(&wq->wait);
+       wq->fasync_list = NULL;
+       RCU_INIT_POINTER(ei->socket.wq, wq);
 
        ei->socket.state = SS_UNCONNECTED;
        ei->socket.flags = 0;
@@ -273,9 +275,11 @@ static void wq_free_rcu(struct rcu_head *head)
 static void sock_destroy_inode(struct inode *inode)
 {
        struct socket_alloc *ei;
+       struct socket_wq *wq;
 
        ei = container_of(inode, struct socket_alloc, vfs_inode);
-       call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+       wq = rcu_dereference_protected(ei->socket.wq, 1);
+       call_rcu(&wq->rcu, wq_free_rcu);
        kmem_cache_free(sock_inode_cachep, ei);
 }
 
@@ -524,7 +528,7 @@ void sock_release(struct socket *sock)
                module_put(owner);
        }
 
-       if (sock->wq->fasync_list)
+       if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
                printk(KERN_ERR "sock_release: fasync list not empty!\n");
 
        percpu_sub(sockets_in_use, 1);
@@ -1108,15 +1112,16 @@ static int sock_fasync(int fd, struct file *filp, int on)
 {
        struct socket *sock = filp->private_data;
        struct sock *sk = sock->sk;
+       struct socket_wq *wq;
 
        if (sk == NULL)
                return -EINVAL;
 
        lock_sock(sk);
+       wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+       fasync_helper(fd, filp, on, &wq->fasync_list);
 
-       fasync_helper(fd, filp, on, &sock->wq->fasync_list);
-
-       if (!sock->wq->fasync_list)
+       if (!wq->fasync_list)
                sock_reset_flag(sk, SOCK_FASYNC);
        else
                sock_set_flag(sk, SOCK_FASYNC);
@@ -2643,7 +2648,8 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
 
                old_fs = get_fs();
                set_fs(KERNEL_DS);
-               err = dev_ioctl(net, cmd, &kifr);
+               err = dev_ioctl(net, cmd,
+                               (struct ifreq __user __force *) &kifr);
                set_fs(old_fs);
 
                return err;
@@ -2752,7 +2758,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 
        old_fs = get_fs();
        set_fs(KERNEL_DS);
-       err = dev_ioctl(net, cmd, (void __user *)&ifr);
+       err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
        set_fs(old_fs);
 
        if (cmd == SIOCGIFMAP && !err) {
@@ -2857,7 +2863,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
                ret |= __get_user(rtdev, &(ur4->rt_dev));
                if (rtdev) {
                        ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
-                       r4.rt_dev = devname; devname[15] = 0;
+                       r4.rt_dev = (char __user __force *)devname;
+                       devname[15] = 0;
                } else
                        r4.rt_dev = NULL;
 
index 7bd3bbb..b7d435c 100644 (file)
@@ -420,6 +420,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
 static void svc_udp_data_ready(struct sock *sk, int count)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        if (svsk) {
                dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
@@ -428,8 +429,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
@@ -438,6 +439,7 @@ static void svc_udp_data_ready(struct sock *sk, int count)
 static void svc_write_space(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        if (svsk) {
                dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -445,10 +447,10 @@ static void svc_write_space(struct sock *sk)
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
+       if (wq && waitqueue_active(wq)) {
                dprintk("RPC svc_write_space: someone sleeping on %p\n",
                       svsk);
-               wake_up_interruptible(sk_sleep(sk));
+               wake_up_interruptible(wq);
        }
 }
 
@@ -739,6 +741,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
 static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq;
 
        dprintk("svc: socket %p TCP (listen) state change %d\n",
                sk, sk->sk_state);
@@ -761,8 +764,9 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
                        printk("svc: socket %p: no user data\n", sk);
        }
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible_all(sk_sleep(sk));
+       wq = sk_sleep(sk);
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible_all(wq);
 }
 
 /*
@@ -771,6 +775,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
 static void svc_tcp_state_change(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
                sk, sk->sk_state, sk->sk_user_data);
@@ -781,13 +786,14 @@ static void svc_tcp_state_change(struct sock *sk)
                set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible_all(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible_all(wq);
 }
 
 static void svc_tcp_data_ready(struct sock *sk, int count)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        dprintk("svc: socket %p TCP data ready (svsk %p)\n",
                sk, sk->sk_user_data);
@@ -795,8 +801,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
@@ -1531,6 +1537,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
 {
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
        struct sock *sk = svsk->sk_sk;
+       wait_queue_head_t *wq;
 
        dprintk("svc: svc_sock_detach(%p)\n", svsk);
 
@@ -1539,8 +1546,9 @@ static void svc_sock_detach(struct svc_xprt *xprt)
        sk->sk_data_ready = svsk->sk_odata;
        sk->sk_write_space = svsk->sk_owspace;
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       wq = sk_sleep(sk);
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
@@ -1609,9 +1617,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
  */
 static void svc_bc_sock_free(struct svc_xprt *xprt)
 {
-       if (xprt) {
-               kfree(xprt->xpt_bc_sid);
+       if (xprt)
                kfree(container_of(xprt, struct svc_sock, sk_xprt));
-       }
 }
 #endif /* CONFIG_NFS_V4_1 */
index 0436927..2c5954b 100644 (file)
@@ -29,18 +29,6 @@ config TIPC_ADVANCED
          Saying Y here will open some advanced configuration for TIPC.
          Most users do not need to bother; if unsure, just say N.
 
-config TIPC_NODES
-       int "Maximum number of nodes in a cluster"
-       depends on TIPC_ADVANCED
-       range 8 2047
-       default "255"
-       help
-         Specifies how many nodes can be supported in a TIPC cluster.
-         Can range from 8 to 2047 nodes; default is 255.
-
-         Setting this to a smaller value saves some memory;
-         setting it to higher allows for more nodes.
-
 config TIPC_PORTS
        int "Maximum number of ports in a node"
        depends on TIPC_ADVANCED
index 88463d9..a6fdab3 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/addr.c: TIPC address utility routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -41,7 +41,7 @@
  * tipc_addr_domain_valid - validates a network domain address
  *
  * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
- * where Z, C, and N are non-zero and do not exceed the configured limits.
+ * where Z, C, and N are non-zero.
  *
  * Returns 1 if domain address is valid, otherwise 0
  */
@@ -51,10 +51,6 @@ int tipc_addr_domain_valid(u32 addr)
        u32 n = tipc_node(addr);
        u32 c = tipc_cluster(addr);
        u32 z = tipc_zone(addr);
-       u32 max_nodes = tipc_max_nodes;
-
-       if (n > max_nodes)
-               return 0;
 
        if (n && (!z || !c))
                return 0;
@@ -66,8 +62,7 @@ int tipc_addr_domain_valid(u32 addr)
 /**
  * tipc_addr_node_valid - validates a proposed network address for this node
  *
- * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
- * the configured limits.
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero.
  *
  * Returns 1 if address can be used, otherwise 0
  */
@@ -81,9 +76,9 @@ int tipc_in_scope(u32 domain, u32 addr)
 {
        if (!domain || (domain == addr))
                return 1;
-       if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+       if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
                return 1;
-       if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+       if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */
                return 1;
        return 0;
 }
index 2490fad..8971aba 100644 (file)
 #ifndef _TIPC_ADDR_H
 #define _TIPC_ADDR_H
 
+static inline u32 tipc_zone_mask(u32 addr)
+{
+       return addr & 0xff000000u;
+}
+
+static inline u32 tipc_cluster_mask(u32 addr)
+{
+       return addr & 0xfffff000u;
+}
+
 static inline int in_own_cluster(u32 addr)
 {
        return !((addr ^ tipc_own_addr) >> 12);
@@ -49,14 +59,13 @@ static inline int in_own_cluster(u32 addr)
  * after a network hop.
  */
 
-static inline int addr_domain(int sc)
+static inline u32 addr_domain(u32 sc)
 {
        if (likely(sc == TIPC_NODE_SCOPE))
                return tipc_own_addr;
        if (sc == TIPC_CLUSTER_SCOPE)
-               return tipc_addr(tipc_zone(tipc_own_addr),
-                                tipc_cluster(tipc_own_addr), 0);
-       return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+               return tipc_cluster_mask(tipc_own_addr);
+       return tipc_zone_mask(tipc_own_addr);
 }
 
 int tipc_addr_domain_valid(u32);
index 70ab5ef..7dc1dc7 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2004-2006, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,8 +61,8 @@
  */
 
 struct bcbearer_pair {
-       struct bearer *primary;
-       struct bearer *secondary;
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
 };
 
 /**
@@ -81,7 +81,7 @@ struct bcbearer_pair {
  */
 
 struct bcbearer {
-       struct bearer bearer;
+       struct tipc_bearer bearer;
        struct media media;
        struct bcbearer_pair bpairs[MAX_BEARERS];
        struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
@@ -93,6 +93,7 @@ struct bcbearer {
  * struct bclink - link used for broadcast messages
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
@@ -100,6 +101,7 @@ struct bcbearer {
 struct bclink {
        struct link link;
        struct tipc_node node;
+       struct tipc_node *retransmit_to;
 };
 
 
@@ -183,6 +185,17 @@ static int bclink_ack_allowed(u32 n)
 }
 
 
+/**
+ * tipc_bclink_retransmit_to - get most recent node to request retransmission
+ *
+ * Called with bc_lock locked
+ */
+
+struct tipc_node *tipc_bclink_retransmit_to(void)
+{
+       return bclink->retransmit_to;
+}
+
 /**
  * bclink_retransmit_pkt - retransmit broadcast packets
  * @after: sequence number of last packet to *not* retransmit
@@ -285,6 +298,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
                msg = buf_msg(buf);
                tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
                         INT_H_SIZE, n_ptr->addr);
+               msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tipc_net_id);
                msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
                msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -405,8 +419,6 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
        else
                bclink_set_last_sent();
 
-       if (bcl->out_queue_size > bcl->stats.max_queue_sz)
-               bcl->stats.max_queue_sz = bcl->out_queue_size;
        bcl->stats.queue_sz_counts++;
        bcl->stats.accu_queue_sz += bcl->out_queue_size;
 
@@ -444,10 +456,9 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
                        tipc_node_unlock(node);
                        spin_lock_bh(&bc_lock);
                        bcl->stats.recv_nacks++;
-                       bcl->owner->next = node;   /* remember requestor */
+                       bclink->retransmit_to = node;
                        bclink_retransmit_pkt(msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       bcl->owner->next = NULL;
                        spin_unlock_bh(&bc_lock);
                } else {
                        tipc_bclink_peek_nack(msg_destnode(msg),
@@ -574,8 +585,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
        bcbearer->remains = tipc_bcast_nmap;
 
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
-               struct bearer *p = bcbearer->bpairs[bp_index].primary;
-               struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+               struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
+               struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
 
                if (!p)
                        break;  /* no more bearers to try */
@@ -584,11 +595,11 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
                if (bcbearer->remains_new.count == bcbearer->remains.count)
                        continue;       /* bearer pair doesn't add anything */
 
-               if (p->publ.blocked ||
-                   p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+               if (p->blocked ||
+                   p->media->send_msg(buf, p, &p->media->bcast_addr)) {
                        /* unable to send on primary bearer */
-                       if (!s || s->publ.blocked ||
-                           s->media->send_msg(buf, &s->publ,
+                       if (!s || s->blocked ||
+                           s->media->send_msg(buf, s,
                                               &s->media->bcast_addr)) {
                                /* unable to send on either bearer */
                                continue;
@@ -633,7 +644,7 @@ void tipc_bcbearer_sort(void)
        memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               struct bearer *b = &tipc_bearers[b_index];
+               struct tipc_bearer *b = &tipc_bearers[b_index];
 
                if (!b->active || !b->nodes.count)
                        continue;
@@ -682,12 +693,12 @@ void tipc_bcbearer_sort(void)
 
 void tipc_bcbearer_push(void)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
 
        spin_lock_bh(&bc_lock);
        b_ptr = &bcbearer->bearer;
-       if (b_ptr->publ.blocked) {
-               b_ptr->publ.blocked = 0;
+       if (b_ptr->blocked) {
+               b_ptr->blocked = 0;
                tipc_bearer_lock_push(b_ptr);
        }
        spin_unlock_bh(&bc_lock);
index 51f8c53..500c97f 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bcast.h: Include file for TIPC broadcast code
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,7 @@ void tipc_port_list_free(struct port_list *pl_ptr);
 
 int  tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
 int  tipc_bclink_send_msg(struct sk_buff *buf);
 void tipc_bclink_recv_pkt(struct sk_buff *buf);
index 837b7a4..411719f 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bearer.c: TIPC bearer code
  *
  * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2004-2006, Wind River Systems
+ * Copyright (c) 2004-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,7 @@
 static struct media media_list[MAX_MEDIA];
 static u32 media_count;
 
-struct bearer tipc_bearers[MAX_BEARERS];
+struct tipc_bearer tipc_bearers[MAX_BEARERS];
 
 /**
  * media_name_valid - validate media name
@@ -158,7 +158,6 @@ int  tipc_register_media(u32 media_type,
        m_ptr->disable_bearer = disable;
        m_ptr->addr2str = addr2str;
        memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
-       m_ptr->bcast = 1;
        strcpy(m_ptr->name, name);
        m_ptr->priority = bearer_priority;
        m_ptr->tolerance = link_tolerance;
@@ -278,13 +277,13 @@ static int bearer_name_validate(const char *name,
  * bearer_find - locates bearer object with matching bearer name
  */
 
-static struct bearer *bearer_find(const char *name)
+static struct tipc_bearer *bearer_find(const char *name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
-               if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+               if (b_ptr->active && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
        return NULL;
@@ -294,16 +293,16 @@ static struct bearer *bearer_find(const char *name)
  * tipc_bearer_find_interface - locates bearer object with matching interface name
  */
 
-struct bearer *tipc_bearer_find_interface(const char *if_name)
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        char *b_if_name;
        u32 i;
 
        for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
                if (!b_ptr->active)
                        continue;
-               b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+               b_if_name = strchr(b_ptr->name, ':') + 1;
                if (!strcmp(b_if_name, if_name))
                        return b_ptr;
        }
@@ -318,7 +317,7 @@ struct sk_buff *tipc_bearer_get_names(void)
 {
        struct sk_buff *buf;
        struct media *m_ptr;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        int i, j;
 
        buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
@@ -331,8 +330,8 @@ struct sk_buff *tipc_bearer_get_names(void)
                        b_ptr = &tipc_bearers[j];
                        if (b_ptr->active && (b_ptr->media == m_ptr)) {
                                tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
-                                                   b_ptr->publ.name,
-                                                   strlen(b_ptr->publ.name) + 1);
+                                                   b_ptr->name,
+                                                   strlen(b_ptr->name) + 1);
                        }
                }
        }
@@ -340,14 +339,14 @@ struct sk_buff *tipc_bearer_get_names(void)
        return buf;
 }
 
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
 {
        tipc_nmap_add(&b_ptr->nodes, dest);
        tipc_disc_update_link_req(b_ptr->link_req);
        tipc_bcbearer_sort();
 }
 
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
 {
        tipc_nmap_remove(&b_ptr->nodes, dest);
        tipc_disc_update_link_req(b_ptr->link_req);
@@ -362,12 +361,12 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
  * bearer.lock must be taken before calling
  * Returns binary true(1) ore false(0)
  */
-static int bearer_push(struct bearer *b_ptr)
+static int bearer_push(struct tipc_bearer *b_ptr)
 {
        u32 res = 0;
        struct link *ln, *tln;
 
-       if (b_ptr->publ.blocked)
+       if (b_ptr->blocked)
                return 0;
 
        while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
@@ -382,13 +381,13 @@ static int bearer_push(struct bearer *b_ptr)
        return list_empty(&b_ptr->cong_links);
 }
 
-void tipc_bearer_lock_push(struct bearer *b_ptr)
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
 {
        int res;
 
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        res = bearer_push(b_ptr);
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        if (res)
                tipc_bcbearer_push();
 }
@@ -398,16 +397,14 @@ void tipc_bearer_lock_push(struct bearer *b_ptr)
  * Interrupt enabling new requests after bearer congestion or blocking:
  * See bearer_send().
  */
-void tipc_continue(struct tipc_bearer *tb_ptr)
+void tipc_continue(struct tipc_bearer *b_ptr)
 {
-       struct bearer *b_ptr = (struct bearer *)tb_ptr;
-
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        b_ptr->continue_count++;
        if (!list_empty(&b_ptr->cong_links))
                tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
-       b_ptr->publ.blocked = 0;
-       spin_unlock_bh(&b_ptr->publ.lock);
+       b_ptr->blocked = 0;
+       spin_unlock_bh(&b_ptr->lock);
 }
 
 /*
@@ -418,7 +415,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
  * bearer.lock is busy
  */
 
-static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
        list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
 }
@@ -431,11 +428,11 @@ static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_p
  * bearer.lock is free
  */
 
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
 }
 
 
@@ -444,18 +441,18 @@ void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
  * and if there is, try to resolve it before returning.
  * 'tipc_net_lock' is read_locked when this function is called
  */
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
        int res = 1;
 
        if (list_empty(&b_ptr->cong_links))
                return 1;
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        if (!bearer_push(b_ptr)) {
                tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
                res = 0;
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        return res;
 }
 
@@ -463,9 +460,9 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
  * tipc_bearer_congested - determines if bearer is currently congested
  */
 
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
-       if (unlikely(b_ptr->publ.blocked))
+       if (unlikely(b_ptr->blocked))
                return 1;
        if (likely(list_empty(&b_ptr->cong_links)))
                return 0;
@@ -476,9 +473,9 @@ int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
  * tipc_enable_bearer - enable bearer with the given name
  */
 
-int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        struct media *m_ptr;
        struct bearer_name b_name;
        char addr_string[16];
@@ -496,9 +493,9 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
                warn("Bearer <%s> rejected, illegal name\n", name);
                return -EINVAL;
        }
-       if (!tipc_addr_domain_valid(bcast_scope) ||
-           !tipc_in_scope(bcast_scope, tipc_own_addr)) {
-               warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
+       if (!tipc_addr_domain_valid(disc_domain) ||
+           !tipc_in_scope(disc_domain, tipc_own_addr)) {
+               warn("Bearer <%s> rejected, illegal discovery domain\n", name);
                return -EINVAL;
        }
        if ((priority < TIPC_MIN_LINK_PRI ||
@@ -528,7 +525,7 @@ restart:
                        bearer_id = i;
                        continue;
                }
-               if (!strcmp(name, tipc_bearers[i].publ.name)) {
+               if (!strcmp(name, tipc_bearers[i].name)) {
                        warn("Bearer <%s> rejected, already enabled\n", name);
                        goto failed;
                }
@@ -551,8 +548,8 @@ restart:
        }
 
        b_ptr = &tipc_bearers[bearer_id];
-       strcpy(b_ptr->publ.name, name);
-       res = m_ptr->enable_bearer(&b_ptr->publ);
+       strcpy(b_ptr->name, name);
+       res = m_ptr->enable_bearer(b_ptr);
        if (res) {
                warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
                goto failed;
@@ -562,18 +559,15 @@ restart:
        b_ptr->media = m_ptr;
        b_ptr->net_plane = bearer_id + 'A';
        b_ptr->active = 1;
-       b_ptr->detect_scope = bcast_scope;
        b_ptr->priority = priority;
        INIT_LIST_HEAD(&b_ptr->cong_links);
        INIT_LIST_HEAD(&b_ptr->links);
-       if (m_ptr->bcast) {
-               b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
-                                                         bcast_scope, 2);
-       }
-       spin_lock_init(&b_ptr->publ.lock);
+       b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+                                                 disc_domain);
+       spin_lock_init(&b_ptr->lock);
        write_unlock_bh(&tipc_net_lock);
        info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
-            name, tipc_addr_string_fill(addr_string, bcast_scope), priority);
+            name, tipc_addr_string_fill(addr_string, disc_domain), priority);
        return 0;
 failed:
        write_unlock_bh(&tipc_net_lock);
@@ -587,7 +581,7 @@ failed:
 
 int tipc_block_bearer(const char *name)
 {
-       struct bearer *b_ptr = NULL;
+       struct tipc_bearer *b_ptr = NULL;
        struct link *l_ptr;
        struct link *temp_l_ptr;
 
@@ -600,8 +594,8 @@ int tipc_block_bearer(const char *name)
        }
 
        info("Blocking bearer <%s>\n", name);
-       spin_lock_bh(&b_ptr->publ.lock);
-       b_ptr->publ.blocked = 1;
+       spin_lock_bh(&b_ptr->lock);
+       b_ptr->blocked = 1;
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                struct tipc_node *n_ptr = l_ptr->owner;
 
@@ -609,7 +603,7 @@ int tipc_block_bearer(const char *name)
                tipc_link_reset(l_ptr);
                spin_unlock_bh(&n_ptr->lock);
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        read_unlock_bh(&tipc_net_lock);
        return 0;
 }
@@ -620,27 +614,27 @@ int tipc_block_bearer(const char *name)
  * Note: This routine assumes caller holds tipc_net_lock.
  */
 
-static void bearer_disable(struct bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr)
 {
        struct link *l_ptr;
        struct link *temp_l_ptr;
 
-       info("Disabling bearer <%s>\n", b_ptr->publ.name);
+       info("Disabling bearer <%s>\n", b_ptr->name);
        tipc_disc_stop_link_req(b_ptr->link_req);
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        b_ptr->link_req = NULL;
-       b_ptr->publ.blocked = 1;
-       b_ptr->media->disable_bearer(&b_ptr->publ);
+       b_ptr->blocked = 1;
+       b_ptr->media->disable_bearer(b_ptr);
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
-       memset(b_ptr, 0, sizeof(struct bearer));
+       spin_unlock_bh(&b_ptr->lock);
+       memset(b_ptr, 0, sizeof(struct tipc_bearer));
 }
 
 int tipc_disable_bearer(const char *name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        int res;
 
        write_lock_bh(&tipc_net_lock);
index 85f451d..31d6172 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bearer.h: Include file for TIPC bearer code
  *
  * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,26 +61,7 @@ struct tipc_media_addr {
        } dev_addr;
 };
 
-/**
- * struct tipc_bearer - TIPC bearer info available to media code
- * @usr_handle: pointer to additional media-specific information about bearer
- * @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
- * @lock: spinlock for controlling access to bearer
- * @addr: media-specific address associated with bearer
- * @name: bearer name (format = media:interface)
- *
- * Note: TIPC initializes "name" and "lock" fields; media code is responsible
- * for initialization all other fields when a bearer is enabled.
- */
-struct tipc_bearer {
-       void *usr_handle;
-       u32 mtu;
-       int blocked;
-       spinlock_t lock;
-       struct tipc_media_addr addr;
-       char name[TIPC_MAX_BEARER_NAME];
-};
+struct tipc_bearer;
 
 /**
  * struct media - TIPC media information available to internal users
@@ -89,7 +70,6 @@ struct tipc_bearer {
  * @disable_bearer: routine which disables a bearer
  * @addr2str: routine which converts bearer's address to string form
  * @bcast_addr: media address used in broadcasting
- * @bcast: non-zero if media supports broadcasting [currently mandatory]
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
  * @window: default window (in packets) before declaring link congestion
@@ -106,7 +86,6 @@ struct media {
        char *(*addr2str)(struct tipc_media_addr *a,
                          char *str_buf, int str_size);
        struct tipc_media_addr bcast_addr;
-       int bcast;
        u32 priority;
        u32 tolerance;
        u32 window;
@@ -115,11 +94,15 @@ struct media {
 };
 
 /**
- * struct bearer - TIPC bearer information available to internal users
- * @publ: bearer information available to privileged users
+ * struct tipc_bearer - TIPC bearer structure
+ * @usr_handle: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @priority: default link priority for bearer
- * @detect_scope: network address mask used during automatic link creation
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @links: list of non-congested links associated with bearer
@@ -128,13 +111,20 @@ struct media {
  * @active: non-zero if bearer structure is represents a bearer
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
  * @nodes: indicates which nodes in cluster can be reached through bearer
+ *
+ * Note: media-specific code is responsible for initialization of the fields
+ * indicated below when a bearer is enabled; TIPC's generic bearer code takes
+ * care of initializing all other fields.
  */
-
-struct bearer {
-       struct tipc_bearer publ;
+struct tipc_bearer {
+       void *usr_handle;                       /* initalized by media */
+       u32 mtu;                                /* initalized by media */
+       int blocked;                            /* initalized by media */
+       struct tipc_media_addr addr;            /* initalized by media */
+       char name[TIPC_MAX_BEARER_NAME];
+       spinlock_t lock;
        struct media *media;
        u32 priority;
-       u32 detect_scope;
        u32 identity;
        struct link_req *link_req;
        struct list_head links;
@@ -152,7 +142,7 @@ struct bearer_name {
 
 struct link;
 
-extern struct bearer tipc_bearers[];
+extern struct tipc_bearer tipc_bearers[];
 
 /*
  * TIPC routines available to supported media types
@@ -173,7 +163,7 @@ void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 int  tipc_block_bearer(const char *name);
 void tipc_continue(struct tipc_bearer *tb_ptr);
 
-int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
 int tipc_disable_bearer(const char *name);
 
 /*
@@ -186,14 +176,14 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
 
 struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
-struct bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
 void tipc_bearer_stop(void);
-void tipc_bearer_lock_push(struct bearer *b_ptr);
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
 
 
 /**
@@ -214,10 +204,11 @@ void tipc_bearer_lock_push(struct bearer *b_ptr);
  * and let TIPC's link code deal with the undelivered message.
  */
 
-static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
+                                  struct sk_buff *buf,
                                   struct tipc_media_addr *dest)
 {
-       return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+       return !b_ptr->media->send_msg(buf, b_ptr, dest);
 }
 
 #endif /* _TIPC_BEARER_H */
index e16750d..b25a396 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -148,7 +148,7 @@ static struct sk_buff *cfg_enable_bearer(void)
 
        args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
        if (tipc_enable_bearer(args->name,
-                              ntohl(args->detect_scope),
+                              ntohl(args->disc_domain),
                               ntohl(args->priority)))
                return tipc_cfg_reply_error_string("unable to enable bearer");
 
@@ -260,25 +260,6 @@ static struct sk_buff *cfg_set_max_ports(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_max_nodes(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value == tipc_max_nodes)
-               return tipc_cfg_reply_none();
-       if (value != delimit(value, 8, 2047))
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max nodes must be 8-2047)");
-       if (tipc_mode == TIPC_NET_MODE)
-               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                       " (cannot change max nodes once TIPC has joined a network)");
-       tipc_max_nodes = value;
-       return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_netid(void)
 {
        u32 value;
@@ -397,9 +378,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_MAX_SUBSCR:
                rep_tlv_buf = cfg_set_max_subscriptions();
                break;
-       case TIPC_CMD_SET_MAX_NODES:
-               rep_tlv_buf = cfg_set_max_nodes();
-               break;
        case TIPC_CMD_SET_NETID:
                rep_tlv_buf = cfg_set_netid();
                break;
@@ -415,9 +393,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_SUBSCR:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
                break;
-       case TIPC_CMD_GET_MAX_NODES:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
-               break;
        case TIPC_CMD_GET_NETID:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
                break;
@@ -431,6 +406,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_SLAVES:
        case TIPC_CMD_SET_MAX_CLUSTERS:
        case TIPC_CMD_GET_MAX_CLUSTERS:
+       case TIPC_CMD_SET_MAX_NODES:
+       case TIPC_CMD_GET_MAX_NODES:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                          " (obsolete command)");
                break;
index e071579..c9a73e7 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/core.c: TIPC module code
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "config.h"
 
 
-#ifndef CONFIG_TIPC_NODES
-#define CONFIG_TIPC_NODES 255
-#endif
-
 #ifndef CONFIG_TIPC_PORTS
 #define CONFIG_TIPC_PORTS 8191
 #endif
@@ -57,7 +53,6 @@
 
 int tipc_mode = TIPC_NOT_RUNNING;
 int tipc_random;
-atomic_t tipc_user_count = ATOMIC_INIT(0);
 
 const char tipc_alphabet[] =
        "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
@@ -65,7 +60,6 @@ const char tipc_alphabet[] =
 /* configurable TIPC parameters */
 
 u32 tipc_own_addr;
-int tipc_max_nodes;
 int tipc_max_ports;
 int tipc_max_subscriptions;
 int tipc_max_publications;
@@ -193,7 +187,6 @@ static int __init tipc_init(void)
        tipc_max_publications = 10000;
        tipc_max_subscriptions = 2000;
        tipc_max_ports = CONFIG_TIPC_PORTS;
-       tipc_max_nodes = CONFIG_TIPC_NODES;
        tipc_net_id = 4711;
 
        res = tipc_core_start();
index 9971585..436dda1 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/core.h: Include file for TIPC global declarations
  *
  * Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,6 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
  */
 
 extern u32 tipc_own_addr;
-extern int tipc_max_nodes;
 extern int tipc_max_ports;
 extern int tipc_max_subscriptions;
 extern int tipc_max_publications;
@@ -161,7 +160,6 @@ extern int tipc_remote_management;
 extern int tipc_mode;
 extern int tipc_random;
 extern const char tipc_alphabet[];
-extern atomic_t tipc_user_count;
 
 
 /*
index fa026bd..491eff5 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/discover.c
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
  * @timer_intv: current interval between requests (in ms)
  */
 struct link_req {
-       struct bearer *bearer;
+       struct tipc_bearer *bearer;
        struct tipc_media_addr dest;
        struct sk_buff *buf;
        struct timer_list timer;
@@ -67,27 +67,24 @@ struct link_req {
 /**
  * tipc_disc_init_msg - initialize a link setup message
  * @type: message type (request or response)
- * @req_links: number of links associated with message
  * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
 
 static struct sk_buff *tipc_disc_init_msg(u32 type,
-                                         u32 req_links,
                                          u32 dest_domain,
-                                         struct bearer *b_ptr)
+                                         struct tipc_bearer *b_ptr)
 {
-       struct sk_buff *buf = tipc_buf_acquire(DSC_H_SIZE);
+       struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
        struct tipc_msg *msg;
 
        if (buf) {
                msg = buf_msg(buf);
-               tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
+               tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
                msg_set_non_seq(msg, 1);
-               msg_set_req_links(msg, req_links);
                msg_set_dest_domain(msg, dest_domain);
                msg_set_bc_netid(msg, tipc_net_id);
-               msg_set_media_addr(msg, &b_ptr->publ.addr);
+               msg_set_media_addr(msg, &b_ptr->addr);
        }
        return buf;
 }
@@ -99,7 +96,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
  * @media_addr: media address advertised by duplicated node
  */
 
-static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
+static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
                            struct tipc_media_addr *media_addr)
 {
        char node_addr_str[16];
@@ -111,7 +108,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
        tipc_media_addr_printf(&pb, media_addr);
        tipc_printbuf_validate(&pb);
        warn("Duplicate %s using %s seen on <%s>\n",
-            node_addr_str, media_addr_str, b_ptr->publ.name);
+            node_addr_str, media_addr_str, b_ptr->name);
 }
 
 /**
@@ -120,19 +117,23 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
  * @b_ptr: bearer that message arrived on
  */
 
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
+       struct tipc_node *n_ptr;
        struct link *link;
-       struct tipc_media_addr media_addr;
+       struct tipc_media_addr media_addr, *addr;
+       struct sk_buff *rbuf;
        struct tipc_msg *msg = buf_msg(buf);
        u32 dest = msg_dest_domain(msg);
        u32 orig = msg_prevnode(msg);
        u32 net_id = msg_bc_netid(msg);
        u32 type = msg_type(msg);
+       int link_fully_up;
 
        msg_get_media_addr(msg, &media_addr);
        buf_discard(buf);
 
+       /* Validate discovery message from requesting node */
        if (net_id != tipc_net_id)
                return;
        if (!tipc_addr_domain_valid(dest))
@@ -140,63 +141,76 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
        if (!tipc_addr_node_valid(orig))
                return;
        if (orig == tipc_own_addr) {
-               if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
+               if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
                        disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
                return;
        }
        if (!tipc_in_scope(dest, tipc_own_addr))
                return;
-       if (in_own_cluster(orig)) {
-               /* Always accept link here */
-               struct sk_buff *rbuf;
-               struct tipc_media_addr *addr;
-               struct tipc_node *n_ptr = tipc_node_find(orig);
-               int link_fully_up;
-
-               if (n_ptr == NULL) {
-                       n_ptr = tipc_node_create(orig);
-                       if (!n_ptr)
-                               return;
-               }
-               spin_lock_bh(&n_ptr->lock);
-
-               /* Don't talk to neighbor during cleanup after last session */
+       if (!in_own_cluster(orig))
+               return;
 
-               if (n_ptr->cleanup_required) {
-                       spin_unlock_bh(&n_ptr->lock);
+       /* Locate structure corresponding to requesting node */
+       n_ptr = tipc_node_find(orig);
+       if (!n_ptr) {
+               n_ptr = tipc_node_create(orig);
+               if (!n_ptr)
                        return;
-               }
+       }
+       tipc_node_lock(n_ptr);
+
+       /* Don't talk to neighbor during cleanup after last session */
+       if (n_ptr->cleanup_required) {
+               tipc_node_unlock(n_ptr);
+               return;
+       }
+
+       link = n_ptr->links[b_ptr->identity];
 
-               link = n_ptr->links[b_ptr->identity];
+       /* Create a link endpoint for this bearer, if necessary */
+       if (!link) {
+               link = tipc_link_create(n_ptr, b_ptr, &media_addr);
                if (!link) {
-                       link = tipc_link_create(b_ptr, orig, &media_addr);
-                       if (!link) {
-                               spin_unlock_bh(&n_ptr->lock);
-                               return;
-                       }
-               }
-               addr = &link->media_addr;
-               if (memcmp(addr, &media_addr, sizeof(*addr))) {
-                       if (tipc_link_is_up(link) || (!link->started)) {
-                               disc_dupl_alert(b_ptr, orig, &media_addr);
-                               spin_unlock_bh(&n_ptr->lock);
-                               return;
-                       }
-                       warn("Resetting link <%s>, peer interface address changed\n",
-                            link->name);
-                       memcpy(addr, &media_addr, sizeof(*addr));
-                       tipc_link_reset(link);
+                       tipc_node_unlock(n_ptr);
+                       return;
                }
-               link_fully_up = link_working_working(link);
-               spin_unlock_bh(&n_ptr->lock);
-               if ((type == DSC_RESP_MSG) || link_fully_up)
+       }
+
+       /*
+        * Ensure requesting node's media address is correct
+        *
+        * If media address doesn't match and the link is working, reject the
+        * request (must be from a duplicate node).
+        *
+        * If media address doesn't match and the link is not working, accept
+        * the new media address and reset the link to ensure it starts up
+        * cleanly.
+        */
+       addr = &link->media_addr;
+       if (memcmp(addr, &media_addr, sizeof(*addr))) {
+               if (tipc_link_is_up(link) || (!link->started)) {
+                       disc_dupl_alert(b_ptr, orig, &media_addr);
+                       tipc_node_unlock(n_ptr);
                        return;
-               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
-               if (rbuf != NULL) {
-                       b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+               }
+               warn("Resetting link <%s>, peer interface address changed\n",
+                    link->name);
+               memcpy(addr, &media_addr, sizeof(*addr));
+               tipc_link_reset(link);
+       }
+
+       /* Accept discovery message & send response, if necessary */
+       link_fully_up = link_working_working(link);
+
+       if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
+               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
+               if (rbuf) {
+                       b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
                        buf_discard(rbuf);
                }
        }
+
+       tipc_node_unlock(n_ptr);
 }
 
 /**
@@ -249,9 +263,9 @@ void tipc_disc_update_link_req(struct link_req *req)
 
 static void disc_timeout(struct link_req *req)
 {
-       spin_lock_bh(&req->bearer->publ.lock);
+       spin_lock_bh(&req->bearer->lock);
 
-       req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+       req->bearer->media->send_msg(req->buf, req->bearer, &req->dest);
 
        if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
            (req->timer_intv == TIPC_LINK_REQ_FAST)) {
@@ -266,7 +280,7 @@ static void disc_timeout(struct link_req *req)
        }
        k_start_timer(&req->timer, req->timer_intv);
 
-       spin_unlock_bh(&req->bearer->publ.lock);
+       spin_unlock_bh(&req->bearer->lock);
 }
 
 /**
@@ -274,15 +288,13 @@ static void disc_timeout(struct link_req *req)
  * @b_ptr: ptr to bearer issuing requests
  * @dest: destination address for request messages
  * @dest_domain: network domain of node(s) which should respond to message
- * @req_links: max number of desired links
  *
  * Returns pointer to link request structure, or NULL if unable to create.
  */
 
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
                                         const struct tipc_media_addr *dest,
-                                        u32 dest_domain,
-                                        u32 req_links)
+                                        u32 dest_domain)
 {
        struct link_req *req;
 
@@ -290,7 +302,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
        if (!req)
                return NULL;
 
-       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
        if (!req->buf) {
                kfree(req);
                return NULL;
index d2c3cff..e48a167 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/discover.h
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 struct link_req;
 
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
                                         const struct tipc_media_addr *dest,
-                                        u32 dest_domain,
-                                        u32 req_links);
+                                        u32 dest_domain);
 void tipc_disc_update_link_req(struct link_req *req);
 void tipc_disc_stop_link_req(struct link_req *req);
 
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
 
 #endif
index 18702f5..43639ff 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/link.c: TIPC link code
  *
  * Copyright (c) 1996-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,7 +90,7 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
-static int  link_send_sections_long(struct port *sender,
+static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
                                    u32 num_sect, u32 destnode);
 static void link_check_defragm_bufs(struct link *l_ptr);
@@ -113,7 +113,7 @@ static void link_init_max_pkt(struct link *l_ptr)
 {
        u32 max_pkt;
 
-       max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+       max_pkt = (l_ptr->b_ptr->mtu & ~3);
        if (max_pkt > MAX_MSG_SIZE)
                max_pkt = MAX_MSG_SIZE;
 
@@ -246,9 +246,6 @@ static void link_timeout(struct link *l_ptr)
        l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
        l_ptr->stats.queue_sz_counts++;
 
-       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-
        if (l_ptr->first_out) {
                struct tipc_msg *msg = buf_msg(l_ptr->first_out);
                u32 length = msg_size(msg);
@@ -296,19 +293,35 @@ static void link_set_timer(struct link *l_ptr, u32 time)
 
 /**
  * tipc_link_create - create a new link
+ * @n_ptr: pointer to associated node
  * @b_ptr: pointer to associated bearer
- * @peer: network address of node at other end of link
  * @media_addr: media address to use when sending messages over link
  *
  * Returns pointer to link.
  */
 
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_node *n_ptr,
+                             struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr)
 {
        struct link *l_ptr;
        struct tipc_msg *msg;
        char *if_name;
+       char addr_string[16];
+       u32 peer = n_ptr->addr;
+
+       if (n_ptr->link_cnt >= 2) {
+               tipc_addr_string_fill(addr_string, n_ptr->addr);
+               err("Attempt to establish third link to %s\n", addr_string);
+               return NULL;
+       }
+
+       if (n_ptr->links[b_ptr->identity]) {
+               tipc_addr_string_fill(addr_string, n_ptr->addr);
+               err("Attempt to establish second link on <%s> to %s\n",
+                   b_ptr->name, addr_string);
+               return NULL;
+       }
 
        l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
        if (!l_ptr) {
@@ -317,7 +330,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
        }
 
        l_ptr->addr = peer;
-       if_name = strchr(b_ptr->publ.name, ':') + 1;
+       if_name = strchr(b_ptr->name, ':') + 1;
        sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
                tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
                tipc_node(tipc_own_addr),
@@ -325,6 +338,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
                tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
                /* note: peer i/f is appended to link name by reset/activate */
        memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+       l_ptr->owner = n_ptr;
        l_ptr->checkpoint = 1;
        l_ptr->b_ptr = b_ptr;
        link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
@@ -348,11 +362,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
 
        link_reset_statistics(l_ptr);
 
-       l_ptr->owner = tipc_node_attach_link(l_ptr);
-       if (!l_ptr->owner) {
-               kfree(l_ptr);
-               return NULL;
-       }
+       tipc_node_attach_link(n_ptr, l_ptr);
 
        k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
        list_add_tail(&l_ptr->link_list, &b_ptr->links);
@@ -391,7 +401,9 @@ void tipc_link_delete(struct link *l_ptr)
 
 static void link_start(struct link *l_ptr)
 {
+       tipc_node_lock(l_ptr->owner);
        link_state_event(l_ptr, STARTING_EVT);
+       tipc_node_unlock(l_ptr->owner);
 }
 
 /**
@@ -406,7 +418,7 @@ static void link_start(struct link *l_ptr)
 
 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        spin_lock_bh(&tipc_port_list_lock);
        p_ptr = tipc_port_lock(origport);
@@ -415,7 +427,7 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
                        goto exit;
                if (!list_empty(&p_ptr->wait_list))
                        goto exit;
-               p_ptr->publ.congested = 1;
+               p_ptr->congested = 1;
                p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
                list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
                l_ptr->stats.link_congs++;
@@ -428,8 +440,8 @@ exit:
 
 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
 {
-       struct port *p_ptr;
-       struct port *temp_p_ptr;
+       struct tipc_port *p_ptr;
+       struct tipc_port *temp_p_ptr;
        int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 
        if (all)
@@ -445,11 +457,11 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
                if (win <= 0)
                        break;
                list_del_init(&p_ptr->wait_list);
-               spin_lock_bh(p_ptr->publ.lock);
-               p_ptr->publ.congested = 0;
-               p_ptr->wakeup(&p_ptr->publ);
+               spin_lock_bh(p_ptr->lock);
+               p_ptr->congested = 0;
+               p_ptr->wakeup(p_ptr);
                win -= p_ptr->waiting_pkts;
-               spin_unlock_bh(p_ptr->publ.lock);
+               spin_unlock_bh(p_ptr->lock);
        }
 
 exit:
@@ -549,7 +561,7 @@ void tipc_link_reset(struct link *l_ptr)
        tipc_node_link_down(l_ptr->owner, l_ptr);
        tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
 
-       if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
+       if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
            l_ptr->owner->permit_changeover) {
                l_ptr->reset_checkpoint = checkpoint;
                l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -824,7 +836,10 @@ static void link_add_to_outqueue(struct link *l_ptr,
                l_ptr->last_out = buf;
        } else
                l_ptr->first_out = l_ptr->last_out = buf;
+
        l_ptr->out_queue_size++;
+       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 }
 
 /*
@@ -867,9 +882,6 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 
        /* Packet can be queued or sent: */
 
-       if (queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = queue_size;
-
        if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
                   !link_congested(l_ptr))) {
                link_add_to_outqueue(l_ptr, buf, msg);
@@ -1027,12 +1039,12 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
  * except for total message length.
  * Returns user data length or errno.
  */
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
                                 const u32 num_sect,
                                 u32 destaddr)
 {
-       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct tipc_msg *hdr = &sender->phdr;
        struct link *l_ptr;
        struct sk_buff *buf;
        struct tipc_node *node;
@@ -1045,7 +1057,7 @@ again:
         * (Must not hold any locks while building message.)
         */
 
-       res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
+       res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
                        !sender->user_port, &buf);
 
        read_lock_bh(&tipc_net_lock);
@@ -1056,7 +1068,7 @@ again:
                if (likely(l_ptr)) {
                        if (likely(buf)) {
                                res = link_send_buf_fast(l_ptr, buf,
-                                                        &sender->publ.max_pkt);
+                                                        &sender->max_pkt);
                                if (unlikely(res < 0))
                                        buf_discard(buf);
 exit:
@@ -1075,7 +1087,7 @@ exit:
                        if (link_congested(l_ptr) ||
                            !list_empty(&l_ptr->b_ptr->cong_links)) {
                                res = link_schedule_port(l_ptr,
-                                                        sender->publ.ref, res);
+                                                        sender->ref, res);
                                goto exit;
                        }
 
@@ -1084,12 +1096,12 @@ exit:
                         * then re-try fast path or fragment the message
                         */
 
-                       sender->publ.max_pkt = l_ptr->max_pkt;
+                       sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
                        read_unlock_bh(&tipc_net_lock);
 
 
-                       if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
+                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
                                goto again;
 
                        return link_send_sections_long(sender, msg_sect,
@@ -1123,14 +1135,14 @@ exit:
  *
  * Returns user data length or errno.
  */
-static int link_send_sections_long(struct port *sender,
+static int link_send_sections_long(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
                                   u32 num_sect,
                                   u32 destaddr)
 {
        struct link *l_ptr;
        struct tipc_node *node;
-       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct tipc_msg *hdr = &sender->phdr;
        u32 dsz = msg_data_sz(hdr);
        u32 max_pkt, fragm_sz, rest;
        struct tipc_msg fragm_hdr;
@@ -1142,7 +1154,7 @@ static int link_send_sections_long(struct port *sender,
 
 again:
        fragm_no = 1;
-       max_pkt = sender->publ.max_pkt - INT_H_SIZE;
+       max_pkt = sender->max_pkt - INT_H_SIZE;
                /* leave room for tunnel header in case of link changeover */
        fragm_sz = max_pkt - INT_H_SIZE;
                /* leave room for fragmentation header in each fragment */
@@ -1157,7 +1169,7 @@ again:
 
        tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
                 INT_H_SIZE, msg_destnode(hdr));
-       msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+       msg_set_link_selector(&fragm_hdr, sender->ref);
        msg_set_size(&fragm_hdr, max_pkt);
        msg_set_fragm_no(&fragm_hdr, 1);
 
@@ -1238,13 +1250,13 @@ error:
        node = tipc_node_find(destaddr);
        if (likely(node)) {
                tipc_node_lock(node);
-               l_ptr = node->active_links[sender->publ.ref & 1];
+               l_ptr = node->active_links[sender->ref & 1];
                if (!l_ptr) {
                        tipc_node_unlock(node);
                        goto reject;
                }
                if (l_ptr->max_pkt < max_pkt) {
-                       sender->publ.max_pkt = l_ptr->max_pkt;
+                       sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
                        for (; buf_chain; buf_chain = buf) {
                                buf = buf_chain->next;
@@ -1441,7 +1453,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
                info("Outstanding acks: %lu\n",
                     (unsigned long) TIPC_SKB_CB(buf)->handle);
 
-               n_ptr = l_ptr->owner->next;
+               n_ptr = tipc_bclink_retransmit_to();
                tipc_node_lock(n_ptr);
 
                tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -1595,11 +1607,10 @@ static int link_recv_buf_validate(struct sk_buff *buf)
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
 
-void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
 {
        read_lock_bh(&tipc_net_lock);
        while (head) {
-               struct bearer *b_ptr = (struct bearer *)tb_ptr;
                struct tipc_node *n_ptr;
                struct link *l_ptr;
                struct sk_buff *crs;
@@ -1735,10 +1746,6 @@ deliver:
                                                tipc_node_unlock(n_ptr);
                                                tipc_link_recv_bundle(buf);
                                                continue;
-                                       case ROUTE_DISTRIBUTOR:
-                                               tipc_node_unlock(n_ptr);
-                                               buf_discard(buf);
-                                               continue;
                                        case NAME_DISTRIBUTOR:
                                                tipc_node_unlock(n_ptr);
                                                tipc_named_recv(buf);
@@ -1765,6 +1772,10 @@ deliver:
                                                        goto protocol_check;
                                                }
                                                break;
+                                       default:
+                                               buf_discard(buf);
+                                               buf = NULL;
+                                               break;
                                        }
                                }
                                tipc_node_unlock(n_ptr);
@@ -1900,6 +1911,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
        u32 msg_size = sizeof(l_ptr->proto_msg);
+       int r_flag;
 
        if (link_blocked(l_ptr))
                return;
@@ -1950,15 +1962,14 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
                msg_set_seq_gap(msg, 0);
                msg_set_next_sent(msg, 1);
+               msg_set_probe(msg, 0);
                msg_set_link_tolerance(msg, l_ptr->tolerance);
                msg_set_linkprio(msg, l_ptr->priority);
                msg_set_max_pkt(msg, l_ptr->max_pkt_target);
        }
 
-       if (tipc_node_has_redundant_links(l_ptr->owner))
-               msg_set_redundant_link(msg);
-       else
-               msg_clear_redundant_link(msg);
+       r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
+       msg_set_redundant_link(msg, r_flag);
        msg_set_linkprio(msg, l_ptr->priority);
 
        /* Ensure sequence number will not fit : */
@@ -1978,7 +1989,6 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
                return;
        }
-       msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
 
        /* Message can be sent */
 
@@ -2066,7 +2076,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
                l_ptr->peer_bearer_id = msg_bearer_id(msg);
 
                /* Synchronize broadcast sequence numbers */
-               if (!tipc_node_has_redundant_links(l_ptr->owner))
+               if (!tipc_node_redundant_links(l_ptr->owner))
                        l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
                break;
        case STATE_MSG:
@@ -2413,9 +2423,6 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
        else
                destaddr = msg_destnode(inmsg);
 
-       if (msg_routed(inmsg))
-               msg_set_prevnode(inmsg, tipc_own_addr);
-
        /* Prepare reusable fragment header: */
 
        tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
@@ -2618,6 +2625,9 @@ static void link_check_defragm_bufs(struct link *l_ptr)
 
 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
 {
+       if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+               return;
+
        l_ptr->tolerance = tolerance;
        l_ptr->continuity_interval =
                ((tolerance / 4) > 500) ? 500 : tolerance / 4;
@@ -2658,7 +2668,7 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
 static struct link *link_find_link(const char *name, struct tipc_node **node)
 {
        struct link_name link_name_parts;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        struct link *l_ptr;
 
        if (!link_name_validate(name, &link_name_parts))
@@ -2961,7 +2971,7 @@ static void link_print(struct link *l_ptr, const char *str)
 
        tipc_printf(buf, str);
        tipc_printf(buf, "Link %x<%s>:",
-                   l_ptr->addr, l_ptr->b_ptr->publ.name);
+                   l_ptr->addr, l_ptr->b_ptr->name);
 
 #ifdef CONFIG_TIPC_DEBUG
        if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
@@ -2981,9 +2991,9 @@ static void link_print(struct link *l_ptr, const char *str)
                     != (l_ptr->out_queue_size - 1)) ||
                    (l_ptr->last_out->next != NULL)) {
                        tipc_printf(buf, "\nSend queue inconsistency\n");
-                       tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
-                       tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
-                       tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+                       tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
+                       tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
+                       tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
                }
        } else
                tipc_printf(buf, "[]");
index 70967e6..e6a30db 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/link.h: Include file for TIPC link code
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ struct link {
        u32 checkpoint;
        u32 peer_session;
        u32 peer_bearer_id;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        u32 tolerance;
        u32 continuity_interval;
        u32 abort_limit;
@@ -196,24 +196,19 @@ struct link {
                u32 bearer_congs;
                u32 deferred_recv;
                u32 duplicates;
-
-               /* for statistical profiling of send queue size */
-
-               u32 max_queue_sz;
-               u32 accu_queue_sz;
-               u32 queue_sz_counts;
-
-               /* for statistical profiling of message lengths */
-
-               u32 msg_length_counts;
-               u32 msg_lengths_total;
-               u32 msg_length_profile[7];
+               u32 max_queue_sz;       /* send queue size high water mark */
+               u32 accu_queue_sz;      /* used for send queue size profiling */
+               u32 queue_sz_counts;    /* used for send queue size profiling */
+               u32 msg_length_counts;  /* used for message length profiling */
+               u32 msg_lengths_total;  /* used for message length profiling */
+               u32 msg_length_profile[7]; /* used for msg. length profiling */
        } stats;
 };
 
-struct port;
+struct tipc_port;
 
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_node *n_ptr,
+                             struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
 void tipc_link_delete(struct link *l_ptr);
 void tipc_link_changeover(struct link *l_ptr);
@@ -230,7 +225,7 @@ void tipc_link_reset(struct link *l_ptr);
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
                                 const u32 num_sect,
                                 u32 destnode);
index bb6180c..6d92d17 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/msg.c: TIPC message header routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -192,8 +192,6 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                default:
                        tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg));
                }
-               if (msg_routed(msg) && !msg_non_seq(msg))
-                       tipc_printf(buf, "ROUT:");
                if (msg_reroute_cnt(msg))
                        tipc_printf(buf, "REROUTED(%u):",
                                    msg_reroute_cnt(msg));
@@ -210,8 +208,6 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                default:
                        tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
                }
-               if (msg_routed(msg))
-                       tipc_printf(buf, "ROUT:");
                if (msg_reroute_cnt(msg))
                        tipc_printf(buf, "REROUTED(%u):",
                                    msg_reroute_cnt(msg));
@@ -232,13 +228,10 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                default:
                        tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
                }
-               if (msg_routed(msg))
-                       tipc_printf(buf, "ROUT:");
                if (msg_reroute_cnt(msg))
                        tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg));
                break;
        case LINK_PROTOCOL:
-               tipc_printf(buf, "PROT:TIM(%u):", msg_timestamp(msg));
                switch (msg_type(msg)) {
                case STATE_MSG:
                        tipc_printf(buf, "STATE:");
@@ -275,33 +268,6 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                        tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
                }
                break;
-       case ROUTE_DISTRIBUTOR:
-               tipc_printf(buf, "ROUTING_MNG:");
-               switch (msg_type(msg)) {
-               case EXT_ROUTING_TABLE:
-                       tipc_printf(buf, "EXT_TBL:");
-                       tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
-                       break;
-               case LOCAL_ROUTING_TABLE:
-                       tipc_printf(buf, "LOCAL_TBL:");
-                       tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
-                       break;
-               case SLAVE_ROUTING_TABLE:
-                       tipc_printf(buf, "DP_TBL:");
-                       tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
-                       break;
-               case ROUTE_ADDITION:
-                       tipc_printf(buf, "ADD:");
-                       tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
-                       break;
-               case ROUTE_REMOVAL:
-                       tipc_printf(buf, "REMOVE:");
-                       tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
-                       break;
-               default:
-                       tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
-               }
-               break;
        case LINK_CONFIG:
                tipc_printf(buf, "CFG:");
                switch (msg_type(msg)) {
@@ -381,20 +347,15 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                        tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
                        tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
                }
-               if (msg_routed(msg) && !msg_non_seq(msg))
-                       tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
        }
        if (msg_user(msg) == NAME_DISTRIBUTOR) {
                tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
                tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
-               if (msg_routed(msg))
-                       tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
        }
 
        if (msg_user(msg) ==  LINK_CONFIG) {
                u32 *raw = (u32 *)msg;
                struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
-               tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
                tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
                tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
                tipc_media_addr_printf(buf, orig);
index 92c4c4f..de02339 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
  * Copyright (c) 2000-2007, Ericsson AB
- * Copyright (c) 2005-2008, Wind River Systems
+ * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -421,13 +421,6 @@ static inline int msg_is_dest(struct tipc_msg *m, u32 d)
        return msg_short(m) || (msg_destnode(m) == d);
 }
 
-static inline u32 msg_routed(struct tipc_msg *m)
-{
-       if (likely(msg_short(m)))
-               return 0;
-       return (msg_destnode(m) ^ msg_orignode(m)) >> 11;
-}
-
 static inline u32 msg_nametype(struct tipc_msg *m)
 {
        return msg_word(m, 8);
@@ -438,26 +431,6 @@ static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
        msg_set_word(m, 8, n);
 }
 
-static inline u32 msg_transp_seqno(struct tipc_msg *m)
-{
-       return msg_word(m, 8);
-}
-
-static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
-{
-       msg_set_word(m, 8, n);
-}
-
-static inline u32 msg_timestamp(struct tipc_msg *m)
-{
-       return msg_word(m, 8);
-}
-
-static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
-{
-       msg_set_word(m, 8, n);
-}
-
 static inline u32 msg_nameinst(struct tipc_msg *m)
 {
        return msg_word(m, 9);
@@ -545,7 +518,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 #define  NAME_DISTRIBUTOR     11
 #define  MSG_FRAGMENTER       12
 #define  LINK_CONFIG          13
-#define  DSC_H_SIZE           40
 
 /*
  *  Connection management protocol messages
@@ -577,16 +549,6 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 16, 0x1fff, n);
 }
 
-static inline u32 msg_req_links(struct tipc_msg *m)
-{
-       return msg_bits(m, 1, 16, 0xfff);
-}
-
-static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
-{
-       msg_set_bits(m, 1, 16, 0xfff, n);
-}
-
 
 /*
  * Word 2
@@ -749,14 +711,9 @@ static inline u32 msg_redundant_link(struct tipc_msg *m)
        return msg_bits(m, 5, 12, 0x1);
 }
 
-static inline void msg_set_redundant_link(struct tipc_msg *m)
+static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
 {
-       msg_set_bits(m, 5, 12, 0x1, 1);
-}
-
-static inline void msg_clear_redundant_link(struct tipc_msg *m)
-{
-       msg_set_bits(m, 5, 12, 0x1, 0);
+       msg_set_bits(m, 5, 12, 0x1, r);
 }
 
 
@@ -804,21 +761,6 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
-/*
- * Routing table message data
- */
-
-
-static inline u32 msg_remote_node(struct tipc_msg *m)
-{
-       return msg_word(m, msg_hdr_sz(m)/4);
-}
-
-static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
-{
-       msg_set_word(m, msg_hdr_sz(m)/4, a);
-}
-
 /*
  * Segmentation message types
  */
index 483c226..c9fa6df 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/name_distr.c: TIPC name distribution code
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -109,11 +109,9 @@ static void named_cluster_distribute(struct sk_buff *buf)
 {
        struct sk_buff *buf_copy;
        struct tipc_node *n_ptr;
-       u32 n_num;
 
-       for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
-               n_ptr = tipc_net.nodes[n_num];
-               if (n_ptr && tipc_node_has_active_links(n_ptr)) {
+       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+               if (tipc_node_active_links(n_ptr)) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                        if (!buf_copy)
                                break;
@@ -214,17 +212,16 @@ exit:
 }
 
 /**
- * node_is_down - remove publication associated with a failed node
+ * named_purge_publ - remove publication associated with a failed node
  *
  * Invoked for each publication issued by a newly failed node.
  * Removes publication structure from name table & deletes it.
  * In rare cases the link may have come back up again when this
  * function is called, and we have two items representing the same
  * publication. Nudge this item's key to distinguish it from the other.
- * (Note: Publication's node subscription is already unsubscribed.)
  */
 
-static void node_is_down(struct publication *publ)
+static void named_purge_publ(struct publication *publ)
 {
        struct publication *p;
 
@@ -232,6 +229,8 @@ static void node_is_down(struct publication *publ)
        publ->key += 1222345;
        p = tipc_nametbl_remove_publ(publ->type, publ->lower,
                                     publ->node, publ->ref, publ->key);
+       if (p)
+               tipc_nodesub_unsubscribe(&p->subscr);
        write_unlock_bh(&tipc_nametbl_lock);
 
        if (p != publ) {
@@ -268,7 +267,8 @@ void tipc_named_recv(struct sk_buff *buf)
                                tipc_nodesub_subscribe(&publ->subscr,
                                                       msg_orignode(msg),
                                                       publ,
-                                                      (net_ev_handler)node_is_down);
+                                                      (net_ev_handler)
+                                                      named_purge_publ);
                        }
                } else if (msg_type(msg) == WITHDRAWAL) {
                        publ = tipc_nametbl_remove_publ(ntohl(item->type),
index 9bacfd0..68b3dd6 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/net.c: TIPC network routing code
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,7 @@
 #include "name_distr.h"
 #include "subscr.h"
 #include "port.h"
+#include "node.h"
 #include "config.h"
 
 /*
 */
 
 DEFINE_RWLOCK(tipc_net_lock);
-struct network tipc_net;
-
-static int net_start(void)
-{
-       tipc_net.nodes = kcalloc(tipc_max_nodes + 1,
-                                sizeof(*tipc_net.nodes), GFP_ATOMIC);
-       tipc_net.highest_node = 0;
-
-       return tipc_net.nodes ? 0 : -ENOMEM;
-}
-
-static void net_stop(void)
-{
-       u32 n_num;
-
-       for (n_num = 1; n_num <= tipc_net.highest_node; n_num++)
-               tipc_node_delete(tipc_net.nodes[n_num]);
-       kfree(tipc_net.nodes);
-       tipc_net.nodes = NULL;
-}
 
 static void net_route_named_msg(struct sk_buff *buf)
 {
@@ -217,9 +198,6 @@ int tipc_net_start(u32 addr)
        tipc_named_reinit();
        tipc_port_reinit();
 
-       res = net_start();
-       if (res)
-               return res;
        res = tipc_bclink_init();
        if (res)
                return res;
@@ -235,14 +213,16 @@ int tipc_net_start(u32 addr)
 
 void tipc_net_stop(void)
 {
+       struct tipc_node *node, *t_node;
+
        if (tipc_mode != TIPC_NET_MODE)
                return;
        write_lock_bh(&tipc_net_lock);
        tipc_bearer_stop();
        tipc_mode = TIPC_NODE_MODE;
        tipc_bclink_stop();
-       net_stop();
+       list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
+               tipc_node_delete(node);
        write_unlock_bh(&tipc_net_lock);
        info("Left network mode\n");
 }
-
index 4ae59ad..9eb4b9e 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/net.h: Include file for TIPC network routing code
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _TIPC_NET_H
 #define _TIPC_NET_H
 
-struct tipc_node;
-
-/**
- * struct network - TIPC network structure
- * @nodes: array of pointers to all nodes within cluster
- * @highest_node: id of highest numbered node within cluster
- * @links: number of (unicast) links to cluster
- */
-
-struct network {
-       struct tipc_node **nodes;
-       u32 highest_node;
-       u32 links;
-};
-
-
-extern struct network tipc_net;
 extern rwlock_t tipc_net_lock;
 
 void tipc_net_route_msg(struct sk_buff *buf);
index 3af53e3..2d106ef 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node.c: TIPC node management routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,8 +44,32 @@ static void node_established_contact(struct tipc_node *n_ptr);
 
 static DEFINE_SPINLOCK(node_create_lock);
 
+static struct hlist_head node_htable[NODE_HTABLE_SIZE];
+LIST_HEAD(tipc_node_list);
+static u32 tipc_num_nodes;
+
+static atomic_t tipc_num_links = ATOMIC_INIT(0);
 u32 tipc_own_tag;
 
+/**
+ * tipc_node_find - locate specified node object, if it exists
+ */
+
+struct tipc_node *tipc_node_find(u32 addr)
+{
+       struct tipc_node *node;
+       struct hlist_node *pos;
+
+       if (unlikely(!in_own_cluster(addr)))
+               return NULL;
+
+       hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
+               if (node->addr == addr)
+                       return node;
+       }
+       return NULL;
+}
+
 /**
  * tipc_node_create - create neighboring node
  *
@@ -58,8 +82,7 @@ u32 tipc_own_tag;
 
 struct tipc_node *tipc_node_create(u32 addr)
 {
-       struct tipc_node *n_ptr;
-       u32 n_num;
+       struct tipc_node *n_ptr, *temp_node;
 
        spin_lock_bh(&node_create_lock);
 
@@ -78,12 +101,19 @@ struct tipc_node *tipc_node_create(u32 addr)
 
        n_ptr->addr = addr;
        spin_lock_init(&n_ptr->lock);
+       INIT_HLIST_NODE(&n_ptr->hash);
+       INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->nsub);
 
-       n_num = tipc_node(addr);
-       tipc_net.nodes[n_num] = n_ptr;
-       if (n_num > tipc_net.highest_node)
-               tipc_net.highest_node = n_num;
+       hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
+
+       list_for_each_entry(temp_node, &tipc_node_list, list) {
+               if (n_ptr->addr < temp_node->addr)
+                       break;
+       }
+       list_add_tail(&n_ptr->list, &temp_node->list);
+
+       tipc_num_nodes++;
 
        spin_unlock_bh(&node_create_lock);
        return n_ptr;
@@ -91,18 +121,11 @@ struct tipc_node *tipc_node_create(u32 addr)
 
 void tipc_node_delete(struct tipc_node *n_ptr)
 {
-       u32 n_num;
-
-       if (!n_ptr)
-               return;
-
-       n_num = tipc_node(n_ptr->addr);
-       tipc_net.nodes[n_num] = NULL;
+       list_del(&n_ptr->list);
+       hlist_del(&n_ptr->hash);
        kfree(n_ptr);
 
-       while (!tipc_net.nodes[tipc_net.highest_node])
-               if (--tipc_net.highest_node == 0)
-                       break;
+       tipc_num_nodes--;
 }
 
 
@@ -200,54 +223,32 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
                node_lost_contact(n_ptr);
 }
 
-int tipc_node_has_active_links(struct tipc_node *n_ptr)
+int tipc_node_active_links(struct tipc_node *n_ptr)
 {
        return n_ptr->active_links[0] != NULL;
 }
 
-int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
+int tipc_node_redundant_links(struct tipc_node *n_ptr)
 {
        return n_ptr->working_links > 1;
 }
 
 int tipc_node_is_up(struct tipc_node *n_ptr)
 {
-       return tipc_node_has_active_links(n_ptr);
+       return tipc_node_active_links(n_ptr);
 }
 
-struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
 {
-       struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr);
-
-       if (!n_ptr)
-               n_ptr = tipc_node_create(l_ptr->addr);
-       if (n_ptr) {
-               u32 bearer_id = l_ptr->b_ptr->identity;
-               char addr_string[16];
-
-               if (n_ptr->link_cnt >= 2) {
-                       err("Attempt to create third link to %s\n",
-                           tipc_addr_string_fill(addr_string, n_ptr->addr));
-                       return NULL;
-               }
-
-               if (!n_ptr->links[bearer_id]) {
-                       n_ptr->links[bearer_id] = l_ptr;
-                       tipc_net.links++;
-                       n_ptr->link_cnt++;
-                       return n_ptr;
-               }
-               err("Attempt to establish second link on <%s> to %s\n",
-                   l_ptr->b_ptr->publ.name,
-                   tipc_addr_string_fill(addr_string, l_ptr->addr));
-       }
-       return NULL;
+       n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
+       atomic_inc(&tipc_num_links);
+       n_ptr->link_cnt++;
 }
 
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
 {
        n_ptr->links[l_ptr->b_ptr->identity] = NULL;
-       tipc_net.links--;
+       atomic_dec(&tipc_num_links);
        n_ptr->link_cnt--;
 }
 
@@ -327,7 +328,6 @@ static void node_cleanup_finished(unsigned long node_addr)
 
 static void node_lost_contact(struct tipc_node *n_ptr)
 {
-       struct tipc_node_subscr *ns, *tns;
        char addr_string[16];
        u32 i;
 
@@ -365,12 +365,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
        }
 
        /* Notify subscribers */
-       list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
-               ns->node = NULL;
-               list_del_init(&ns->nodesub_list);
-               tipc_k_signal((Handler)ns->handle_node_down,
-                             (unsigned long)ns->usr_handle);
-       }
+       tipc_nodesub_notify(n_ptr);
 
        /* Prevent re-contact with node until all cleanup is done */
 
@@ -385,7 +380,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
        struct tipc_node *n_ptr;
        struct tipc_node_info node_info;
        u32 payload_size;
-       u32 n_num;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -396,15 +390,14 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
                                                   " (network address)");
 
        read_lock_bh(&tipc_net_lock);
-       if (!tipc_net.nodes) {
+       if (!tipc_num_nodes) {
                read_unlock_bh(&tipc_net_lock);
                return tipc_cfg_reply_none();
        }
 
        /* For now, get space for all other nodes */
 
-       payload_size = TLV_SPACE(sizeof(node_info)) *
-               (tipc_net.highest_node - 1);
+       payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
        if (payload_size > 32768u) {
                read_unlock_bh(&tipc_net_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -418,9 +411,8 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 
        /* Add TLVs for all nodes in scope */
 
-       for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
-               n_ptr = tipc_net.nodes[n_num];
-               if (!n_ptr || !tipc_in_scope(domain, n_ptr->addr))
+       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+               if (!tipc_in_scope(domain, n_ptr->addr))
                        continue;
                node_info.addr = htonl(n_ptr->addr);
                node_info.up = htonl(tipc_node_is_up(n_ptr));
@@ -439,7 +431,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
        struct tipc_node *n_ptr;
        struct tipc_link_info link_info;
        u32 payload_size;
-       u32 n_num;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -456,7 +447,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
 
        /* Get space for all unicast links + multicast link */
 
-       payload_size = TLV_SPACE(sizeof(link_info)) * (tipc_net.links + 1);
+       payload_size = TLV_SPACE(sizeof(link_info)) *
+               (atomic_read(&tipc_num_links) + 1);
        if (payload_size > 32768u) {
                read_unlock_bh(&tipc_net_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -470,18 +462,17 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
 
        /* Add TLV for broadcast link */
 
-       link_info.dest = htonl(tipc_own_addr & 0xfffff00);
+       link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
        link_info.up = htonl(1);
        strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
        /* Add TLVs for any other links in scope */
 
-       for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
+       list_for_each_entry(n_ptr, &tipc_node_list, list) {
                u32 i;
 
-               n_ptr = tipc_net.nodes[n_num];
-               if (!n_ptr || !tipc_in_scope(domain, n_ptr->addr))
+               if (!tipc_in_scope(domain, n_ptr->addr))
                        continue;
                tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
index 206a8ef..5c61afc 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node.h: Include file for TIPC node management routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,8 @@
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
  * @lock: spinlock governing access to structure
- * @next: pointer to next node in sorted list of cluster's nodes
+ * @hash: links to adjacent nodes in unsorted hash chain
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @nsub: list of "node down" subscriptions monitoring node
  * @active_links: pointers to active links to node
  * @links: pointers to all links to node
@@ -69,7 +70,8 @@
 struct tipc_node {
        u32 addr;
        spinlock_t lock;
-       struct tipc_node *next;
+       struct hlist_node hash;
+       struct list_head list;
        struct list_head nsub;
        struct link *active_links[2];
        struct link *links[MAX_BEARERS];
@@ -90,27 +92,35 @@ struct tipc_node {
        } bclink;
 };
 
+#define NODE_HTABLE_SIZE 512
+extern struct list_head tipc_node_list;
+
+/*
+ * A trivial power-of-two bitmask technique is used for speed, since this
+ * operation is done for every incoming TIPC packet. The number of hash table
+ * entries has been chosen so that no hash chain exceeds 8 nodes and will
+ * usually be much smaller (typically only a single node).
+ */
+static inline unsigned int tipc_hashfn(u32 addr)
+{
+       return addr & (NODE_HTABLE_SIZE - 1);
+}
+
 extern u32 tipc_own_tag;
 
+struct tipc_node *tipc_node_find(u32 addr);
 struct tipc_node *tipc_node_create(u32 addr);
 void tipc_node_delete(struct tipc_node *n_ptr);
-struct tipc_node *tipc_node_attach_link(struct link *l_ptr);
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
 void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
 void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
-int tipc_node_has_active_links(struct tipc_node *n_ptr);
-int tipc_node_has_redundant_links(struct tipc_node *n_ptr);
+int tipc_node_active_links(struct tipc_node *n_ptr);
+int tipc_node_redundant_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
 
-static inline struct tipc_node *tipc_node_find(u32 addr)
-{
-       if (likely(in_own_cluster(addr)))
-               return tipc_net.nodes[tipc_node(addr)];
-       return NULL;
-}
-
 static inline void tipc_node_lock(struct tipc_node *n_ptr)
 {
        spin_lock_bh(&n_ptr->lock);
index 018a553..c3c2815 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node_subscr.c: TIPC "node down" subscription handling
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -76,3 +76,22 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
        list_del_init(&node_sub->nodesub_list);
        tipc_node_unlock(node_sub->node);
 }
+
+/**
+ * tipc_nodesub_notify - notify subscribers that a node is unreachable
+ *
+ * Note: node is locked by caller
+ */
+
+void tipc_nodesub_notify(struct tipc_node *node)
+{
+       struct tipc_node_subscr *ns;
+
+       list_for_each_entry(ns, &node->nsub, nodesub_list) {
+               if (ns->handle_node_down) {
+                       tipc_k_signal((Handler)ns->handle_node_down,
+                                     (unsigned long)ns->usr_handle);
+                       ns->handle_node_down = NULL;
+               }
+       }
+}
index 006ed73..4bc2ca0 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -59,5 +59,6 @@ struct tipc_node_subscr {
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
                            void *usr_handle, net_ev_handler handle_down);
 void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
+void tipc_nodesub_notify(struct tipc_node *node);
 
 #endif
index 067bab2..6ff78f9 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.c: TIPC port code
  *
  * Copyright (c) 1992-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -54,33 +54,19 @@ static DEFINE_SPINLOCK(queue_lock);
 
 static LIST_HEAD(ports);
 static void port_handle_node_down(unsigned long ref);
-static struct sk_buff *port_build_self_abort_msg(struct port *, u32 err);
-static struct sk_buff *port_build_peer_abort_msg(struct port *, u32 err);
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
 static void port_timeout(unsigned long ref);
 
 
-static u32 port_peernode(struct port *p_ptr)
+static u32 port_peernode(struct tipc_port *p_ptr)
 {
-       return msg_destnode(&p_ptr->publ.phdr);
+       return msg_destnode(&p_ptr->phdr);
 }
 
-static u32 port_peerport(struct port *p_ptr)
+static u32 port_peerport(struct tipc_port *p_ptr)
 {
-       return msg_destport(&p_ptr->publ.phdr);
-}
-
-static u32 port_out_seqno(struct port *p_ptr)
-{
-       return msg_transp_seqno(&p_ptr->publ.phdr);
-}
-
-static void port_incr_out_seqno(struct port *p_ptr)
-{
-       struct tipc_msg *m = &p_ptr->publ.phdr;
-
-       if (likely(!msg_routed(m)))
-               return;
-       msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+       return msg_destport(&p_ptr->phdr);
 }
 
 /**
@@ -94,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        struct sk_buff *buf;
        struct sk_buff *ibuf = NULL;
        struct port_list dports = {0, NULL, };
-       struct port *oport = tipc_port_deref(ref);
+       struct tipc_port *oport = tipc_port_deref(ref);
        int ext_targets;
        int res;
 
@@ -103,7 +89,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 
        /* Create multicast message */
 
-       hdr = &oport->publ.phdr;
+       hdr = &oport->phdr;
        msg_set_type(hdr, TIPC_MCAST_MSG);
        msg_set_nametype(hdr, seq->type);
        msg_set_namelower(hdr, seq->lower);
@@ -211,7 +197,7 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
                        void (*wakeup)(struct tipc_port *),
                        const u32 importance)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 ref;
 
@@ -220,21 +206,19 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
                warn("Port creation failed, no memory\n");
                return NULL;
        }
-       ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
+       ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
        if (!ref) {
                warn("Port creation failed, reference table exhausted\n");
                kfree(p_ptr);
                return NULL;
        }
 
-       p_ptr->publ.usr_handle = usr_handle;
-       p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
-       p_ptr->publ.ref = ref;
-       msg = &p_ptr->publ.phdr;
+       p_ptr->usr_handle = usr_handle;
+       p_ptr->max_pkt = MAX_PKT_DEFAULT;
+       p_ptr->ref = ref;
+       msg = &p_ptr->phdr;
        tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
        msg_set_origport(msg, ref);
-       p_ptr->last_in_seqno = 41;
-       p_ptr->sent = 1;
        INIT_LIST_HEAD(&p_ptr->wait_list);
        INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
        p_ptr->dispatcher = dispatcher;
@@ -246,12 +230,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
        INIT_LIST_HEAD(&p_ptr->port_list);
        list_add_tail(&p_ptr->port_list, &ports);
        spin_unlock_bh(&tipc_port_list_lock);
-       return &(p_ptr->publ);
+       return p_ptr;
 }
 
 int tipc_deleteport(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        tipc_withdraw(ref, 0, NULL);
@@ -263,7 +247,7 @@ int tipc_deleteport(u32 ref)
        tipc_port_unlock(p_ptr);
 
        k_cancel_timer(&p_ptr->timer);
-       if (p_ptr->publ.connected) {
+       if (p_ptr->connected) {
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
                tipc_nodesub_unsubscribe(&p_ptr->subscription);
        }
@@ -279,14 +263,14 @@ int tipc_deleteport(u32 ref)
        return 0;
 }
 
-static int port_unreliable(struct port *p_ptr)
+static int port_unreliable(struct tipc_port *p_ptr)
 {
-       return msg_src_droppable(&p_ptr->publ.phdr);
+       return msg_src_droppable(&p_ptr->phdr);
 }
 
 int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
@@ -298,24 +282,24 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
 
 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+       msg_set_src_droppable(&p_ptr->phdr, (isunreliable != 0));
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
-static int port_unreturnable(struct port *p_ptr)
+static int port_unreturnable(struct tipc_port *p_ptr)
 {
-       return msg_dest_droppable(&p_ptr->publ.phdr);
+       return msg_dest_droppable(&p_ptr->phdr);
 }
 
 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
@@ -327,12 +311,12 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
 
 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+       msg_set_dest_droppable(&p_ptr->phdr, (isunrejectable != 0));
        tipc_port_unlock(p_ptr);
        return 0;
 }
@@ -345,7 +329,7 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
                                            u32 origport, u32 orignode,
                                            u32 usr, u32 type, u32 err,
-                                           u32 seqno, u32 ack)
+                                           u32 ack)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -358,7 +342,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
                msg_set_destport(msg, destport);
                msg_set_origport(msg, origport);
                msg_set_orignode(msg, orignode);
-               msg_set_transp_seqno(msg, seqno);
                msg_set_msgcnt(msg, ack);
        }
        return buf;
@@ -413,10 +396,10 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send self-abort message when rejecting on a connected port */
        if (msg_connected(msg)) {
                struct sk_buff *abuf = NULL;
-               struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+               struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
 
                if (p_ptr) {
-                       if (p_ptr->publ.connected)
+                       if (p_ptr->connected)
                                abuf = port_build_self_abort_msg(p_ptr, err);
                        tipc_port_unlock(p_ptr);
                }
@@ -429,7 +412,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        return data_sz;
 }
 
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
                              struct iovec const *msg_sect, u32 num_sect,
                              int err)
 {
@@ -446,13 +429,13 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
 
 static void port_timeout(unsigned long ref)
 {
-       struct port *p_ptr = tipc_port_lock(ref);
+       struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
 
        if (!p_ptr)
                return;
 
-       if (!p_ptr->publ.connected) {
+       if (!p_ptr->connected) {
                tipc_port_unlock(p_ptr);
                return;
        }
@@ -463,14 +446,12 @@ static void port_timeout(unsigned long ref)
        } else {
                buf = port_build_proto_msg(port_peerport(p_ptr),
                                           port_peernode(p_ptr),
-                                          p_ptr->publ.ref,
+                                          p_ptr->ref,
                                           tipc_own_addr,
                                           CONN_MANAGER,
                                           CONN_PROBE,
                                           TIPC_OK,
-                                          port_out_seqno(p_ptr),
                                           0);
-               port_incr_out_seqno(p_ptr);
                p_ptr->probing_state = PROBING;
                k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
        }
@@ -481,7 +462,7 @@ static void port_timeout(unsigned long ref)
 
 static void port_handle_node_down(unsigned long ref)
 {
-       struct port *p_ptr = tipc_port_lock(ref);
+       struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
 
        if (!p_ptr)
@@ -492,73 +473,71 @@ static void port_handle_node_down(unsigned long ref)
 }
 
 
-static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
 {
-       u32 imp = msg_importance(&p_ptr->publ.phdr);
+       u32 imp = msg_importance(&p_ptr->phdr);
 
-       if (!p_ptr->publ.connected)
+       if (!p_ptr->connected)
                return NULL;
        if (imp < TIPC_CRITICAL_IMPORTANCE)
                imp++;
-       return port_build_proto_msg(p_ptr->publ.ref,
+       return port_build_proto_msg(p_ptr->ref,
                                    tipc_own_addr,
                                    port_peerport(p_ptr),
                                    port_peernode(p_ptr),
                                    imp,
                                    TIPC_CONN_MSG,
                                    err,
-                                   p_ptr->last_in_seqno + 1,
                                    0);
 }
 
 
-static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
 {
-       u32 imp = msg_importance(&p_ptr->publ.phdr);
+       u32 imp = msg_importance(&p_ptr->phdr);
 
-       if (!p_ptr->publ.connected)
+       if (!p_ptr->connected)
                return NULL;
        if (imp < TIPC_CRITICAL_IMPORTANCE)
                imp++;
        return port_build_proto_msg(port_peerport(p_ptr),
                                    port_peernode(p_ptr),
-                                   p_ptr->publ.ref,
+                                   p_ptr->ref,
                                    tipc_own_addr,
                                    imp,
                                    TIPC_CONN_MSG,
                                    err,
-                                   port_out_seqno(p_ptr),
                                    0);
 }
 
 void tipc_port_recv_proto_msg(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+       struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
        u32 err = TIPC_OK;
        struct sk_buff *r_buf = NULL;
        struct sk_buff *abort_buf = NULL;
 
        if (!p_ptr) {
                err = TIPC_ERR_NO_PORT;
-       } else if (p_ptr->publ.connected) {
+       } else if (p_ptr->connected) {
                if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
                    (port_peerport(p_ptr) != msg_origport(msg))) {
                        err = TIPC_ERR_NO_PORT;
                } else if (msg_type(msg) == CONN_ACK) {
                        int wakeup = tipc_port_congested(p_ptr) &&
-                                    p_ptr->publ.congested &&
+                                    p_ptr->congested &&
                                     p_ptr->wakeup;
                        p_ptr->acked += msg_msgcnt(msg);
                        if (tipc_port_congested(p_ptr))
                                goto exit;
-                       p_ptr->publ.congested = 0;
+                       p_ptr->congested = 0;
                        if (!wakeup)
                                goto exit;
-                       p_ptr->wakeup(&p_ptr->publ);
+                       p_ptr->wakeup(p_ptr);
                        goto exit;
                }
-       } else if (p_ptr->publ.published) {
+       } else if (p_ptr->published) {
                err = TIPC_ERR_NO_PORT;
        }
        if (err) {
@@ -569,7 +548,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
                                             TIPC_HIGH_IMPORTANCE,
                                             TIPC_CONN_MSG,
                                             err,
-                                            0,
                                             0);
                goto exit;
        }
@@ -583,11 +561,9 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
                                             CONN_MANAGER,
                                             CONN_PROBE_REPLY,
                                             TIPC_OK,
-                                            port_out_seqno(p_ptr),
                                             0);
        }
        p_ptr->probing_state = CONFIRMED;
-       port_incr_out_seqno(p_ptr);
 exit:
        if (p_ptr)
                tipc_port_unlock(p_ptr);
@@ -596,29 +572,29 @@ exit:
        buf_discard(buf);
 }
 
-static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
 {
        struct publication *publ;
 
        if (full_id)
                tipc_printf(buf, "<%u.%u.%u:%u>:",
                            tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
-                           tipc_node(tipc_own_addr), p_ptr->publ.ref);
+                           tipc_node(tipc_own_addr), p_ptr->ref);
        else
-               tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+               tipc_printf(buf, "%-10u:", p_ptr->ref);
 
-       if (p_ptr->publ.connected) {
+       if (p_ptr->connected) {
                u32 dport = port_peerport(p_ptr);
                u32 destnode = port_peernode(p_ptr);
 
                tipc_printf(buf, " connected to <%u.%u.%u:%u>",
                            tipc_zone(destnode), tipc_cluster(destnode),
                            tipc_node(destnode), dport);
-               if (p_ptr->publ.conn_type != 0)
+               if (p_ptr->conn_type != 0)
                        tipc_printf(buf, " via {%u,%u}",
-                                   p_ptr->publ.conn_type,
-                                   p_ptr->publ.conn_instance);
-       } else if (p_ptr->publ.published) {
+                                   p_ptr->conn_type,
+                                   p_ptr->conn_instance);
+       } else if (p_ptr->published) {
                tipc_printf(buf, " bound to");
                list_for_each_entry(publ, &p_ptr->publications, pport_list) {
                        if (publ->lower == publ->upper)
@@ -639,7 +615,7 @@ struct sk_buff *tipc_port_get_ports(void)
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
        struct print_buf pb;
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        int str_len;
 
        buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
@@ -650,9 +626,9 @@ struct sk_buff *tipc_port_get_ports(void)
        tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
        spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
-               spin_lock_bh(p_ptr->publ.lock);
+               spin_lock_bh(p_ptr->lock);
                port_print(p_ptr, &pb, 0);
-               spin_unlock_bh(p_ptr->publ.lock);
+               spin_unlock_bh(p_ptr->lock);
        }
        spin_unlock_bh(&tipc_port_list_lock);
        str_len = tipc_printbuf_validate(&pb);
@@ -665,12 +641,12 @@ struct sk_buff *tipc_port_get_ports(void)
 
 void tipc_port_reinit(void)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
 
        spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
-               msg = &p_ptr->publ.phdr;
+               msg = &p_ptr->phdr;
                if (msg_orignode(msg) == tipc_own_addr)
                        break;
                msg_set_prevnode(msg, tipc_own_addr);
@@ -695,7 +671,7 @@ static void port_dispatcher_sigh(void *dummy)
        spin_unlock_bh(&queue_lock);
 
        while (buf) {
-               struct port *p_ptr;
+               struct tipc_port *p_ptr;
                struct user_port *up_ptr;
                struct tipc_portid orig;
                struct tipc_name_seq dseq;
@@ -720,8 +696,8 @@ static void port_dispatcher_sigh(void *dummy)
                orig.node = msg_orignode(msg);
                up_ptr = p_ptr->user_port;
                usr_handle = up_ptr->usr_handle;
-               connected = p_ptr->publ.connected;
-               published = p_ptr->publ.published;
+               connected = p_ptr->connected;
+               published = p_ptr->published;
 
                if (unlikely(msg_errcode(msg)))
                        goto err;
@@ -732,6 +708,7 @@ static void port_dispatcher_sigh(void *dummy)
                                tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
                                u32 peer_port = port_peerport(p_ptr);
                                u32 peer_node = port_peernode(p_ptr);
+                               u32 dsz;
 
                                tipc_port_unlock(p_ptr);
                                if (unlikely(!cb))
@@ -742,13 +719,14 @@ static void port_dispatcher_sigh(void *dummy)
                                } else if ((msg_origport(msg) != peer_port) ||
                                           (msg_orignode(msg) != peer_node))
                                        goto reject;
-                               if (unlikely(++p_ptr->publ.conn_unacked >=
-                                            TIPC_FLOW_CONTROL_WIN))
+                               dsz = msg_data_sz(msg);
+                               if (unlikely(dsz &&
+                                            (++p_ptr->conn_unacked >=
+                                             TIPC_FLOW_CONTROL_WIN)))
                                        tipc_acknowledge(dref,
-                                                        p_ptr->publ.conn_unacked);
+                                                        p_ptr->conn_unacked);
                                skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg), dsz);
                                break;
                        }
                case TIPC_DIRECT_MSG:{
@@ -872,7 +850,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
 
 static void port_wakeup_sh(unsigned long ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct user_port *up_ptr;
        tipc_continue_event cb = NULL;
        void *uh = NULL;
@@ -898,14 +876,14 @@ static void port_wakeup(struct tipc_port *p_ptr)
 
 void tipc_acknowledge(u32 ref, u32 ack)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return;
-       if (p_ptr->publ.connected) {
-               p_ptr->publ.conn_unacked -= ack;
+       if (p_ptr->connected) {
+               p_ptr->conn_unacked -= ack;
                buf = port_build_proto_msg(port_peerport(p_ptr),
                                           port_peernode(p_ptr),
                                           ref,
@@ -913,7 +891,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
                                           CONN_MANAGER,
                                           CONN_ACK,
                                           TIPC_OK,
-                                          port_out_seqno(p_ptr),
                                           ack);
        }
        tipc_port_unlock(p_ptr);
@@ -936,14 +913,14 @@ int tipc_createport(void *usr_handle,
                    u32 *portref)
 {
        struct user_port *up_ptr;
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
        if (!up_ptr) {
                warn("Port creation failed, no memory\n");
                return -ENOMEM;
        }
-       p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher,
+       p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher,
                                                   port_wakeup, importance);
        if (!p_ptr) {
                kfree(up_ptr);
@@ -952,7 +929,7 @@ int tipc_createport(void *usr_handle,
 
        p_ptr->user_port = up_ptr;
        up_ptr->usr_handle = usr_handle;
-       up_ptr->ref = p_ptr->publ.ref;
+       up_ptr->ref = p_ptr->ref;
        up_ptr->err_cb = error_cb;
        up_ptr->named_err_cb = named_error_cb;
        up_ptr->conn_err_cb = conn_error_cb;
@@ -960,26 +937,26 @@ int tipc_createport(void *usr_handle,
        up_ptr->named_msg_cb = named_msg_cb;
        up_ptr->conn_msg_cb = conn_msg_cb;
        up_ptr->continue_event_cb = continue_event_cb;
-       *portref = p_ptr->publ.ref;
+       *portref = p_ptr->ref;
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
 int tipc_portimportance(u32 ref, unsigned int *importance)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+       *importance = (unsigned int)msg_importance(&p_ptr->phdr);
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
 int tipc_set_portimportance(u32 ref, unsigned int imp)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        if (imp > TIPC_CRITICAL_IMPORTANCE)
                return -EINVAL;
@@ -987,7 +964,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+       msg_set_importance(&p_ptr->phdr, (u32)imp);
        tipc_port_unlock(p_ptr);
        return 0;
 }
@@ -995,7 +972,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
 
 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct publication *publ;
        u32 key;
        int res = -EINVAL;
@@ -1004,7 +981,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        if (!p_ptr)
                return -EINVAL;
 
-       if (p_ptr->publ.connected)
+       if (p_ptr->connected)
                goto exit;
        if (seq->lower > seq->upper)
                goto exit;
@@ -1016,11 +993,11 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                goto exit;
        }
        publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
-                                   scope, p_ptr->publ.ref, key);
+                                   scope, p_ptr->ref, key);
        if (publ) {
                list_add(&publ->pport_list, &p_ptr->publications);
                p_ptr->pub_count++;
-               p_ptr->publ.published = 1;
+               p_ptr->published = 1;
                res = 0;
        }
 exit:
@@ -1030,7 +1007,7 @@ exit:
 
 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct publication *publ;
        struct publication *tpubl;
        int res = -EINVAL;
@@ -1063,37 +1040,36 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                }
        }
        if (list_empty(&p_ptr->publications))
-               p_ptr->publ.published = 0;
+               p_ptr->published = 0;
        tipc_port_unlock(p_ptr);
        return res;
 }
 
 int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res = -EINVAL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       if (p_ptr->publ.published || p_ptr->publ.connected)
+       if (p_ptr->published || p_ptr->connected)
                goto exit;
        if (!peer->ref)
                goto exit;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_destnode(msg, peer->node);
        msg_set_destport(msg, peer->ref);
        msg_set_orignode(msg, tipc_own_addr);
-       msg_set_origport(msg, p_ptr->publ.ref);
-       msg_set_transp_seqno(msg, 42);
+       msg_set_origport(msg, p_ptr->ref);
        msg_set_type(msg, TIPC_CONN_MSG);
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
        p_ptr->probing_interval = PROBING_INTERVAL;
        p_ptr->probing_state = CONFIRMED;
-       p_ptr->publ.connected = 1;
+       p_ptr->connected = 1;
        k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
 
        tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
@@ -1102,7 +1078,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
        res = 0;
 exit:
        tipc_port_unlock(p_ptr);
-       p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+       p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
        return res;
 }
 
@@ -1120,7 +1096,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
                tp_ptr->connected = 0;
                /* let timer expire on it's own to avoid deadlock! */
                tipc_nodesub_unsubscribe(
-                       &((struct port *)tp_ptr)->subscription);
+                       &((struct tipc_port *)tp_ptr)->subscription);
                res = 0;
        } else {
                res = -ENOTCONN;
@@ -1135,7 +1111,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
 
 int tipc_disconnect(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        int res;
 
        p_ptr = tipc_port_lock(ref);
@@ -1151,15 +1127,15 @@ int tipc_disconnect(u32 ref)
  */
 int tipc_shutdown(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
 
-       if (p_ptr->publ.connected) {
-               u32 imp = msg_importance(&p_ptr->publ.phdr);
+       if (p_ptr->connected) {
+               u32 imp = msg_importance(&p_ptr->phdr);
                if (imp < TIPC_CRITICAL_IMPORTANCE)
                        imp++;
                buf = port_build_proto_msg(port_peerport(p_ptr),
@@ -1169,7 +1145,6 @@ int tipc_shutdown(u32 ref)
                                           imp,
                                           TIPC_CONN_MSG,
                                           TIPC_CONN_SHUTDOWN,
-                                          port_out_seqno(p_ptr),
                                           0);
        }
        tipc_port_unlock(p_ptr);
@@ -1182,13 +1157,13 @@ int tipc_shutdown(u32 ref)
  *                        message for this node.
  */
 
-static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
                                   struct iovec const *msg_sect)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect,
+       res = tipc_msg_build(&sender->phdr, msg_sect, num_sect,
                        MAX_MSG_SIZE, !sender->user_port, &buf);
        if (likely(buf))
                tipc_port_recv_msg(buf);
@@ -1201,15 +1176,15 @@ static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
 
 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        u32 destnode;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || !p_ptr->publ.connected)
+       if (!p_ptr || !p_ptr->connected)
                return -EINVAL;
 
-       p_ptr->publ.congested = 1;
+       p_ptr->congested = 1;
        if (!tipc_port_congested(p_ptr)) {
                destnode = port_peernode(p_ptr);
                if (likely(destnode != tipc_own_addr))
@@ -1219,14 +1194,14 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
                        res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
 
                if (likely(res != -ELINKCONG)) {
-                       port_incr_out_seqno(p_ptr);
-                       p_ptr->publ.congested = 0;
-                       p_ptr->sent++;
+                       p_ptr->congested = 0;
+                       if (res > 0)
+                               p_ptr->sent++;
                        return res;
                }
        }
        if (port_unreliable(p_ptr)) {
-               p_ptr->publ.congested = 0;
+               p_ptr->congested = 0;
                /* Just calculate msg length and return */
                return tipc_msg_calc_data_size(msg_sect, num_sect);
        }
@@ -1240,17 +1215,17 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
           unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 destnode = domain;
        u32 destport;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_NAMED_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
@@ -1263,13 +1238,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
        msg_set_destport(msg, destport);
 
        if (likely(destport)) {
-               p_ptr->sent++;
                if (likely(destnode == tipc_own_addr))
-                       return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
-               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                  destnode);
-               if (likely(res != -ELINKCONG))
+                       res = tipc_port_recv_sections(p_ptr, num_sect,
+                                                     msg_sect);
+               else
+                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+                                                          num_sect, destnode);
+               if (likely(res != -ELINKCONG)) {
+                       if (res > 0)
+                               p_ptr->sent++;
                        return res;
+               }
                if (port_unreliable(p_ptr)) {
                        /* Just calculate msg length and return */
                        return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1287,27 +1266,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
           unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_DIRECT_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
        msg_set_destnode(msg, dest->node);
        msg_set_destport(msg, dest->ref);
        msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
-       p_ptr->sent++;
+
        if (dest->node == tipc_own_addr)
-               return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
-       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
-       if (likely(res != -ELINKCONG))
+               res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+       else
+               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
+                                                  dest->node);
+       if (likely(res != -ELINKCONG)) {
+               if (res > 0)
+                       p_ptr->sent++;
                return res;
+       }
        if (port_unreliable(p_ptr)) {
                /* Just calculate msg length and return */
                return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1322,15 +1306,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
 int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
               struct sk_buff *buf, unsigned int dsz)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res;
 
-       p_ptr = (struct port *)tipc_ref_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       p_ptr = (struct tipc_port *)tipc_ref_deref(ref);
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_DIRECT_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
@@ -1343,12 +1327,16 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
 
        skb_push(buf, DIR_MSG_H_SIZE);
        skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
-       p_ptr->sent++;
+
        if (dest->node == tipc_own_addr)
-               return tipc_port_recv_msg(buf);
-       res = tipc_send_buf_fast(buf, dest->node);
-       if (likely(res != -ELINKCONG))
+               res = tipc_port_recv_msg(buf);
+       else
+               res = tipc_send_buf_fast(buf, dest->node);
+       if (likely(res != -ELINKCONG)) {
+               if (res > 0)
+                       p_ptr->sent++;
                return res;
+       }
        if (port_unreliable(p_ptr))
                return dsz;
        return -ELINKCONG;
index 8e84b98..87b9424 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.h: Include file for TIPC port code
  *
  * Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -95,7 +95,7 @@ struct user_port {
 };
 
 /**
- * struct tipc_port - TIPC port info available to socket API
+ * struct tipc_port - TIPC port structure
  * @usr_handle: pointer to additional user-defined information about port
  * @lock: pointer to spinlock for controlling access to port
  * @connected: non-zero if port is currently connected to a peer port
@@ -107,43 +107,33 @@ struct user_port {
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
  * @ref: unique reference to port in TIPC object registry
  * @phdr: preformatted message header used when sending messages
- */
-struct tipc_port {
-       void *usr_handle;
-       spinlock_t *lock;
-       int connected;
-       u32 conn_type;
-       u32 conn_instance;
-       u32 conn_unacked;
-       int published;
-       u32 congested;
-       u32 max_pkt;
-       u32 ref;
-       struct tipc_msg phdr;
-};
-
-/**
- * struct port - TIPC port structure
- * @publ: TIPC port info available to privileged users
  * @port_list: adjacent ports in TIPC's global list of ports
  * @dispatcher: ptr to routine which handles received messages
  * @wakeup: ptr to routine to call when port is no longer congested
  * @user_port: ptr to user port associated with port (if any)
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
- * @sent:
- * @acked:
+ * @sent: # of non-empty messages sent by port
+ * @acked: # of non-empty message acknowledgements from connected port's peer
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
  * @probing_interval:
- * @last_in_seqno:
  * @timer_ref:
  * @subscription: "node down" subscription used to terminate failed connections
  */
-
-struct port {
-       struct tipc_port publ;
+struct tipc_port {
+       void *usr_handle;
+       spinlock_t *lock;
+       int connected;
+       u32 conn_type;
+       u32 conn_instance;
+       u32 conn_unacked;
+       int published;
+       u32 congested;
+       u32 max_pkt;
+       u32 ref;
+       struct tipc_msg phdr;
        struct list_head port_list;
        u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
        void (*wakeup)(struct tipc_port *);
@@ -156,7 +146,6 @@ struct port {
        u32 pub_count;
        u32 probing_state;
        u32 probing_interval;
-       u32 last_in_seqno;
        struct timer_list timer;
        struct tipc_node_subscr subscription;
 };
@@ -230,7 +219,7 @@ int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
                unsigned int section_count, struct iovec const *msg);
 
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
                              struct iovec const *msg_sect, u32 num_sect,
                              int err);
 struct sk_buff *tipc_port_get_ports(void);
@@ -242,9 +231,9 @@ void tipc_port_reinit(void);
  * tipc_port_lock - lock port instance referred to and return its pointer
  */
 
-static inline struct port *tipc_port_lock(u32 ref)
+static inline struct tipc_port *tipc_port_lock(u32 ref)
 {
-       return (struct port *)tipc_ref_lock(ref);
+       return (struct tipc_port *)tipc_ref_lock(ref);
 }
 
 /**
@@ -253,27 +242,27 @@ static inline struct port *tipc_port_lock(u32 ref)
  * Can use pointer instead of tipc_ref_unlock() since port is already locked.
  */
 
-static inline void tipc_port_unlock(struct port *p_ptr)
+static inline void tipc_port_unlock(struct tipc_port *p_ptr)
 {
-       spin_unlock_bh(p_ptr->publ.lock);
+       spin_unlock_bh(p_ptr->lock);
 }
 
-static inline struct port *tipc_port_deref(u32 ref)
+static inline struct tipc_port *tipc_port_deref(u32 ref)
 {
-       return (struct port *)tipc_ref_deref(ref);
+       return (struct tipc_port *)tipc_ref_deref(ref);
 }
 
-static inline u32 tipc_peer_port(struct port *p_ptr)
+static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
 {
-       return msg_destport(&p_ptr->publ.phdr);
+       return msg_destport(&p_ptr->phdr);
 }
 
-static inline u32 tipc_peer_node(struct port *p_ptr)
+static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
 {
-       return msg_destnode(&p_ptr->publ.phdr);
+       return msg_destnode(&p_ptr->phdr);
 }
 
-static inline int tipc_port_congested(struct port *p_ptr)
+static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
        return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
 }
@@ -284,7 +273,7 @@ static inline int tipc_port_congested(struct port *p_ptr)
 
 static inline int tipc_port_recv_msg(struct sk_buff *buf)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg = buf_msg(buf);
        u32 destport = msg_destport(msg);
        u32 dsz = msg_data_sz(msg);
@@ -299,7 +288,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
        /* validate destination & pass to port, otherwise reject message */
        p_ptr = tipc_port_lock(destport);
        if (likely(p_ptr)) {
-               if (likely(p_ptr->publ.connected)) {
+               if (likely(p_ptr->connected)) {
                        if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
                            (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
                            (unlikely(!msg_connected(msg)))) {
@@ -308,7 +297,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
                                goto reject;
                        }
                }
-               err = p_ptr->dispatcher(&p_ptr->publ, buf);
+               err = p_ptr->dispatcher(p_ptr, buf);
                tipc_port_unlock(p_ptr);
                if (likely(!err))
                        return dsz;
index 2b02a3a..29d94d5 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/socket.c: TIPC socket API
  *
  * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -58,6 +58,9 @@ struct tipc_sock {
 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
 #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
 
+#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
+                       (sock->state == SS_DISCONNECTING))
+
 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
 static void wakeupdispatch(struct tipc_port *tport);
@@ -241,7 +244,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
                        tipc_set_portunreliable(tp_ptr->ref, 1);
        }
 
-       atomic_inc(&tipc_user_count);
        return 0;
 }
 
@@ -290,7 +292,7 @@ static int release(struct socket *sock)
                if (buf == NULL)
                        break;
                atomic_dec(&tipc_queue_size);
-               if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
+               if (TIPC_SKB_CB(buf)->handle != 0)
                        buf_discard(buf);
                else {
                        if ((sock->state == SS_CONNECTING) ||
@@ -321,7 +323,6 @@ static int release(struct socket *sock)
        sock_put(sk);
        sock->sk = NULL;
 
-       atomic_dec(&tipc_user_count);
        return res;
 }
 
@@ -495,6 +496,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
        if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
                return -EACCES;
 
+       if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
+               return -EMSGSIZE;
        if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
                return -EFAULT;
        if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
@@ -911,15 +914,13 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
        struct tipc_port *tport = tipc_sk_port(sk);
        struct sk_buff *buf;
        struct tipc_msg *msg;
+       long timeout;
        unsigned int sz;
        u32 err;
        int res;
 
        /* Catch invalid receive requests */
 
-       if (m->msg_iovlen != 1)
-               return -EOPNOTSUPP;   /* Don't do multiple iovec entries yet */
-
        if (unlikely(!buf_len))
                return -EINVAL;
 
@@ -930,6 +931,7 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
                goto exit;
        }
 
+       timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 restart:
 
        /* Look for a message in receive queue; wait if necessary */
@@ -939,17 +941,15 @@ restart:
                        res = -ENOTCONN;
                        goto exit;
                }
-               if (flags & MSG_DONTWAIT) {
-                       res = -EWOULDBLOCK;
+               if (timeout <= 0L) {
+                       res = timeout ? timeout : -EWOULDBLOCK;
                        goto exit;
                }
                release_sock(sk);
-               res = wait_event_interruptible(*sk_sleep(sk),
-                       (!skb_queue_empty(&sk->sk_receive_queue) ||
-                        (sock->state == SS_DISCONNECTING)));
+               timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
+                                                          tipc_rx_ready(sock),
+                                                          timeout);
                lock_sock(sk);
-               if (res)
-                       goto exit;
        }
 
        /* Look at first message in receive queue */
@@ -991,11 +991,10 @@ restart:
                        sz = buf_len;
                        m->msg_flags |= MSG_TRUNC;
                }
-               if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
-                                         sz))) {
-                       res = -EFAULT;
+               res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
+                                             m->msg_iov, sz);
+               if (res)
                        goto exit;
-               }
                res = sz;
        } else {
                if ((sock->state == SS_READY) ||
@@ -1038,19 +1037,15 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
        struct tipc_port *tport = tipc_sk_port(sk);
        struct sk_buff *buf;
        struct tipc_msg *msg;
+       long timeout;
        unsigned int sz;
        int sz_to_copy, target, needed;
        int sz_copied = 0;
-       char __user *crs = m->msg_iov->iov_base;
-       unsigned char *buf_crs;
        u32 err;
        int res = 0;
 
        /* Catch invalid receive attempts */
 
-       if (m->msg_iovlen != 1)
-               return -EOPNOTSUPP;   /* Don't do multiple iovec entries yet */
-
        if (unlikely(!buf_len))
                return -EINVAL;
 
@@ -1063,7 +1058,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
        }
 
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
-
+       timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 restart:
 
        /* Look for a message in receive queue; wait if necessary */
@@ -1073,17 +1068,15 @@ restart:
                        res = -ENOTCONN;
                        goto exit;
                }
-               if (flags & MSG_DONTWAIT) {
-                       res = -EWOULDBLOCK;
+               if (timeout <= 0L) {
+                       res = timeout ? timeout : -EWOULDBLOCK;
                        goto exit;
                }
                release_sock(sk);
-               res = wait_event_interruptible(*sk_sleep(sk),
-                       (!skb_queue_empty(&sk->sk_receive_queue) ||
-                        (sock->state == SS_DISCONNECTING)));
+               timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
+                                                          tipc_rx_ready(sock),
+                                                          timeout);
                lock_sock(sk);
-               if (res)
-                       goto exit;
        }
 
        /* Look at first message in receive queue */
@@ -1112,24 +1105,25 @@ restart:
        /* Capture message data (if valid) & compute return value (always) */
 
        if (!err) {
-               buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
-               sz = (unsigned char *)msg + msg_size(msg) - buf_crs;
+               u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
 
+               sz -= offset;
                needed = (buf_len - sz_copied);
                sz_to_copy = (sz <= needed) ? sz : needed;
-               if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
-                       res = -EFAULT;
+
+               res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
+                                             m->msg_iov, sz_to_copy);
+               if (res)
                        goto exit;
-               }
+
                sz_copied += sz_to_copy;
 
                if (sz_to_copy < sz) {
                        if (!(flags & MSG_PEEK))
-                               TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
+                               TIPC_SKB_CB(buf)->handle =
+                               (void *)(unsigned long)(offset + sz_to_copy);
                        goto exit;
                }
-
-               crs += sz_to_copy;
        } else {
                if (sz_copied != 0)
                        goto exit; /* can't add error msg to valid data */
@@ -1256,7 +1250,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 
        /* Enqueue message (finally!) */
 
-       TIPC_SKB_CB(buf)->handle = msg_data(msg);
+       TIPC_SKB_CB(buf)->handle = 0;
        atomic_inc(&tipc_queue_size);
        __skb_queue_tail(&sk->sk_receive_queue, buf);
 
@@ -1608,7 +1602,7 @@ restart:
                buf = __skb_dequeue(&sk->sk_receive_queue);
                if (buf) {
                        atomic_dec(&tipc_queue_size);
-                       if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
+                       if (TIPC_SKB_CB(buf)->handle != 0) {
                                buf_discard(buf);
                                goto restart;
                        }
index ca04479..aae9eae 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/subscr.c: TIPC network topology service
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -160,7 +160,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
 
 static void subscr_timeout(struct subscription *sub)
 {
-       struct port *server_port;
+       struct tipc_port *server_port;
 
        /* Validate server port reference (in case subscriber is terminating) */
 
@@ -472,8 +472,6 @@ static void subscr_named_msg_event(void *usr_handle,
                                   struct tipc_portid const *orig,
                                   struct tipc_name_seq const *dest)
 {
-       static struct iovec msg_sect = {NULL, 0};
-
        struct subscriber *subscriber;
        u32 server_port_ref;
 
@@ -508,7 +506,7 @@ static void subscr_named_msg_event(void *usr_handle,
 
        /* Lock server port (& save lock address for future use) */
 
-       subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
+       subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
 
        /* Add subscriber to topology server's subscriber list */
 
@@ -523,7 +521,7 @@ static void subscr_named_msg_event(void *usr_handle,
 
        /* Send an ACK- to complete connection handshaking */
 
-       tipc_send(server_port_ref, 1, &msg_sect);
+       tipc_send(server_port_ref, 0, NULL);
 
        /* Handle optional subscription request */
 
@@ -542,7 +540,6 @@ int tipc_subscr_start(void)
        spin_lock_init(&topsrv.lock);
        INIT_LIST_HEAD(&topsrv.subscriber_list);
 
-       spin_lock_bh(&topsrv.lock);
        res = tipc_createport(NULL,
                              TIPC_CRITICAL_IMPORTANCE,
                              NULL,
@@ -563,12 +560,10 @@ int tipc_subscr_start(void)
                goto failed;
        }
 
-       spin_unlock_bh(&topsrv.lock);
        return 0;
 
 failed:
        err("Failed to create subscription service\n");
-       spin_unlock_bh(&topsrv.lock);
        return res;
 }
 
index dd419d2..de87018 100644 (file)
@@ -1171,7 +1171,7 @@ restart:
        newsk->sk_type          = sk->sk_type;
        init_peercred(newsk);
        newu = unix_sk(newsk);
-       newsk->sk_wq            = &newu->peer_wq;
+       RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
        otheru = unix_sk(other);
 
        /* copy address information from listening to new sock*/
@@ -1475,6 +1475,12 @@ restart:
                        goto out_free;
        }
 
+       if (sk_filter(other, skb) < 0) {
+               /* Toss the packet but do not return any error to the sender */
+               err = len;
+               goto out_free;
+       }
+
        unix_state_lock(other);
        err = -EPERM;
        if (!unix_may_send(sk, other))
@@ -1561,7 +1567,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct sock *sk = sock->sk;
        struct sock *other = NULL;
-       struct sockaddr_un *sunaddr = msg->msg_name;
        int err, size;
        struct sk_buff *skb;
        int sent = 0;
@@ -1584,7 +1589,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
                goto out_err;
        } else {
-               sunaddr = NULL;
                err = -ENOTCONN;
                other = unix_peer(sk);
                if (!other)
@@ -1724,7 +1728,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        msg->msg_namelen = 0;
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err) {
+               err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+               goto out;
+       }
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
@@ -1864,7 +1872,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                memset(&tmp_scm, 0, sizeof(tmp_scm));
        }
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err) {
+               err = sock_intr_errno(timeo);
+               goto out;
+       }
 
        do {
                int chunk;
@@ -1895,11 +1907,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                        timeo = unix_stream_data_wait(sk, timeo);
 
-                       if (signal_pending(current)) {
+                       if (signal_pending(current)
+                           ||  mutex_lock_interruptible(&u->readlock)) {
                                err = sock_intr_errno(timeo);
                                goto out;
                        }
-                       mutex_lock(&u->readlock);
+
                        continue;
  unlock:
                        unix_state_unlock(sk);
@@ -1978,36 +1991,38 @@ static int unix_shutdown(struct socket *sock, int mode)
 
        mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
 
-       if (mode) {
-               unix_state_lock(sk);
-               sk->sk_shutdown |= mode;
-               other = unix_peer(sk);
-               if (other)
-                       sock_hold(other);
-               unix_state_unlock(sk);
-               sk->sk_state_change(sk);
-
-               if (other &&
-                       (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
-                       int peer_mode = 0;
-
-                       if (mode&RCV_SHUTDOWN)
-                               peer_mode |= SEND_SHUTDOWN;
-                       if (mode&SEND_SHUTDOWN)
-                               peer_mode |= RCV_SHUTDOWN;
-                       unix_state_lock(other);
-                       other->sk_shutdown |= peer_mode;
-                       unix_state_unlock(other);
-                       other->sk_state_change(other);
-                       if (peer_mode == SHUTDOWN_MASK)
-                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
-                       else if (peer_mode & RCV_SHUTDOWN)
-                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-               }
-               if (other)
-                       sock_put(other);
+       if (!mode)
+               return 0;
+
+       unix_state_lock(sk);
+       sk->sk_shutdown |= mode;
+       other = unix_peer(sk);
+       if (other)
+               sock_hold(other);
+       unix_state_unlock(sk);
+       sk->sk_state_change(sk);
+
+       if (other &&
+               (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+               int peer_mode = 0;
+
+               if (mode&RCV_SHUTDOWN)
+                       peer_mode |= SEND_SHUTDOWN;
+               if (mode&SEND_SHUTDOWN)
+                       peer_mode |= RCV_SHUTDOWN;
+               unix_state_lock(other);
+               other->sk_shutdown |= peer_mode;
+               unix_state_unlock(other);
+               other->sk_state_change(other);
+               if (peer_mode == SHUTDOWN_MASK)
+                       sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+               else if (peer_mode & RCV_SHUTDOWN)
+                       sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
        }
+       if (other)
+               sock_put(other);
+
        return 0;
 }
 
index 74944a2..788a12c 100644 (file)
@@ -59,8 +59,6 @@
 #include <asm/uaccess.h>        /* copy_to/from_user */
 #include <linux/init.h>         /* __initfunc et al. */
 
-#define KMEM_SAFETYZONE 8
-
 #define DEV_TO_SLAVE(dev)      (*((struct net_device **)netdev_priv(dev)))
 
 /*
index e9a5f8c..fe01de2 100644 (file)
@@ -718,13 +718,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                        wdev->ps = false;
                /* allow mac80211 to determine the timeout */
                wdev->ps_timeout = -1;
-               if (rdev->ops->set_power_mgmt)
-                       if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
-                                                     wdev->ps,
-                                                     wdev->ps_timeout)) {
-                               /* assume this means it's off */
-                               wdev->ps = false;
-                       }
 
                if (!dev->ethtool_ops)
                        dev->ethtool_ops = &cfg80211_ethtool_ops;
@@ -813,6 +806,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                rdev->opencount++;
                mutex_unlock(&rdev->devlist_mtx);
                cfg80211_unlock_rdev(rdev);
+
+               /*
+                * Configure power management to the driver here so that its
+                * correctly set also after interface type changes etc.
+                */
+               if (wdev->iftype == NL80211_IFTYPE_STATION &&
+                   rdev->ops->set_power_mgmt)
+                       if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
+                                                     wdev->ps,
+                                                     wdev->ps_timeout)) {
+                               /* assume this means it's off */
+                               wdev->ps = false;
+                       }
                break;
        case NETDEV_UNREGISTER:
                /*
index ca4c825..9bde4d1 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/utsname.h>
 #include <net/cfg80211.h>
+#include "core.h"
 #include "ethtool.h"
 
 static void cfg80211_get_drvinfo(struct net_device *dev,
@@ -37,9 +38,41 @@ static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        regs->len = 0;
 }
 
+static void cfg80211_get_ringparam(struct net_device *dev,
+                                  struct ethtool_ringparam *rp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       memset(rp, 0, sizeof(*rp));
+
+       if (rdev->ops->get_ringparam)
+               rdev->ops->get_ringparam(wdev->wiphy,
+                                        &rp->tx_pending, &rp->tx_max_pending,
+                                        &rp->rx_pending, &rp->rx_max_pending);
+}
+
+static int cfg80211_set_ringparam(struct net_device *dev,
+                                 struct ethtool_ringparam *rp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
+               return -EINVAL;
+
+       if (rdev->ops->set_ringparam)
+               return rdev->ops->set_ringparam(wdev->wiphy,
+                                               rp->tx_pending, rp->rx_pending);
+
+       return -ENOTSUPP;
+}
+
 const struct ethtool_ops cfg80211_ethtool_ops = {
        .get_drvinfo = cfg80211_get_drvinfo,
        .get_regs_len = cfg80211_get_regs_len,
        .get_regs = cfg80211_get_regs,
        .get_link = ethtool_op_get_link,
+       .get_ringparam = cfg80211_get_ringparam,
+       .set_ringparam = cfg80211_set_ringparam,
 };
index 9b62710..4ebce42 100644 (file)
@@ -1968,13 +1968,41 @@ static int parse_station_flags(struct genl_info *info,
        return 0;
 }
 
+static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
+                                int attr)
+{
+       struct nlattr *rate;
+       u16 bitrate;
+
+       rate = nla_nest_start(msg, attr);
+       if (!rate)
+               goto nla_put_failure;
+
+       /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+       bitrate = cfg80211_calculate_bitrate(info);
+       if (bitrate > 0)
+               NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
+
+       if (info->flags & RATE_INFO_FLAGS_MCS)
+               NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
+       if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
+               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
+       if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
+               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+
+       nla_nest_end(msg, rate);
+       return true;
+
+nla_put_failure:
+       return false;
+}
+
 static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                                int flags, struct net_device *dev,
                                const u8 *mac_addr, struct station_info *sinfo)
 {
        void *hdr;
-       struct nlattr *sinfoattr, *txrate;
-       u16 bitrate;
+       struct nlattr *sinfoattr;
 
        hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
@@ -2013,24 +2041,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
                           sinfo->signal_avg);
        if (sinfo->filled & STATION_INFO_TX_BITRATE) {
-               txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE);
-               if (!txrate)
+               if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
+                                         NL80211_STA_INFO_TX_BITRATE))
+                       goto nla_put_failure;
+       }
+       if (sinfo->filled & STATION_INFO_RX_BITRATE) {
+               if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
+                                         NL80211_STA_INFO_RX_BITRATE))
                        goto nla_put_failure;
-
-               /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
-               bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
-               if (bitrate > 0)
-                       NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS)
-                       NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS,
-                                   sinfo->txrate.mcs);
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
-                       NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI)
-                       NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
-
-               nla_nest_end(msg, txrate);
        }
        if (sinfo->filled & STATION_INFO_RX_PACKETS)
                NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
@@ -2718,7 +2736,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
        hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
                             NL80211_CMD_GET_MESH_CONFIG);
        if (!hdr)
-               goto nla_put_failure;
+               goto out;
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
        if (!pinfoattr)
                goto nla_put_failure;
@@ -2759,6 +2777,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
+ out:
        nlmsg_free(msg);
        return -ENOBUFS;
 }
@@ -2954,7 +2973,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
        hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
                             NL80211_CMD_GET_REG);
        if (!hdr)
-               goto nla_put_failure;
+               goto put_failure;
 
        NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
                cfg80211_regdomain->alpha2);
@@ -3001,6 +3020,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
+put_failure:
        nlmsg_free(msg);
        err = -EMSGSIZE;
 out:
index 37693b6..3332d5b 100644 (file)
@@ -63,6 +63,10 @@ static struct regulatory_request *last_request;
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
 
+static struct device_type reg_device_type = {
+       .uevent = reg_device_uevent,
+};
+
 /*
  * Central wireless core regulatory domains, we only need two,
  * the current one and a world regulatory domain in case we have no
@@ -362,16 +366,11 @@ static inline void reg_regdb_query(const char *alpha2) {}
 
 /*
  * This lets us keep regulatory code which is updated on a regulatory
- * basis in userspace.
+ * basis in userspace. Country information is filled in by
+ * reg_device_uevent
  */
 static int call_crda(const char *alpha2)
 {
-       char country_env[9 + 2] = "COUNTRY=";
-       char *envp[] = {
-               country_env,
-               NULL
-       };
-
        if (!is_world_regdom((char *) alpha2))
                pr_info("Calling CRDA for country: %c%c\n",
                        alpha2[0], alpha2[1]);
@@ -381,10 +380,7 @@ static int call_crda(const char *alpha2)
        /* query internal regulatory database (if it exists) */
        reg_regdb_query(alpha2);
 
-       country_env[8] = alpha2[0];
-       country_env[9] = alpha2[1];
-
-       return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp);
+       return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE);
 }
 
 /* Used by nl80211 before kmalloc'ing our regulatory domain */
@@ -1801,9 +1797,9 @@ void regulatory_hint_disconnect(void)
 
 static bool freq_is_chan_12_13_14(u16 freq)
 {
-       if (freq == ieee80211_channel_to_frequency(12) ||
-           freq == ieee80211_channel_to_frequency(13) ||
-           freq == ieee80211_channel_to_frequency(14))
+       if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
                return true;
        return false;
 }
@@ -2087,6 +2083,25 @@ int set_regdom(const struct ieee80211_regdomain *rd)
        return r;
 }
 
+#ifdef CONFIG_HOTPLUG
+int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       if (last_request && !last_request->processed) {
+               if (add_uevent_var(env, "COUNTRY=%c%c",
+                                  last_request->alpha2[0],
+                                  last_request->alpha2[1]))
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+#else
+int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_HOTPLUG */
+
 /* Caller must hold cfg80211_mutex */
 void reg_device_remove(struct wiphy *wiphy)
 {
@@ -2118,6 +2133,8 @@ int __init regulatory_init(void)
        if (IS_ERR(reg_pdev))
                return PTR_ERR(reg_pdev);
 
+       reg_pdev->dev.type = &reg_device_type;
+
        spin_lock_init(&reg_requests_lock);
        spin_lock_init(&reg_pending_beacons_lock);
 
index c4695d0..b67d1c3 100644 (file)
@@ -8,6 +8,7 @@ bool reg_is_valid_request(const char *alpha2);
 
 int regulatory_hint_user(const char *alpha2);
 
+int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
 void reg_device_remove(struct wiphy *wiphy);
 
 int __init regulatory_init(void);
index 7620ae2..6a750bc 100644 (file)
@@ -29,29 +29,37 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
 }
 EXPORT_SYMBOL(ieee80211_get_response_rate);
 
-int ieee80211_channel_to_frequency(int chan)
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
 {
-       if (chan < 14)
-               return 2407 + chan * 5;
-
-       if (chan == 14)
-               return 2484;
-
-       /* FIXME: 802.11j 17.3.8.3.2 */
-       return (chan + 1000) * 5;
+       /* see 802.11 17.3.8.3.2 and Annex J
+        * there are overlapping channel numbers in 5GHz and 2GHz bands */
+       if (band == IEEE80211_BAND_5GHZ) {
+               if (chan >= 182 && chan <= 196)
+                       return 4000 + chan * 5;
+               else
+                       return 5000 + chan * 5;
+       } else { /* IEEE80211_BAND_2GHZ */
+               if (chan == 14)
+                       return 2484;
+               else if (chan < 14)
+                       return 2407 + chan * 5;
+               else
+                       return 0; /* not supported */
+       }
 }
 EXPORT_SYMBOL(ieee80211_channel_to_frequency);
 
 int ieee80211_frequency_to_channel(int freq)
 {
+       /* see 802.11 17.3.8.3.2 and Annex J */
        if (freq == 2484)
                return 14;
-
-       if (freq < 2484)
+       else if (freq < 2484)
                return (freq - 2407) / 5;
-
-       /* FIXME: 802.11j 17.3.8.3.2 */
-       return freq/5 - 1000;
+       else if (freq >= 4910 && freq <= 4980)
+               return (freq - 4000) / 5;
+       else
+               return (freq - 5000) / 5;
 }
 EXPORT_SYMBOL(ieee80211_frequency_to_channel);
 
@@ -159,12 +167,15 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
 
        /*
         * Disallow pairwise keys with non-zero index unless it's WEP
-        * (because current deployments use pairwise WEP keys with
-        * non-zero indizes but 802.11i clearly specifies to use zero)
+        * or a vendor specific cipher (because current deployments use
+        * pairwise WEP keys with non-zero indices and for vendor specific
+        * ciphers this should be validated in the driver or hardware level
+        * - but 802.11i clearly specifies to use zero)
         */
        if (pairwise && key_idx &&
-           params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
-           params->cipher != WLAN_CIPHER_SUITE_WEP104)
+           ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
+            (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
+            (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
                return -EINVAL;
 
        switch (params->cipher) {
index 3e5dbd4..0bf169b 100644 (file)
@@ -267,9 +267,12 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
         * -EINVAL for impossible things.
         */
        if (freq->e == 0) {
+               enum ieee80211_band band = IEEE80211_BAND_2GHZ;
                if (freq->m < 0)
                        return 0;
-               return ieee80211_channel_to_frequency(freq->m);
+               if (freq->m > 14)
+                       band = IEEE80211_BAND_5GHZ;
+               return ieee80211_channel_to_frequency(freq->m, band);
        } else {
                int i, div = 1000000;
                for (i = 0; i < freq->e; i++)
@@ -802,11 +805,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
                        return freq;
                if (freq == 0)
                        return -EINVAL;
-               wdev_lock(wdev);
                mutex_lock(&rdev->devlist_mtx);
+               wdev_lock(wdev);
                err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
-               mutex_unlock(&rdev->devlist_mtx);
                wdev_unlock(wdev);
+               mutex_unlock(&rdev->devlist_mtx);
                return err;
        default:
                return -EOPNOTSUPP;
index 55187c8..4062075 100644 (file)
 #include <net/sock.h>
 #include <net/x25.h>
 
-/*
- * Parse a set of facilities into the facilities structures. Unrecognised
- *     facilities are written to the debug log file.
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilites, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ *  -1 - Parsing error, caller should drop call and clean up
+ *   0 - Parse OK, this skb has no facilities
+ *  >0 - Parse OK, returns the length of the facilities header
+ *
  */
 int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                switch (*p & X25_FAC_CLASS_MASK) {
                case X25_FAC_CLASS_A:
                        if (len < 2)
-                               return 0;
+                               return -1;
                        switch (*p) {
                        case X25_FAC_REVERSE:
                                if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        break;
                case X25_FAC_CLASS_B:
                        if (len < 3)
-                               return 0;
+                               return -1;
                        switch (*p) {
                        case X25_FAC_PACKET_SIZE:
                                facilities->pacsize_in  = p[1];
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        break;
                case X25_FAC_CLASS_C:
                        if (len < 4)
-                               return 0;
+                               return -1;
                        printk(KERN_DEBUG "X.25: unknown facility %02X, "
                               "values %02X, %02X, %02X\n",
                               p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        break;
                case X25_FAC_CLASS_D:
                        if (len < p[1] + 2)
-                               return 0;
+                               return -1;
                        switch (*p) {
                        case X25_FAC_CALLING_AE:
                                if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-                                       return 0;
+                                       return -1;
                                dte_facs->calling_len = p[2];
                                memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLING_AE;
                                break;
                        case X25_FAC_CALLED_AE:
                                if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-                                       return 0;
+                                       return -1;
                                dte_facs->called_len = p[2];
                                memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLED_AE;
index f729f02..15de65f 100644 (file)
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
 {
        struct x25_address source_addr, dest_addr;
        int len;
+       struct x25_sock *x25 = x25_sk(sk);
 
        switch (frametype) {
                case X25_CALL_ACCEPTED: {
-                       struct x25_sock *x25 = x25_sk(sk);
 
                        x25_stop_timer(sk);
                        x25->condition = 0x00;
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                                                &dest_addr);
                        if (len > 0)
                                skb_pull(skb, len);
+                       else if (len < 0)
+                               goto out_clear;
 
                        len = x25_parse_facilities(skb, &x25->facilities,
                                                &x25->dte_facilities,
                                                &x25->vc_facil_mask);
                        if (len > 0)
                                skb_pull(skb, len);
-                       else
-                               return -1;
+                       else if (len < 0)
+                               goto out_clear;
                        /*
                         *      Copy any Call User Data.
                         */
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
        }
 
        return 0;
+
+out_clear:
+       x25_write_internal(sk, X25_CLEAR_REQUEST);
+       x25->state = X25_STATE_2;
+       x25_start_t23timer(sk);
+       return 0;
 }
 
 /*
index 4cbc942..2130692 100644 (file)
@@ -396,9 +396,12 @@ void __exit x25_link_free(void)
        write_lock_bh(&x25_neigh_list_lock);
 
        list_for_each_safe(entry, tmp, &x25_neigh_list) {
+               struct net_device *dev;
+
                nb = list_entry(entry, struct x25_neigh, node);
+               dev = nb->dev;
                __x25_remove_neigh(nb);
-               dev_put(nb->dev);
+               dev_put(dev);
        }
        write_unlock_bh(&x25_neigh_list_lock);
 }
index c631047..aa429ee 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
                      xfrm_input.o xfrm_output.o xfrm_algo.o \
-                     xfrm_sysctl.o
+                     xfrm_sysctl.o xfrm_replay.o
 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
 obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
index 8b4d6e3..58064d9 100644 (file)
@@ -618,21 +618,21 @@ static int xfrm_alg_name_match(const struct xfrm_algo_desc *entry,
                        (entry->compat && !strcmp(name, entry->compat)));
 }
 
-struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_name_match, name,
                              probe);
 }
 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
 
-struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_name_match, name,
                              probe);
 }
 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
 
-struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_name_match, name,
                              probe);
@@ -654,7 +654,7 @@ static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry,
               !strcmp(name, entry->name);
 }
 
-struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len, int probe)
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, int probe)
 {
        struct xfrm_aead_name data = {
                .name = name,
index 8e69533..7199d78 100644 (file)
@@ -4,29 +4,32 @@
 #include <linux/xfrm.h>
 #include <linux/socket.h>
 
-static inline unsigned int __xfrm4_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
 {
        return ntohl(addr->a4);
 }
 
-static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm6_addr_hash(const xfrm_address_t *addr)
 {
        return ntohl(addr->a6[2] ^ addr->a6[3]);
 }
 
-static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
+                                                   const xfrm_address_t *saddr)
 {
        u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
        return ntohl((__force __be32)sum);
 }
 
-static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
+                                                   const xfrm_address_t *saddr)
 {
        return ntohl(daddr->a6[2] ^ daddr->a6[3] ^
                     saddr->a6[2] ^ saddr->a6[3]);
 }
 
-static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t *saddr,
+static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
+                                          const xfrm_address_t *saddr,
                                           u32 reqid, unsigned short family,
                                           unsigned int hmask)
 {
@@ -42,8 +45,8 @@ static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t
        return (h ^ (h >> 16)) & hmask;
 }
 
-static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
-                                      xfrm_address_t *saddr,
+static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
+                                      const xfrm_address_t *saddr,
                                       unsigned short family,
                                       unsigned int hmask)
 {
@@ -60,8 +63,8 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
 }
 
 static inline unsigned int
-__xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family,
-               unsigned int hmask)
+__xfrm_spi_hash(const xfrm_address_t *daddr, __be32 spi, u8 proto,
+               unsigned short family, unsigned int hmask)
 {
        unsigned int h = (__force u32)spi ^ proto;
        switch (family) {
@@ -80,10 +83,11 @@ static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
        return (index ^ (index >> 8)) & hmask;
 }
 
-static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short family, unsigned int hmask)
+static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
+                                     unsigned short family, unsigned int hmask)
 {
-       xfrm_address_t *daddr = &sel->daddr;
-       xfrm_address_t *saddr = &sel->saddr;
+       const xfrm_address_t *daddr = &sel->daddr;
+       const xfrm_address_t *saddr = &sel->saddr;
        unsigned int h = 0;
 
        switch (family) {
@@ -107,7 +111,9 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short
        return h & hmask;
 }
 
-static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, unsigned int hmask)
+static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
+                                      const xfrm_address_t *saddr,
+                                      unsigned short family, unsigned int hmask)
 {
        unsigned int h = 0;
 
index 45f1c98..872065c 100644 (file)
@@ -107,6 +107,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        struct net *net = dev_net(skb->dev);
        int err;
        __be32 seq;
+       __be32 seq_hi;
        struct xfrm_state *x;
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
@@ -118,7 +119,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        if (encap_type < 0) {
                async = 1;
                x = xfrm_input_state(skb);
-               seq = XFRM_SKB_CB(skb)->seq.input;
+               seq = XFRM_SKB_CB(skb)->seq.input.low;
                goto resume;
        }
 
@@ -172,7 +173,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        goto drop_unlock;
                }
 
-               if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) {
+               if (x->props.replay_window && x->repl->check(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
@@ -184,7 +185,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                spin_unlock(&x->lock);
 
-               XFRM_SKB_CB(skb)->seq.input = seq;
+               seq_hi = htonl(xfrm_replay_seqhi(x, seq));
+
+               XFRM_SKB_CB(skb)->seq.input.low = seq;
+               XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
                nexthdr = x->type->input(x, skb);
 
@@ -206,8 +210,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (x->props.replay_window)
-                       xfrm_replay_advance(x, seq);
+               x->repl->advance(x, seq);
 
                x->curlft.bytes += skb->len;
                x->curlft.packets++;
index 64f2ae1..1aba03f 100644 (file)
@@ -67,17 +67,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                        goto error;
                }
 
-               if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
-                       XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
-                       if (unlikely(x->replay.oseq == 0)) {
-                               XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
-                               x->replay.oseq--;
-                               xfrm_audit_state_replay_overflow(x, skb);
-                               err = -EOVERFLOW;
-                               goto error;
-                       }
-                       if (xfrm_aevent_is_on(net))
-                               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+               err = x->repl->overflow(x, skb);
+               if (err) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
+                       goto error;
                }
 
                x->curlft.bytes += skb->len;
index 8b3ef40..1ba0258 100644 (file)
@@ -50,37 +50,40 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
-static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
-                         struct flowi *fl, int family, int strict);
+static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family);
 
 
 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
                                                int dir);
 
 static inline int
-__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
-       return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
-               addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
-               !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
-               !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
-               (fl->proto == sel->proto || !sel->proto) &&
-               (fl->oif == sel->ifindex || !sel->ifindex);
+       const struct flowi4 *fl4 = &fl->u.ip4;
+
+       return  addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
+               addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+               !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
+               !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
+               (fl4->flowi4_proto == sel->proto || !sel->proto) &&
+               (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 }
 
 static inline int
-__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
-       return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
-               addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
-               !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
-               !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
-               (fl->proto == sel->proto || !sel->proto) &&
-               (fl->oif == sel->ifindex || !sel->ifindex);
+       const struct flowi6 *fl6 = &fl->u.ip6;
+
+       return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
+               addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
+               !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
+               !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
+               (fl6->flowi6_proto == sel->proto || !sel->proto) &&
+               (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 }
 
-int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
-                   unsigned short family)
+int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+                       unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -92,8 +95,8 @@ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
 }
 
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
-                                                 xfrm_address_t *saddr,
-                                                 xfrm_address_t *daddr,
+                                                 const xfrm_address_t *saddr,
+                                                 const xfrm_address_t *daddr,
                                                  int family)
 {
        struct xfrm_policy_afinfo *afinfo;
@@ -311,7 +314,9 @@ static inline unsigned int idx_hash(struct net *net, u32 index)
        return __idx_hash(index, net->xfrm.policy_idx_hmask);
 }
 
-static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
+static struct hlist_head *policy_hash_bysel(struct net *net,
+                                           const struct xfrm_selector *sel,
+                                           unsigned short family, int dir)
 {
        unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
        unsigned int hash = __sel_hash(sel, family, hmask);
@@ -321,7 +326,10 @@ static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selecto
                net->xfrm.policy_bydst[dir].table + hash);
 }
 
-static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
+static struct hlist_head *policy_hash_direct(struct net *net,
+                                            const xfrm_address_t *daddr,
+                                            const xfrm_address_t *saddr,
+                                            unsigned short family, int dir)
 {
        unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
        unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
@@ -864,32 +872,33 @@ EXPORT_SYMBOL(xfrm_policy_walk_done);
  *
  * Returns 0 if policy found, else an -errno.
  */
-static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
+static int xfrm_policy_match(const struct xfrm_policy *pol,
+                            const struct flowi *fl,
                             u8 type, u16 family, int dir)
 {
-       struct xfrm_selector *sel = &pol->selector;
+       const struct xfrm_selector *sel = &pol->selector;
        int match, ret = -ESRCH;
 
        if (pol->family != family ||
-           (fl->mark & pol->mark.m) != pol->mark.v ||
+           (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
            pol->type != type)
                return ret;
 
        match = xfrm_selector_match(sel, fl, family);
        if (match)
-               ret = security_xfrm_policy_lookup(pol->security, fl->secid,
+               ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
                                                  dir);
 
        return ret;
 }
 
 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
-                                                    struct flowi *fl,
+                                                    const struct flowi *fl,
                                                     u16 family, u8 dir)
 {
        int err;
        struct xfrm_policy *pol, *ret;
-       xfrm_address_t *daddr, *saddr;
+       const xfrm_address_t *daddr, *saddr;
        struct hlist_node *entry;
        struct hlist_head *chain;
        u32 priority = ~0U;
@@ -941,7 +950,7 @@ fail:
 }
 
 static struct xfrm_policy *
-__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
+__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_policy *pol;
@@ -954,7 +963,7 @@ __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
 }
 
 static struct flow_cache_object *
-xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
+xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
                   u8 dir, struct flow_cache_object *old_obj, void *ctx)
 {
        struct xfrm_policy *pol;
@@ -990,7 +999,8 @@ static inline int policy_to_flow_dir(int dir)
        }
 }
 
-static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
+static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
+                                                const struct flowi *fl)
 {
        struct xfrm_policy *pol;
 
@@ -1006,7 +1016,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
                                goto out;
                        }
                        err = security_xfrm_policy_lookup(pol->security,
-                                                     fl->secid,
+                                                     fl->flowi_secid,
                                                      policy_to_flow_dir(dir));
                        if (!err)
                                xfrm_pol_hold(pol);
@@ -1098,7 +1108,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
        return 0;
 }
 
-static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
+static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
 {
        struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
 
@@ -1157,9 +1167,8 @@ xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
 /* Resolve list of templates for the flow, given policy. */
 
 static int
-xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
-                     struct xfrm_state **xfrm,
-                     unsigned short family)
+xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
+                     struct xfrm_state **xfrm, unsigned short family)
 {
        struct net *net = xp_net(policy);
        int nx;
@@ -1214,9 +1223,8 @@ fail:
 }
 
 static int
-xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
-                 struct xfrm_state **xfrm,
-                 unsigned short family)
+xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
+                 struct xfrm_state **xfrm, unsigned short family)
 {
        struct xfrm_state *tp[XFRM_MAX_DEPTH];
        struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
@@ -1256,7 +1264,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  * still valid.
  */
 
-static inline int xfrm_get_tos(struct flowi *fl, int family)
+static inline int xfrm_get_tos(const struct flowi *fl, int family)
 {
        struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
        int tos;
@@ -1340,10 +1348,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        default:
                BUG();
        }
-       xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
+       xdst = dst_alloc(dst_ops, 0);
        xfrm_policy_put_afinfo(afinfo);
 
-       xdst->flo.ops = &xfrm_bundle_fc_ops;
+       if (likely(xdst))
+               xdst->flo.ops = &xfrm_bundle_fc_ops;
+       else
+               xdst = ERR_PTR(-ENOBUFS);
 
        return xdst;
 }
@@ -1366,7 +1377,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                               struct flowi *fl)
+                               const struct flowi *fl)
 {
        struct xfrm_policy_afinfo *afinfo =
                xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1389,7 +1400,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 
 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                                            struct xfrm_state **xfrm, int nx,
-                                           struct flowi *fl,
+                                           const struct flowi *fl,
                                            struct dst_entry *dst)
 {
        struct net *net = xp_net(policy);
@@ -1505,7 +1516,7 @@ free_dst:
 }
 
 static int inline
-xfrm_dst_alloc_copy(void **target, void *src, int size)
+xfrm_dst_alloc_copy(void **target, const void *src, int size)
 {
        if (!*target) {
                *target = kmalloc(size, GFP_ATOMIC);
@@ -1517,7 +1528,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
 }
 
 static int inline
-xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
+xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1529,7 +1540,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
 }
 
 static int inline
-xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
+xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1539,7 +1550,7 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
 #endif
 }
 
-static int xfrm_expand_policies(struct flowi *fl, u16 family,
+static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                                struct xfrm_policy **pols,
                                int *num_pols, int *num_xfrms)
 {
@@ -1585,7 +1596,7 @@ static int xfrm_expand_policies(struct flowi *fl, u16 family,
 
 static struct xfrm_dst *
 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
-                              struct flowi *fl, u16 family,
+                              const struct flowi *fl, u16 family,
                               struct dst_entry *dst_orig)
 {
        struct net *net = xp_net(pols[0]);
@@ -1628,7 +1639,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 }
 
 static struct flow_cache_object *
-xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
+xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
 {
        struct dst_entry *dst_orig = (struct dst_entry *)ctx;
@@ -1727,18 +1738,36 @@ error:
        return ERR_PTR(err);
 }
 
+static struct dst_entry *make_blackhole(struct net *net, u16 family,
+                                       struct dst_entry *dst_orig)
+{
+       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       struct dst_entry *ret;
+
+       if (!afinfo) {
+               dst_release(dst_orig);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = afinfo->blackhole_route(net, dst_orig);
+       }
+       xfrm_policy_put_afinfo(afinfo);
+
+       return ret;
+}
+
 /* Main function: finds/creates a bundle for given flow.
  *
  * At the moment we eat a raw IP route. Mostly to speed up lookups
  * on interfaces with disabled IPsec.
  */
-int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
-                 struct sock *sk, int flags)
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                             const struct flowi *fl,
+                             struct sock *sk, int flags)
 {
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        struct flow_cache_object *flo;
        struct xfrm_dst *xdst;
-       struct dst_entry *dst, *dst_orig = *dst_p, *route;
+       struct dst_entry *dst, *route;
        u16 family = dst_orig->ops->family;
        u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
        int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
@@ -1820,9 +1849,10 @@ restart:
                        dst_release(dst);
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
-                       return -EREMOTE;
+
+                       return make_blackhole(net, family, dst_orig);
                }
-               if (flags & XFRM_LOOKUP_WAIT) {
+               if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
                        DECLARE_WAITQUEUE(wait, current);
 
                        add_wait_queue(&net->xfrm.km_waitq, &wait);
@@ -1864,47 +1894,33 @@ no_transform:
                goto error;
        } else if (num_xfrms > 0) {
                /* Flow transformed */
-               *dst_p = dst;
                dst_release(dst_orig);
        } else {
                /* Flow passes untransformed */
                dst_release(dst);
+               dst = dst_orig;
        }
 ok:
        xfrm_pols_put(pols, drop_pols);
-       return 0;
+       return dst;
 
 nopol:
-       if (!(flags & XFRM_LOOKUP_ICMP))
+       if (!(flags & XFRM_LOOKUP_ICMP)) {
+               dst = dst_orig;
                goto ok;
+       }
        err = -ENOENT;
 error:
        dst_release(dst);
 dropdst:
        dst_release(dst_orig);
-       *dst_p = NULL;
        xfrm_pols_put(pols, drop_pols);
-       return err;
-}
-EXPORT_SYMBOL(__xfrm_lookup);
-
-int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
-               struct sock *sk, int flags)
-{
-       int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
-
-       if (err == -EREMOTE) {
-               dst_release(*dst_p);
-               *dst_p = NULL;
-               err = -EAGAIN;
-       }
-
-       return err;
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL(xfrm_lookup);
 
 static inline int
-xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
+xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 {
        struct xfrm_state *x;
 
@@ -1923,7 +1939,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  */
 
 static inline int
-xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
+xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
              unsigned short family)
 {
        if (xfrm_state_kern(x))
@@ -1946,7 +1962,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  * Otherwise "-2 - errored_index" is returned.
  */
 static inline int
-xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
+xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
               unsigned short family)
 {
        int idx = start;
@@ -1978,13 +1994,13 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
                return -EAFNOSUPPORT;
 
        afinfo->decode_session(skb, fl, reverse);
-       err = security_xfrm_decode_session(skb, &fl->secid);
+       err = security_xfrm_decode_session(skb, &fl->flowi_secid);
        xfrm_policy_put_afinfo(afinfo);
        return err;
 }
 EXPORT_SYMBOL(__xfrm_decode_session);
 
-static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
+static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
 {
        for (; k < sp->len; k++) {
                if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
@@ -2159,7 +2175,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        struct net *net = dev_net(skb->dev);
        struct flowi fl;
        struct dst_entry *dst;
-       int res;
+       int res = 0;
 
        if (xfrm_decode_session(skb, &fl, family) < 0) {
                XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
@@ -2167,9 +2183,12 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
-       dst = skb_dst(skb);
 
-       res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
+       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+       if (IS_ERR(dst)) {
+               res = 1;
+               dst = NULL;
+       }
        skb_dst_set(skb, dst);
        return res;
 }
@@ -2207,7 +2226,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
 
 static int stale_bundle(struct dst_entry *dst)
 {
-       return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+       return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC);
 }
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2279,8 +2298,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
  * still valid.
  */
 
-static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
-               struct flowi *fl, int family, int strict)
+static int xfrm_bundle_ok(struct xfrm_dst *first, int family)
 {
        struct dst_entry *dst = &first->u.dst;
        struct xfrm_dst *last;
@@ -2289,26 +2307,12 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
        if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
            (dst->dev && !netif_running(dst->dev)))
                return 0;
-#ifdef CONFIG_XFRM_SUB_POLICY
-       if (fl) {
-               if (first->origin && !flow_cache_uli_match(first->origin, fl))
-                       return 0;
-               if (first->partner &&
-                   !xfrm_selector_match(first->partner, fl, family))
-                       return 0;
-       }
-#endif
 
        last = NULL;
 
        do {
                struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
-               if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
-                       return 0;
-               if (fl && pol &&
-                   !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
-                       return 0;
                if (dst->xfrm->km.state != XFRM_STATE_VALID)
                        return 0;
                if (xdst->xfrm_genid != dst->xfrm->genid)
@@ -2317,11 +2321,6 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
                    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
                        return 0;
 
-               if (strict && fl &&
-                   !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
-                   !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
-                       return 0;
-
                mtu = dst_mtu(dst->child);
                if (xdst->child_mtu_cached != mtu) {
                        last = xdst;
@@ -2732,8 +2731,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
 #endif
 
 #ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
-                                      struct xfrm_selector *sel_tgt)
+static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+                                      const struct xfrm_selector *sel_tgt)
 {
        if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
                if (sel_tgt->family == sel_cmp->family &&
@@ -2753,7 +2752,7 @@ static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
        return 0;
 }
 
-static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
+static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
                                                     u8 dir, u8 type)
 {
        struct xfrm_policy *pol, *ret = NULL;
@@ -2789,7 +2788,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
        return ret;
 }
 
-static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
+static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
 {
        int match = 0;
 
@@ -2859,7 +2858,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
        return 0;
 }
 
-static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
+static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
 {
        int i, j;
 
@@ -2893,7 +2892,7 @@ static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
        return 0;
 }
 
-int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                 struct xfrm_migrate *m, int num_migrate,
                 struct xfrm_kmaddress *k)
 {
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
new file mode 100644 (file)
index 0000000..2f5be5b
--- /dev/null
@@ -0,0 +1,534 @@
+/*
+ * xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c.
+ *
+ * Copyright (C) 2010 secunet Security Networks AG
+ * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <net/xfrm.h>
+
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
+{
+       u32 seq, seq_hi, bottom;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+
+       if (!(x->props.flags & XFRM_STATE_ESN))
+               return 0;
+
+       seq = ntohl(net_seq);
+       seq_hi = replay_esn->seq_hi;
+       bottom = replay_esn->seq - replay_esn->replay_window + 1;
+
+       if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) {
+               /* A. same subspace */
+               if (unlikely(seq < bottom))
+                       seq_hi++;
+       } else {
+               /* B. window spans two subspaces */
+               if (unlikely(seq >= bottom))
+                       seq_hi--;
+       }
+
+       return seq_hi;
+}
+
+static void xfrm_replay_notify(struct xfrm_state *x, int event)
+{
+       struct km_event c;
+       /* we send notify messages in case
+        *  1. we updated on of the sequence numbers, and the seqno difference
+        *     is at least x->replay_maxdiff, in this case we also update the
+        *     timeout of our timer function
+        *  2. if x->replay_maxage has elapsed since last update,
+        *     and there were changes
+        *
+        *  The state structure must be locked!
+        */
+
+       switch (event) {
+       case XFRM_REPLAY_UPDATE:
+               if (x->replay_maxdiff &&
+                   (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+                       if (x->xflags & XFRM_TIME_DEFER)
+                               event = XFRM_REPLAY_TIMEOUT;
+                       else
+                               return;
+               }
+
+               break;
+
+       case XFRM_REPLAY_TIMEOUT:
+               if (memcmp(&x->replay, &x->preplay,
+                          sizeof(struct xfrm_replay_state)) == 0) {
+                       x->xflags |= XFRM_TIME_DEFER;
+                       return;
+               }
+
+               break;
+       }
+
+       memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
+       c.event = XFRM_MSG_NEWAE;
+       c.data.aevent = event;
+       km_state_notify(x, &c);
+
+       if (x->replay_maxage &&
+           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+               x->xflags &= ~XFRM_TIME_DEFER;
+}
+
+static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+       int err = 0;
+       struct net *net = xs_net(x);
+
+       if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+               XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+               if (unlikely(x->replay.oseq == 0)) {
+                       x->replay.oseq--;
+                       xfrm_audit_state_replay_overflow(x, skb);
+                       err = -EOVERFLOW;
+
+                       return err;
+               }
+               if (xfrm_aevent_is_on(net))
+                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+       }
+
+       return err;
+}
+
+static int xfrm_replay_check(struct xfrm_state *x,
+                     struct sk_buff *skb, __be32 net_seq)
+{
+       u32 diff;
+       u32 seq = ntohl(net_seq);
+
+       if (unlikely(seq == 0))
+               goto err;
+
+       if (likely(seq > x->replay.seq))
+               return 0;
+
+       diff = x->replay.seq - seq;
+       if (diff >= min_t(unsigned int, x->props.replay_window,
+                         sizeof(x->replay.bitmap) * 8)) {
+               x->stats.replay_window++;
+               goto err;
+       }
+
+       if (x->replay.bitmap & (1U << diff)) {
+               x->stats.replay++;
+               goto err;
+       }
+       return 0;
+
+err:
+       xfrm_audit_state_replay(x, skb, net_seq);
+       return -EINVAL;
+}
+
+static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
+{
+       u32 diff;
+       u32 seq = ntohl(net_seq);
+
+       if (!x->props.replay_window)
+               return;
+
+       if (seq > x->replay.seq) {
+               diff = seq - x->replay.seq;
+               if (diff < x->props.replay_window)
+                       x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
+               else
+                       x->replay.bitmap = 1;
+               x->replay.seq = seq;
+       } else {
+               diff = x->replay.seq - seq;
+               x->replay.bitmap |= (1U << diff);
+       }
+
+       if (xfrm_aevent_is_on(xs_net(x)))
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
+static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
+{
+       int err = 0;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct net *net = xs_net(x);
+
+       if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+               XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+               if (unlikely(replay_esn->oseq == 0)) {
+                       replay_esn->oseq--;
+                       xfrm_audit_state_replay_overflow(x, skb);
+                       err = -EOVERFLOW;
+
+                       return err;
+               }
+               if (xfrm_aevent_is_on(net))
+                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+       }
+
+       return err;
+}
+
+static int xfrm_replay_check_bmp(struct xfrm_state *x,
+                                struct sk_buff *skb, __be32 net_seq)
+{
+       unsigned int bitnr, nr;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       u32 seq = ntohl(net_seq);
+       u32 diff =  replay_esn->seq - seq;
+       u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+       if (unlikely(seq == 0))
+               goto err;
+
+       if (likely(seq > replay_esn->seq))
+               return 0;
+
+       if (diff >= replay_esn->replay_window) {
+               x->stats.replay_window++;
+               goto err;
+       }
+
+       if (pos >= diff) {
+               bitnr = (pos - diff) % replay_esn->replay_window;
+               nr = bitnr >> 5;
+               bitnr = bitnr & 0x1F;
+               if (replay_esn->bmp[nr] & (1U << bitnr))
+                       goto err_replay;
+       } else {
+               bitnr = replay_esn->replay_window - (diff - pos);
+               nr = bitnr >> 5;
+               bitnr = bitnr & 0x1F;
+               if (replay_esn->bmp[nr] & (1U << bitnr))
+                       goto err_replay;
+       }
+       return 0;
+
+err_replay:
+       x->stats.replay++;
+err:
+       xfrm_audit_state_replay(x, skb, net_seq);
+       return -EINVAL;
+}
+
+static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
+{
+       unsigned int bitnr, nr, i;
+       u32 diff;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       u32 seq = ntohl(net_seq);
+       u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+       if (!replay_esn->replay_window)
+               return;
+
+       if (seq > replay_esn->seq) {
+               diff = seq - replay_esn->seq;
+
+               if (diff < replay_esn->replay_window) {
+                       for (i = 1; i < diff; i++) {
+                               bitnr = (pos + i) % replay_esn->replay_window;
+                               nr = bitnr >> 5;
+                               bitnr = bitnr & 0x1F;
+                               replay_esn->bmp[nr] &=  ~(1U << bitnr);
+                       }
+
+                       bitnr = (pos + diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               } else {
+                       nr = replay_esn->replay_window >> 5;
+                       for (i = 0; i <= nr; i++)
+                               replay_esn->bmp[i] = 0;
+
+                       bitnr = (pos + diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               }
+
+               replay_esn->seq = seq;
+       } else {
+               diff = replay_esn->seq - seq;
+
+               if (pos >= diff) {
+                       bitnr = (pos - diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               } else {
+                       bitnr = replay_esn->replay_window - (diff - pos);
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               }
+       }
+
+       if (xfrm_aevent_is_on(xs_net(x)))
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
+static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
+{
+       struct km_event c;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
+
+       /* we send notify messages in case
+        *  1. we updated on of the sequence numbers, and the seqno difference
+        *     is at least x->replay_maxdiff, in this case we also update the
+        *     timeout of our timer function
+        *  2. if x->replay_maxage has elapsed since last update,
+        *     and there were changes
+        *
+        *  The state structure must be locked!
+        */
+
+       switch (event) {
+       case XFRM_REPLAY_UPDATE:
+               if (x->replay_maxdiff &&
+                   (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+                   (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+                       if (x->xflags & XFRM_TIME_DEFER)
+                               event = XFRM_REPLAY_TIMEOUT;
+                       else
+                               return;
+               }
+
+               break;
+
+       case XFRM_REPLAY_TIMEOUT:
+               if (memcmp(x->replay_esn, x->preplay_esn,
+                          xfrm_replay_state_esn_len(replay_esn)) == 0) {
+                       x->xflags |= XFRM_TIME_DEFER;
+                       return;
+               }
+
+               break;
+       }
+
+       memcpy(x->preplay_esn, x->replay_esn,
+              xfrm_replay_state_esn_len(replay_esn));
+       c.event = XFRM_MSG_NEWAE;
+       c.data.aevent = event;
+       km_state_notify(x, &c);
+
+       if (x->replay_maxage &&
+           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+               x->xflags &= ~XFRM_TIME_DEFER;
+}
+
+static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
+{
+       int err = 0;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct net *net = xs_net(x);
+
+       if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+               XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi;
+
+               if (unlikely(replay_esn->oseq == 0)) {
+                       XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi;
+
+                       if (replay_esn->oseq_hi == 0) {
+                               replay_esn->oseq--;
+                               replay_esn->oseq_hi--;
+                               xfrm_audit_state_replay_overflow(x, skb);
+                               err = -EOVERFLOW;
+
+                               return err;
+                       }
+               }
+               if (xfrm_aevent_is_on(net))
+                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+       }
+
+       return err;
+}
+
+static int xfrm_replay_check_esn(struct xfrm_state *x,
+                                struct sk_buff *skb, __be32 net_seq)
+{
+       unsigned int bitnr, nr;
+       u32 diff;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       u32 seq = ntohl(net_seq);
+       u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+       u32 wsize = replay_esn->replay_window;
+       u32 top = replay_esn->seq;
+       u32 bottom = top - wsize + 1;
+
+       if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
+                    (replay_esn->seq < replay_esn->replay_window - 1)))
+               goto err;
+
+       diff = top - seq;
+
+       if (likely(top >= wsize - 1)) {
+               /* A. same subspace */
+               if (likely(seq > top) || seq < bottom)
+                       return 0;
+       } else {
+               /* B. window spans two subspaces */
+               if (likely(seq > top && seq < bottom))
+                       return 0;
+               if (seq >= bottom)
+                       diff = ~seq + top + 1;
+       }
+
+       if (diff >= replay_esn->replay_window) {
+               x->stats.replay_window++;
+               goto err;
+       }
+
+       if (pos >= diff) {
+               bitnr = (pos - diff) % replay_esn->replay_window;
+               nr = bitnr >> 5;
+               bitnr = bitnr & 0x1F;
+               if (replay_esn->bmp[nr] & (1U << bitnr))
+                       goto err_replay;
+       } else {
+               bitnr = replay_esn->replay_window - (diff - pos);
+               nr = bitnr >> 5;
+               bitnr = bitnr & 0x1F;
+               if (replay_esn->bmp[nr] & (1U << bitnr))
+                       goto err_replay;
+       }
+       return 0;
+
+err_replay:
+       x->stats.replay++;
+err:
+       xfrm_audit_state_replay(x, skb, net_seq);
+       return -EINVAL;
+}
+
+static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+{
+       unsigned int bitnr, nr, i;
+       int wrap;
+       u32 diff, pos, seq, seq_hi;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+
+       if (!replay_esn->replay_window)
+               return;
+
+       seq = ntohl(net_seq);
+       pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+       seq_hi = xfrm_replay_seqhi(x, net_seq);
+       wrap = seq_hi - replay_esn->seq_hi;
+
+       if ((!wrap && seq > replay_esn->seq) || wrap > 0) {
+               if (likely(!wrap))
+                       diff = seq - replay_esn->seq;
+               else
+                       diff = ~replay_esn->seq + seq + 1;
+
+               if (diff < replay_esn->replay_window) {
+                       for (i = 1; i < diff; i++) {
+                               bitnr = (pos + i) % replay_esn->replay_window;
+                               nr = bitnr >> 5;
+                               bitnr = bitnr & 0x1F;
+                               replay_esn->bmp[nr] &=  ~(1U << bitnr);
+                       }
+
+                       bitnr = (pos + diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               } else {
+                       nr = replay_esn->replay_window >> 5;
+                       for (i = 0; i <= nr; i++)
+                               replay_esn->bmp[i] = 0;
+
+                       bitnr = (pos + diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               }
+
+               replay_esn->seq = seq;
+
+               if (unlikely(wrap > 0))
+                       replay_esn->seq_hi++;
+       } else {
+               diff = replay_esn->seq - seq;
+
+               if (pos >= diff) {
+                       bitnr = (pos - diff) % replay_esn->replay_window;
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               } else {
+                       bitnr = replay_esn->replay_window - (diff - pos);
+                       nr = bitnr >> 5;
+                       bitnr = bitnr & 0x1F;
+                       replay_esn->bmp[nr] |= (1U << bitnr);
+               }
+       }
+
+       if (xfrm_aevent_is_on(xs_net(x)))
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
+static struct xfrm_replay xfrm_replay_legacy = {
+       .advance        = xfrm_replay_advance,
+       .check          = xfrm_replay_check,
+       .notify         = xfrm_replay_notify,
+       .overflow       = xfrm_replay_overflow,
+};
+
+static struct xfrm_replay xfrm_replay_bmp = {
+       .advance        = xfrm_replay_advance_bmp,
+       .check          = xfrm_replay_check_bmp,
+       .notify         = xfrm_replay_notify_bmp,
+       .overflow       = xfrm_replay_overflow_bmp,
+};
+
+static struct xfrm_replay xfrm_replay_esn = {
+       .advance        = xfrm_replay_advance_esn,
+       .check          = xfrm_replay_check_esn,
+       .notify         = xfrm_replay_notify_bmp,
+       .overflow       = xfrm_replay_overflow_esn,
+};
+
+int xfrm_init_replay(struct xfrm_state *x)
+{
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+
+       if (replay_esn) {
+               if (replay_esn->replay_window >
+                   replay_esn->bmp_len * sizeof(__u32))
+                       return -EINVAL;
+
+       if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
+               x->repl = &xfrm_replay_esn;
+       else
+               x->repl = &xfrm_replay_bmp;
+       } else
+               x->repl = &xfrm_replay_legacy;
+
+       return 0;
+}
+EXPORT_SYMBOL(xfrm_init_replay);
index 220ebc0..d575f05 100644 (file)
@@ -42,16 +42,9 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
-#ifdef CONFIG_AUDITSYSCALL
-static void xfrm_audit_state_replay(struct xfrm_state *x,
-                                   struct sk_buff *skb, __be32 net_seq);
-#else
-#define xfrm_audit_state_replay(x, s, sq)      do { ; } while (0)
-#endif /* CONFIG_AUDITSYSCALL */
-
 static inline unsigned int xfrm_dst_hash(struct net *net,
-                                        xfrm_address_t *daddr,
-                                        xfrm_address_t *saddr,
+                                        const xfrm_address_t *daddr,
+                                        const xfrm_address_t *saddr,
                                         u32 reqid,
                                         unsigned short family)
 {
@@ -59,15 +52,16 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
 }
 
 static inline unsigned int xfrm_src_hash(struct net *net,
-                                        xfrm_address_t *daddr,
-                                        xfrm_address_t *saddr,
+                                        const xfrm_address_t *daddr,
+                                        const xfrm_address_t *saddr,
                                         unsigned short family)
 {
        return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
 }
 
 static inline unsigned int
-xfrm_spi_hash(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
+             __be32 spi, u8 proto, unsigned short family)
 {
        return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
 }
@@ -362,6 +356,8 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
        kfree(x->calg);
        kfree(x->encap);
        kfree(x->coaddr);
+       kfree(x->replay_esn);
+       kfree(x->preplay_esn);
        if (x->inner_mode)
                xfrm_put_mode(x->inner_mode);
        if (x->inner_mode_iaf)
@@ -656,9 +652,9 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 EXPORT_SYMBOL(xfrm_sad_getinfo);
 
 static int
-xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
-                   struct xfrm_tmpl *tmpl,
-                   xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
+                   const struct xfrm_tmpl *tmpl,
+                   const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                    unsigned short family)
 {
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
@@ -677,7 +673,10 @@ xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
        return 0;
 }
 
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+                                             const xfrm_address_t *daddr,
+                                             __be32 spi, u8 proto,
+                                             unsigned short family)
 {
        unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
        struct xfrm_state *x;
@@ -699,7 +698,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_ad
        return NULL;
 }
 
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+                                                    const xfrm_address_t *daddr,
+                                                    const xfrm_address_t *saddr,
+                                                    u8 proto, unsigned short family)
 {
        unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
        struct xfrm_state *x;
@@ -746,8 +748,7 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
 }
 
 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
-                              struct flowi *fl, unsigned short family,
-                              xfrm_address_t *daddr, xfrm_address_t *saddr,
+                              const struct flowi *fl, unsigned short family,
                               struct xfrm_state **best, int *acq_in_progress,
                               int *error)
 {
@@ -784,8 +785,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
 }
 
 struct xfrm_state *
-xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
-               struct flowi *fl, struct xfrm_tmpl *tmpl,
+xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+               const struct flowi *fl, struct xfrm_tmpl *tmpl,
                struct xfrm_policy *pol, int *err,
                unsigned short family)
 {
@@ -813,7 +814,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
        if (best)
@@ -829,7 +830,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
 
@@ -853,7 +854,7 @@ found:
                xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
                memcpy(&x->mark, &pol->mark, sizeof(x->mark));
 
-               error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
+               error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
                if (error) {
                        x->km.state = XFRM_STATE_DEAD;
                        to_put = x;
@@ -991,7 +992,11 @@ void xfrm_state_insert(struct xfrm_state *x)
 EXPORT_SYMBOL(xfrm_state_insert);
 
 /* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
+static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+                                         unsigned short family, u8 mode,
+                                         u32 reqid, u8 proto,
+                                         const xfrm_address_t *daddr,
+                                         const xfrm_address_t *saddr, int create)
 {
        unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
        struct hlist_node *entry;
@@ -1369,7 +1374,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
 EXPORT_SYMBOL(xfrm_state_check_expire);
 
 struct xfrm_state *
-xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
+xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
                  u8 proto, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1383,7 +1388,7 @@ EXPORT_SYMBOL(xfrm_state_lookup);
 
 struct xfrm_state *
 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                         u8 proto, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1397,7 +1402,7 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
 
 struct xfrm_state *
 xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
-             xfrm_address_t *daddr, xfrm_address_t *saddr,
+             const xfrm_address_t *daddr, const xfrm_address_t *saddr,
              int create, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1609,54 +1614,6 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk)
 }
 EXPORT_SYMBOL(xfrm_state_walk_done);
 
-
-void xfrm_replay_notify(struct xfrm_state *x, int event)
-{
-       struct km_event c;
-       /* we send notify messages in case
-        *  1. we updated on of the sequence numbers, and the seqno difference
-        *     is at least x->replay_maxdiff, in this case we also update the
-        *     timeout of our timer function
-        *  2. if x->replay_maxage has elapsed since last update,
-        *     and there were changes
-        *
-        *  The state structure must be locked!
-        */
-
-       switch (event) {
-       case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
-                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
-                       if (x->xflags & XFRM_TIME_DEFER)
-                               event = XFRM_REPLAY_TIMEOUT;
-                       else
-                               return;
-               }
-
-               break;
-
-       case XFRM_REPLAY_TIMEOUT:
-               if ((x->replay.seq == x->preplay.seq) &&
-                   (x->replay.bitmap == x->preplay.bitmap) &&
-                   (x->replay.oseq == x->preplay.oseq)) {
-                       x->xflags |= XFRM_TIME_DEFER;
-                       return;
-               }
-
-               break;
-       }
-
-       memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
-       c.event = XFRM_MSG_NEWAE;
-       c.data.aevent = event;
-       km_state_notify(x, &c);
-
-       if (x->replay_maxage &&
-           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
-               x->xflags &= ~XFRM_TIME_DEFER;
-}
-
 static void xfrm_replay_timer_handler(unsigned long data)
 {
        struct xfrm_state *x = (struct xfrm_state*)data;
@@ -1665,7 +1622,7 @@ static void xfrm_replay_timer_handler(unsigned long data)
 
        if (x->km.state == XFRM_STATE_VALID) {
                if (xfrm_aevent_is_on(xs_net(x)))
-                       xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
+                       x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
                else
                        x->xflags |= XFRM_TIME_DEFER;
        }
@@ -1673,61 +1630,10 @@ static void xfrm_replay_timer_handler(unsigned long data)
        spin_unlock(&x->lock);
 }
 
-int xfrm_replay_check(struct xfrm_state *x,
-                     struct sk_buff *skb, __be32 net_seq)
-{
-       u32 diff;
-       u32 seq = ntohl(net_seq);
-
-       if (unlikely(seq == 0))
-               goto err;
-
-       if (likely(seq > x->replay.seq))
-               return 0;
-
-       diff = x->replay.seq - seq;
-       if (diff >= min_t(unsigned int, x->props.replay_window,
-                         sizeof(x->replay.bitmap) * 8)) {
-               x->stats.replay_window++;
-               goto err;
-       }
-
-       if (x->replay.bitmap & (1U << diff)) {
-               x->stats.replay++;
-               goto err;
-       }
-       return 0;
-
-err:
-       xfrm_audit_state_replay(x, skb, net_seq);
-       return -EINVAL;
-}
-
-void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
-{
-       u32 diff;
-       u32 seq = ntohl(net_seq);
-
-       if (seq > x->replay.seq) {
-               diff = seq - x->replay.seq;
-               if (diff < x->props.replay_window)
-                       x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
-               else
-                       x->replay.bitmap = 1;
-               x->replay.seq = seq;
-       } else {
-               diff = x->replay.seq - seq;
-               x->replay.bitmap |= (1U << diff);
-       }
-
-       if (xfrm_aevent_is_on(xs_net(x)))
-               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
-}
-
 static LIST_HEAD(xfrm_km_list);
 static DEFINE_RWLOCK(xfrm_km_lock);
 
-void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct xfrm_mgr *km;
 
@@ -1738,7 +1644,7 @@ void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
        read_unlock(&xfrm_km_lock);
 }
 
-void km_state_notify(struct xfrm_state *x, struct km_event *c)
+void km_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_mgr *km;
        read_lock(&xfrm_km_lock);
@@ -1819,9 +1725,9 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
 EXPORT_SYMBOL(km_policy_expired);
 
 #ifdef CONFIG_XFRM_MIGRATE
-int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-              struct xfrm_migrate *m, int num_migrate,
-              struct xfrm_kmaddress *k)
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+              const struct xfrm_migrate *m, int num_migrate,
+              const struct xfrm_kmaddress *k)
 {
        int err = -EINVAL;
        int ret;
@@ -2236,7 +2142,7 @@ void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
 }
 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
 
-static void xfrm_audit_state_replay(struct xfrm_state *x,
+void xfrm_audit_state_replay(struct xfrm_state *x,
                             struct sk_buff *skb, __be32 net_seq)
 {
        struct audit_buffer *audit_buf;
@@ -2251,6 +2157,7 @@ static void xfrm_audit_state_replay(struct xfrm_state *x,
                         spi, spi, ntohl(net_seq));
        audit_log_end(audit_buf);
 }
+EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
 
 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
 {
index 6129196..706385a 100644 (file)
@@ -119,6 +119,19 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
        return 0;
 }
 
+static inline int verify_replay(struct xfrm_usersa_info *p,
+                               struct nlattr **attrs)
+{
+       struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+
+       if (!rt)
+               return 0;
+
+       if (p->replay_window != 0)
+               return -EINVAL;
+
+       return 0;
+}
 
 static int verify_newsa_info(struct xfrm_usersa_info *p,
                             struct nlattr **attrs)
@@ -214,6 +227,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                goto out;
        if ((err = verify_sec_ctx_len(attrs)))
                goto out;
+       if ((err = verify_replay(p, attrs)))
+               goto out;
 
        err = -EINVAL;
        switch (p->mode) {
@@ -234,7 +249,7 @@ out:
 }
 
 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
-                          struct xfrm_algo_desc *(*get_byname)(char *, int),
+                          struct xfrm_algo_desc *(*get_byname)(const char *, int),
                           struct nlattr *rta)
 {
        struct xfrm_algo *p, *ualg;
@@ -345,6 +360,33 @@ static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
        return 0;
 }
 
+static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
+                                      struct xfrm_replay_state_esn **preplay_esn,
+                                      struct nlattr *rta)
+{
+       struct xfrm_replay_state_esn *p, *pp, *up;
+
+       if (!rta)
+               return 0;
+
+       up = nla_data(rta);
+
+       p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+       if (!pp) {
+               kfree(p);
+               return -ENOMEM;
+       }
+
+       *replay_esn = p;
+       *preplay_esn = pp;
+
+       return 0;
+}
+
 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
 {
        int len = 0;
@@ -380,10 +422,20 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
 {
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+       struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
        struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
        struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
 
+       if (re) {
+               struct xfrm_replay_state_esn *replay_esn;
+               replay_esn = nla_data(re);
+               memcpy(x->replay_esn, replay_esn,
+                      xfrm_replay_state_esn_len(replay_esn));
+               memcpy(x->preplay_esn, replay_esn,
+                      xfrm_replay_state_esn_len(replay_esn));
+       }
+
        if (rp) {
                struct xfrm_replay_state *replay;
                replay = nla_data(rp);
@@ -467,16 +519,19 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
            security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
                goto error;
 
+       if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
+                                              attrs[XFRMA_REPLAY_ESN_VAL])))
+               goto error;
+
        x->km.seq = p->seq;
        x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
        /* sysctl_xfrm_aevent_etime is in 100ms units */
        x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
-       x->preplay.bitmap = 0;
-       x->preplay.seq = x->replay.seq+x->replay_maxdiff;
-       x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
 
-       /* override default values from above */
+       if ((err = xfrm_init_replay(x)))
+               goto error;
 
+       /* override default values from above */
        xfrm_update_ae_params(x, attrs);
 
        return x;
@@ -497,9 +552,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_state *x;
        int err;
        struct km_event c;
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        err = verify_newsa_info(p, attrs);
        if (err)
@@ -515,6 +570,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        else
                err = xfrm_state_update(x);
 
+       security_task_getsecid(current, &sid);
        xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err < 0) {
@@ -575,9 +631,9 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        x = xfrm_user_state_lookup(net, p, attrs, &err);
        if (x == NULL)
@@ -602,6 +658,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_notify(x, &c);
 
 out:
+       security_task_getsecid(current, &sid);
        xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
        xfrm_state_put(x);
        return err;
@@ -705,6 +762,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
        if (xfrm_mark_put(skb, &x->mark))
                goto nla_put_failure;
 
+       if (x->replay_esn)
+               NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
+                       xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+
        if (x->security && copy_sec_ctx(x->security, skb) < 0)
                goto nla_put_failure;
 
@@ -1265,9 +1326,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int err;
        int excl;
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        err = verify_newpolicy_info(p);
        if (err)
@@ -1286,6 +1347,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
         * a type XFRM_MSG_UPDPOLICY - JHS */
        excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
        err = xfrm_policy_insert(p->dir, xp, excl);
+       security_task_getsecid(current, &sid);
        xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err) {
@@ -1522,10 +1584,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                                            NETLINK_CB(skb).pid);
                }
        } else {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               u32 sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
 
+               security_task_getsecid(current, &sid);
                xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
                                         sid);
 
@@ -1553,9 +1616,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_audit audit_info;
        int err;
 
-       audit_info.loginuid = NETLINK_CB(skb).loginuid;
-       audit_info.sessionid = NETLINK_CB(skb).sessionid;
-       audit_info.secid = NETLINK_CB(skb).sid;
+       audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &audit_info.secid);
        err = xfrm_state_flush(net, p->proto, &audit_info);
        if (err) {
                if (err == -ESRCH) /* empty table */
@@ -1572,17 +1635,21 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        return 0;
 }
 
-static inline size_t xfrm_aevent_msgsize(void)
+static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
 {
+       size_t replay_size = x->replay_esn ?
+                             xfrm_replay_state_esn_len(x->replay_esn) :
+                             sizeof(struct xfrm_replay_state);
+
        return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
-              + nla_total_size(sizeof(struct xfrm_replay_state))
+              + nla_total_size(replay_size)
               + nla_total_size(sizeof(struct xfrm_lifetime_cur))
               + nla_total_size(sizeof(struct xfrm_mark))
               + nla_total_size(4) /* XFRM_AE_RTHR */
               + nla_total_size(4); /* XFRM_AE_ETHR */
 }
 
-static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_aevent_id *id;
        struct nlmsghdr *nlh;
@@ -1600,7 +1667,13 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
        id->reqid = x->props.reqid;
        id->flags = c->data.aevent;
 
-       NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+       if (x->replay_esn)
+               NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
+                       xfrm_replay_state_esn_len(x->replay_esn),
+                       x->replay_esn);
+       else
+               NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+
        NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
 
        if (id->flags & XFRM_AE_RTHR)
@@ -1633,16 +1706,16 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_aevent_id *p = nlmsg_data(nlh);
        struct xfrm_usersa_id *id = &p->sa_id;
 
-       r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
-       if (r_skb == NULL)
-               return -ENOMEM;
-
        mark = xfrm_mark_get(attrs, &m);
 
        x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
-       if (x == NULL) {
-               kfree_skb(r_skb);
+       if (x == NULL)
                return -ESRCH;
+
+       r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
+       if (r_skb == NULL) {
+               xfrm_state_put(x);
+               return -ENOMEM;
        }
 
        /*
@@ -1674,9 +1747,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_mark m;
        struct xfrm_aevent_id *p = nlmsg_data(nlh);
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+       struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
 
-       if (!lt && !rp)
+       if (!lt && !rp && !re)
                return err;
 
        /* pedantic mode - thou shalt sayeth replaceth */
@@ -1720,9 +1794,9 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (err)
                return err;
 
-       audit_info.loginuid = NETLINK_CB(skb).loginuid;
-       audit_info.sessionid = NETLINK_CB(skb).sessionid;
-       audit_info.secid = NETLINK_CB(skb).sid;
+       audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &audit_info.secid);
        err = xfrm_policy_flush(net, type, &audit_info);
        if (err) {
                if (err == -ESRCH) /* empty table */
@@ -1789,9 +1863,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = 0;
        if (up->hard) {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               uid_t sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
+
+               security_task_getsecid(current, &sid);
                xfrm_policy_delete(xp, p->dir);
                xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
 
@@ -1830,9 +1906,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_expired(x, ue->hard, current->pid);
 
        if (ue->hard) {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               uid_t sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
+
+               security_task_getsecid(current, &sid);
                __xfrm_state_delete(x);
                xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
        }
@@ -1986,7 +2064,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
 #endif
 
 #ifdef CONFIG_XFRM_MIGRATE
-static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
+static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
 {
        struct xfrm_user_migrate um;
 
@@ -2004,7 +2082,7 @@ static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
        return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
 }
 
-static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
+static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
 {
        struct xfrm_user_kmaddress uk;
 
@@ -2025,11 +2103,11 @@ static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
              + userpolicy_type_attrsize();
 }
 
-static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
-                        int num_migrate, struct xfrm_kmaddress *k,
-                        struct xfrm_selector *sel, u8 dir, u8 type)
+static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
+                        int num_migrate, const struct xfrm_kmaddress *k,
+                        const struct xfrm_selector *sel, u8 dir, u8 type)
 {
-       struct xfrm_migrate *mp;
+       const struct xfrm_migrate *mp;
        struct xfrm_userpolicy_id *pol_id;
        struct nlmsghdr *nlh;
        int i;
@@ -2061,9 +2139,9 @@ nlmsg_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                            struct xfrm_migrate *m, int num_migrate,
-                            struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                            const struct xfrm_migrate *m, int num_migrate,
+                            const struct xfrm_kmaddress *k)
 {
        struct net *net = &init_net;
        struct sk_buff *skb;
@@ -2079,9 +2157,9 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
 }
 #else
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                            struct xfrm_migrate *m, int num_migrate,
-                            struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                            const struct xfrm_migrate *m, int num_migrate,
+                            const struct xfrm_kmaddress *k)
 {
        return -ENOPROTOOPT;
 }
@@ -2137,6 +2215,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
        [XFRMA_KMADDRESS]       = { .len = sizeof(struct xfrm_user_kmaddress) },
        [XFRMA_MARK]            = { .len = sizeof(struct xfrm_mark) },
        [XFRMA_TFCPAD]          = { .type = NLA_U32 },
+       [XFRMA_REPLAY_ESN_VAL]  = { .len = sizeof(struct xfrm_replay_state_esn) },
 };
 
 static struct xfrm_link {
@@ -2220,7 +2299,7 @@ static inline size_t xfrm_expire_msgsize(void)
               + nla_total_size(sizeof(struct xfrm_mark));
 }
 
-static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_user_expire *ue;
        struct nlmsghdr *nlh;
@@ -2242,7 +2321,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2259,12 +2338,12 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
 }
 
-static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
 
-       skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
+       skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
        if (skb == NULL)
                return -ENOMEM;
 
@@ -2274,7 +2353,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
 }
 
-static int xfrm_notify_sa_flush(struct km_event *c)
+static int xfrm_notify_sa_flush(const struct km_event *c)
 {
        struct net *net = c->net;
        struct xfrm_usersa_flush *p;
@@ -2318,6 +2397,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
                l += nla_total_size(sizeof(*x->encap));
        if (x->tfcpad)
                l += nla_total_size(sizeof(x->tfcpad));
+       if (x->replay_esn)
+               l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
        if (x->security)
                l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
                                    x->security->ctx_len);
@@ -2330,7 +2411,7 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
        return l;
 }
 
-static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct xfrm_usersa_info *p;
@@ -2387,7 +2468,7 @@ nla_put_failure:
        return -1;
 }
 
-static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
 
        switch (c->event) {
@@ -2546,7 +2627,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
 }
 
 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
-                          int dir, struct km_event *c)
+                          int dir, const struct km_event *c)
 {
        struct xfrm_user_polexpire *upe;
        struct nlmsghdr *nlh;
@@ -2576,7 +2657,7 @@ nlmsg_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct net *net = xp_net(xp);
        struct sk_buff *skb;
@@ -2591,7 +2672,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_eve
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
 }
 
-static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct net *net = xp_net(xp);
        struct xfrm_userpolicy_info *p;
@@ -2656,7 +2737,7 @@ nlmsg_failure:
        return -1;
 }
 
-static int xfrm_notify_policy_flush(struct km_event *c)
+static int xfrm_notify_policy_flush(const struct km_event *c)
 {
        struct net *net = c->net;
        struct nlmsghdr *nlh;
@@ -2681,7 +2762,7 @@ nlmsg_failure:
        return -1;
 }
 
-static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
 
        switch (c->event) {
index c9a16ab..6c94c6c 100644 (file)
@@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len)
        char *end = m + len;
        char *p;
        char s[PATH_MAX];
+       int first;
 
        p = strchr(m, ':');
        if (!p) {
@@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len)
 
        clear_config();
 
+       first = 1;
        while (m < end) {
                while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
                        m++;
@@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len)
                if (strrcmp(s, "include/generated/autoconf.h") &&
                    strrcmp(s, "arch/um/include/uml-config.h") &&
                    strrcmp(s, ".ver")) {
-                       printf("  %s \\\n", s);
+                       /*
+                        * Do not output the first dependency (the
+                        * source file), so that kbuild is not confused
+                        * if a .c file is rewritten into .S or vice
+                        * versa.
+                        */
+                       if (!first)
+                               printf("  %s \\\n", s);
                        do_config_file(s);
                }
+               first = 0;
                m = p + 1;
        }
        printf("\n%s: $(deps_%s)\n\n", target, target);
index b0b2357..f6cbc3d 100644 (file)
@@ -238,12 +238,12 @@ EOF
 fi
 
 # Build header package
-find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$
-find arch/x86/include include scripts -type f >> /tmp/files$$
+(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
 (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -)
+(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
 (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
 rm -f /tmp/files$$ /tmp/objfiles$$
 arch=$(dpkg --print-architecture)
index 2a5df2b..b8eeaee 100644 (file)
@@ -760,7 +760,7 @@ static int cap_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 sk_sid, u8 dir)
 
 static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x,
                                         struct xfrm_policy *xp,
-                                        struct flowi *fl)
+                                        const struct flowi *fl)
 {
        return 1;
 }
index 64c2ed9..a83e607 100644 (file)
@@ -52,13 +52,12 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
 
 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       NETLINK_CB(skb).eff_cap = current_cap();
        return 0;
 }
 
 int cap_netlink_recv(struct sk_buff *skb, int cap)
 {
-       if (!cap_raised(NETLINK_CB(skb).eff_cap, cap))
+       if (!cap_raised(current_cap(), cap))
                return -EPERM;
        return 0;
 }
index 6c94105..1bf090a 100644 (file)
@@ -13,8 +13,8 @@ obj-y := \
        request_key_auth.o \
        user_defined.o
 
-obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
similarity index 99%
rename from security/keys/encrypted_defined.c
rename to security/keys/encrypted.c
index 28791a6..9e7e4ce 100644 (file)
@@ -30,7 +30,7 @@
 #include <crypto/sha.h>
 #include <crypto/aes.h>
 
-#include "encrypted_defined.h"
+#include "encrypted.h"
 
 static const char KEY_TRUSTED_PREFIX[] = "trusted:";
 static const char KEY_USER_PREFIX[] = "user:";
@@ -888,6 +888,7 @@ static int __init init_encrypted(void)
 out:
        encrypted_shash_release();
        return ret;
+
 }
 
 static void __exit cleanup_encrypted(void)
index edfa50d..a52aa7c 100644 (file)
@@ -87,13 +87,13 @@ extern void key_type_put(struct key_type *ktype);
 extern int __key_link_begin(struct key *keyring,
                            const struct key_type *type,
                            const char *description,
-                           struct keyring_list **_prealloc);
+                           unsigned long *_prealloc);
 extern int __key_link_check_live_key(struct key *keyring, struct key *key);
 extern void __key_link(struct key *keyring, struct key *key,
-                      struct keyring_list **_prealloc);
+                      unsigned long *_prealloc);
 extern void __key_link_end(struct key *keyring,
                           struct key_type *type,
-                          struct keyring_list *prealloc);
+                          unsigned long prealloc);
 
 extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
                                      const struct key_type *type,
index 84d4eb5..1c2d43d 100644 (file)
@@ -415,7 +415,7 @@ static int __key_instantiate_and_link(struct key *key,
                                      size_t datalen,
                                      struct key *keyring,
                                      struct key *authkey,
-                                     struct keyring_list **_prealloc)
+                                     unsigned long *_prealloc)
 {
        int ret, awaken;
 
@@ -481,7 +481,7 @@ int key_instantiate_and_link(struct key *key,
                             struct key *keyring,
                             struct key *authkey)
 {
-       struct keyring_list *prealloc;
+       unsigned long prealloc;
        int ret;
 
        if (keyring) {
@@ -526,7 +526,7 @@ int key_negate_and_link(struct key *key,
                        struct key *keyring,
                        struct key *authkey)
 {
-       struct keyring_list *prealloc;
+       unsigned long prealloc;
        struct timespec now;
        int ret, awaken, link_ret = 0;
 
@@ -814,7 +814,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
                               key_perm_t perm,
                               unsigned long flags)
 {
-       struct keyring_list *prealloc;
+       unsigned long prealloc;
        const struct cred *cred = current_cred();
        struct key_type *ktype;
        struct key *keyring, *key = NULL;
index 92024ed..5620f08 100644 (file)
@@ -25,6 +25,8 @@
                (keyring)->payload.subscriptions,                       \
                rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
 
+#define KEY_LINK_FIXQUOTA 1UL
+
 /*
  * When plumbing the depths of the key tree, this sets a hard limit
  * set on how deep we're willing to go.
@@ -699,11 +701,11 @@ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
  * Preallocate memory so that a key can be linked into to a keyring.
  */
 int __key_link_begin(struct key *keyring, const struct key_type *type,
-                    const char *description,
-                    struct keyring_list **_prealloc)
+                    const char *description, unsigned long *_prealloc)
        __acquires(&keyring->sem)
 {
        struct keyring_list *klist, *nklist;
+       unsigned long prealloc;
        unsigned max;
        size_t size;
        int loop, ret;
@@ -746,6 +748,7 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
 
                                /* note replacement slot */
                                klist->delkey = nklist->delkey = loop;
+                               prealloc = (unsigned long)nklist;
                                goto done;
                        }
                }
@@ -760,6 +763,7 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
        if (klist && klist->nkeys < klist->maxkeys) {
                /* there's sufficient slack space to append directly */
                nklist = NULL;
+               prealloc = KEY_LINK_FIXQUOTA;
        } else {
                /* grow the key list */
                max = 4;
@@ -794,8 +798,9 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
                nklist->keys[nklist->delkey] = NULL;
        }
 
+       prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
 done:
-       *_prealloc = nklist;
+       *_prealloc = prealloc;
        kleave(" = 0");
        return 0;
 
@@ -836,12 +841,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
  * combination.
  */
 void __key_link(struct key *keyring, struct key *key,
-               struct keyring_list **_prealloc)
+               unsigned long *_prealloc)
 {
        struct keyring_list *klist, *nklist;
 
-       nklist = *_prealloc;
-       *_prealloc = NULL;
+       nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
+       *_prealloc = 0;
 
        kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
 
@@ -881,20 +886,22 @@ void __key_link(struct key *keyring, struct key *key,
  * Must be called with __key_link_begin() having being called.
  */
 void __key_link_end(struct key *keyring, struct key_type *type,
-                   struct keyring_list *prealloc)
+                   unsigned long prealloc)
        __releases(&keyring->sem)
 {
        BUG_ON(type == NULL);
        BUG_ON(type->name == NULL);
-       kenter("%d,%s,%p", keyring->serial, type->name, prealloc);
+       kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
 
        if (type == &key_type_keyring)
                up_write(&keyring_serialise_link_sem);
 
        if (prealloc) {
-               kfree(prealloc);
-               key_payload_reserve(keyring,
-                                   keyring->datalen - KEYQUOTA_LINK_BYTES);
+               if (prealloc & KEY_LINK_FIXQUOTA)
+                       key_payload_reserve(keyring,
+                                           keyring->datalen -
+                                           KEYQUOTA_LINK_BYTES);
+               kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
        }
        up_write(&keyring->sem);
 }
@@ -921,7 +928,7 @@ void __key_link_end(struct key *keyring, struct key_type *type,
  */
 int key_link(struct key *keyring, struct key *key)
 {
-       struct keyring_list *prealloc;
+       unsigned long prealloc;
        int ret;
 
        key_check(keyring);
index 9a7fb39..a3dc0d4 100644 (file)
@@ -352,8 +352,8 @@ static int construct_alloc_key(struct key_type *type,
                               struct key_user *user,
                               struct key **_key)
 {
-       struct keyring_list *prealloc;
        const struct cred *cred = current_cred();
+       unsigned long prealloc;
        struct key *key;
        key_ref_t key_ref;
        int ret;
similarity index 99%
rename from security/keys/trusted_defined.c
rename to security/keys/trusted.c
index 2836c6d..83fc92e 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/tpm.h>
 #include <linux/tpm_command.h>
 
-#include "trusted_defined.h"
+#include "trusted.h"
 
 static const char hmac_alg[] = "hmac(sha1)";
 static const char hash_alg[] = "sha1";
@@ -1032,6 +1032,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
        ret = datablob_parse(datablob, new_p, new_o);
        if (ret != Opt_update) {
                ret = -EINVAL;
+               kfree(new_p);
                goto out;
        }
        /* copy old key values, and reseal with new pcrs */
index 739e403..bae843c 100644 (file)
@@ -154,10 +154,9 @@ int security_capset(struct cred *new, const struct cred *old,
                                    effective, inheritable, permitted);
 }
 
-int security_capable(int cap)
+int security_capable(const struct cred *cred, int cap)
 {
-       return security_ops->capable(current, current_cred(), cap,
-                                    SECURITY_CAP_AUDIT);
+       return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 int security_real_capable(struct task_struct *tsk, int cap)
@@ -1101,7 +1100,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
 
 void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
 {
-       security_ops->sk_getsecid(sk, &fl->secid);
+       security_ops->sk_getsecid(sk, &fl->flowi_secid);
 }
 EXPORT_SYMBOL(security_sk_classify_flow);
 
@@ -1234,7 +1233,8 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
 }
 
 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                                      struct xfrm_policy *xp, struct flowi *fl)
+                                      struct xfrm_policy *xp,
+                                      const struct flowi *fl)
 {
        return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
 }
@@ -1246,7 +1246,7 @@ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
 
 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
 {
-       int rc = security_ops->xfrm_decode_session(skb, &fl->secid, 0);
+       int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0);
 
        BUG_ON(rc);
 }
index e276eb4..c178494 100644 (file)
@@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred)
 {
        struct task_security_struct *tsec = cred->security;
 
-       BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+       /*
+        * cred->security == NULL if security_cred_alloc_blank() or
+        * security_prepare_creds() returned an error.
+        */
+       BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
        cred->security = (void *) 0x7UL;
        kfree(tsec);
 }
@@ -4302,7 +4306,7 @@ static void selinux_secmark_refcount_dec(void)
 static void selinux_req_classify_flow(const struct request_sock *req,
                                      struct flowi *fl)
 {
-       fl->secid = req->secid;
+       fl->flowi_secid = req->secid;
 }
 
 static int selinux_tun_dev_create(void)
@@ -4665,6 +4669,7 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
 {
        int err;
        struct common_audit_data ad;
+       u32 sid;
 
        err = cap_netlink_recv(skb, capability);
        if (err)
@@ -4673,8 +4678,9 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
        COMMON_AUDIT_DATA_INIT(&ad, CAP);
        ad.u.cap = capability;
 
-       return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
-                           SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad);
+       security_task_getsecid(current, &sid);
+       return avc_has_perm(sid, sid, SECCLASS_CAPABILITY,
+                           CAP_TO_MASK(capability), &ad);
 }
 
 static int ipc_alloc_security(struct task_struct *task,
index 13128f9..b43813c 100644 (file)
@@ -19,7 +19,7 @@ void selinux_xfrm_state_free(struct xfrm_state *x);
 int selinux_xfrm_state_delete(struct xfrm_state *x);
 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, struct flowi *fl);
+                       struct xfrm_policy *xp, const struct flowi *fl);
 
 /*
  * Extract the security blob from the sock (it's actually on the socket)
index c3f845c..a533732 100644 (file)
@@ -178,7 +178,7 @@ int cond_init_bool_indexes(struct policydb *p)
        p->bool_val_to_struct = (struct cond_bool_datum **)
                kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
        if (!p->bool_val_to_struct)
-               return -1;
+               return -ENOMEM;
        return 0;
 }
 
index be9de38..5736356 100644 (file)
@@ -501,8 +501,8 @@ static int policydb_index(struct policydb *p)
        if (rc)
                goto out;
 
-       rc = -ENOMEM;
-       if (cond_init_bool_indexes(p))
+       rc = cond_init_bool_indexes(p);
+       if (rc)
                goto out;
 
        for (i = 0; i < SYM_NUM; i++) {
index fff78d3..510ec2c 100644 (file)
@@ -112,7 +112,7 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
  */
 
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
-                       struct flowi *fl)
+                       const struct flowi *fl)
 {
        u32 state_sid;
        int rc;
@@ -135,10 +135,10 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
 
        state_sid = x->security->ctx_sid;
 
-       if (fl->secid != state_sid)
+       if (fl->flowi_secid != state_sid)
                return 0;
 
-       rc = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
+       rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
                          ASSOCIATION__SENDTO,
                          NULL)? 0:1;
 
index 91acc9a..7c1fc64 100644 (file)
@@ -30,6 +30,8 @@
 
 #define DRIVER_NAME    "aaci-pl041"
 
+#define FRAME_PERIOD_US        21
+
 /*
  * PM support is not complete.  Turn it off.
  */
@@ -48,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97)
        if (v & SLFR_1RXV)
                readl(aaci->base + AACI_SL1RX);
 
-       writel(maincr, aaci->base + AACI_MAINCR);
+       if (maincr != readl(aaci->base + AACI_MAINCR)) {
+               writel(maincr, aaci->base + AACI_MAINCR);
+               readl(aaci->base + AACI_MAINCR);
+               udelay(1);
+       }
 }
 
 /*
@@ -64,8 +70,8 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
                            unsigned short val)
 {
        struct aaci *aaci = ac97->private_data;
+       int timeout;
        u32 v;
-       int timeout = 5000;
 
        if (ac97->num >= 4)
                return;
@@ -81,14 +87,17 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
        writel(val << 4, aaci->base + AACI_SL2TX);
        writel(reg << 12, aaci->base + AACI_SL1TX);
 
-       /*
-        * Wait for the transmission of both slots to complete.
-        */
+       /* Initially, wait one frame period */
+       udelay(FRAME_PERIOD_US);
+
+       /* And then wait an additional eight frame periods for it to be sent */
+       timeout = FRAME_PERIOD_US * 8;
        do {
+               udelay(1);
                v = readl(aaci->base + AACI_SLFR);
        } while ((v & (SLFR_1TXB|SLFR_2TXB)) && --timeout);
 
-       if (!timeout)
+       if (v & (SLFR_1TXB|SLFR_2TXB))
                dev_err(&aaci->dev->dev,
                        "timeout waiting for write to complete\n");
 
@@ -101,9 +110,8 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
 static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
 {
        struct aaci *aaci = ac97->private_data;
+       int timeout, retries = 10;
        u32 v;
-       int timeout = 5000;
-       int retries = 10;
 
        if (ac97->num >= 4)
                return ~0;
@@ -117,35 +125,34 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
         */
        writel((reg << 12) | (1 << 19), aaci->base + AACI_SL1TX);
 
-       /*
-        * Wait for the transmission to complete.
-        */
+       /* Initially, wait one frame period */
+       udelay(FRAME_PERIOD_US);
+
+       /* And then wait an additional eight frame periods for it to be sent */
+       timeout = FRAME_PERIOD_US * 8;
        do {
+               udelay(1);
                v = readl(aaci->base + AACI_SLFR);
        } while ((v & SLFR_1TXB) && --timeout);
 
-       if (!timeout) {
+       if (v & SLFR_1TXB) {
                dev_err(&aaci->dev->dev, "timeout on slot 1 TX busy\n");
                v = ~0;
                goto out;
        }
 
-       /*
-        * Give the AC'97 codec more than enough time
-        * to respond. (42us = ~2 frames at 48kHz.)
-        */
-       udelay(42);
+       /* Now wait for the response frame */
+       udelay(FRAME_PERIOD_US);
 
-       /*
-        * Wait for slot 2 to indicate data.
-        */
-       timeout = 5000;
+       /* And then wait an additional eight frame periods for data */
+       timeout = FRAME_PERIOD_US * 8;
        do {
+               udelay(1);
                cond_resched();
                v = readl(aaci->base + AACI_SLFR) & (SLFR_1RXV|SLFR_2RXV);
        } while ((v != (SLFR_1RXV|SLFR_2RXV)) && --timeout);
 
-       if (!timeout) {
+       if (v != (SLFR_1RXV|SLFR_2RXV)) {
                dev_err(&aaci->dev->dev, "timeout on RX valid\n");
                v = ~0;
                goto out;
@@ -179,6 +186,7 @@ aaci_chan_wait_ready(struct aaci_runtime *aacirun, unsigned long mask)
        int timeout = 5000;
 
        do {
+               udelay(1);
                val = readl(aacirun->base + AACI_SR);
        } while (val & mask && timeout--);
 }
@@ -874,7 +882,7 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci)
         * Give the AC'97 codec more than enough time
         * to wake up. (42us = ~2 frames at 48kHz.)
         */
-       udelay(42);
+       udelay(FRAME_PERIOD_US * 2);
 
        ret = snd_ac97_bus(aaci->card, 0, &aaci_bus_ops, aaci, &ac97_bus);
        if (ret)
@@ -989,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
         * disabling the channel doesn't clear the FIFO.
         */
        writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
+       readl(aaci->base + AACI_MAINCR);
+       udelay(1);
        writel(aaci->maincr, aaci->base + AACI_MAINCR);
 
        /*
index 10c3a87..b310702 100644 (file)
 #include <linux/dw_dmac.h>
 
 #include <mach/cpu.h>
-#include <mach/hardware.h>
 #include <mach/gpio.h>
 
+#ifdef CONFIG_ARCH_AT91
+#include <mach/hardware.h>
+#endif
+
 #include "ac97c.h"
 
 enum {
index 7730575..b8b31c4 100644 (file)
@@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
 {
        struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
        struct snd_timer *t = stime->timer;
+       unsigned long oruns;
 
        if (!atomic_read(&stime->running))
                return HRTIMER_NORESTART;
 
-       hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
-       snd_timer_interrupt(stime->timer, t->sticks);
+       oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+       snd_timer_interrupt(stime->timer, t->sticks * oruns);
 
        if (!atomic_read(&stime->running))
                return HRTIMER_NORESTART;
@@ -104,7 +105,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
 }
 
 static struct snd_timer_hardware hrtimer_hw = {
-       .flags =        SNDRV_TIMER_HW_AUTO,
+       .flags =        SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET,
        .open =         snd_hrtimer_open,
        .close =        snd_hrtimer_close,
        .start =        snd_hrtimer_start,
index 4902ae5..53b53e9 100644 (file)
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
 
 fail_input:
        input_free_device(jack->input_dev);
+       kfree(jack->id);
        kfree(jack);
        return err;
 }
index da03597..5c426df 100644 (file)
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/ioport.h>
+#include <linux/io.h>
 #include <linux/moduleparam.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <linux/delay.h>
 
-#include <asm/io.h>
-
 /*
  *      globals
  */
index 96f14dc..90ffb99 100644 (file)
@@ -87,7 +87,7 @@ ifeq ($(CONFIG_PSS_HAVE_BOOT),y)
        $(obj)/bin2hex pss_synth < $< > $@
 else
     $(obj)/pss_boot.h:
-       (                                                       \
+       $(Q)(                                                   \
            echo 'static unsigned char * pss_synth = NULL;';    \
            echo 'static int pss_synthLen = 0;';                \
        ) > $@
@@ -102,7 +102,7 @@ ifeq ($(CONFIG_TRIX_HAVE_BOOT),y)
        $(obj)/hex2hex -i trix_boot < $< > $@
 else
     $(obj)/trix_boot.h:
-       (                                                       \
+       $(Q)(                                                   \
            echo 'static unsigned char * trix_boot = NULL;';    \
            echo 'static int trix_boot_len = 0;';               \
        ) > $@
index 23f49f3..16c0bdf 100644 (file)
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
 static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
 {
        stream_t *dma = &vortex->dma_adb[adbdma];
-       int temp;
+       int temp, page, delta;
 
        temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
-       temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1));
-       return temp;
+       page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
+       if (dma->nr_periods >= 4)
+               delta = (page - dma->period_real) & 3;
+       else {
+               delta = (page - dma->period_real);
+               if (delta < 0)
+                       delta += dma->nr_periods;
+       }
+       return (dma->period_virt + delta) * dma->period_bytes
+               + (temp & (dma->period_bytes - 1));
 }
 
 static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
index 6117595..573594b 100644 (file)
@@ -979,31 +979,25 @@ snd_azf3328_codec_setfmt(struct snd_azf3328_codec_data *codec,
 
        snd_azf3328_dbgcallenter();
        switch (bitrate) {
-#define AZF_FMT_XLATE(in_freq, out_bits) \
-       do { \
-               case AZF_FREQ_ ## in_freq: \
-                       freq = SOUNDFORMAT_FREQ_ ## out_bits; \
-                       break; \
-       } while (0);
-       AZF_FMT_XLATE(4000, SUSPECTED_4000)
-       AZF_FMT_XLATE(4800, SUSPECTED_4800)
-       /* the AZF3328 names it "5510" for some strange reason: */
-       AZF_FMT_XLATE(5512, 5510)
-       AZF_FMT_XLATE(6620, 6620)
-       AZF_FMT_XLATE(8000, 8000)
-       AZF_FMT_XLATE(9600, 9600)
-       AZF_FMT_XLATE(11025, 11025)
-       AZF_FMT_XLATE(13240, SUSPECTED_13240)
-       AZF_FMT_XLATE(16000, 16000)
-       AZF_FMT_XLATE(22050, 22050)
-       AZF_FMT_XLATE(32000, 32000)
+       case AZF_FREQ_4000:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
+       case AZF_FREQ_4800:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
+       case AZF_FREQ_5512:
+               /* the AZF3328 names it "5510" for some strange reason */
+                            freq = SOUNDFORMAT_FREQ_5510; break;
+       case AZF_FREQ_6620:  freq = SOUNDFORMAT_FREQ_6620; break;
+       case AZF_FREQ_8000:  freq = SOUNDFORMAT_FREQ_8000; break;
+       case AZF_FREQ_9600:  freq = SOUNDFORMAT_FREQ_9600; break;
+       case AZF_FREQ_11025: freq = SOUNDFORMAT_FREQ_11025; break;
+       case AZF_FREQ_13240: freq = SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
+       case AZF_FREQ_16000: freq = SOUNDFORMAT_FREQ_16000; break;
+       case AZF_FREQ_22050: freq = SOUNDFORMAT_FREQ_22050; break;
+       case AZF_FREQ_32000: freq = SOUNDFORMAT_FREQ_32000; break;
        default:
                snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
                /* fall-through */
-       AZF_FMT_XLATE(44100, 44100)
-       AZF_FMT_XLATE(48000, 48000)
-       AZF_FMT_XLATE(66200, SUSPECTED_66200)
-#undef AZF_FMT_XLATE
+       case AZF_FREQ_44100: freq = SOUNDFORMAT_FREQ_44100; break;
+       case AZF_FREQ_48000: freq = SOUNDFORMAT_FREQ_48000; break;
+       case AZF_FREQ_66200: freq = SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
        }
        /* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */
        /* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */
index 4a66347..74b0560 100644 (file)
@@ -381,7 +381,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a)
        snd_print_pcm_rates(a->rates, buf, sizeof(buf));
 
        if (a->format == AUDIO_CODING_TYPE_LPCM)
-               snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
+               snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
        else if (a->max_bitrate)
                snprintf(buf2, sizeof(buf2),
                                ", max bitrate = %d", a->max_bitrate);
index 2e91a99..fcedad9 100644 (file)
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
@@ -2703,7 +2704,7 @@ static int __devinit azx_probe(struct pci_dev *pci,
        if (err < 0)
                goto out_free;
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
-       if (patch[dev]) {
+       if (patch[dev] && *patch[dev]) {
                snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
                           patch[dev]);
                err = snd_hda_load_patch(chip->bus, patch[dev]);
index a07b031..067982f 100644 (file)
@@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = {
        {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
        {0x11, AC_VERB_SET_PROC_STATE, 0x00},
 
+#if 0 /* Don't to set to D3 as we are in power-up sequence */
        {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
        {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
        /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+#endif
 
        {} /* terminator */
 };
index 9bb030a..4d5004e 100644 (file)
@@ -85,6 +85,7 @@ struct conexant_spec {
        unsigned int auto_mic;
        int auto_mic_ext;               /* autocfg.inputs[] index for ext mic */
        unsigned int need_dac_fix;
+       hda_nid_t slave_dig_outs[2];
 
        /* capture */
        unsigned int num_adc_nids;
@@ -127,6 +128,7 @@ struct conexant_spec {
        unsigned int ideapad:1;
        unsigned int thinkpad:1;
        unsigned int hp_laptop:1;
+       unsigned int asus:1;
 
        unsigned int ext_mic_present;
        unsigned int recording;
@@ -352,6 +354,8 @@ static int conexant_build_pcms(struct hda_codec *codec)
                        info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
                                spec->dig_in_nid;
                }
+               if (spec->slave_dig_outs[0])
+                       codec->slave_dig_outs = spec->slave_dig_outs;
        }
 
        return 0;
@@ -403,10 +407,16 @@ static int conexant_add_jack(struct hda_codec *codec,
        struct conexant_spec *spec;
        struct conexant_jack *jack;
        const char *name;
-       int err;
+       int i, err;
 
        spec = codec->spec;
        snd_array_init(&spec->jacks, sizeof(*jack), 32);
+
+       jack = spec->jacks.list;
+       for (i = 0; i < spec->jacks.used; i++, jack++)
+               if (jack->nid == nid)
+                       return 0 ; /* already present */
+
        jack = snd_array_new(&spec->jacks);
        name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
 
@@ -2100,7 +2110,7 @@ static int patch_cxt5051(struct hda_codec *codec)
 static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
 static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
 static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
-#define CXT5066_SPDIF_OUT      0x21
+static hda_nid_t cxt5066_digout_pin_nids[2] = { 0x20, 0x22 };
 
 /* OLPC's microphone port is DC coupled for use with external sensors,
  * therefore we use a 50% mic bias in order to center the input signal with
@@ -2312,6 +2322,19 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
        }
 }
 
+
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_asus_automic(struct hda_codec *codec)
+{
+       unsigned int present;
+
+       present = snd_hda_jack_detect(codec, 0x1b);
+       snd_printdd("CXT5066: external microphone present=%d\n", present);
+       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+                           present ? 1 : 0);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately */
 static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
 {
@@ -2387,79 +2410,55 @@ static void cxt5066_hp_automute(struct hda_codec *codec)
        cxt5066_update_speaker(codec);
 }
 
-/* unsolicited event for jack sensing */
-static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
+/* Dispatch the right mic autoswitch function */
+static void cxt5066_automic(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
-       snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
-       switch (res >> 26) {
-       case CONEXANT_HP_EVENT:
-               cxt5066_hp_automute(codec);
-               break;
-       case CONEXANT_MIC_EVENT:
-               /* ignore mic events in DC mode; we're always using the jack */
-               if (!spec->dc_enable)
-                       cxt5066_olpc_automic(codec);
-               break;
-       }
-}
 
-/* unsolicited event for jack sensing */
-static void cxt5066_vostro_event(struct hda_codec *codec, unsigned int res)
-{
-       snd_printdd("CXT5066_vostro: unsol event %x (%x)\n", res, res >> 26);
-       switch (res >> 26) {
-       case CONEXANT_HP_EVENT:
-               cxt5066_hp_automute(codec);
-               break;
-       case CONEXANT_MIC_EVENT:
+       if (spec->dell_vostro)
                cxt5066_vostro_automic(codec);
-               break;
-       }
-}
-
-/* unsolicited event for jack sensing */
-static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
-{
-       snd_printdd("CXT5066_ideapad: unsol event %x (%x)\n", res, res >> 26);
-       switch (res >> 26) {
-       case CONEXANT_HP_EVENT:
-               cxt5066_hp_automute(codec);
-               break;
-       case CONEXANT_MIC_EVENT:
+       else if (spec->ideapad)
                cxt5066_ideapad_automic(codec);
-               break;
-       }
+       else if (spec->thinkpad)
+               cxt5066_thinkpad_automic(codec);
+       else if (spec->hp_laptop)
+               cxt5066_hp_laptop_automic(codec);
+       else if (spec->asus)
+               cxt5066_asus_automic(codec);
 }
 
 /* unsolicited event for jack sensing */
-static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
 {
-       snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+       struct conexant_spec *spec = codec->spec;
+       snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
        switch (res >> 26) {
        case CONEXANT_HP_EVENT:
                cxt5066_hp_automute(codec);
                break;
        case CONEXANT_MIC_EVENT:
-               cxt5066_hp_laptop_automic(codec);
+               /* ignore mic events in DC mode; we're always using the jack */
+               if (!spec->dc_enable)
+                       cxt5066_olpc_automic(codec);
                break;
        }
 }
 
 /* unsolicited event for jack sensing */
-static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
+static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
 {
-       snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
+       snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
        switch (res >> 26) {
        case CONEXANT_HP_EVENT:
                cxt5066_hp_automute(codec);
                break;
        case CONEXANT_MIC_EVENT:
-               cxt5066_thinkpad_automic(codec);
+               cxt5066_automic(codec);
                break;
        }
 }
 
+
 static const struct hda_input_mux cxt5066_analog_mic_boost = {
        .num_items = 5,
        .items = {
@@ -2633,6 +2632,27 @@ static void cxt5066_olpc_capture_cleanup(struct hda_codec *codec)
        spec->recording = 0;
 }
 
+static void conexant_check_dig_outs(struct hda_codec *codec,
+                                   hda_nid_t *dig_pins,
+                                   int num_pins)
+{
+       struct conexant_spec *spec = codec->spec;
+       hda_nid_t *nid_loc = &spec->multiout.dig_out_nid;
+       int i;
+
+       for (i = 0; i < num_pins; i++, dig_pins++) {
+               unsigned int cfg = snd_hda_codec_get_pincfg(codec, *dig_pins);
+               if (get_defcfg_connect(cfg) == AC_JACK_PORT_NONE)
+                       continue;
+               if (snd_hda_get_connections(codec, *dig_pins, nid_loc, 1) != 1)
+                       continue;
+               if (spec->slave_dig_outs[0])
+                       nid_loc++;
+               else
+                       nid_loc = spec->slave_dig_outs;
+       }
+}
+
 static struct hda_input_mux cxt5066_capture_source = {
        .num_items = 4,
        .items = {
@@ -3039,20 +3059,11 @@ static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
-       struct conexant_spec *spec = codec->spec;
-
        snd_printdd("CXT5066: init\n");
        conexant_init(codec);
        if (codec->patch_ops.unsol_event) {
                cxt5066_hp_automute(codec);
-               if (spec->dell_vostro)
-                       cxt5066_vostro_automic(codec);
-               else if (spec->ideapad)
-                       cxt5066_ideapad_automic(codec);
-               else if (spec->thinkpad)
-                       cxt5066_thinkpad_automic(codec);
-               else if (spec->hp_laptop)
-                       cxt5066_hp_laptop_automic(codec);
+               cxt5066_automic(codec);
        }
        cxt5066_set_mic_boost(codec);
        return 0;
@@ -3080,6 +3091,7 @@ enum {
        CXT5066_DELL_VOSTRO,    /* Dell Vostro 1015i */
        CXT5066_IDEAPAD,        /* Lenovo IdeaPad U150 */
        CXT5066_THINKPAD,       /* Lenovo ThinkPad T410s, others? */
+       CXT5066_ASUS,           /* Asus K52JU, Lenovo G560 - Int mic at 0x1a and Ext mic at 0x1b */
        CXT5066_HP_LAPTOP,      /* HP Laptop */
        CXT5066_MODELS
 };
@@ -3091,6 +3103,7 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
        [CXT5066_DELL_VOSTRO]   = "dell-vostro",
        [CXT5066_IDEAPAD]       = "ideapad",
        [CXT5066_THINKPAD]      = "thinkpad",
+       [CXT5066_ASUS]          = "asus",
        [CXT5066_HP_LAPTOP]     = "hp-laptop",
 };
 
@@ -3101,8 +3114,12 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
-       SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
+       SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
+       SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
+       SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
        SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
@@ -3111,7 +3128,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
        {}
 };
@@ -3133,7 +3152,8 @@ static int patch_cxt5066(struct hda_codec *codec)
        spec->multiout.max_channels = 2;
        spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
        spec->multiout.dac_nids = cxt5066_dac_nids;
-       spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
+       conexant_check_dig_outs(codec, cxt5066_digout_pin_nids,
+           ARRAY_SIZE(cxt5066_digout_pin_nids));
        spec->num_adc_nids = 1;
        spec->adc_nids = cxt5066_adc_nids;
        spec->capsrc_nids = cxt5066_capsrc_nids;
@@ -3167,17 +3187,20 @@ static int patch_cxt5066(struct hda_codec *codec)
                spec->num_init_verbs++;
                spec->dell_automute = 1;
                break;
+       case CXT5066_ASUS:
        case CXT5066_HP_LAPTOP:
                codec->patch_ops.init = cxt5066_init;
-               codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+               codec->patch_ops.unsol_event = cxt5066_unsol_event;
                spec->init_verbs[spec->num_init_verbs] =
                        cxt5066_init_verbs_hp_laptop;
                spec->num_init_verbs++;
-               spec->hp_laptop = 1;
+               spec->hp_laptop = board_config == CXT5066_HP_LAPTOP;
+               spec->asus = board_config == CXT5066_ASUS;
                spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
                spec->mixers[spec->num_mixers++] = cxt5066_mixers;
                /* no S/PDIF out */
-               spec->multiout.dig_out_nid = 0;
+               if (board_config == CXT5066_HP_LAPTOP)
+                       spec->multiout.dig_out_nid = 0;
                /* input source automatically selected */
                spec->input_mux = NULL;
                spec->port_d_mode = 0;
@@ -3207,7 +3230,7 @@ static int patch_cxt5066(struct hda_codec *codec)
                break;
        case CXT5066_DELL_VOSTRO:
                codec->patch_ops.init = cxt5066_init;
-               codec->patch_ops.unsol_event = cxt5066_vostro_event;
+               codec->patch_ops.unsol_event = cxt5066_unsol_event;
                spec->init_verbs[0] = cxt5066_init_verbs_vostro;
                spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
                spec->mixers[spec->num_mixers++] = cxt5066_mixers;
@@ -3224,7 +3247,7 @@ static int patch_cxt5066(struct hda_codec *codec)
                break;
        case CXT5066_IDEAPAD:
                codec->patch_ops.init = cxt5066_init;
-               codec->patch_ops.unsol_event = cxt5066_ideapad_event;
+               codec->patch_ops.unsol_event = cxt5066_unsol_event;
                spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
                spec->mixers[spec->num_mixers++] = cxt5066_mixers;
                spec->init_verbs[0] = cxt5066_init_verbs_ideapad;
@@ -3240,7 +3263,7 @@ static int patch_cxt5066(struct hda_codec *codec)
                break;
        case CXT5066_THINKPAD:
                codec->patch_ops.init = cxt5066_init;
-               codec->patch_ops.unsol_event = cxt5066_thinkpad_event;
+               codec->patch_ops.unsol_event = cxt5066_unsol_event;
                spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
                spec->mixers[spec->num_mixers++] = cxt5066_mixers;
                spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;
@@ -3389,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec)
                }
        }
        spec->multiout.dac_nids = spec->private_dac_nids;
-       spec->multiout.max_channels = nums * 2;
+       spec->multiout.max_channels = spec->multiout.num_dacs * 2;
 
        if (cfg->hp_outs > 0)
                spec->auto_mute = 1;
@@ -3708,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec)
        return 0;
 }
 
-static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
+static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
                              const char *dir, int cidx,
-                             hda_nid_t nid, int hda_dir)
+                             hda_nid_t nid, int hda_dir, int amp_idx)
 {
        static char name[32];
        static struct snd_kcontrol_new knew[] = {
@@ -3722,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
 
        for (i = 0; i < 2; i++) {
                struct snd_kcontrol *kctl;
-               knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir);
+               knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
+                                                           hda_dir);
                knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
                knew[i].index = cidx;
                snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]);
@@ -3738,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
        return 0;
 }
 
+#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir)                \
+       cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
+
 #define cx_auto_add_pb_volume(codec, nid, str, idx)                    \
        cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
 
@@ -3787,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
        struct conexant_spec *spec = codec->spec;
        struct auto_pin_cfg *cfg = &spec->autocfg;
        static const char *prev_label;
-       int i, err, cidx;
+       int i, err, cidx, conn_len;
+       hda_nid_t conn[HDA_MAX_CONNECTIONS];
+
+       int multi_adc_volume = 0; /* If the ADC nid has several input volumes */
+       int adc_nid = spec->adc_nids[0];
+
+       conn_len = snd_hda_get_connections(codec, adc_nid, conn,
+                                          HDA_MAX_CONNECTIONS);
+       if (conn_len < 0)
+               return conn_len;
+
+       multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1;
+       if (!multi_adc_volume) {
+               err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid,
+                                        HDA_INPUT);
+               if (err < 0)
+                       return err;
+       }
 
-       err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0],
-                                HDA_INPUT);
-       if (err < 0)
-               return err;
        prev_label = NULL;
        cidx = 0;
        for (i = 0; i < cfg->num_inputs; i++) {
                hda_nid_t nid = cfg->inputs[i].pin;
                const char *label;
-               if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP))
+               int j;
+               int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP;
+               if (!pin_amp && !multi_adc_volume)
                        continue;
+
                label = hda_get_autocfg_input_label(codec, cfg, i);
                if (label == prev_label)
                        cidx++;
                else
                        cidx = 0;
                prev_label = label;
-               err = cx_auto_add_volume(codec, label, " Capture", cidx,
-                                        nid, HDA_INPUT);
-               if (err < 0)
-                       return err;
+
+               if (pin_amp) {
+                       err = cx_auto_add_volume(codec, label, " Boost", cidx,
+                                                nid, HDA_INPUT);
+                       if (err < 0)
+                               return err;
+               }
+
+               if (!multi_adc_volume)
+                       continue;
+               for (j = 0; j < conn_len; j++) {
+                       if (conn[j] == nid) {
+                               err = cx_auto_add_volume_idx(codec, label,
+                                   " Capture", cidx, adc_nid, HDA_INPUT, j);
+                               if (err < 0)
+                                       return err;
+                               break;
+                       }
+               }
        }
        return 0;
 }
@@ -3881,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_cxt5066 },
        { .id = 0x14f15069, .name = "CX20585",
          .patch = patch_cxt5066 },
+       { .id = 0x14f1506e, .name = "CX20590",
+         .patch = patch_cxt5066 },
        { .id = 0x14f15097, .name = "CX20631",
          .patch = patch_conexant_auto },
        { .id = 0x14f15098, .name = "CX20632",
@@ -3907,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
 MODULE_ALIAS("snd-hda-codec-id:14f150a1");
index 2d5b83f..ec0fa2d 100644 (file)
@@ -642,6 +642,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
                        hdmi_ai->ver            = 0x01;
                        hdmi_ai->len            = 0x0a;
                        hdmi_ai->CC02_CT47      = channels - 1;
+                       hdmi_ai->CA             = ca;
                        hdmi_checksum_audio_infoframe(hdmi_ai);
                } else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */
                        struct dp_audio_infoframe *dp_ai;
@@ -651,6 +652,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
                        dp_ai->len              = 0x1b;
                        dp_ai->ver              = 0x11 << 2;
                        dp_ai->CC02_CT47        = channels - 1;
+                       dp_ai->CA               = ca;
                } else {
                        snd_printd("HDMI: unknown connection type at pin %d\n",
                                   pin_nid);
@@ -1632,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+/* 17 is known to be absent */
 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
@@ -1674,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011");
 MODULE_ALIAS("snd-hda-codec-id:10de0012");
 MODULE_ALIAS("snd-hda-codec-id:10de0013");
 MODULE_ALIAS("snd-hda-codec-id:10de0014");
+MODULE_ALIAS("snd-hda-codec-id:10de0015");
+MODULE_ALIAS("snd-hda-codec-id:10de0016");
 MODULE_ALIAS("snd-hda-codec-id:10de0018");
 MODULE_ALIAS("snd-hda-codec-id:10de0019");
 MODULE_ALIAS("snd-hda-codec-id:10de001a");
index be4df4c..4261bb8 100644 (file)
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl)
                nid = spec->autocfg.hp_pins[i];
                if (!nid)
                        break;
-               if (snd_hda_jack_detect(codec, nid)) {
-                       spec->jack_present = 1;
-                       break;
-               }
-               alc_report_jack(codec, spec->autocfg.hp_pins[i]);
+               alc_report_jack(codec, nid);
+               spec->jack_present |= snd_hda_jack_detect(codec, nid);
        }
 
        mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -2290,6 +2287,29 @@ static struct snd_kcontrol_new alc888_base_mixer[] = {
        { } /* end */
 };
 
+static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = {
+       HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0,
+               HDA_OUTPUT),
+       HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
+       HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
+       HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+       HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+       HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+       HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+       HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+       { } /* end */
+};
+
+
 static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = {
        HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
        HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -10359,7 +10379,7 @@ static struct alc_config_preset alc882_presets[] = {
                .init_hook = alc_automute_amp,
        },
        [ALC888_ACER_ASPIRE_4930G] = {
-               .mixers = { alc888_base_mixer,
+               .mixers = { alc888_acer_aspire_4930g_mixer,
                                alc883_chmode_mixer },
                .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
                                alc888_acer_aspire_4930g_verbs },
@@ -14954,9 +14974,11 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
-       SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
-       SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        {}
@@ -14990,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
-       SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC),
+       SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
        SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
        SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
@@ -18800,6 +18822,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
                                        ALC662_3ST_6ch_DIG),
        SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
                           ALC663_ASUS_H13),
+       SND_PCI_QUIRK(0x1991, 0x5628, "Ordissimo EVE", ALC662_LENOVO_101E),
        {}
 };
 
@@ -19492,6 +19515,7 @@ static const struct alc_fixup alc662_fixups[] = {
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
index 9ea48b4..bd7b123 100644 (file)
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
        0x0f, 0x10, 0x11, 0x1f, 0x20,
 };
 
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+       0x0a, 0x0b, 0x0c, 0x0d,
+       0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
        0x0a, 0x0b, 0x0c, 0x0d,
        0x0f, 0x11, 0x1f, 0x20,
 };
@@ -5430,12 +5435,13 @@ again:
        switch (codec->vendor_id) {
        case 0x111d76d1:
        case 0x111d76d9:
+       case 0x111d76e5:
                spec->dmic_nids = stac92hd87b_dmic_nids;
                spec->num_dmics = stac92xx_connected_ports(codec,
                                stac92hd87b_dmic_nids,
                                STAC92HD87B_NUM_DMICS);
-               spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
-               spec->pin_nids = stac92hd88xxx_pin_nids;
+               spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+               spec->pin_nids = stac92hd87xxx_pin_nids;
                spec->mono_nid = 0;
                spec->num_pwrs = 0;
                break;
@@ -5443,6 +5449,7 @@ again:
        case 0x111d7667:
        case 0x111d7668:
        case 0x111d7669:
+       case 0x111d76e3:
                spec->num_dmics = stac92xx_connected_ports(codec,
                                stac92hd88xxx_dmic_nids,
                                STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
        { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
        { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
        {} /* terminator */
 };
index a76c326..63b0054 100644 (file)
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
                hda_nid_t nid = cfg->inputs[i].pin;
                if (spec->smart51_enabled && is_smart51_pins(spec, nid))
                        ctl = PIN_OUT;
-               else if (i == AUTO_PIN_MIC)
+               else if (cfg->inputs[i].type == AUTO_PIN_MIC)
                        ctl = PIN_VREF50;
                else
                        ctl = PIN_IN;
index c2ae63d..f53897a 100644 (file)
@@ -92,6 +92,8 @@ struct oxygen_model {
        void (*update_dac_volume)(struct oxygen *chip);
        void (*update_dac_mute)(struct oxygen *chip);
        void (*update_center_lfe_mix)(struct oxygen *chip, bool mixed);
+       unsigned int (*adjust_dac_routing)(struct oxygen *chip,
+                                          unsigned int play_routing);
        void (*gpio_changed)(struct oxygen *chip);
        void (*uart_input)(struct oxygen *chip);
        void (*ac97_switch)(struct oxygen *chip,
index 9bff14d..26c7e8b 100644 (file)
@@ -180,6 +180,8 @@ void oxygen_update_dac_routing(struct oxygen *chip)
                            (1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
                            (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
                            (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
+       if (chip->model.adjust_dac_routing)
+               reg_value = chip->model.adjust_dac_routing(chip, reg_value);
        oxygen_write16_masked(chip, OXYGEN_PLAY_ROUTING, reg_value,
                              OXYGEN_PLAY_DAC0_SOURCE_MASK |
                              OXYGEN_PLAY_DAC1_SOURCE_MASK |
index 9f72d42..2527191 100644 (file)
@@ -392,7 +392,7 @@ static void dump_d1_registers(struct oxygen *chip,
        unsigned int i;
 
        snd_iprintf(buffer, "\nCS4398: 7?");
-       for (i = 2; i <= 8; ++i)
+       for (i = 2; i < 8; ++i)
                snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
        snd_iprintf(buffer, "\n");
        dump_cs4362a_registers(data, buffer);
index e1fa602..bc6eb58 100644 (file)
  *
  *   SPI 0 -> CS4245
  *
+ *   I²S 1 -> CS4245
+ *   I²S 2 -> CS4361 (center/LFE)
+ *   I²S 3 -> CS4361 (surround)
+ *   I²S 4 -> CS4361 (front)
+ *
  *   GPIO 3 <- ?
  *   GPIO 4 <- headphone detect
  *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
@@ -36,6 +41,7 @@
  *   input 1 <- aux
  *   input 2 <- front mic
  *   input 4 <- line/mic
+ *   DAC out -> headphones
  *   aux out -> front panel headphones
  */
 
@@ -207,6 +213,35 @@ static void set_cs4245_adc_params(struct oxygen *chip,
        cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
 }
 
+static inline unsigned int shift_bits(unsigned int value,
+                                     unsigned int shift_from,
+                                     unsigned int shift_to,
+                                     unsigned int mask)
+{
+       if (shift_from < shift_to)
+               return (value << (shift_to - shift_from)) & mask;
+       else
+               return (value >> (shift_from - shift_to)) & mask;
+}
+
+static unsigned int adjust_dg_dac_routing(struct oxygen *chip,
+                                         unsigned int play_routing)
+{
+       return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+              shift_bits(play_routing,
+                         OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC1_SOURCE_MASK) |
+              shift_bits(play_routing,
+                         OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC2_SOURCE_MASK) |
+              shift_bits(play_routing,
+                         OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
+                         OXYGEN_PLAY_DAC3_SOURCE_MASK);
+}
+
 static int output_switch_info(struct snd_kcontrol *ctl,
                              struct snd_ctl_elem_info *info)
 {
@@ -557,6 +592,7 @@ struct oxygen_model model_xonar_dg = {
        .resume = dg_resume,
        .set_dac_params = set_cs4245_dac_params,
        .set_adc_params = set_cs4245_adc_params,
+       .adjust_dac_routing = adjust_dg_dac_routing,
        .dump_registers = dump_cs4245_registers,
        .model_data_size = sizeof(struct dg),
        .device_config = PLAYBACK_0_TO_I2S |
index bd26e09..6ce9ad7 100644 (file)
@@ -22,7 +22,7 @@
 #define __PDAUDIOCF_H
 
 #include <sound/pcm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/interrupt.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ds.h>
index 989e04a..fe33e12 100644 (file)
@@ -23,8 +23,8 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
+#include <linux/io.h>
 #include <sound/core.h>
-#include <asm/io.h>
 #include "vxpocket.h"
 
 
index da2208e..5e4d499 100644 (file)
@@ -129,7 +129,7 @@ static struct snd_soc_dai_link afeb9260_dai = {
        .cpu_dai_name = "atmel-ssc-dai.0",
        .codec_dai_name = "tlv320aic23-hifi",
        .platform_name = "atmel_pcm-audio",
-       .codec_name = "tlv320aic23-codec.0-0x1a",
+       .codec_name = "tlv320aic23-codec.0-001a",
        .init = afeb9260_tlv320aic23_init,
        .ops = &afeb9260_ops,
 };
index e902b24..ad28663 100644 (file)
@@ -119,7 +119,7 @@ static struct snd_soc_dai_link bf5xx_ssm2602_dai = {
        .cpu_dai_name = "bf5xx-i2s",
        .codec_dai_name = "ssm2602-hifi",
        .platform_name = "bf5xx-pcm-audio",
-       .codec_name = "ssm2602-codec.0-0x1b",
+       .codec_name = "ssm2602-codec.0-001b",
        .ops = &bf5xx_ssm2602_ops,
 };
 
index 46dbfd0..347a567 100644 (file)
@@ -153,7 +153,7 @@ static int cq93vc_resume(struct snd_soc_codec *codec)
 
 static int cq93vc_probe(struct snd_soc_codec *codec)
 {
-       struct davinci_vc *davinci_vc = codec->dev->platform_data;
+       struct davinci_vc *davinci_vc = snd_soc_codec_get_drvdata(codec);
 
        davinci_vc->cq93vc.codec = codec;
        codec->control_data = davinci_vc;
index 03d1e86..0bb424a 100644 (file)
@@ -367,9 +367,12 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
        return 0;
 }
 
+static const u8 cx20442_reg;
+
 static struct snd_soc_codec_driver cx20442_codec_dev = {
        .probe =        cx20442_codec_probe,
        .remove =       cx20442_codec_remove,
+       .reg_cache_default = &cx20442_reg,
        .reg_cache_size = 1,
        .reg_word_size = sizeof(u8),
        .read = cx20442_read_reg_cache,
index 987476a..017d99c 100644 (file)
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
                            WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
                            irq_mask);
 
-       if (det && shrt) {
+       if (det || shrt) {
                /* Enable mic detection, this may not have been set through
                 * platform data (eg, if the defaults are OK). */
                snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
index e8490f3..e3ec243 100644 (file)
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
 
 #define WM8903_VMID_RES_50K                          2
 #define WM8903_VMID_RES_250K                         3
-#define WM8903_VMID_RES_5K                           4
+#define WM8903_VMID_RES_5K                           6
 
 /*
  * R8 (0x08) - Analogue DAC 0
index 247a6a9..4afbe3b 100644 (file)
@@ -107,6 +107,12 @@ struct wm8994_priv {
 
        int revision;
        struct wm8994_pdata *pdata;
+
+       unsigned int aif1clk_enable:1;
+       unsigned int aif2clk_enable:1;
+
+       unsigned int aif1clk_disable:1;
+       unsigned int aif2clk_disable:1;
 };
 
 static int wm8994_readable(unsigned int reg)
@@ -1004,6 +1010,110 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
        }
 }
 
+static int late_enable_ev(struct snd_soc_dapm_widget *w,
+                         struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               if (wm8994->aif1clk_enable) {
+                       snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+                                           WM8994_AIF1CLK_ENA_MASK,
+                                           WM8994_AIF1CLK_ENA);
+                       wm8994->aif1clk_enable = 0;
+               }
+               if (wm8994->aif2clk_enable) {
+                       snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+                                           WM8994_AIF2CLK_ENA_MASK,
+                                           WM8994_AIF2CLK_ENA);
+                       wm8994->aif2clk_enable = 0;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int late_disable_ev(struct snd_soc_dapm_widget *w,
+                          struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMD:
+               if (wm8994->aif1clk_disable) {
+                       snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+                                           WM8994_AIF1CLK_ENA_MASK, 0);
+                       wm8994->aif1clk_disable = 0;
+               }
+               if (wm8994->aif2clk_disable) {
+                       snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+                                           WM8994_AIF2CLK_ENA_MASK, 0);
+                       wm8994->aif2clk_disable = 0;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               wm8994->aif1clk_enable = 1;
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               wm8994->aif1clk_disable = 1;
+               break;
+       }
+
+       return 0;
+}
+
+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               wm8994->aif2clk_enable = 1;
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               wm8994->aif2clk_disable = 1;
+               break;
+       }
+
+       return 0;
+}
+
+static int adc_mux_ev(struct snd_soc_dapm_widget *w,
+                     struct snd_kcontrol *kcontrol, int event)
+{
+       late_enable_ev(w, kcontrol, event);
+       return 0;
+}
+
+static int dac_ev(struct snd_soc_dapm_widget *w,
+                 struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       unsigned int mask = 1 << w->shift;
+
+       snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+                           mask, mask);
+       return 0;
+}
+
 static const char *hp_mux_text[] = {
        "Mixer",
        "DAC",
@@ -1272,6 +1382,59 @@ static const struct soc_enum aif2dacr_src_enum =
 static const struct snd_kcontrol_new aif2dacr_src_mux =
        SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
 
+static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
+       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
+       SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+       late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+
+SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+};
+
+static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
+SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
+       dac_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
+SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
+SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
+SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
+SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+};
+
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("DMIC1DAT"),
 SND_SOC_DAPM_INPUT("DMIC2DAT"),
@@ -1284,12 +1447,9 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
 
-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
-
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
                     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
                     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
                      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
@@ -1298,9 +1458,9 @@ SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
                      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
                      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
                     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
                     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
                      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
@@ -1345,6 +1505,7 @@ SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
 
 SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
@@ -1368,14 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
 
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
-
-SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
-SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
-SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
-SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
-
 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
 
@@ -1515,14 +1668,12 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
 
        /* DAC1 inputs */
-       { "DAC1L", NULL, "DAC1L Mixer" },
        { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
        { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
        { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
        { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
        { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
-       { "DAC1R", NULL, "DAC1R Mixer" },
        { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
        { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
        { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1531,7 +1682,6 @@ static const struct snd_soc_dapm_route intercon[] = {
 
        /* DAC2/AIF2 outputs  */
        { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
-       { "DAC2L", NULL, "AIF2DAC2L Mixer" },
        { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
        { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
        { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -1539,13 +1689,17 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
        { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
-       { "DAC2R", NULL, "AIF2DAC2R Mixer" },
        { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
        { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
        { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
        { "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" },
        { "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
+       { "AIF1ADCDAT", NULL, "AIF1ADC1L" },
+       { "AIF1ADCDAT", NULL, "AIF1ADC1R" },
+       { "AIF1ADCDAT", NULL, "AIF1ADC2L" },
+       { "AIF1ADCDAT", NULL, "AIF1ADC2R" },
+
        { "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
        /* AIF3 output */
@@ -1578,6 +1732,31 @@ static const struct snd_soc_dapm_route intercon[] = {
        { "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
+       { "DAC1L", NULL, "Late DAC1L Enable PGA" },
+       { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
+       { "DAC1R", NULL, "Late DAC1R Enable PGA" },
+       { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
+       { "DAC2L", NULL, "Late DAC2L Enable PGA" },
+       { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
+       { "DAC2R", NULL, "Late DAC2R Enable PGA" },
+       { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
+};
+
+static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
+       { "DAC1L", NULL, "DAC1L Mixer" },
+       { "DAC1R", NULL, "DAC1R Mixer" },
+       { "DAC2L", NULL, "AIF2DAC2L Mixer" },
+       { "DAC2R", NULL, "AIF2DAC2R Mixer" },
+};
+
+static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
+       { "AIF1DACDAT", NULL, "AIF2DACDAT" },
+       { "AIF2DACDAT", NULL, "AIF1DACDAT" },
+       { "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
+       { "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
+};
+
 static const struct snd_soc_dapm_route wm8994_intercon[] = {
        { "AIF2DACL", NULL, "AIF2DAC Mux" },
        { "AIF2DACR", NULL, "AIF2DAC Mux" },
@@ -2386,7 +2565,7 @@ static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
        else
                val = 0;
 
-       return snd_soc_update_bits(codec, reg, mask, reg);
+       return snd_soc_update_bits(codec, reg, mask, val);
 }
 
 #define WM8994_RATES SNDRV_PCM_RATE_8000_96000
@@ -2501,6 +2680,22 @@ static int wm8994_resume(struct snd_soc_codec *codec)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int i, ret;
+       unsigned int val, mask;
+
+       if (wm8994->revision < 4) {
+               /* force a HW read */
+               val = wm8994_reg_read(codec->control_data,
+                                     WM8994_POWER_MANAGEMENT_5);
+
+               /* modify the cache only */
+               codec->cache_only = 1;
+               mask =  WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
+                       WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
+               val &= mask;
+               snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+                                   mask, val);
+               codec->cache_only = 0;
+       }
 
        /* Restore the registers */
        ret = snd_soc_cache_sync(codec);
@@ -2834,11 +3029,10 @@ static void wm8958_default_micdet(u16 status, void *data)
                report |= SND_JACK_BTN_5;
 
 done:
-       snd_soc_jack_report(wm8994->micdet[0].jack,
+       snd_soc_jack_report(wm8994->micdet[0].jack, report,
                            SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
                            SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
-                           SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
-                           report);
+                           SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
 }
 
 /**
@@ -3112,6 +3306,21 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
        case WM8994:
                snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
                                          ARRAY_SIZE(wm8994_specific_dapm_widgets));
+               if (wm8994->revision < 4) {
+                       snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_lateclk_revd_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_adc_revd_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
+                                                 ARRAY_SIZE(wm8994_dac_revd_widgets));
+               } else {
+                       snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+                                                 ARRAY_SIZE(wm8994_lateclk_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+                                                 ARRAY_SIZE(wm8994_adc_widgets));
+                       snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+                                                 ARRAY_SIZE(wm8994_dac_widgets));
+               }
                break;
        case WM8958:
                snd_soc_add_controls(codec, wm8958_snd_controls,
@@ -3129,6 +3338,16 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
        case WM8994:
                snd_soc_dapm_add_routes(dapm, wm8994_intercon,
                                        ARRAY_SIZE(wm8994_intercon));
+
+               if (wm8994->revision < 4) {
+                       snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
+                                               ARRAY_SIZE(wm8994_revd_intercon));
+                       snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
+                                               ARRAY_SIZE(wm8994_lateclk_revd_intercon));
+               } else {
+                       snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+                                               ARRAY_SIZE(wm8994_lateclk_intercon));
+               }
                break;
        case WM8958:
                snd_soc_dapm_add_routes(dapm, wm8958_intercon,
index 6045cbd..608c84c 100644 (file)
@@ -1223,7 +1223,7 @@ static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
        else
                val = 0;
 
-       return snd_soc_update_bits(codec, reg, mask, reg);
+       return snd_soc_update_bits(codec, reg, mask, val);
 }
 
 /* The size in bits of the FLL divide multiplied by 10
index 43825b2..cce704c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
        wm9081->control_type = SND_SOC_I2C;
        wm9081->control_data = i2c;
 
+       if (dev_get_platdata(&i2c->dev))
+               memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev),
+                      sizeof(wm9081->retune));
+
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_wm9081, &wm9081_dai, 1);
        if (ret < 0)
index c466982..5168927 100644 (file)
@@ -91,6 +91,7 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
 static void calibrate_dc_servo(struct snd_soc_codec *codec)
 {
        struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
+       s8 offset;
        u16 reg, reg_l, reg_r, dcs_cfg;
 
        /* If we're using a digital only path and have a previously
@@ -149,16 +150,14 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
                        hubs->dcs_codes);
 
                /* HPOUT1L */
-               if (reg_l + hubs->dcs_codes > 0 &&
-                   reg_l + hubs->dcs_codes < 0xff)
-                       reg_l += hubs->dcs_codes;
-               dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+               offset = reg_l;
+               offset += hubs->dcs_codes;
+               dcs_cfg = (u8)offset << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
 
                /* HPOUT1R */
-               if (reg_r + hubs->dcs_codes > 0 &&
-                   reg_r + hubs->dcs_codes < 0xff)
-                       reg_r += hubs->dcs_codes;
-               dcs_cfg |= reg_r;
+               offset = reg_r;
+               offset += hubs->dcs_codes;
+               dcs_cfg |= (u8)offset;
 
                dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
 
@@ -675,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
 };
 
 static const struct snd_soc_dapm_route analogue_routes[] = {
+       { "MICBIAS1", NULL, "CLK_SYS" },
+       { "MICBIAS2", NULL, "CLK_SYS" },
+
        { "IN1L PGA", "IN1LP Switch", "IN1LP" },
        { "IN1L PGA", "IN1LN Switch", "IN1LN" },
 
index 0c2d6ba..fe79842 100644 (file)
@@ -218,12 +218,24 @@ static struct snd_soc_dai_link dm6467_evm_dai[] = {
                .ops = &evm_spdif_ops,
        },
 };
-static struct snd_soc_dai_link da8xx_evm_dai = {
+
+static struct snd_soc_dai_link da830_evm_dai = {
+       .name = "TLV320AIC3X",
+       .stream_name = "AIC3X",
+       .cpu_dai_name = "davinci-mcasp.1",
+       .codec_dai_name = "tlv320aic3x-hifi",
+       .codec_name = "tlv320aic3x-codec.1-0018",
+       .platform_name = "davinci-pcm-audio",
+       .init = evm_aic3x_init,
+       .ops = &evm_ops,
+};
+
+static struct snd_soc_dai_link da850_evm_dai = {
        .name = "TLV320AIC3X",
        .stream_name = "AIC3X",
        .cpu_dai_name= "davinci-mcasp.0",
        .codec_dai_name = "tlv320aic3x-hifi",
-       .codec_name = "tlv320aic3x-codec.0-001a",
+       .codec_name = "tlv320aic3x-codec.1-0018",
        .platform_name = "davinci-pcm-audio",
        .init = evm_aic3x_init,
        .ops = &evm_ops,
@@ -259,13 +271,13 @@ static struct snd_soc_card dm6467_snd_soc_card_evm = {
 
 static struct snd_soc_card da830_snd_soc_card = {
        .name = "DA830/OMAP-L137 EVM",
-       .dai_link = &da8xx_evm_dai,
+       .dai_link = &da830_evm_dai,
        .num_links = 1,
 };
 
 static struct snd_soc_card da850_snd_soc_card = {
        .name = "DA850/OMAP-L138 EVM",
-       .dai_link = &da8xx_evm_dai,
+       .dai_link = &da850_evm_dai,
        .num_links = 1,
 };
 
index e20c9e1..1e9bcca 100644 (file)
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
        .name           = "tlv320aic23",
        .stream_name    = "TLV320AIC23",
        .codec_dai_name = "tlv320aic23-hifi",
-       .platform_name  = "imx-pcm-audio.0",
+       .platform_name  = "imx-fiq-pcm-audio.0",
        .codec_name     = "tlv320aic23-codec.0-001a",
        .cpu_dai_name   = "imx-ssi.0",
        .ops            = &eukrea_tlv320_snd_ops,
index 2101bdc..3167be6 100644 (file)
@@ -507,8 +507,6 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
        /* Set up digital mute if not provided by the codec */
        if (!codec_dai->driver->ops) {
                codec_dai->driver->ops = &ams_delta_dai_ops;
-       } else if (!codec_dai->driver->ops->digital_mute) {
-               codec_dai->driver->ops->digital_mute = ams_delta_digital_mute;
        } else {
                ams_delta_ops.startup = ams_delta_startup;
                ams_delta_ops.shutdown = ams_delta_shutdown;
index fc592f0..784cff5 100644 (file)
@@ -307,10 +307,10 @@ static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 static struct snd_soc_dai_link corgi_dai = {
        .name = "WM8731",
        .stream_name = "WM8731",
-       .cpu_dai_name = "pxa-is2-dai",
+       .cpu_dai_name = "pxa2xx-i2s",
        .codec_dai_name = "wm8731-hifi",
        .platform_name = "pxa-pcm-audio",
-       .codec_name = "wm8731-codec-0.001a",
+       .codec_name = "wm8731-codec-0.001b",
        .init = corgi_wm8731_init,
        .ops = &corgi_ops,
 };
index 28333e7..dc65650 100644 (file)
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9705-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name = "wm9705-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
index 01bf316..51897fc 100644 (file)
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9705-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9705-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9705-codec",
index c6a37c6..053ed20 100644 (file)
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9712-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9712-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
index fc22e6e..b13a425 100644 (file)
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9712-hifi",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9712-aux",
                .platform_name = "pxa-pcm-audio",
                .codec_name = "wm9712-codec",
index 0d70fc8..38ca675 100644 (file)
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
        {
                .name = "AC97",
                .stream_name = "AC97 HiFi",
-               .cpu_dai_name = "pxa-ac97.0",
+               .cpu_dai_name = "pxa2xx-ac97",
                .codec_dai_name = "wm9713-hifi",
                .codec_name = "wm9713-codec",
                .init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
        {
                .name = "AC97 Aux",
                .stream_name = "AC97 Aux",
-               .cpu_dai_name = "pxa-ac97.1",
+               .cpu_dai_name = "pxa2xx-ac97-aux",
                .codec_dai_name ="wm9713-aux",
                .codec_name = "wm9713-codec",
                .platform_name = "pxa-pcm-audio",
index 857db96..504e400 100644 (file)
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
 {
        .name = "AC97 HiFi",
        .stream_name = "AC97 HiFi",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_dai_name =  "wm9712-hifi",
        .codec_name = "wm9712-codec",
        .platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
 {
        .name = "AC97 Aux",
        .stream_name = "AC97 Aux",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_dai_name = "wm9712-aux",
        .codec_name = "wm9712-codec",
        .platform_name = "pxa-pcm-audio",
index 6298ee1..a7d4999 100644 (file)
@@ -276,7 +276,7 @@ static struct snd_soc_dai_link poodle_dai = {
        .cpu_dai_name = "pxa2xx-i2s",
        .codec_dai_name = "wm8731-hifi",
        .platform_name = "pxa-pcm-audio",
-       .codec_name = "wm8731-codec.0-001a",
+       .codec_name = "wm8731-codec.0-001b",
        .init = poodle_wm8731_init,
        .ops = &poodle_ops,
 };
index c2acb69..8e15713 100644 (file)
@@ -315,10 +315,10 @@ static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 static struct snd_soc_dai_link spitz_dai = {
        .name = "wm8750",
        .stream_name = "WM8750",
-       .cpu_dai_name = "pxa-is2",
+       .cpu_dai_name = "pxa2xx-i2s",
        .codec_dai_name = "wm8750-hifi",
        .platform_name = "pxa-pcm-audio",
-       .codec_name = "wm8750-codec.0-001a",
+       .codec_name = "wm8750-codec.0-001b",
        .init = spitz_wm8750_init,
        .ops = &spitz_ops,
 };
index f75804e..4b6e5d6 100644 (file)
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
 {
        .name = "AC97",
        .stream_name = "AC97 HiFi",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_dai_name = "wm9712-hifi",
        .platform_name = "pxa-pcm-audio",
        .codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
 {
        .name = "AC97 Aux",
        .stream_name = "AC97 Aux",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_dai_name = "wm9712-aux",
        .platform_name = "pxa-pcm-audio",
        .codec_name = "wm9712-codec",
index b222a7d..25bba10 100644 (file)
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
        .stream_name = "AC97 HiFi",
        .codec_name = "wm9713-codec",
        .platform_name = "pxa-pcm-audio",
-       .cpu_dai_name = "pxa-ac97.0",
+       .cpu_dai_name = "pxa2xx-ac97",
        .codec_name = "wm9713-hifi",
        .init = zylonite_wm9713_init,
 },
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
        .stream_name = "AC97 Aux",
        .codec_name = "wm9713-codec",
        .platform_name = "pxa-pcm-audio",
-       .cpu_dai_name = "pxa-ac97.1",
+       .cpu_dai_name = "pxa2xx-ac97-aux",
        .codec_name = "wm9713-aux",
 },
 {
index 3eec610..0d0ae2b 100644 (file)
@@ -397,11 +397,11 @@ static struct snd_soc_dai_link neo1973_gta02_dai[] = {
 { /* Hifi Playback - for similatious use with voice below */
        .name = "WM8753",
        .stream_name = "WM8753 HiFi",
-       .cpu_dai_name = "s3c24xx-i2s",
+       .cpu_dai_name = "s3c24xx-iis",
        .codec_dai_name = "wm8753-hifi",
        .init = neo1973_gta02_wm8753_init,
        .platform_name = "samsung-audio",
-       .codec_name = "wm8753-codec.0-0x1a",
+       .codec_name = "wm8753-codec.0-001a",
        .ops = &neo1973_gta02_hifi_ops,
 },
 { /* Voice via BT */
@@ -410,7 +410,7 @@ static struct snd_soc_dai_link neo1973_gta02_dai[] = {
        .cpu_dai_name = "bluetooth-dai",
        .codec_dai_name = "wm8753-voice",
        .ops = &neo1973_gta02_voice_ops,
-       .codec_name = "wm8753-codec.0-0x1a",
+       .codec_name = "wm8753-codec.0-001a",
        .platform_name = "samsung-audio",
 },
 };
index c7a2451..d20815d 100644 (file)
@@ -559,9 +559,9 @@ static struct snd_soc_dai_link neo1973_dai[] = {
        .name = "WM8753",
        .stream_name = "WM8753 HiFi",
        .platform_name = "samsung-audio",
-       .cpu_dai_name = "s3c24xx-i2s",
+       .cpu_dai_name = "s3c24xx-iis",
        .codec_dai_name = "wm8753-hifi",
-       .codec_name = "wm8753-codec.0-0x1a",
+       .codec_name = "wm8753-codec.0-001a",
        .init = neo1973_wm8753_init,
        .ops = &neo1973_hifi_ops,
 },
@@ -571,7 +571,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
        .platform_name = "samsung-audio",
        .cpu_dai_name = "bluetooth-dai",
        .codec_dai_name = "wm8753-voice",
-       .codec_name = "wm8753-codec.0-0x1a",
+       .codec_name = "wm8753-codec.0-001a",
        .ops = &neo1973_voice_ops,
 },
 };
index bb4292e..08fcaaa 100644 (file)
@@ -94,8 +94,8 @@ static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd)
 static struct snd_soc_dai_link simtec_dai_aic33 = {
        .name           = "tlv320aic33",
        .stream_name    = "TLV320AIC33",
-       .codec_name     = "tlv320aic3x-codec.0-0x1a",
-       .cpu_dai_name   = "s3c24xx-i2s",
+       .codec_name     = "tlv320aic3x-codec.0-001a",
+       .cpu_dai_name   = "s3c24xx-iis",
        .codec_dai_name = "tlv320aic3x-hifi",
        .platform_name  = "samsung-audio",
        .init           = simtec_hermes_init,
index fbba4e3..116e3e6 100644 (file)
@@ -85,8 +85,8 @@ static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 static struct snd_soc_dai_link simtec_dai_aic23 = {
        .name           = "tlv320aic23",
        .stream_name    = "TLV320AIC23",
-       .codec_name     = "tlv320aic3x-codec.0-0x1a",
-       .cpu_dai_name   = "s3c24xx-i2s",
+       .codec_name     = "tlv320aic3x-codec.0-001a",
+       .cpu_dai_name   = "s3c24xx-iis",
        .codec_dai_name = "tlv320aic3x-hifi",
        .platform_name  = "samsung-audio",
        .init           = simtec_tlv320aic23_init,
index cdc8ecb..2c09e93 100644 (file)
@@ -228,7 +228,7 @@ static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
        .stream_name = "UDA134X",
        .codec_name = "uda134x-hifi",
        .codec_dai_name = "uda134x-hifi",
-       .cpu_dai_name = "s3c24xx-i2s",
+       .cpu_dai_name = "s3c24xx-iis",
        .ops = &s3c24xx_uda134x_ops,
        .platform_name  = "samsung-audio",
 };
index bac7291..c3f6f1e 100644 (file)
@@ -1449,6 +1449,7 @@ static int soc_post_component_init(struct snd_soc_card *card,
                rtd = &card->rtd_aux[num];
                name = aux_dev->name;
        }
+       rtd->card = card;
 
        /* machine controls, routes and widgets are not prefixed */
        temp = codec->name_prefix;
@@ -1471,7 +1472,6 @@ static int soc_post_component_init(struct snd_soc_card *card,
 
        /* register the rtd device */
        rtd->codec = codec;
-       rtd->card = card;
        rtd->dev.parent = card->dev;
        rtd->dev.release = rtd_release;
        rtd->dev.init_name = name;
@@ -1664,9 +1664,6 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
        goto out;
 
 found:
-       if (!try_module_get(codec->dev->driver->owner))
-               return -ENODEV;
-
        ret = soc_probe_codec(card, codec);
        if (ret < 0)
                return ret;
index 499730a..25e5423 100644 (file)
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
                    !path->connected(path->source, path->sink))
                        continue;
 
-               if (path->sink && path->sink->power_check &&
+               if (!path->sink)
+                       continue;
+
+               if (path->sink->force) {
+                       power = 1;
+                       break;
+               }
+
+               if (path->sink->power_check &&
                    path->sink->power_check(path->sink)) {
                        power = 1;
                        break;
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
 int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
        struct snd_soc_dapm_widget *w;
+       unsigned int val;
 
        list_for_each_entry(w, &dapm->card->widgets, list)
        {
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                case snd_soc_dapm_post:
                        break;
                }
+
+               /* Read the initial power state from the device */
+               if (w->reg >= 0) {
+                       val = snd_soc_read(w->codec, w->reg);
+                       val &= 1 << w->shift;
+                       if (w->invert)
+                               val = !val;
+
+                       if (val)
+                               w->power = 1;
+               }
+
                w->new = 1;
        }
 
@@ -1742,7 +1763,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
-       unsigned int val, val_mask;
+       unsigned int val;
        int connect, change;
        struct snd_soc_dapm_update update;
 
@@ -1750,13 +1771,13 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
 
        if (invert)
                val = max - val;
-       val_mask = mask << shift;
+       mask = mask << shift;
        val = val << shift;
 
        mutex_lock(&widget->codec->mutex);
        widget->value = val;
 
-       change = snd_soc_test_bits(widget->codec, reg, val_mask, val);
+       change = snd_soc_test_bits(widget->codec, reg, mask, val);
        if (change) {
                if (val)
                        /* new connection */
index 68b9747..66eabaf 100644 (file)
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
        }
 
        dev->pcm->private_data = dev;
-       strcpy(dev->pcm->name, dev->product_name);
+       strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
 
        memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
        memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
index 2f218c7..a1a4708 100644 (file)
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
        if (ret < 0)
                return ret;
 
-       strcpy(rmidi->name, device->product_name);
+       strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
 
        rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
        rmidi->private_data = device;
index 800f7cb..c0f8270 100644 (file)
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
                return -ENOMEM;
        }
 
+       mutex_init(&chip->shutdown_mutex);
        chip->index = idx;
        chip->dev = dev;
        chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
        chip = ptr;
        card = chip->card;
        mutex_lock(&register_mutex);
+       mutex_lock(&chip->shutdown_mutex);
        chip->shutdown = 1;
        chip->num_interfaces--;
        if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
                        snd_usb_mixer_disconnect(p);
                }
                usb_chip[chip->index] = NULL;
+               mutex_unlock(&chip->shutdown_mutex);
                mutex_unlock(&register_mutex);
                snd_card_free_when_closed(card);
        } else {
+               mutex_unlock(&chip->shutdown_mutex);
                mutex_unlock(&register_mutex);
        }
 }
index 7df89b3..85af605 100644 (file)
@@ -95,7 +95,7 @@ enum {
 };
 
 
-/*E-mu 0202(0404) eXtension Unit(XU) control*/
+/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
 enum {
        USB_XU_CLOCK_RATE               = 0xe301,
        USB_XU_CLOCK_SOURCE             = 0xe302,
@@ -1566,7 +1566,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw
                        cval->initialized = 1;
                } else {
                        if (type == USB_XU_CLOCK_RATE) {
-                               /* E-Mu USB 0404/0202/TrackerPre
+                               /* E-Mu USB 0404/0202/TrackerPre/0204
                                 * samplerate control quirk
                                 */
                                cval->min = 0;
index 4132522..e3f6805 100644 (file)
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
        }
 
        if (changed) {
+               mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                snd_usb_release_substream_urbs(subs, 0);
                /* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                                                  params_rate(hw_params),
                                                  snd_pcm_format_physical_width(params_format(hw_params)) *
                                                        params_channels(hw_params));
+               mutex_unlock(&subs->stream->chip->shutdown_mutex);
        }
 
        return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->cur_audiofmt = NULL;
        subs->cur_rate = 0;
        subs->period_bytes = 0;
-       if (!subs->stream->chip->shutdown)
-               snd_usb_release_substream_urbs(subs, 0);
+       mutex_lock(&subs->stream->chip->shutdown_mutex);
+       snd_usb_release_substream_urbs(subs, 0);
+       mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
index 3599987..921a86f 100644 (file)
        .idProduct = 0x3f0a,
        .bInterfaceClass = USB_CLASS_AUDIO,
 },
+{
+       /* E-Mu 0204 USB */
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor = 0x041e,
+       .idProduct = 0x3f19,
+       .bInterfaceClass = USB_CLASS_AUDIO,
+},
 
 /*
  * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface
index cf8bf08..e314cdb 100644 (file)
@@ -532,7 +532,7 @@ int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat
 }
 
 /*
- * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
+ * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device,
  * not for interface.
  */
 
@@ -589,6 +589,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
        case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
        case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+       case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
                set_format_emu_quirk(subs, fmt);
                break;
        }
index db3eb21..6e66fff 100644 (file)
@@ -36,6 +36,7 @@ struct snd_usb_audio {
        struct snd_card *card;
        u32 usb_id;
        int shutdown;
+       struct mutex shutdown_mutex;
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        int num_interfaces;
        int num_suspended_intf;
index 2b5387d..7141c42 100644 (file)
@@ -204,13 +204,11 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
@@ -294,6 +292,13 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
        CFLAGS := $(CFLAGS) -fstack-protector-all
 endif
 
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y)
+       CFLAGS := $(CFLAGS) -Wstack-protector
+endif
+
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y)
+       CFLAGS := $(CFLAGS) -Wvolatile-register-var
+endif
 
 ### --- END CONFIGURATION SECTION ---
 
index c056cdc..8879463 100644 (file)
@@ -212,7 +212,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename)
                        continue;
 
                offset = start + i;
-               sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+               sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
                fp = popen(cmd, "r");
                if (!fp)
                        continue;
@@ -270,9 +270,9 @@ static void hist_entry__print_hits(struct hist_entry *self)
 
        for (offset = 0; offset < len; ++offset)
                if (h->ip[offset] != 0)
-                       printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+                       printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
                               sym->start + offset, h->ip[offset]);
-       printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+       printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
 }
 
 static int hist_entry__tty_annotate(struct hist_entry *he)
index def7ddc..d97256d 100644 (file)
@@ -371,10 +371,10 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
                        addr = data->ptr;
 
                if (sym != NULL)
-                       snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
+                       snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
                                 addr - map->unmap_ip(map, sym->start));
                else
-                       snprintf(buf, sizeof(buf), "%#Lx", addr);
+                       snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
                printf(" %-34s |", buf);
 
                printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
index b9c6e54..2b36def 100644 (file)
@@ -782,9 +782,9 @@ static void print_result(void)
                pr_info("%10u ", st->nr_acquired);
                pr_info("%10u ", st->nr_contended);
 
-               pr_info("%15llu ", st->wait_time_total);
-               pr_info("%15llu ", st->wait_time_max);
-               pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
+               pr_info("%15" PRIu64 " ", st->wait_time_total);
+               pr_info("%15" PRIu64 " ", st->wait_time_max);
+               pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
                       0 : st->wait_time_min);
                pr_info("\n");
        }
index fcd29e8..60cac6f 100644 (file)
@@ -759,8 +759,8 @@ static int __cmd_record(int argc, const char **argv)
                perf_session__process_machines(session, event__synthesize_guest_os);
 
        if (!system_wide)
-               event__synthesize_thread(target_tid, process_synthesized_event,
-                                        session);
+               event__synthesize_thread_map(threads, process_synthesized_event,
+                                            session);
        else
                event__synthesize_threads(process_synthesized_event, session);
 
@@ -817,7 +817,7 @@ static int __cmd_record(int argc, const char **argv)
         * Approximate RIP event size: 24 bytes.
         */
        fprintf(stderr,
-               "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
+               "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
                (double)bytes_written / 1024.0 / 1024.0,
                output_name,
                bytes_written / 24);
index 75183a4..c27e31f 100644 (file)
@@ -197,7 +197,7 @@ static int process_read_event(event_t *event, struct sample_data *sample __used,
                                           event->read.value);
        }
 
-       dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
+       dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
                    attr ? __event_name(attr->type, attr->config) : "FAIL",
                    event->read.value);
 
index 29e7ffd..29acb89 100644 (file)
@@ -193,7 +193,7 @@ static void calibrate_run_measurement_overhead(void)
        }
        run_measurement_overhead = min_delta;
 
-       printf("run measurement overhead: %Ld nsecs\n", min_delta);
+       printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static void calibrate_sleep_measurement_overhead(void)
@@ -211,7 +211,7 @@ static void calibrate_sleep_measurement_overhead(void)
        min_delta -= 10000;
        sleep_measurement_overhead = min_delta;
 
-       printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
+       printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static struct sched_atom *
@@ -617,13 +617,13 @@ static void test_calibrations(void)
        burn_nsecs(1e6);
        T1 = get_nsecs();
 
-       printf("the run test took %Ld nsecs\n", T1-T0);
+       printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
 
        T0 = get_nsecs();
        sleep_nsecs(1e6);
        T1 = get_nsecs();
 
-       printf("the sleep test took %Ld nsecs\n", T1-T0);
+       printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
 }
 
 #define FILL_FIELD(ptr, field, event, data)    \
@@ -816,10 +816,10 @@ replay_switch_event(struct trace_switch_event *switch_event,
                delta = 0;
 
        if (delta < 0)
-               die("hm, delta: %Ld < 0 ?\n", delta);
+               die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
        if (verbose) {
-               printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
+               printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
                        switch_event->prev_comm, switch_event->prev_pid,
                        switch_event->next_comm, switch_event->next_pid,
                        delta);
@@ -1048,7 +1048,7 @@ latency_switch_event(struct trace_switch_event *switch_event,
                delta = 0;
 
        if (delta < 0)
-               die("hm, delta: %Ld < 0 ?\n", delta);
+               die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
        sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1221,7 +1221,7 @@ static void output_lat_thread(struct work_atoms *work_list)
 
        avg = work_list->total_lat / work_list->nb_atoms;
 
-       printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+       printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
              (double)work_list->total_runtime / 1e6,
                 work_list->nb_atoms, (double)avg / 1e6,
                 (double)work_list->max_lat / 1e6,
@@ -1423,7 +1423,7 @@ map_switch_event(struct trace_switch_event *switch_event,
                delta = 0;
 
        if (delta < 0)
-               die("hm, delta: %Ld < 0 ?\n", delta);
+               die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
        sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1713,7 +1713,7 @@ static void __cmd_lat(void)
        }
 
        printf(" -----------------------------------------------------------------------------------------\n");
-       printf("  TOTAL:                |%11.3f ms |%9Ld |\n",
+       printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
                (double)all_runtime/1e6, all_count);
 
        printf(" ---------------------------------------------------\n");
index 150a606..b766c2a 100644 (file)
@@ -77,8 +77,8 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
        if (session->sample_type & PERF_SAMPLE_RAW) {
                if (debug_mode) {
                        if (sample->time < last_timestamp) {
-                               pr_err("Samples misordered, previous: %llu "
-                                       "this: %llu\n", last_timestamp,
+                               pr_err("Samples misordered, previous: %" PRIu64
+                                       " this: %" PRIu64 "\n", last_timestamp,
                                        sample->time);
                                nr_unordered++;
                        }
@@ -126,7 +126,7 @@ static int __cmd_script(struct perf_session *session)
        ret = perf_session__process_events(session, &event_ops);
 
        if (debug_mode)
-               pr_err("Misordered timestamps: %llu\n", nr_unordered);
+               pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
 
        return ret;
 }
index 0ff11d9..a482a19 100644 (file)
@@ -206,8 +206,8 @@ static int read_counter_aggr(struct perf_evsel *counter)
                update_stats(&ps->res_stats[i], count[i]);
 
        if (verbose) {
-               fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
-                               count[0], count[1], count[2]);
+               fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+                       event_name(counter), count[0], count[1], count[2]);
        }
 
        /*
index ed56961..5dcdba6 100644 (file)
@@ -146,7 +146,7 @@ next_pair:
                                if (llabs(skew) < page_size)
                                        continue;
 
-                               pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+                               pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
                                         sym->start, sym->name, sym->end, pair->end);
                        } else {
                                struct rb_node *nnd;
@@ -168,11 +168,11 @@ detour:
                                        goto detour;
                                }
 
-                               pr_debug("%#Lx: diff name v: %s k: %s\n",
+                               pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
                                         sym->start, sym->name, pair->name);
                        }
                } else
-                       pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+                       pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 
                err = -1;
        }
@@ -211,10 +211,10 @@ detour:
 
                if (pair->start == pos->start) {
                        pair->priv = 1;
-                       pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+                       pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
                                pos->start, pos->end, pos->pgoff, pos->dso->name);
                        if (pos->pgoff != pair->pgoff || pos->end != pair->end)
-                               pr_info(": \n*%Lx-%Lx %Lx",
+                               pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
                                        pair->start, pair->end, pair->pgoff);
                        pr_info(" %s\n", pair->dso->name);
                        pair->priv = 1;
@@ -307,7 +307,7 @@ static int test__open_syscall_event(void)
        }
 
        if (evsel->counts->cpu[0].val != nr_open_calls) {
-               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
+               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
                         nr_open_calls, evsel->counts->cpu[0].val);
                goto out_close_fd;
        }
@@ -332,8 +332,7 @@ static int test__open_syscall_event_on_all_cpus(void)
        struct perf_evsel *evsel;
        struct perf_event_attr attr;
        unsigned int nr_open_calls = 111, i;
-       cpu_set_t *cpu_set;
-       size_t cpu_set_size;
+       cpu_set_t cpu_set;
        int id = trace_event__id("sys_enter_open");
 
        if (id < 0) {
@@ -353,13 +352,8 @@ static int test__open_syscall_event_on_all_cpus(void)
                return -1;
        }
 
-       cpu_set = CPU_ALLOC(cpus->nr);
 
-       if (cpu_set == NULL)
-               goto out_thread_map_delete;
-
-       cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
-       CPU_ZERO_S(cpu_set_size, cpu_set);
+       CPU_ZERO(&cpu_set);
 
        memset(&attr, 0, sizeof(attr));
        attr.type = PERF_TYPE_TRACEPOINT;
@@ -367,7 +361,7 @@ static int test__open_syscall_event_on_all_cpus(void)
        evsel = perf_evsel__new(&attr, 0);
        if (evsel == NULL) {
                pr_debug("perf_evsel__new\n");
-               goto out_cpu_free;
+               goto out_thread_map_delete;
        }
 
        if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -379,14 +373,29 @@ static int test__open_syscall_event_on_all_cpus(void)
 
        for (cpu = 0; cpu < cpus->nr; ++cpu) {
                unsigned int ncalls = nr_open_calls + cpu;
+               /*
+                * XXX eventually lift this restriction in a way that
+                * keeps perf building on older glibc installations
+                * without CPU_ALLOC. 1024 cpus in 2010 still seems
+                * a reasonable upper limit tho :-)
+                */
+               if (cpus->map[cpu] >= CPU_SETSIZE) {
+                       pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+                       continue;
+               }
 
-               CPU_SET(cpu, cpu_set);
-               sched_setaffinity(0, cpu_set_size, cpu_set);
+               CPU_SET(cpus->map[cpu], &cpu_set);
+               if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+                       pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+                                cpus->map[cpu],
+                                strerror(errno));
+                       goto out_close_fd;
+               }
                for (i = 0; i < ncalls; ++i) {
                        fd = open("/etc/passwd", O_RDONLY);
                        close(fd);
                }
-               CPU_CLR(cpucpu_set);
+               CPU_CLR(cpus->map[cpu], &cpu_set);
        }
 
        /*
@@ -402,6 +411,9 @@ static int test__open_syscall_event_on_all_cpus(void)
        for (cpu = 0; cpu < cpus->nr; ++cpu) {
                unsigned int expected;
 
+               if (cpus->map[cpu] >= CPU_SETSIZE)
+                       continue;
+
                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
                        pr_debug("perf_evsel__open_read_on_cpu\n");
                        goto out_close_fd;
@@ -409,8 +421,8 @@ static int test__open_syscall_event_on_all_cpus(void)
 
                expected = nr_open_calls + cpu;
                if (evsel->counts->cpu[cpu].val != expected) {
-                       pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
-                                expected, cpu, evsel->counts->cpu[cpu].val);
+                       pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+                                expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
                        goto out_close_fd;
                }
        }
@@ -420,8 +432,6 @@ out_close_fd:
        perf_evsel__close_fd(evsel, 1, threads->nr);
 out_evsel_delete:
        perf_evsel__delete(evsel);
-out_cpu_free:
-       CPU_FREE(cpu_set);
 out_thread_map_delete:
        thread_map__delete(threads);
        return err;
index 746cf03..0ace786 100644 (file)
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
                c->start_time = start;
        if (p->start_time == 0 || p->start_time > start)
                p->start_time = start;
-
-       if (cpu > numcpus)
-               numcpus = cpu;
 }
 
 #define MAX_CPUS 4096
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used,
                if (!event_str)
                        return 0;
 
+               if (sample->cpu > numcpus)
+                       numcpus = sample->cpu;
+
                if (strcmp(event_str, "power:cpu_idle") == 0) {
                        struct power_processor_entry *ppe = (void *)te;
                        if (ppe->state == (u32)PWR_EVENT_EXIT)
index 05344c6..5a29d9c 100644 (file)
@@ -40,6 +40,7 @@
 #include <stdio.h>
 #include <termios.h>
 #include <unistd.h>
+#include <inttypes.h>
 
 #include <errno.h>
 #include <time.h>
@@ -214,7 +215,7 @@ static int parse_source(struct sym_entry *syme)
        len = sym->end - sym->start;
 
        sprintf(command,
-               "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+               "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
                BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
                BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
 
@@ -308,7 +309,7 @@ static void lookup_sym_source(struct sym_entry *syme)
        struct source_line *line;
        char pattern[PATTERN_LEN + 1];
 
-       sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+       sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
                map__rip_2objdump(syme->map, symbol->start));
 
        pthread_mutex_lock(&syme->src->lock);
@@ -537,7 +538,7 @@ static void print_sym_table(void)
        if (nr_counters == 1 || !display_weighted) {
                struct perf_evsel *first;
                first = list_entry(evsel_list.next, struct perf_evsel, node);
-               printf("%Ld", first->attr.sample_period);
+               printf("%" PRIu64, (uint64_t)first->attr.sample_period);
                if (freq)
                        printf("Hz ");
                else
@@ -640,7 +641,7 @@ static void print_sym_table(void)
 
                percent_color_fprintf(stdout, "%4.1f%%", pcnt);
                if (verbose)
-                       printf(" %016llx", sym->start);
+                       printf(" %016" PRIx64, sym->start);
                printf(" %-*.*s", sym_width, sym_width, sym->name);
                printf(" %-*.*s\n", dso_width, dso_width,
                       dso_width >= syme->map->dso->long_name_len ?
@@ -1305,7 +1306,7 @@ static int __cmd_top(void)
                return -ENOMEM;
 
        if (target_tid != -1)
-               event__synthesize_thread(target_tid, event__process, session);
+               event__synthesize_thread_map(threads, event__process, session);
        else
                event__synthesize_threads(event__process, session);
 
index 2302ec0..50d0a93 100644 (file)
@@ -263,11 +263,12 @@ static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
                                             process, session);
 }
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-                            struct perf_session *session)
+int event__synthesize_thread_map(struct thread_map *threads,
+                                event__handler_t process,
+                                struct perf_session *session)
 {
        event_t *comm_event, *mmap_event;
-       int err = -1;
+       int err = -1, thread;
 
        comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
        if (comm_event == NULL)
@@ -277,8 +278,15 @@ int event__synthesize_thread(pid_t pid, event__handler_t process,
        if (mmap_event == NULL)
                goto out_free_comm;
 
-       err = __event__synthesize_thread(comm_event, mmap_event, pid,
-                                        process, session);
+       err = 0;
+       for (thread = 0; thread < threads->nr; ++thread) {
+               if (__event__synthesize_thread(comm_event, mmap_event,
+                                              threads->map[thread],
+                                              process, session)) {
+                       err = -1;
+                       break;
+               }
+       }
        free(mmap_event);
 out_free_comm:
        free(comm_event);
@@ -459,7 +467,8 @@ int event__process_comm(event_t *self, struct sample_data *sample __used,
 int event__process_lost(event_t *self, struct sample_data *sample __used,
                        struct perf_session *session)
 {
-       dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
+       dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+                   self->lost.id, self->lost.lost);
        session->hists.stats.total_lost += self->lost.lost;
        return 0;
 }
@@ -575,7 +584,7 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used,
        u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
        int ret = 0;
 
-       dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+       dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
                        self->mmap.pid, self->mmap.tid, self->mmap.start,
                        self->mmap.len, self->mmap.pgoff, self->mmap.filename);
 
index 2b7e919..cc7b52f 100644 (file)
@@ -135,14 +135,16 @@ typedef union event_union {
 void event__print_totals(void);
 
 struct perf_session;
+struct thread_map;
 
 typedef int (*event__handler_synth_t)(event_t *event, 
                                      struct perf_session *session);
 typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
                                struct perf_session *session);
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-                            struct perf_session *session);
+int event__synthesize_thread_map(struct thread_map *threads,
+                                event__handler_t process,
+                                struct perf_session *session);
 int event__synthesize_threads(event__handler_t process,
                              struct perf_session *session);
 int event__synthesize_kernel_mmap(event__handler_t process,
index f5cfed6..d8575d3 100644 (file)
@@ -90,7 +90,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
        int cpu, thread;
        struct perf_counts_values *aggr = &evsel->counts->aggr, count;
 
-       aggr->val = 0;
+       aggr->val = aggr->ena = aggr->run = 0;
 
        for (cpu = 0; cpu < ncpus; cpu++) {
                for (thread = 0; thread < nthreads; thread++) {
index 989fa2d..0866bcd 100644 (file)
@@ -270,11 +270,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
                          const char *name, bool is_kallsyms)
 {
        const size_t size = PATH_MAX;
-       char *realname = realpath(name, NULL),
-            *filename = malloc(size),
+       char *realname, *filename = malloc(size),
             *linkname = malloc(size), *targetname;
        int len, err = -1;
 
+       if (is_kallsyms)
+               realname = (char *)name;
+       else
+               realname = realpath(name, NULL);
+
        if (realname == NULL || filename == NULL || linkname == NULL)
                goto out_free;
 
@@ -306,7 +310,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
        if (symlink(targetname, linkname) == 0)
                err = 0;
 out_free:
-       free(realname);
+       if (!is_kallsyms)
+               free(realname);
        free(filename);
        free(linkname);
        return err;
@@ -798,8 +803,8 @@ static int perf_file_section__process(struct perf_file_section *self,
                                      int feat, int fd)
 {
        if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
-               pr_debug("Failed to lseek to %Ld offset for feature %d, "
-                        "continuing...\n", self->offset, feat);
+               pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+                         "%d, continuing...\n", self->offset, feat);
                return 0;
        }
 
index c749ba6..df51560 100644 (file)
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
 {
        struct sort_entry *se;
        u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+       u64 nr_events;
        const char *sep = symbol_conf.field_sep;
        int ret;
 
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
 
        if (pair_hists) {
                period = self->pair ? self->pair->period : 0;
+               nr_events = self->pair ? self->pair->nr_events : 0;
                total = pair_hists->stats.total_period;
                period_sys = self->pair ? self->pair->period_sys : 0;
                period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
                period_guest_us = self->pair ? self->pair->period_guest_us : 0;
        } else {
                period = self->period;
+               nr_events = self->nr_events;
                total = session_total;
                period_sys = self->period_sys;
                period_us = self->period_us;
@@ -636,13 +639,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
                        }
                }
        } else
-               ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
+               ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
 
        if (symbol_conf.show_nr_samples) {
                if (sep)
-                       ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+                       ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
                else
-                       ret += snprintf(s + ret, size - ret, "%11lld", period);
+                       ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
        }
 
        if (pair_hists) {
@@ -971,7 +974,7 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
        sym_size = sym->end - sym->start;
        offset = ip - sym->start;
 
-       pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+       pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
 
        if (offset >= sym_size)
                return 0;
@@ -980,8 +983,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
        h->sum++;
        h->ip[offset]++;
 
-       pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
-                 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
+       pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
+                 "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
+                 ip, ip - self->ms.sym->start, h->ip[offset]);
        return 0;
 }
 
@@ -1132,7 +1136,7 @@ fallback:
                goto out_free_filename;
        }
 
-       pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+       pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
                 filename, sym->name, map->unmap_ip(map, sym->start),
                 map->unmap_ip(map, sym->end));
 
@@ -1142,7 +1146,7 @@ fallback:
                 dso, dso->long_name, sym, sym->name);
 
        snprintf(command, sizeof(command),
-                "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
+                "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
                 map__rip_2objdump(map, sym->start),
                 map__rip_2objdump(map, sym->end),
                 symfs_filename, filename);
index 8be0b96..305c848 100644 (file)
@@ -2,6 +2,7 @@
 #define _PERF_LINUX_BITOPS_H_
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <asm/hweight.h>
 
 #define BITS_PER_LONG __WORDSIZE
index 3a7eb6e..a16ecab 100644 (file)
@@ -1,5 +1,6 @@
 #include "symbol.h"
 #include <errno.h>
+#include <inttypes.h>
 #include <limits.h>
 #include <stdlib.h>
 #include <string.h>
@@ -195,7 +196,7 @@ int map__overlap(struct map *l, struct map *r)
 
 size_t map__fprintf(struct map *self, FILE *fp)
 {
-       return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+       return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
                       self->start, self->end, self->pgoff, self->dso->name);
 }
 
index bc2732e..135f69b 100644 (file)
@@ -279,7 +279,7 @@ const char *__event_name(int type, u64 config)
        static char buf[32];
 
        if (type == PERF_TYPE_RAW) {
-               sprintf(buf, "raw 0x%llx", config);
+               sprintf(buf, "raw 0x%" PRIx64, config);
                return buf;
        }
 
index b82cafb..458e3ec 100644 (file)
@@ -23,7 +23,7 @@ struct tracepoint_path {
 };
 
 extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
-extern bool have_tracepoints(struct list_head *evsel_list);
+extern bool have_tracepoints(struct list_head *evlist);
 
 extern int                     nr_counters;
 
index 128aaab..6e29d9c 100644 (file)
@@ -172,7 +172,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
        sym = __find_kernel_function_by_name(tp->symbol, &map);
        if (sym) {
                addr = map->unmap_ip(map, sym->start + tp->offset);
-               pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+               pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
                         tp->offset, addr);
                ret = find_perf_probe_point((unsigned long)addr, pp);
        }
index 313dac2..105f00b 100644 (file)
@@ -652,10 +652,11 @@ static void callchain__printf(struct sample_data *sample)
 {
        unsigned int i;
 
-       printf("... chain: nr:%Lu\n", sample->callchain->nr);
+       printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
 
        for (i = 0; i < sample->callchain->nr; i++)
-               printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
+               printf("..... %2d: %016" PRIx64 "\n",
+                      i, sample->callchain->ips[i]);
 }
 
 static void perf_session__print_tstamp(struct perf_session *session,
@@ -672,7 +673,7 @@ static void perf_session__print_tstamp(struct perf_session *session,
                printf("%u ", sample->cpu);
 
        if (session->sample_type & PERF_SAMPLE_TIME)
-               printf("%Lu ", sample->time);
+               printf("%" PRIu64 " ", sample->time);
 }
 
 static void dump_event(struct perf_session *session, event_t *event,
@@ -681,16 +682,16 @@ static void dump_event(struct perf_session *session, event_t *event,
        if (!dump_trace)
                return;
 
-       printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size,
-              event->header.type);
+       printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+              file_offset, event->header.size, event->header.type);
 
        trace_event(event);
 
        if (sample)
                perf_session__print_tstamp(session, event, sample);
 
-       printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size,
-              event__get_event_name(event->header.type));
+       printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+              event->header.size, event__get_event_name(event->header.type));
 }
 
 static void dump_sample(struct perf_session *session, event_t *event,
@@ -699,8 +700,9 @@ static void dump_sample(struct perf_session *session, event_t *event,
        if (!dump_trace)
                return;
 
-       printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
-              sample->pid, sample->tid, sample->ip, sample->period);
+       printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
+              event->header.misc, sample->pid, sample->tid, sample->ip,
+              sample->period);
 
        if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
                callchain__printf(sample);
@@ -843,8 +845,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
 {
        if (ops->lost == event__process_lost &&
            session->hists.stats.total_lost != 0) {
-               ui__warning("Processed %Lu events and LOST %Lu!\n\n"
-                           "Check IO/CPU overload!\n\n",
+               ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
+                           "!\n\nCheck IO/CPU overload!\n\n",
                            session->hists.stats.total_period,
                            session->hists.stats.total_lost);
        }
@@ -918,7 +920,7 @@ more:
 
        if (size == 0 ||
            (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
-               dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+               dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
                            head, event.header.size, event.header.type);
                /*
                 * assume we lost track of the stream, check alignment, and
@@ -1023,7 +1025,7 @@ more:
 
        if (size == 0 ||
            perf_session__process_event(session, event, ops, file_pos) < 0) {
-               dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+               dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
                            file_offset + head, event->header.size,
                            event->header.type);
                /*
index b3637db..96c8660 100644 (file)
@@ -12,6 +12,7 @@
  * of the License.
  */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -43,11 +44,11 @@ static double cpu2y(int cpu)
        return cpu2slot(cpu) * SLOT_MULT;
 }
 
-static double time2pixels(u64 time)
+static double time2pixels(u64 __time)
 {
        double X;
 
-       X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+       X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
        return X;
 }
 
@@ -94,7 +95,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
 
        total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
        fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
-       fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+       fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
 
        fprintf(svgfile, "<defs>\n  <style type=\"text/css\">\n    <![CDATA[\n");
 
@@ -455,9 +456,9 @@ void svg_legenda(void)
                return;
 
        svg_legenda_box(0,      "Running", "sample");
-       svg_legenda_box(100,    "Idle","rect.c1");
-       svg_legenda_box(200,    "Deeper Idle", "rect.c3");
-       svg_legenda_box(350,    "Deepest Idle", "rect.c6");
+       svg_legenda_box(100,    "Idle","c1");
+       svg_legenda_box(200,    "Deeper Idle", "c3");
+       svg_legenda_box(350,    "Deepest Idle", "c6");
        svg_legenda_box(550,    "Sleeping", "process2");
        svg_legenda_box(650,    "Waiting for cpu", "waiting");
        svg_legenda_box(800,    "Blocked on IO", "blocked");
@@ -483,7 +484,7 @@ void svg_time_grid(void)
                        color = 128;
                }
 
-               fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+               fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
                        time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
 
                i += 10000000;
index 15ccfba..b1bf490 100644 (file)
@@ -11,6 +11,7 @@
 #include <sys/param.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <inttypes.h>
 #include "build-id.h"
 #include "debug.h"
 #include "symbol.h"
@@ -153,7 +154,7 @@ static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
        self->binding = binding;
        self->namelen = namelen - 1;
 
-       pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+       pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end);
 
        memcpy(self->name, name, namelen);
 
@@ -167,7 +168,7 @@ void symbol__delete(struct symbol *self)
 
 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
 {
-       return fprintf(fp, " %llx-%llx %c %s\n",
+       return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
                       self->start, self->end,
                       self->binding == STB_GLOBAL ? 'g' :
                       self->binding == STB_LOCAL  ? 'l' : 'w',
@@ -1161,6 +1162,13 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
 
                section_name = elf_sec__name(&shdr, secstrs);
 
+               /* On ARM, symbols for thumb functions have 1 added to
+                * the symbol address as a flag - remove it */
+               if ((ehdr.e_machine == EM_ARM) &&
+                   (map->type == MAP__FUNCTION) &&
+                   (sym.st_value & 1))
+                       --sym.st_value;
+
                if (self->kernel != DSO_TYPE_USER || kmodule) {
                        char dso_name[PATH_MAX];
 
@@ -1208,8 +1216,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
                }
 
                if (curr_dso->adjust_symbols) {
-                       pr_debug4("%s: adjusting symbol: st_value: %#Lx "
-                                 "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+                       pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+                                 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
                                  (u64)sym.st_value, (u64)shdr.sh_addr,
                                  (u64)shdr.sh_offset);
                        sym.st_value -= shdr.sh_addr - shdr.sh_offset;
@@ -1828,7 +1836,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
        int err = -1, fd;
        char symfs_vmlinux[PATH_MAX];
 
-       snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s",
+       snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
                 symbol_conf.symfs, vmlinux);
        fd = open(symfs_vmlinux, O_RDONLY);
        if (fd < 0)
index 7d6b833..5f3689a 100644 (file)
@@ -1,12 +1,14 @@
 #ifndef __PERF_TYPES_H
 #define __PERF_TYPES_H
 
+#include <stdint.h>
+
 /*
- * We define u64 as unsigned long long for every architecture
- * so that we can print it with %Lx without getting warnings.
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
  */
-typedef unsigned long long u64;
-typedef signed long long   s64;
+typedef uint64_t          u64;
+typedef int64_t                   s64;
 typedef unsigned int      u32;
 typedef signed int        s32;
 typedef unsigned short    u16;
index ebda8c3..60c463c 100644 (file)
@@ -350,7 +350,7 @@ static char *callchain_list__sym_name(struct callchain_list *self,
        if (self->ms.sym)
                return self->ms.sym->name;
 
-       snprintf(bf, bfsize, "%#Lx", self->ip);
+       snprintf(bf, bfsize, "%#" PRIx64, self->ip);
        return bf;
 }
 
index e35437d..e515836 100644 (file)
@@ -1,5 +1,6 @@
 #include "../libslang.h"
 #include <elf.h>
+#include <inttypes.h>
 #include <sys/ttydefaults.h>
 #include <ctype.h>
 #include <string.h>
@@ -57,7 +58,7 @@ static void map_browser__write(struct ui_browser *self, void *nd, int row)
        int width;
 
        ui_browser__set_percent_color(self, 0, current_entry);
-       slsmg_printf("%*llx %*llx %c ",
+       slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
                     mb->addrlen, sym->start, mb->addrlen, sym->end,
                     sym->binding == STB_GLOBAL ? 'g' :
                     sym->binding == STB_LOCAL  ? 'l' : 'w');
@@ -150,6 +151,6 @@ int map__browse(struct map *self)
                ++mb.b.nr_entries;
        }
 
-       mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
+       mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
        return map_browser__run(&mb);
 }
index cfa55d6..bdd3347 100644 (file)
@@ -150,7 +150,7 @@ static void perf_read_values__display_pretty(FILE *fp,
                if (width > tidwidth)
                        tidwidth = width;
                for (j = 0; j < values->counters; j++) {
-                       width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+                       width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
                        if (width > counterwidth[j])
                                counterwidth[j] = width;
                }
@@ -165,7 +165,7 @@ static void perf_read_values__display_pretty(FILE *fp,
                fprintf(fp, "  %*d  %*d", pidwidth, values->pid[i],
                        tidwidth, values->tid[i]);
                for (j = 0; j < values->counters; j++)
-                       fprintf(fp, "  %*Lu",
+                       fprintf(fp, "  %*" PRIu64,
                                counterwidth[j], values->value[i][j]);
                fprintf(fp, "\n");
        }
@@ -196,13 +196,13 @@ static void perf_read_values__display_raw(FILE *fp,
                width = strlen(values->countername[j]);
                if (width > namewidth)
                        namewidth = width;
-               width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+               width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
                if (width > rawwidth)
                        rawwidth = width;
        }
        for (i = 0; i < values->threads; i++) {
                for (j = 0; j < values->counters; j++) {
-                       width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+                       width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
                        if (width > countwidth)
                                countwidth = width;
                }
@@ -214,7 +214,7 @@ static void perf_read_values__display_raw(FILE *fp,
                countwidth, "Count");
        for (i = 0; i < values->threads; i++)
                for (j = 0; j < values->counters; j++)
-                       fprintf(fp, "  %*d  %*d  %*s  %*llx  %*Lu\n",
+                       fprintf(fp, "  %*d  %*d  %*s  %*" PRIx64 "  %*" PRIu64,
                                pidwidth, values->pid[i],
                                tidwidth, values->tid[i],
                                namewidth, values->countername[j],
index 4c6983d..362a0cb 100644 (file)
@@ -72,7 +72,7 @@ int need_reinitialize;
 
 int num_cpus;
 
-typedef struct per_cpu_counters {
+struct counters {
        unsigned long long tsc;         /* per thread */
        unsigned long long aperf;       /* per thread */
        unsigned long long mperf;       /* per thread */
@@ -88,13 +88,13 @@ typedef struct per_cpu_counters {
        int pkg;
        int core;
        int cpu;
-       struct per_cpu_counters *next;
-} PCC;
+       struct counters *next;
+};
 
-PCC *pcc_even;
-PCC *pcc_odd;
-PCC *pcc_delta;
-PCC *pcc_average;
+struct counters *cnt_even;
+struct counters *cnt_odd;
+struct counters *cnt_delta;
+struct counters *cnt_average;
 struct timeval tv_even;
 struct timeval tv_odd;
 struct timeval tv_delta;
@@ -125,7 +125,7 @@ unsigned long long get_msr(int cpu, off_t offset)
        return msr;
 }
 
-void print_header()
+void print_header(void)
 {
        if (show_pkg)
                fprintf(stderr, "pkg ");
@@ -160,39 +160,39 @@ void print_header()
        putc('\n', stderr);
 }
 
-void dump_pcc(PCC *pcc)
+void dump_cnt(struct counters *cnt)
 {
-       fprintf(stderr, "package: %d ", pcc->pkg);
-       fprintf(stderr, "core:: %d ", pcc->core);
-       fprintf(stderr, "CPU: %d ", pcc->cpu);
-       fprintf(stderr, "TSC: %016llX\n", pcc->tsc);
-       fprintf(stderr, "c3: %016llX\n", pcc->c3);
-       fprintf(stderr, "c6: %016llX\n", pcc->c6);
-       fprintf(stderr, "c7: %016llX\n", pcc->c7);
-       fprintf(stderr, "aperf: %016llX\n", pcc->aperf);
-       fprintf(stderr, "pc2: %016llX\n", pcc->pc2);
-       fprintf(stderr, "pc3: %016llX\n", pcc->pc3);
-       fprintf(stderr, "pc6: %016llX\n", pcc->pc6);
-       fprintf(stderr, "pc7: %016llX\n", pcc->pc7);
-       fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr);
+       fprintf(stderr, "package: %d ", cnt->pkg);
+       fprintf(stderr, "core:: %d ", cnt->core);
+       fprintf(stderr, "CPU: %d ", cnt->cpu);
+       fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
+       fprintf(stderr, "c3: %016llX\n", cnt->c3);
+       fprintf(stderr, "c6: %016llX\n", cnt->c6);
+       fprintf(stderr, "c7: %016llX\n", cnt->c7);
+       fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
+       fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
+       fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
+       fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
+       fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
+       fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
 }
 
-void dump_list(PCC *pcc)
+void dump_list(struct counters *cnt)
 {
-       printf("dump_list 0x%p\n", pcc);
+       printf("dump_list 0x%p\n", cnt);
 
-       for (; pcc; pcc = pcc->next)
-               dump_pcc(pcc);
+       for (; cnt; cnt = cnt->next)
+               dump_cnt(cnt);
 }
 
-void print_pcc(PCC *p)
+void print_cnt(struct counters *p)
 {
        double interval_float;
 
        interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
 
        /* topology columns, print blanks on 1st (average) line */
-       if (p == pcc_average) {
+       if (p == cnt_average) {
                if (show_pkg)
                        fprintf(stderr, "    ");
                if (show_core)
@@ -262,24 +262,24 @@ void print_pcc(PCC *p)
        putc('\n', stderr);
 }
 
-void print_counters(PCC *cnt)
+void print_counters(struct counters *counters)
 {
-       PCC *pcc;
+       struct counters *cnt;
 
        print_header();
 
        if (num_cpus > 1)
-               print_pcc(pcc_average);
+               print_cnt(cnt_average);
 
-       for (pcc = cnt; pcc != NULL; pcc = pcc->next)
-               print_pcc(pcc);
+       for (cnt = counters; cnt != NULL; cnt = cnt->next)
+               print_cnt(cnt);
 
 }
 
 #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
 
-
-int compute_delta(PCC *after, PCC *before, PCC *delta)
+int compute_delta(struct counters *after,
+       struct counters *before, struct counters *delta)
 {
        int errors = 0;
        int perf_err = 0;
@@ -391,20 +391,20 @@ int compute_delta(PCC *after, PCC *before, PCC *delta)
                delta->extra_msr = after->extra_msr;
                if (errors) {
                        fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
-                       dump_pcc(before);
+                       dump_cnt(before);
                        fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
-                       dump_pcc(after);
+                       dump_cnt(after);
                        errors = 0;
                }
        }
        return 0;
 }
 
-void compute_average(PCC *delta, PCC *avg)
+void compute_average(struct counters *delta, struct counters *avg)
 {
-       PCC *sum;
+       struct counters *sum;
 
-       sum = calloc(1, sizeof(PCC));
+       sum = calloc(1, sizeof(struct counters));
        if (sum == NULL) {
                perror("calloc sum");
                exit(1);
@@ -438,35 +438,34 @@ void compute_average(PCC *delta, PCC *avg)
        free(sum);
 }
 
-void get_counters(PCC *pcc)
+void get_counters(struct counters *cnt)
 {
-       for ( ; pcc; pcc = pcc->next) {
-               pcc->tsc = get_msr(pcc->cpu, MSR_TSC);
+       for ( ; cnt; cnt = cnt->next) {
+               cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
                if (do_nhm_cstates)
-                       pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY);
+                       cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
                if (do_nhm_cstates)
-                       pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY);
+                       cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
                if (do_snb_cstates)
-                       pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY);
+                       cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
                if (has_aperf)
-                       pcc->aperf = get_msr(pcc->cpu, MSR_APERF);
+                       cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
                if (has_aperf)
-                       pcc->mperf = get_msr(pcc->cpu, MSR_MPERF);
+                       cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
                if (do_snb_cstates)
-                       pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY);
+                       cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
                if (do_nhm_cstates)
-                       pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY);
+                       cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
                if (do_nhm_cstates)
-                       pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY);
+                       cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
                if (do_snb_cstates)
-                       pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY);
+                       cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
                if (extra_msr_offset)
-                       pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset);
+                       cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
        }
 }
 
-
-void print_nehalem_info()
+void print_nehalem_info(void)
 {
        unsigned long long msr;
        unsigned int ratio;
@@ -514,38 +513,38 @@ void print_nehalem_info()
 
 }
 
-void free_counter_list(PCC *list)
+void free_counter_list(struct counters *list)
 {
-       PCC *p;
+       struct counters *p;
 
        for (p = list; p; ) {
-               PCC *free_me;
+               struct counters *free_me;
 
                free_me = p;
                p = p->next;
                free(free_me);
        }
-       return;
 }
 
 void free_all_counters(void)
 {
-       free_counter_list(pcc_even);
-       pcc_even = NULL;
+       free_counter_list(cnt_even);
+       cnt_even = NULL;
 
-       free_counter_list(pcc_odd);
-       pcc_odd = NULL;
+       free_counter_list(cnt_odd);
+       cnt_odd = NULL;
 
-       free_counter_list(pcc_delta);
-       pcc_delta = NULL;
+       free_counter_list(cnt_delta);
+       cnt_delta = NULL;
 
-       free_counter_list(pcc_average);
-       pcc_average = NULL;
+       free_counter_list(cnt_average);
+       cnt_average = NULL;
 }
 
-void insert_cpu_counters(PCC **list, PCC *new)
+void insert_counters(struct counters **list,
+       struct counters *new)
 {
-       PCC *prev;
+       struct counters *prev;
 
        /*
         * list was empty
@@ -594,18 +593,16 @@ void insert_cpu_counters(PCC **list, PCC *new)
         */
        new->next = prev->next;
        prev->next = new;
-
-       return;
 }
 
-void alloc_new_cpu_counters(int pkg, int core, int cpu)
+void alloc_new_counters(int pkg, int core, int cpu)
 {
-       PCC *new;
+       struct counters *new;
 
        if (verbose > 1)
                printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
 
-       new = (PCC *)calloc(1, sizeof(PCC));
+       new = (struct counters *)calloc(1, sizeof(struct counters));
        if (new == NULL) {
                perror("calloc");
                exit(1);
@@ -613,9 +610,10 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
        new->pkg = pkg;
        new->core = core;
        new->cpu = cpu;
-       insert_cpu_counters(&pcc_odd, new);
+       insert_counters(&cnt_odd, new);
 
-       new = (PCC *)calloc(1, sizeof(PCC));
+       new = (struct counters *)calloc(1,
+               sizeof(struct counters));
        if (new == NULL) {
                perror("calloc");
                exit(1);
@@ -623,9 +621,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
        new->pkg = pkg;
        new->core = core;
        new->cpu = cpu;
-       insert_cpu_counters(&pcc_even, new);
+       insert_counters(&cnt_even, new);
 
-       new = (PCC *)calloc(1, sizeof(PCC));
+       new = (struct counters *)calloc(1, sizeof(struct counters));
        if (new == NULL) {
                perror("calloc");
                exit(1);
@@ -633,9 +631,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
        new->pkg = pkg;
        new->core = core;
        new->cpu = cpu;
-       insert_cpu_counters(&pcc_delta, new);
+       insert_counters(&cnt_delta, new);
 
-       new = (PCC *)calloc(1, sizeof(PCC));
+       new = (struct counters *)calloc(1, sizeof(struct counters));
        if (new == NULL) {
                perror("calloc");
                exit(1);
@@ -643,7 +641,7 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
        new->pkg = pkg;
        new->core = core;
        new->cpu = cpu;
-       pcc_average = new;
+       cnt_average = new;
 }
 
 int get_physical_package_id(int cpu)
@@ -719,7 +717,7 @@ void re_initialize(void)
 {
        printf("turbostat: topology changed, re-initializing.\n");
        free_all_counters();
-       num_cpus = for_all_cpus(alloc_new_cpu_counters);
+       num_cpus = for_all_cpus(alloc_new_counters);
        need_reinitialize = 0;
        printf("num_cpus is now %d\n", num_cpus);
 }
@@ -728,7 +726,7 @@ void dummy(int pkg, int core, int cpu) { return; }
 /*
  * check to see if a cpu came on-line
  */
-void verify_num_cpus()
+void verify_num_cpus(void)
 {
        int new_num_cpus;
 
@@ -740,14 +738,12 @@ void verify_num_cpus()
                                num_cpus, new_num_cpus);
                need_reinitialize = 1;
        }
-
-       return;
 }
 
 void turbostat_loop()
 {
 restart:
-       get_counters(pcc_even);
+       get_counters(cnt_even);
        gettimeofday(&tv_even, (struct timezone *)NULL);
 
        while (1) {
@@ -757,24 +753,24 @@ restart:
                        goto restart;
                }
                sleep(interval_sec);
-               get_counters(pcc_odd);
+               get_counters(cnt_odd);
                gettimeofday(&tv_odd, (struct timezone *)NULL);
 
-               compute_delta(pcc_odd, pcc_even, pcc_delta);
+               compute_delta(cnt_odd, cnt_even, cnt_delta);
                timersub(&tv_odd, &tv_even, &tv_delta);
-               compute_average(pcc_delta, pcc_average);
-               print_counters(pcc_delta);
+               compute_average(cnt_delta, cnt_average);
+               print_counters(cnt_delta);
                if (need_reinitialize) {
                        re_initialize();
                        goto restart;
                }
                sleep(interval_sec);
-               get_counters(pcc_even);
+               get_counters(cnt_even);
                gettimeofday(&tv_even, (struct timezone *)NULL);
-               compute_delta(pcc_even, pcc_odd, pcc_delta);
+               compute_delta(cnt_even, cnt_odd, cnt_delta);
                timersub(&tv_even, &tv_odd, &tv_delta);
-               compute_average(pcc_delta, pcc_average);
-               print_counters(pcc_delta);
+               compute_average(cnt_delta, cnt_average);
+               print_counters(cnt_delta);
        }
 }
 
@@ -892,7 +888,7 @@ void check_cpuid()
         * this check is valid for both Intel and AMD
         */
        asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
-       has_invariant_tsc = edx && (1 << 8);
+       has_invariant_tsc = edx & (1 << 8);
 
        if (!has_invariant_tsc) {
                fprintf(stderr, "No invariant TSC\n");
@@ -905,7 +901,7 @@ void check_cpuid()
         */
 
        asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
-       has_aperf = ecx && (1 << 0);
+       has_aperf = ecx & (1 << 0);
        if (!has_aperf) {
                fprintf(stderr, "No APERF MSR\n");
                exit(1);
@@ -952,7 +948,7 @@ void turbostat_init()
        check_dev_msr();
        check_super_user();
 
-       num_cpus = for_all_cpus(alloc_new_cpu_counters);
+       num_cpus = for_all_cpus(alloc_new_counters);
 
        if (verbose)
                print_nehalem_info();
@@ -962,7 +958,7 @@ int fork_it(char **argv)
 {
        int retval;
        pid_t child_pid;
-       get_counters(pcc_even);
+       get_counters(cnt_even);
        gettimeofday(&tv_even, (struct timezone *)NULL);
 
        child_pid = fork();
@@ -985,14 +981,14 @@ int fork_it(char **argv)
                        exit(1);
                }
        }
-       get_counters(pcc_odd);
+       get_counters(cnt_odd);
        gettimeofday(&tv_odd, (struct timezone *)NULL);
-       retval = compute_delta(pcc_odd, pcc_even, pcc_delta);
+       retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
 
        timersub(&tv_odd, &tv_even, &tv_delta);
-       compute_average(pcc_delta, pcc_average);
+       compute_average(cnt_delta, cnt_average);
        if (!retval)
-               print_counters(pcc_delta);
+               print_counters(cnt_delta);
 
        fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;